summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/ca/restapi/CertificateAuthorityApiHandler.java2
-rw-r--r--bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java12
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryMap.java9
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/MatchedElementsOnlyResolver.java64
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/documentmodel/SummaryTransform.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/ml/ConvertedModel.java18
-rw-r--r--config-model/src/main/javacc/SDParser.jj3
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/derived/SummaryMapTestCase.java39
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/MatchedElementsOnlyResolverTestCase.java151
-rw-r--r--configd/src/apps/cmd/main.cpp10
-rw-r--r--configdefinitions/src/vespa/configserver.def1
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/FileDistributionMaintainer.java5
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java5
-rw-r--r--container-dependencies-enforcer/pom.xml16
-rw-r--r--container-dependency-versions/pom.xml79
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/fastsearch/SortDataHitSorter.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/grouping/result/Group.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/grouping/result/HitList.java9
-rw-r--r--container-search/src/main/java/com/yahoo/search/grouping/result/HitRenderer.java3
-rw-r--r--container-search/src/main/java/com/yahoo/search/grouping/vespa/HitConverter.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/grouping/vespa/ResultBuilder.java13
-rw-r--r--container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java44
-rw-r--r--container-search/src/main/java/com/yahoo/search/result/HitOrderer.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/yql/YqlParser.java2
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ApplicationIdSource.java16
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/BuildService.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java246
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java290
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java38
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java128
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java204
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedInstance.java288
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationList.java245
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentJobs.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/InstanceList.java234
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/JobList.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/TenantAndApplicationId.java98
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzFacade.java12
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java252
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java36
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java71
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Versions.java38
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java84
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BillingMaintainer.java7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainer.java25
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirer.java27
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java71
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java57
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java51
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployer.java11
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RotationStatusUpdater.java19
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPolicies.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java24
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java612
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java108
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java149
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java60
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiHandler.java10
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/SignatureFilter.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepository.java78
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/security/AccessControl.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/security/CloudAccessControl.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java78
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java22
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java363
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java84
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/BuildJob.java13
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java135
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java656
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalDeploymentTester.java65
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java114
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/TestConfigSerializerTest.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainerTest.java14
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java24
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java42
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java18
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java13
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java104
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java40
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RotationStatusUpdaterTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java49
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java465
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java250
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/InstanceSerializerTest.java27
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/OldCuratorDb.java12
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/complete-application.json970
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/complete-instance.json534
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java27
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java135
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java54
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/delete-with-active-deployments.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java9
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/filter/SignatureFilterTest.java8
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java56
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java51
-rw-r--r--dist/vespa.spec12
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java4
-rw-r--r--docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImplTest.java1
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java12
-rw-r--r--jdisc_http_service/src/main/java/com/yahoo/jdisc/http/server/jetty/HealthCheckProxyHandler.java1
-rw-r--r--maven-plugins/pom.xml3
-rw-r--r--model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Argument.java2
-rw-r--r--model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Const.java2
-rw-r--r--model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Constant.java2
-rw-r--r--model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/IntermediateOperation.java2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileSnapshot.java83
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileWriter.java24
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java2
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileSnapshotTest.java64
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java19
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodePatcher.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java12
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/SerializationTest.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1-os-upgrade-complete.json5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/dockerhost1-with-firmware-data.json5
-rw-r--r--parent/pom.xml314
-rw-r--r--searchcore/CMakeLists.txt6
-rw-r--r--searchcore/src/apps/fdispatch/.gitignore1
-rw-r--r--searchcore/src/apps/fdispatch/CMakeLists.txt15
-rw-r--r--searchcore/src/apps/fdispatch/fdispatch.cpp208
-rw-r--r--searchcore/src/apps/vespa-proton-cmd/vespa-proton-cmd.cpp9
-rw-r--r--searchcore/src/tests/fdispatch/fnet_search/.gitignore2
-rw-r--r--searchcore/src/tests/fdispatch/fnet_search/CMakeLists.txt18
-rw-r--r--searchcore/src/tests/fdispatch/fnet_search/search_coverage_test.cpp141
-rw-r--r--searchcore/src/tests/fdispatch/fnet_search/search_path_test.cpp123
-rw-r--r--searchcore/src/tests/fdispatch/randomrow/.gitignore1
-rw-r--r--searchcore/src/tests/fdispatch/randomrow/CMakeLists.txt10
-rw-r--r--searchcore/src/tests/fdispatch/randomrow/randomrow_test.cpp99
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/.gitignore1
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/OWNERS2
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/common/CMakeLists.txt9
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/common/appcontext.cpp56
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/common/appcontext.h39
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/common/properties.h19
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/common/rpc.cpp92
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/common/rpc.h51
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/common/search.cpp221
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/common/search.h393
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/common/stdincl.h12
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/common/timestat.cpp116
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/common/timestat.h215
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/program/.gitignore13
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/program/CMakeLists.txt11
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/program/description.html3
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/program/docsumadapter.cpp114
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/program/docsumadapter.h52
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/program/engineadapter.cpp77
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/program/engineadapter.h46
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/program/fdispatch.cpp397
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/program/fdispatch.h111
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/program/rpc.cpp113
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/program/rpc.h28
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/program/searchadapter.cpp109
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/program/searchadapter.h48
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/.gitignore3
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/CMakeLists.txt21
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/child_info.h22
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/configdesc.cpp344
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/configdesc.h374
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/dataset_base.cpp294
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/dataset_base.h227
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/datasetcollection.cpp266
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/datasetcollection.h87
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/engine_base.cpp417
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/engine_base.h195
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/fnet_dataset.cpp144
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/fnet_dataset.h66
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/fnet_engine.cpp231
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/fnet_engine.h112
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/fnet_search.cpp1572
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/fnet_search.h375
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/mergehits.cpp283
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/mergehits.h158
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/nodemanager.cpp438
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/nodemanager.h89
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/plain_dataset.cpp560
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/plain_dataset.h216
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/poss_count.h16
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/query.cpp122
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/query.h75
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/querycacheutil.cpp156
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/querycacheutil.h151
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/rowstate.cpp90
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/rowstate.h60
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/search_path.cpp114
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/search_path.h58
-rw-r--r--searchcore/src/vespa/searchcore/proton/docsummary/docsumcontext.cpp7
-rw-r--r--searchcore/src/vespa/searchcore/proton/docsummary/docsumcontext.h1
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/aggregation/AggregationResult.java45
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/aggregation/FS4Hit.java5
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java60
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/aggregation/Hit.java5
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/aggregation/HitsAggregationResult.java29
-rw-r--r--searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java1
-rw-r--r--searchlib/src/tests/attribute/attribute_test.cpp41
-rw-r--r--searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp5
-rwxr-xr-xsearchlib/src/tests/docstore/logdatastore/logdatastore_test.sh1
-rw-r--r--searchlib/src/tests/queryeval/same_element/same_element_test.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp23
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumattribute.hpp1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postingchange.cpp30
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postingchange.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp11
-rw-r--r--searchsummary/src/tests/docsummary/attribute_combiner/CMakeLists.txt2
-rw-r--r--searchsummary/src/tests/docsummary/attribute_combiner/attribute_combiner_test.cpp126
-rw-r--r--searchsummary/src/tests/docsummary/positionsdfw_test.cpp3
-rw-r--r--searchsummary/src/tests/docsummary/slime_summary/slime_summary_test.cpp3
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/array_attribute_combiner_dfw.cpp52
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/array_attribute_combiner_dfw.h5
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/attribute_combiner_dfw.cpp15
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/attribute_combiner_dfw.h8
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/docsumconfig.cpp8
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp15
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/docsumstate.h6
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/struct_map_attribute_combiner_dfw.cpp58
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/struct_map_attribute_combiner_dfw.h5
-rw-r--r--storage/src/tests/bucketdb/bucketmanagertest.cpp2
-rw-r--r--storage/src/tests/distributor/btree_bucket_database_test.cpp45
-rw-r--r--storage/src/tests/distributor/distributortestutil.cpp1
-rw-r--r--storage/src/tests/distributor/simplemaintenancescannertest.cpp4
-rw-r--r--storage/src/vespa/storage/bucketdb/btree_bucket_database.cpp4
-rw-r--r--storage/src/vespa/storage/bucketdb/btree_bucket_database.h8
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketdatabase.h1
-rw-r--r--storage/src/vespa/storage/bucketdb/mapbucketdatabase.h1
-rw-r--r--storage/src/vespa/storage/config/stor-distributormanager.def6
-rw-r--r--storage/src/vespa/storage/distributor/distributor.cpp9
-rw-r--r--storage/src/vespa/storage/distributor/distributor.h1
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space.cpp10
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space.h2
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h2
-rw-r--r--storage/src/vespa/storage/storageserver/distributornode.cpp3
-rw-r--r--storage/src/vespa/storage/storageserver/distributornode.h4
-rw-r--r--storageserver/src/vespa/storageserver/app/distributorprocess.cpp26
-rw-r--r--storageserver/src/vespa/storageserver/app/distributorprocess.h3
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/utils/SiaUtils.java7
-rw-r--r--vespajlib/src/main/java/com/yahoo/vespa/objects/Identifiable.java2
-rw-r--r--vespajlib/src/main/java/com/yahoo/vespa/objects/Selectable.java11
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_builder.h1
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_entry_base.h12
-rw-r--r--vsm/src/vespa/vsm/vsm/vsm-adapter.cpp7
-rw-r--r--vsm/src/vespa/vsm/vsm/vsm-adapter.h1
-rw-r--r--zkfacade/src/main/java/com/yahoo/vespa/curator/Curator.java1
-rw-r--r--zookeeper-command-line-client/pom.xml5
-rw-r--r--zookeeper-command-line-client/src/main/resources/log4j-vespa.properties11
253 files changed, 6467 insertions, 13784 deletions
diff --git a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/ca/restapi/CertificateAuthorityApiHandler.java b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/ca/restapi/CertificateAuthorityApiHandler.java
index b2120f24160..28b6c6c0939 100644
--- a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/ca/restapi/CertificateAuthorityApiHandler.java
+++ b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/ca/restapi/CertificateAuthorityApiHandler.java
@@ -8,6 +8,7 @@ import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.container.jdisc.LoggingRequestHandler;
import com.yahoo.container.jdisc.secretstore.SecretStore;
+import com.yahoo.restapi.ErrorResponse;
import com.yahoo.restapi.Path;
import com.yahoo.restapi.SlimeJsonResponse;
import com.yahoo.security.KeyUtils;
@@ -16,7 +17,6 @@ import com.yahoo.slime.Slime;
import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.hosted.ca.Certificates;
import com.yahoo.vespa.hosted.ca.instance.InstanceIdentity;
-import com.yahoo.vespa.hosted.provision.restapi.v2.ErrorResponse;
import com.yahoo.yolean.Exceptions;
import java.io.IOException;
diff --git a/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java b/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java
index 04f2428d284..c23e55a9eec 100644
--- a/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java
+++ b/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java
@@ -76,9 +76,6 @@ public class GenerateOsgiManifestMojo extends AbstractMojo {
@Parameter(alias = "X-JDisc-Privileged-Activator")
private String jdiscPrivilegedActivator = null;
- @Parameter(alias = "X-Config-Models")
- private String configModels = null;
-
@Parameter(alias = "Import-Package")
private String importPackage = null;
@@ -88,9 +85,6 @@ public class GenerateOsgiManifestMojo extends AbstractMojo {
@Parameter(alias = "Main-Class")
private String mainClass = null;
- @Parameter(alias = "X-Jersey-Binding")
- private String jerseyBinding = null;
-
public void execute() throws MojoExecutionException {
try {
Artifacts.ArtifactSet artifactSet = Artifacts.getArtifacts(project);
@@ -179,8 +173,8 @@ public class GenerateOsgiManifestMojo extends AbstractMojo {
Set<String> exportedPackagesFromProvidedDeps) {
Set<String> overlappingProjectPackages = Sets.intersection(projectPackages.definedPackages(), exportedPackagesFromProvidedDeps);
if (! overlappingProjectPackages.isEmpty()) {
- getLog().warn("Project classes use the following packages that are already defined in provided scoped dependencies: "
- + overlappingProjectPackages);
+ getLog().warn("This project defines packages that are also defined in provided scoped dependencies " +
+ "(overlapping packages are strongly discouraged): " + overlappingProjectPackages);
}
}
@@ -237,8 +231,6 @@ public class GenerateOsgiManifestMojo extends AbstractMojo {
Pair.of("Main-Class", mainClass), //
Pair.of("X-JDisc-Application", discApplicationClass), //
Pair.of("X-JDisc-Preinstall-Bundle", trimWhitespace(Optional.ofNullable(discPreInstallBundle))), //
- Pair.of("X-Config-Models", configModels), //
- Pair.of("X-Jersey-Binding", jerseyBinding), //
Pair.of("WebInfUrl", webInfUrl), //
Pair.of("Import-Package", importPackage), //
Pair.of("Export-Package", exportPackage))) {
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryMap.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryMap.java
index 71bf05dbb6e..74de63eae3a 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryMap.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryMap.java
@@ -45,7 +45,10 @@ public class SummaryMap extends Derived implements SummarymapConfig.Producer {
if (summaryField.getTransform()==SummaryTransform.ATTRIBUTE ||
summaryField.getTransform()==SummaryTransform.DISTANCE ||
summaryField.getTransform()==SummaryTransform.GEOPOS ||
- summaryField.getTransform()==SummaryTransform.POSITIONS) {
+ summaryField.getTransform()==SummaryTransform.POSITIONS ||
+ summaryField.getTransform()==SummaryTransform.MATCHED_ELEMENTS_FILTER ||
+ summaryField.getTransform()==SummaryTransform.MATCHED_ATTRIBUTE_ELEMENTS_FILTER)
+ {
resultTransforms.put(summaryField.getName(),new FieldResultTransform(summaryField.getName(),
summaryField.getTransform(),
summaryField.getSingleSource()));
@@ -99,7 +102,9 @@ public class SummaryMap extends Derived implements SummarymapConfig.Producer {
frt.getTransform().equals(SummaryTransform.DISTANCE) ||
frt.getTransform().equals(SummaryTransform.GEOPOS) ||
frt.getTransform().equals(SummaryTransform.POSITIONS) ||
- frt.getTransform().equals(SummaryTransform.TEXTEXTRACTOR))
+ frt.getTransform().equals(SummaryTransform.TEXTEXTRACTOR) ||
+ frt.getTransform().equals(SummaryTransform.MATCHED_ELEMENTS_FILTER) ||
+ frt.getTransform().equals(SummaryTransform.MATCHED_ATTRIBUTE_ELEMENTS_FILTER))
{
oB.arguments(frt.getArgument());
} else {
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/MatchedElementsOnlyResolver.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/MatchedElementsOnlyResolver.java
new file mode 100644
index 00000000000..8bb834d4697
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/MatchedElementsOnlyResolver.java
@@ -0,0 +1,64 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.searchdefinition.processing;
+
+import com.yahoo.config.application.api.DeployLogger;
+import com.yahoo.searchdefinition.RankProfileRegistry;
+import com.yahoo.searchdefinition.Search;
+import com.yahoo.vespa.documentmodel.DocumentSummary;
+import com.yahoo.vespa.documentmodel.SummaryField;
+import com.yahoo.vespa.documentmodel.SummaryTransform;
+import com.yahoo.vespa.model.container.search.QueryProfiles;
+
+import static com.yahoo.searchdefinition.document.ComplexAttributeFieldUtils.isComplexFieldWithOnlyStructFieldAttributes;
+import static com.yahoo.searchdefinition.document.ComplexAttributeFieldUtils.isSupportedComplexField;
+
+/**
+ * Iterates all summary fields with 'matched-elements-only' and adjusts transform (if all struct-fields are attributes)
+ * and validates that the field type is supported.
+ *
+ * @author geirst
+ */
+public class MatchedElementsOnlyResolver extends Processor {
+
+ public MatchedElementsOnlyResolver(Search search, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
+ super(search, deployLogger, rankProfileRegistry, queryProfiles);
+ }
+
+ @Override
+ public void process(boolean validate, boolean documentsOnly) {
+ for (var entry : search.getSummaries().entrySet()) {
+ var summary = entry.getValue();
+ for (var field : summary.getSummaryFields()) {
+ if (field.getTransform().equals(SummaryTransform.MATCHED_ELEMENTS_FILTER)) {
+ processSummaryField(summary, field, validate);
+ }
+ }
+ }
+ }
+
+ private void processSummaryField(DocumentSummary summary, SummaryField field, boolean validate) {
+ var sourceField = search.getField(field.getSingleSource());
+ if (sourceField != null) {
+ if (isSupportedComplexField(sourceField)) {
+ if (isComplexFieldWithOnlyStructFieldAttributes(sourceField)) {
+ field.setTransform(SummaryTransform.MATCHED_ATTRIBUTE_ELEMENTS_FILTER);
+ }
+ } else if (validate) {
+ fail(summary, field, "'matched-elements-only' is not supported for this field type. " +
+ "Supported field types are array of simple struct, map of primitive type to simple struct, " +
+ "and map of primitive type to primitive type");
+ }
+ }
+ // else case is handled in SummaryFieldsMustHaveValidSource
+ }
+
+ private void fail(DocumentSummary summary, SummaryField field, String msg) {
+ throw new IllegalArgumentException(formatError(search, summary, field, msg));
+ }
+
+ private String formatError(Search search, DocumentSummary summary, SummaryField field, String msg) {
+ return "For search '" + search.getName() + "', document summary '" + summary.getName()
+ + "', summary field '" + field.getName() + "': " + msg;
+ }
+
+}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java
index 47433479588..b0ba8e30f06 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java
@@ -46,6 +46,7 @@ public class Processing {
SummaryConsistency::new,
SummaryNamesFieldCollisions::new,
SummaryFieldsMustHaveValidSource::new,
+ MatchedElementsOnlyResolver::new,
AddAttributeTransformToSummaryOfImportedFields::new,
MakeDefaultSummaryTheSuperSet::new,
Bolding::new,
diff --git a/config-model/src/main/java/com/yahoo/vespa/documentmodel/SummaryTransform.java b/config-model/src/main/java/com/yahoo/vespa/documentmodel/SummaryTransform.java
index 4f85dd0c84e..4279003bb9d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/documentmodel/SummaryTransform.java
+++ b/config-model/src/main/java/com/yahoo/vespa/documentmodel/SummaryTransform.java
@@ -20,7 +20,9 @@ public enum SummaryTransform {
SUMMARYFEATURES("summaryfeatures"),
TEXTEXTRACTOR("textextractor"),
GEOPOS("geopos"),
- ATTRIBUTECOMBINER("attributecombiner");
+ ATTRIBUTECOMBINER("attributecombiner"),
+ MATCHED_ELEMENTS_FILTER("matchedelementsfilter"),
+ MATCHED_ATTRIBUTE_ELEMENTS_FILTER("matchedattributeelementsfilter");
private String name;
@@ -88,6 +90,7 @@ public enum SummaryTransform {
case RANKFEATURES:
case SUMMARYFEATURES:
case ATTRIBUTECOMBINER:
+ case MATCHED_ATTRIBUTE_ELEMENTS_FILTER:
return true;
default:
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/ml/ConvertedModel.java b/config-model/src/main/java/com/yahoo/vespa/model/ml/ConvertedModel.java
index 877b1ac72a9..4acb47df179 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/ml/ConvertedModel.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/ml/ConvertedModel.java
@@ -409,6 +409,24 @@ public class ConvertedModel {
return reduceBatchDimensionExpression(tensorFunction, typeContext);
}
}
+ // Modify any renames in expression to disregard batch dimension
+ else if (children.size() == 1 && children.get(0) instanceof TensorFunctionNode) {
+ TensorFunction childFunction = (((TensorFunctionNode) children.get(0)).function());
+ TensorType childType = childFunction.type(typeContext);
+ Rename rename = (Rename) tensorFunction;
+ List<String> from = new ArrayList<>();
+ List<String> to = new ArrayList<>();
+ for (TensorType.Dimension dimension : childType.dimensions()) {
+ int i = rename.fromDimensions().indexOf(dimension.name());
+ if (i < 0) {
+ throw new IllegalArgumentException("Rename does not contain dimension '" +
+ dimension + "' in child expression type: " + childType);
+ }
+ from.add(rename.fromDimensions().get(i));
+ to.add(rename.toDimensions().get(i));
+ }
+ return new TensorFunctionNode(new Rename(childFunction, from, to));
+ }
}
}
if (node instanceof ReferenceNode) {
diff --git a/config-model/src/main/javacc/SDParser.jj b/config-model/src/main/javacc/SDParser.jj
index db22e73268c..e560d78a116 100644
--- a/config-model/src/main/javacc/SDParser.jj
+++ b/config-model/src/main/javacc/SDParser.jj
@@ -241,6 +241,7 @@ TOKEN :
| < FULL: "full" >
| < STATIC: "static" >
| < DYNAMIC: "dynamic" >
+| < MATCHEDELEMENTSONLY: "matched-elements-only" >
| < SSCONTEXTUAL: "contextual" >
| < SSOVERRIDE: "override" >
| < SSTITLE: "title" >
@@ -1312,6 +1313,7 @@ SummaryInFieldOperation summaryInFieldShort(FieldOperationContainer field) :
<COLON> ( <DYNAMIC> { op.setTransform(SummaryTransform.DYNAMICTEASER);
op.addSource(name);
}
+ | <MATCHEDELEMENTSONLY> { op.setTransform(SummaryTransform.MATCHED_ELEMENTS_FILTER); }
| (<FULL> | <STATIC>) { op.setTransform(SummaryTransform.NONE); } )
{ return op; }
}
@@ -1362,6 +1364,7 @@ Object summaryItem(SummaryInFieldLongOperation field) : { }
Object summaryTransform(SummaryInFieldOperation field) : { }
{
( <DYNAMIC> { field.setTransform(SummaryTransform.DYNAMICTEASER); }
+ | <MATCHEDELEMENTSONLY> { field.setTransform(SummaryTransform.MATCHED_ELEMENTS_FILTER); }
| (<FULL> | <STATIC>) { field.setTransform(SummaryTransform.NONE); } )
{ return null; }
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/SummaryMapTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/SummaryMapTestCase.java
index b03db1d7f2e..3f05fc67831 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/SummaryMapTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/SummaryMapTestCase.java
@@ -16,6 +16,7 @@ import org.junit.Test;
import java.io.IOException;
import java.util.Iterator;
+import static com.yahoo.config.model.test.TestUtil.joinLines;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@@ -150,4 +151,42 @@ public class SummaryMapTestCase extends SearchDefinitionTestCase {
}
}
+ @Test
+ public void source_field_is_passed_as_argument_in_matched_elements_filter_transforms() throws ParseException {
+ assertOverride(joinLines("field my_field type map<string, string> {",
+ " indexing: summary",
+ " summary: matched-elements-only",
+ " struct-field key { indexing: attribute }",
+ "}"), "my_field", SummaryTransform.MATCHED_ELEMENTS_FILTER.getName());
+
+ assertOverride(joinLines("field my_field type map<string, string> {",
+ " indexing: summary",
+ " summary: matched-elements-only",
+ " struct-field key { indexing: attribute }",
+ " struct-field value { indexing: attribute }",
+ "}"), "my_field", SummaryTransform.MATCHED_ATTRIBUTE_ELEMENTS_FILTER.getName());
+ }
+
+ private void assertOverride(String fieldContent, String expFieldName, String expCommand) throws ParseException {
+ var summaryMap = new SummaryMap(buildSearch(fieldContent));
+ var cfgBuilder = new SummarymapConfig.Builder();
+ summaryMap.getConfig(cfgBuilder);
+ var cfg = new SummarymapConfig(cfgBuilder);
+ var override = cfg.override(0);
+ assertEquals(expFieldName, override.field());
+ assertEquals(expCommand, override.command());
+ assertEquals(expFieldName, override.arguments());
+ }
+
+ private Search buildSearch(String field) throws ParseException {
+ var builder = new SearchBuilder(new RankProfileRegistry());
+ builder.importString(joinLines("search test {",
+ " document test {",
+ field,
+ " }",
+ "}"));
+ builder.build();
+ return builder.getSearch();
+ }
+
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/MatchedElementsOnlyResolverTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/MatchedElementsOnlyResolverTestCase.java
new file mode 100644
index 00000000000..3b6918c04ae
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/MatchedElementsOnlyResolverTestCase.java
@@ -0,0 +1,151 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.searchdefinition.processing;
+
+import com.yahoo.searchdefinition.RankProfileRegistry;
+import com.yahoo.searchdefinition.Search;
+import com.yahoo.searchdefinition.SearchBuilder;
+import com.yahoo.searchdefinition.parser.ParseException;
+import com.yahoo.vespa.documentmodel.SummaryField;
+import com.yahoo.vespa.documentmodel.SummaryTransform;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import static com.yahoo.config.model.test.TestUtil.joinLines;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author geirst
+ */
+public class MatchedElementsOnlyResolverTestCase {
+
+ @Rule
+ public final ExpectedException exceptionRule = ExpectedException.none();
+
+ @Test
+ public void complex_field_with_some_struct_field_attributes_gets_default_transform() throws ParseException {
+ assertSummaryField(joinLines("field my_field type map<string, string> {",
+ " indexing: summary",
+ " summary: matched-elements-only",
+ " struct-field key { indexing: attribute }",
+ "}"),
+ "my_field", SummaryTransform.MATCHED_ELEMENTS_FILTER);
+
+ assertSummaryField(joinLines("field my_field type map<string, elem> {",
+ " indexing: summary",
+ " summary: matched-elements-only",
+ " struct-field key { indexing: attribute }",
+ "}"),
+ "my_field", SummaryTransform.MATCHED_ELEMENTS_FILTER);
+
+ assertSummaryField(joinLines("field my_field type array<elem> {",
+ " indexing: summary",
+ " summary: matched-elements-only",
+ " struct-field name { indexing: attribute }",
+ "}"),
+ "my_field", SummaryTransform.MATCHED_ELEMENTS_FILTER);
+ }
+
+ @Test
+ public void complex_field_with_only_struct_field_attributes_gets_attribute_transform() throws ParseException {
+ assertSummaryField(joinLines("field my_field type map<string, string> {",
+ " indexing: summary",
+ " summary: matched-elements-only",
+ " struct-field key { indexing: attribute }",
+ " struct-field value { indexing: attribute }",
+ "}"),
+ "my_field", SummaryTransform.MATCHED_ATTRIBUTE_ELEMENTS_FILTER);
+
+ assertSummaryField(joinLines("field my_field type map<string, elem> {",
+ " indexing: summary",
+ " summary: matched-elements-only",
+ " struct-field key { indexing: attribute }",
+ " struct-field value.name { indexing: attribute }",
+ " struct-field value.weight { indexing: attribute }",
+ "}"),
+ "my_field", SummaryTransform.MATCHED_ATTRIBUTE_ELEMENTS_FILTER);
+
+ assertSummaryField(joinLines("field my_field type array<elem> {",
+ " indexing: summary",
+ " summary: matched-elements-only",
+ " struct-field name { indexing: attribute }",
+ " struct-field weight { indexing: attribute }",
+ "}"),
+ "my_field", SummaryTransform.MATCHED_ATTRIBUTE_ELEMENTS_FILTER);
+ }
+
+ @Test
+ public void explicit_summary_field_can_use_filter_transform_with_reference_to_source_field() throws ParseException {
+ String documentSummary = joinLines("document-summary my_summary {",
+ " summary my_filter_field type map<string, string> {",
+ " source: my_field",
+ " matched-elements-only",
+ " }",
+ "}");
+ {
+ var search = buildSearch(joinLines("field my_field type map<string, string> {",
+ " indexing: summary",
+ " struct-field key { indexing: attribute }",
+ "}"),
+ documentSummary);
+ assertSummaryField(search.getSummaryField("my_filter_field"),
+ SummaryTransform.MATCHED_ELEMENTS_FILTER, "my_field");
+ assertSummaryField(search.getSummaryField("my_field"),
+ SummaryTransform.NONE, "my_field");
+ }
+ {
+ var search = buildSearch(joinLines("field my_field type map<string, string> {",
+ " indexing: summary",
+ " struct-field key { indexing: attribute }",
+ " struct-field value { indexing: attribute }",
+ "}"),
+ documentSummary);
+ assertSummaryField(search.getSummaryField("my_filter_field"),
+ SummaryTransform.MATCHED_ATTRIBUTE_ELEMENTS_FILTER, "my_field");
+ assertSummaryField(search.getSummaryField("my_field"),
+ SummaryTransform.ATTRIBUTECOMBINER, "my_field");
+ }
+ }
+
+ @Test
+ public void unsupported_field_type_throws() throws ParseException {
+ exceptionRule.expect(IllegalArgumentException.class);
+ exceptionRule.expectMessage("For search 'test', document summary 'default', summary field 'my_field': " +
+ "'matched-elements-only' is not supported for this field type. " +
+ "Supported field types are array of simple struct, map of primitive type to simple struct, and map of primitive type to primitive type");
+ buildSearch(joinLines("field my_field type string {",
+ " indexing: summary",
+ " summary: matched-elements-only",
+ "}"));
+ }
+
+ private void assertSummaryField(String fieldContent, String fieldName, SummaryTransform expTransform) throws ParseException {
+ var search = buildSearch(fieldContent);
+ assertSummaryField(search.getSummaryField(fieldName), expTransform, fieldName);
+ }
+
+ private void assertSummaryField(SummaryField field, SummaryTransform expTransform, String expSourceField) {
+ assertEquals(expTransform, field.getTransform());
+ assertEquals(expSourceField, field.getSingleSource());
+ }
+
+ private Search buildSearch(String field) throws ParseException {
+ return buildSearch(field, "");
+ }
+
+ private Search buildSearch(String field, String summary) throws ParseException {
+ var builder = new SearchBuilder(new RankProfileRegistry());
+ builder.importString(joinLines("search test {",
+ " document test {",
+ " struct elem {",
+ " field name type string {}",
+ " field weight type int {}",
+ " }",
+ field,
+ " }",
+ summary,
+ "}"));
+ builder.build();
+ return builder.getSearch();
+ }
+}
diff --git a/configd/src/apps/cmd/main.cpp b/configd/src/apps/cmd/main.cpp
index 53edaf80dba..49767ab47aa 100644
--- a/configd/src/apps/cmd/main.cpp
+++ b/configd/src/apps/cmd/main.cpp
@@ -4,6 +4,7 @@
#include <unistd.h>
#include <vespa/vespalib/util/signalhandler.h>
+#include <vespa/vespalib/util/exception.h>
#include <vespa/fnet/frt/supervisor.h>
#include <vespa/fnet/frt/target.h>
#include <vespa/fnet/frt/rpcrequest.h>
@@ -64,8 +65,13 @@ int
Cmd::run(const char *cmd, const char *arg)
{
int retval = 0;
- initRPC("tcp/localhost:19097");
-
+ try {
+ initRPC("tcp/localhost:19097");
+ } catch (vespalib::Exception &e) {
+ fprintf(stderr, "vespa-sentinel-cmd: exception in network initialization: %s\n",
+ e.what());
+ return 2;
+ }
FRT_RPCRequest *req = _server->supervisor().AllocRPCRequest();
req->SetMethodName(cmd);
diff --git a/configdefinitions/src/vespa/configserver.def b/configdefinitions/src/vespa/configserver.def
index e7e626f3d22..2c5d3ec60da 100644
--- a/configdefinitions/src/vespa/configserver.def
+++ b/configdefinitions/src/vespa/configserver.def
@@ -53,6 +53,7 @@ ztsUrl string default=""
maintainerIntervalMinutes int default=30
# TODO: Default set to a high value (1 year) => maintainer will not run, change when maintainer verified out in prod
tenantsMaintainerIntervalMinutes int default=525600
+keepUnusedFileReferencesHours int default=48
# Bootstrapping
# How long bootstrapping can take before giving up (in seconds)
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
index 3a9e5a55191..2734e5ad6b8 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
@@ -409,7 +409,7 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
return fileDistributionStatus.status(getApplication(applicationId), timeout);
}
- public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath) {
+ public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath, Duration keepFileReferences) {
if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory");
Set<String> fileReferencesInUse = new HashSet<>();
@@ -429,7 +429,7 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
fileReferencesOnDisk.addAll(Arrays.stream(filesOnDisk).map(File::getName).collect(Collectors.toSet()));
log.log(LogLevel.DEBUG, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk);
- Instant instant = Instant.now().minus(Duration.ofDays(14));
+ Instant instant = Instant.now().minus(keepFileReferences);
Set<String> fileReferencesToDelete = fileReferencesOnDisk
.stream()
.filter(fileReference -> ! fileReferencesInUse.contains(fileReference))
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/FileDistributionMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/FileDistributionMaintainer.java
index c6accd04896..8388159ba07 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/FileDistributionMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/FileDistributionMaintainer.java
@@ -20,6 +20,7 @@ public class FileDistributionMaintainer extends Maintainer {
private final ApplicationRepository applicationRepository;
private final File fileReferencesDir;
+ private final ConfigserverConfig configserverConfig;
FileDistributionMaintainer(ApplicationRepository applicationRepository,
Curator curator,
@@ -27,11 +28,13 @@ public class FileDistributionMaintainer extends Maintainer {
ConfigserverConfig configserverConfig) {
super(applicationRepository, curator, interval);
this.applicationRepository = applicationRepository;
+ this.configserverConfig = configserverConfig;
this.fileReferencesDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir()));
}
@Override
protected void maintain() {
- applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir);
+ applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir,
+ Duration.ofHours(configserverConfig.keepUnusedFileReferencesHours()));
}
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
index 5731caff5a7..0802db23ea7 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
@@ -3,14 +3,11 @@ package com.yahoo.vespa.config.server;
import com.google.common.io.Files;
import com.yahoo.cloud.config.ConfigserverConfig;
-import com.yahoo.component.Version;
-import com.yahoo.component.Vtag;
import com.yahoo.config.application.api.ApplicationMetaData;
import com.yahoo.config.model.application.provider.FilesApplicationPackage;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.Deployment;
-import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.Provisioner;
import com.yahoo.config.provision.TenantName;
@@ -224,7 +221,7 @@ public class ApplicationRepositoryTest {
PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build();
deployApp(new File("src/test/apps/app"), prepareParams);
- Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir);
+ Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, Duration.ofHours(48));
assertEquals(Collections.singleton("foo"), toBeDeleted);
assertFalse(filereferenceDir.exists());
assertTrue(filereferenceDir2.exists());
diff --git a/container-dependencies-enforcer/pom.xml b/container-dependencies-enforcer/pom.xml
index 9a4e3704ecd..92407aa9c68 100644
--- a/container-dependencies-enforcer/pom.xml
+++ b/container-dependencies-enforcer/pom.xml
@@ -68,7 +68,7 @@
</excludes>
<includes>
<include>com.yahoo.vespa</include>
- <include>aopalliance:aopalliance:[1.0]:jar:provided</include>
+ <include>aopalliance:aopalliance:[${aopalliance.version}]:jar:provided</include>
<include>com.fasterxml.jackson.core:jackson-annotations:[${jackson2.version}]:jar:provided</include>
<include>com.fasterxml.jackson.core:jackson-core:[${jackson2.version}]:jar:provided</include>
<include>com.fasterxml.jackson.core:jackson-databind:[${jackson-databind.version}]:jar:provided</include>
@@ -89,18 +89,18 @@
<include>com.sun.activation:javax.activation:[1.2.0]:jar:provided</include>
<include>com.sun.xml.bind:jaxb-core:[${jaxb.version}]:jar:provided</include>
<include>com.sun.xml.bind:jaxb-impl:[${jaxb.version}]:jar:provided</include>
- <include>commons-daemon:commons-daemon:[1.0.3]:jar:provided</include>
+ <include>commons-daemon:commons-daemon:[${commons-daemon.version}]:jar:provided</include>
<include>commons-logging:commons-logging:[1.1.1]:jar:provided</include>
<include>javax.annotation:javax.annotation-api:[${javax.annotation-api.version}]:jar:provided</include>
- <include>javax.inject:javax.inject:[1]:jar:provided</include>
- <include>javax.servlet:javax.servlet-api:[3.1.0]:jar:provided</include>
+ <include>javax.inject:javax.inject:[${javax.inject.version}]:jar:provided</include>
+ <include>javax.servlet:javax.servlet-api:[${javax.servlet-api.version}]:jar:provided</include>
<include>javax.validation:validation-api:[${javax.validation-api.version}]:jar:provided</include>
<include>javax.ws.rs:javax.ws.rs-api:[${javax.ws.rs-api.version}]:jar:provided</include>
<include>javax.xml.bind:jaxb-api:[${jaxb.version}]:jar:provided</include>
<include>net.jcip:jcip-annotations:[1.0]:jar:provided</include>
- <include>net.jpountz.lz4:lz4:[1.3.0]:jar:provided</include>
+ <include>net.jpountz.lz4:lz4:[${lz4.version}]:jar:provided</include>
<include>org.apache.felix:org.apache.felix.framework:[${felix.version}]:jar:provided</include>
- <include>org.apache.felix:org.apache.felix.log:[1.0.1]:jar:provided</include>
+ <include>org.apache.felix:org.apache.felix.log:[${felix.log.version}]:jar:provided</include>
<include>org.apache.felix:org.apache.felix.main:[${felix.version}]:jar:provided</include>
<include>org.bouncycastle:bcpkix-jdk15on:[${bouncycastle.version}]:jar:provided</include>
<include>org.bouncycastle:bcprov-jdk15on:[${bouncycastle.version}]:jar:provided</include>
@@ -125,13 +125,13 @@
<include>org.glassfish.jersey.media:jersey-media-json-jackson:[${jersey2.version}]:jar:provided</include>
<include>org.glassfish.jersey.media:jersey-media-multipart:[${jersey2.version}]:jar:provided</include>
<include>org.javassist:javassist:[${javassist.version}]:jar:provided</include>
- <include>org.json:json:[20090211]:jar:provided</include>
+ <include>org.json:json:[${org.json.version}]:jar:provided</include>
<include>org.jvnet.mimepull:mimepull:[${mimepull.version}]:jar:provided</include>
<include>org.slf4j:jcl-over-slf4j:[${slf4j.version}]:jar:provided</include>
<include>org.slf4j:log4j-over-slf4j:[${slf4j.version}]:jar:provided</include>
<include>org.slf4j:slf4j-api:[${slf4j.version}]:jar:provided</include>
<include>org.slf4j:slf4j-jdk14:[${slf4j.version}]:jar:provided</include>
- <include>xml-apis:xml-apis:[1.4.01]:jar:provided</include>
+ <include>xml-apis:xml-apis:[${xml-apis.version}]:jar:provided</include>
</includes>
</bannedDependencies>
</rules>
diff --git a/container-dependency-versions/pom.xml b/container-dependency-versions/pom.xml
index b377e7203a9..44ee98ae521 100644
--- a/container-dependency-versions/pom.xml
+++ b/container-dependency-versions/pom.xml
@@ -34,39 +34,12 @@
<url>git@github.com:vespa-engine/vespa.git</url>
</scm>
-
- <!-- TODO: add pluginManagement for bundle-plugin and/or compiler-plugin?
- <build>
- <pluginManagement>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-compiler-plugin</artifactId>
- <version>3.6.1</version>
- <configuration>
- <source>1.8</source>
- <target>1.8</target>
- <optimize>true</optimize>
- </configuration>
- </plugin>
- <plugin>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>bundle-plugin</artifactId>
- <version>${project.version}</version>
- <configuration>
- <configGenVersion>${project.version}</configGenVersion>
- </configuration>
- </plugin>
- </plugins>
- </pluginManagement>
- </build>
- -->
<dependencyManagement>
<dependencies>
<dependency>
<groupId>aopalliance</groupId>
<artifactId>aopalliance</artifactId>
- <version>1.0</version>
+ <version>${aopalliance.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
@@ -144,12 +117,12 @@
<dependency>
<groupId>commons-daemon</groupId>
<artifactId>commons-daemon</artifactId>
- <version>1.0.3</version>
+ <version>${commons-daemon.version}</version>
</dependency>
<dependency>
- <!-- This version is exported by jdisc via jcl-over-slf4j. -->
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
+ <!-- This version is exported by jdisc via jcl-over-slf4j. -->
<version>1.1.1</version>
</dependency>
<dependency>
@@ -160,12 +133,12 @@
<dependency>
<groupId>javax.inject</groupId>
<artifactId>javax.inject</artifactId>
- <version>1</version>
+ <version>${javax.inject.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
- <version>3.1.0</version>
+ <version>${javax.servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.validation</groupId>
@@ -213,7 +186,7 @@
<dependency>
<groupId>net.jpountz.lz4</groupId>
<artifactId>lz4</artifactId>
- <version>1.3.0</version>
+ <version>${lz4.version}</version>
</dependency>
<dependency>
<groupId>org.apache.felix</groupId>
@@ -223,7 +196,7 @@
<dependency>
<groupId>org.apache.felix</groupId>
<artifactId>org.apache.felix.log</artifactId>
- <version>1.0.1</version>
+ <version>${felix.log.version}</version>
</dependency>
<dependency>
<groupId>org.apache.felix</groupId>
@@ -348,7 +321,7 @@
<dependency>
<groupId>org.json</groupId>
<artifactId>json</artifactId>
- <version>20090211</version>
+ <version>${org.json.version}</version>
</dependency>
<dependency>
<groupId>org.jvnet.mimepull</groupId>
@@ -378,7 +351,7 @@
<dependency>
<groupId>xml-apis</groupId>
<artifactId>xml-apis</artifactId>
- <version>1.4.01</version>
+ <version>${xml-apis.version}</version>
</dependency>
<!-- NOTE: The dependencies below are not provided from the jdisc container runtime, but had to be moved
@@ -442,15 +415,46 @@
</dependencies>
</dependencyManagement>
+ <profiles>
+ <profile>
+ <id>check-dependency-properties</id>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>versions-maven-plugin</artifactId>
+ <version>${versions-maven-plugin.version}</version>
+ <executions>
+ <execution>
+ <phase>verify</phase>
+ <goals>
+ <goal>display-property-updates</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+
<properties>
+ <aopalliance.version>1.0</aopalliance.version>
<bouncycastle.version>1.58</bouncycastle.version>
+ <commons-daemon.version>1.0.3</commons-daemon.version>
<felix.version>6.0.3</felix.version>
+ <felix.log.version>1.0.1</felix.log.version>
<findbugs.version>1.3.9</findbugs.version>
<guava.version>20.0</guava.version>
<guice.version>3.0</guice.version>
+ <javax.inject.version>1</javax.inject.version>
+ <javax.servlet-api.version>3.1.0</javax.servlet-api.version>
<jaxb.version>2.3.0</jaxb.version>
<jetty.version>9.4.20.v20190813</jetty.version>
+ <lz4.version>1.3.0</lz4.version>
+ <org.json.version>20090211</org.json.version>
<slf4j.version>1.7.5</slf4j.version>
+ <xml-apis.version>1.4.01</xml-apis.version>
<!-- These must be kept in sync with version used by current jersey2.version. -->
<!-- MUST be updated each time jersey2 is upgraded! -->
@@ -466,6 +470,9 @@
<javax.ws.rs-api.version>2.0.1</javax.ws.rs-api.version>
<jersey2.version>2.25</jersey2.version>
<mimepull.version>1.9.6</mimepull.version>
+
+ <!-- Not a dependency. Only included to allow the versions-maven-plugin to check for updates of itself -->
+ <versions-maven-plugin.version>2.7</versions-maven-plugin.version>
</properties>
</project>
diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/SortDataHitSorter.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/SortDataHitSorter.java
index ee64632d40a..93a21476f35 100644
--- a/container-search/src/main/java/com/yahoo/prelude/fastsearch/SortDataHitSorter.java
+++ b/container-search/src/main/java/com/yahoo/prelude/fastsearch/SortDataHitSorter.java
@@ -10,6 +10,7 @@ import java.util.Comparator;
import java.util.List;
public class SortDataHitSorter {
+
public static void sort(HitGroup hitGroup, List<Hit> hits) {
var sorting = hitGroup.getQuery().getRanking().getSorting();
var fallbackOrderer = hitGroup.getOrderer();
@@ -61,4 +62,5 @@ public class SortDataHitSorter {
return fallback.compare(left, right);
}
}
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/grouping/result/Group.java b/container-search/src/main/java/com/yahoo/search/grouping/result/Group.java
index 2a1e3199d7d..30bf0ff4bb0 100644
--- a/container-search/src/main/java/com/yahoo/search/grouping/result/Group.java
+++ b/container-search/src/main/java/com/yahoo/search/grouping/result/Group.java
@@ -14,7 +14,6 @@ import com.yahoo.search.result.Relevance;
*/
public class Group extends HitGroup {
- private static final long serialVersionUID = 2122928012157537800L;
private final GroupId groupId;
/**
@@ -80,4 +79,5 @@ public class Group extends HitGroup {
}
return null;
}
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/grouping/result/HitList.java b/container-search/src/main/java/com/yahoo/search/grouping/result/HitList.java
index 03e0f7e6d59..653fc4cb978 100644
--- a/container-search/src/main/java/com/yahoo/search/grouping/result/HitList.java
+++ b/container-search/src/main/java/com/yahoo/search/grouping/result/HitList.java
@@ -6,20 +6,21 @@ import com.yahoo.search.grouping.GroupingRequest;
import com.yahoo.search.result.Hit;
/**
- * <p>This class represents a labeled hit list in the grouping result model. It is contained in {@link Group}, and
+ * This class represents a labeled hit list in the grouping result model. It is contained in {@link Group}, and
* contains one or more {@link Hit hits} itself, making this the parent of leaf nodes in the hierarchy of grouping
- * results. Use the {@link GroupingRequest#getResultGroup(Result)} to retrieve grouping results.</p>
+ * results. Use the {@link GroupingRequest#getResultGroup(Result)} to retrieve grouping results.
*
* @author Simon Thoresen Hult
*/
public class HitList extends AbstractList {
/**
- * <p>Constructs a new instance of this class.</p>
+ * Constructs a new instance of this class.
*
- * @param label The label to assign to this.
+ * @param label the label to assign to this
*/
public HitList(String label) {
super("hitlist", label);
}
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/grouping/result/HitRenderer.java b/container-search/src/main/java/com/yahoo/search/grouping/result/HitRenderer.java
index 3907f87a276..da115807fc2 100644
--- a/container-search/src/main/java/com/yahoo/search/grouping/result/HitRenderer.java
+++ b/container-search/src/main/java/com/yahoo/search/grouping/result/HitRenderer.java
@@ -11,7 +11,7 @@ import java.util.Arrays;
import java.util.Map;
/**
- * This is a helper class for rendering grouping results.
+ * A helper for rendering grouping results.
*
* @author Simon Thoresen Hult
*/
@@ -94,4 +94,5 @@ public abstract class HitRenderer {
private static void renderContinuation(String id, Continuation continuation, XMLWriter writer) {
writer.openTag(TAG_CONTINUATION).attribute(TAG_CONTINUATION_ID, id).content(continuation, false).closeTag();
}
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/grouping/vespa/HitConverter.java b/container-search/src/main/java/com/yahoo/search/grouping/vespa/HitConverter.java
index e8f4d566028..5ea0e3a0838 100644
--- a/container-search/src/main/java/com/yahoo/search/grouping/vespa/HitConverter.java
+++ b/container-search/src/main/java/com/yahoo/search/grouping/vespa/HitConverter.java
@@ -44,7 +44,8 @@ class HitConverter implements ResultBuilder.HitConverter {
}
private Hit convertFs4Hit(String summaryClass, FS4Hit groupHit) {
- FastHit hit = new FastHit(groupHit.getGlobalId().getRawId(), new Relevance(groupHit.getRank()),
+ FastHit hit = new FastHit(groupHit.getGlobalId().getRawId(),
+ new Relevance(groupHit.getRank()),
groupHit.getPath(), groupHit.getDistributionKey());
hit.setFillable();
hit.setSearcherSpecificMetaData(searcher, summaryClass);
@@ -72,4 +73,5 @@ class HitConverter implements ResultBuilder.HitConverter {
}
return ret;
}
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/grouping/vespa/ResultBuilder.java b/container-search/src/main/java/com/yahoo/search/grouping/vespa/ResultBuilder.java
index 2402be27b9c..f45617d1cd7 100644
--- a/container-search/src/main/java/com/yahoo/search/grouping/vespa/ResultBuilder.java
+++ b/container-search/src/main/java/com/yahoo/search/grouping/vespa/ResultBuilder.java
@@ -285,6 +285,7 @@ class ResultBuilder {
page.putContinuations(hitList.continuations());
return hitList;
}
+
}
private class GroupListBuilder {
@@ -341,6 +342,7 @@ class ResultBuilder {
}
return ret;
}
+
}
private class PageInfo {
@@ -381,16 +383,17 @@ class ResultBuilder {
}
}
}
+
}
/**
- * Defines a helper interface to convert Vespa style grouping hits into corresponding instances of {@link Hit}. It
- * is an interface to simplify testing.
- *
- * @author Simon Thoresen Hult
+ * Defines a helper interface to convert Vespa style grouping hits into corresponding instances of {@link Hit}.
+ * It is an interface to simplify testing.
*/
public interface HitConverter {
- public com.yahoo.search.result.Hit toSearchHit(String summaryClass, com.yahoo.searchlib.aggregation.Hit hit);
+ com.yahoo.search.result.Hit toSearchHit(String summaryClass, com.yahoo.searchlib.aggregation.Hit hit);
+
}
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
index 64db1cf0062..1c58081e4f1 100644
--- a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
+++ b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
@@ -394,27 +394,29 @@ public class SearchHandler extends LoggingRequestHandler {
log.log(LogLevel.DEBUG, () -> error.getDetailedMessage());
return new Result(query, error);
} catch (IllegalArgumentException e) {
- ErrorMessage error = ErrorMessage.createBadRequest("Invalid search request [" + request + "]: "
- + Exceptions.toMessageString(e));
- log.log(LogLevel.DEBUG, () -> error.getDetailedMessage());
- return new Result(query, error);
- } catch (LinkageError e) {
- // Should have been an Exception in an OSGi world - typical bundle dependency issue problem
- ErrorMessage error = ErrorMessage.createErrorInPluginSearcher(
- "Error executing " + searchChain + "]: " + Exceptions.toMessageString(e), e);
- log(request, query, e);
- return new Result(query, error);
- } catch (StackOverflowError e) { // Also recoverable
- ErrorMessage error = ErrorMessage.createErrorInPluginSearcher(
- "Error executing " + searchChain + "]: " + Exceptions.toMessageString(e), e);
+ if ("Comparison method violates its general contract!".equals(e.getMessage())) {
+ // This is an error in application components or Vespa code
+ log(request, query, e);
+ return new Result(query, ErrorMessage.createUnspecifiedError("Failed searching: " +
+ Exceptions.toMessageString(e), e));
+ }
+ else {
+ ErrorMessage error = ErrorMessage.createBadRequest("Invalid search request [" + request + "]: "
+ + Exceptions.toMessageString(e));
+ log.log(LogLevel.DEBUG, () -> error.getDetailedMessage());
+ return new Result(query, error);
+ }
+ } catch (LinkageError | StackOverflowError e) {
+ // LinkageError should have been an Exception in an OSGi world - typical bundle dependency issue problem
+ // StackOverflowError is recoverable
+ ErrorMessage error = ErrorMessage.createErrorInPluginSearcher("Error executing " + searchChain + "]: " +
+ Exceptions.toMessageString(e), e);
log(request, query, e);
return new Result(query, error);
} catch (Exception e) {
- Result result = new Result(query);
log(request, query, e);
- result.hits().addError(
- ErrorMessage.createUnspecifiedError("Failed searching: " + Exceptions.toMessageString(e), e));
- return result;
+ return new Result(query, ErrorMessage.createUnspecifiedError("Failed searching: " +
+ Exceptions.toMessageString(e), e));
}
}
@@ -448,12 +450,10 @@ public class SearchHandler extends LoggingRequestHandler {
private void log(String request, Query query, Throwable e) {
// Attempted workaround for missing stack traces
if (e.getStackTrace().length == 0) {
- log.log(LogLevel.ERROR,
- "Failed executing " + query.toDetailString() + " [" + request
- + "], received exception with no context", e);
+ log.log(LogLevel.ERROR, "Failed executing " + query.toDetailString() +
+ " [" + request + "], received exception with no context", e);
} else {
- log.log(LogLevel.ERROR,
- "Failed executing " + query.toDetailString() + " [" + request + "]", e);
+ log.log(LogLevel.ERROR, "Failed executing " + query.toDetailString() + " [" + request + "]", e);
}
}
diff --git a/container-search/src/main/java/com/yahoo/search/result/HitOrderer.java b/container-search/src/main/java/com/yahoo/search/result/HitOrderer.java
index 331a9ab5852..848666d628b 100644
--- a/container-search/src/main/java/com/yahoo/search/result/HitOrderer.java
+++ b/container-search/src/main/java/com/yahoo/search/result/HitOrderer.java
@@ -9,15 +9,13 @@ import java.util.List;
*
* @author bratseth
*/
-
public abstract class HitOrderer {
/** Orders the given list of hits */
public abstract void order(List<Hit> hits);
/**
- * Returns the Comparator that this HitOrderer uses internally to
- * sort hits. Returns null if no Comparator is used.
+ * Returns the Comparator that this HitOrderer uses internally to sort hits. Returns null if no Comparator is used.
* <p>
* This default implementation returns null.
*
diff --git a/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java b/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java
index b660d90072b..dfc603846fd 100644
--- a/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java
+++ b/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java
@@ -1039,7 +1039,7 @@ public class YqlParser implements Parser {
assertHasOperator(ast, ExpressionOperator.CONTAINS);
String field = getIndex(ast.getArgument(0));
if (userQuery != null && indexFactsSession.getIndex(field).isAttribute()) {
- userQuery.trace("Field '" + field + "' is an attribute, 'contains' will only match exactly", 1);
+ userQuery.trace("Field '" + field + "' is an attribute, 'contains' will only match exactly", 2);
}
return instantiateLeafItem(field, ast.<OperatorNode<ExpressionOperator>> getArgument(1));
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ApplicationIdSource.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ApplicationIdSource.java
new file mode 100644
index 00000000000..0562ec91fb1
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ApplicationIdSource.java
@@ -0,0 +1,16 @@
+package com.yahoo.vespa.hosted.controller.api.integration;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.TenantName;
+
+import java.util.List;
+
+public interface ApplicationIdSource {
+
+ /** Returns a list of all known application instance IDs. */
+ List<ApplicationId> listApplications();
+
+ /** Returns a list of all known application instance IDs for the given tenant. */
+ List<ApplicationId> listApplications(TenantName tenant);
+
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/BuildService.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/BuildService.java
index 600db544552..0f5b7f154e1 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/BuildService.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/BuildService.java
@@ -8,6 +8,7 @@ import java.util.Objects;
/**
* @author jonmv
*/
+// TODO jonmv: Remove this.
public interface BuildService {
/**
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
new file mode 100644
index 00000000000..83fb71422cb
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
@@ -0,0 +1,246 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller;
+
+import com.google.common.collect.ImmutableMap;
+import com.yahoo.component.Version;
+import com.yahoo.config.application.api.DeploymentSpec;
+import com.yahoo.config.application.api.ValidationOverrides;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.InstanceName;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
+import com.yahoo.vespa.hosted.controller.application.ApplicationActivity;
+import com.yahoo.vespa.hosted.controller.application.Change;
+import com.yahoo.vespa.hosted.controller.application.Deployment;
+import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
+import com.yahoo.vespa.hosted.controller.metric.ApplicationMetrics;
+import com.yahoo.vespa.hosted.controller.tenant.Tenant;
+
+import java.time.Instant;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.OptionalInt;
+import java.util.OptionalLong;
+import java.util.stream.Collectors;
+
+/**
+ * An application. Belongs to a {@link Tenant}, and may have many {@link Instance}s.
+ *
+ * This is immutable.
+ *
+ * @author jonmv
+ */
+public class Application {
+
+ private final TenantAndApplicationId id;
+ private final Instant createdAt;
+ private final DeploymentSpec deploymentSpec;
+ private final ValidationOverrides validationOverrides;
+ private final OptionalLong projectId;
+ private final boolean internal;
+ private final Change change;
+ private final Change outstandingChange;
+ private final Optional<IssueId> deploymentIssueId;
+ private final Optional<IssueId> ownershipIssueId;
+ private final Optional<User> owner;
+ private final OptionalInt majorVersion;
+ private final ApplicationMetrics metrics;
+ private final Optional<String> pemDeployKey;
+ private final Map<InstanceName, Instance> instances;
+
+ /** Creates an empty application. */
+ public Application(TenantAndApplicationId id, Instant now) {
+ this(id, now, DeploymentSpec.empty, ValidationOverrides.empty, Change.empty(), Change.empty(),
+ Optional.empty(), Optional.empty(), Optional.empty(), OptionalInt.empty(),
+ new ApplicationMetrics(0, 0), Optional.empty(), OptionalLong.empty(), true, List.of());
+ }
+
+ // DO NOT USE! For serialization purposes, only.
+ public Application(TenantAndApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides,
+ Change change, Change outstandingChange, Optional<IssueId> deploymentIssueId, Optional<IssueId> ownershipIssueId, Optional<User> owner,
+ OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey, OptionalLong projectId,
+ boolean internal, Collection<Instance> instances) {
+ this.id = Objects.requireNonNull(id, "id cannot be null");
+ this.createdAt = Objects.requireNonNull(createdAt, "instant of creation cannot be null");
+ this.deploymentSpec = Objects.requireNonNull(deploymentSpec, "deploymentSpec cannot be null");
+ this.validationOverrides = Objects.requireNonNull(validationOverrides, "validationOverrides cannot be null");
+ this.change = Objects.requireNonNull(change, "change cannot be null");
+ this.outstandingChange = Objects.requireNonNull(outstandingChange, "outstandingChange cannot be null");
+ this.deploymentIssueId = Objects.requireNonNull(deploymentIssueId, "deploymentIssueId cannot be null");
+ this.ownershipIssueId = Objects.requireNonNull(ownershipIssueId, "ownershipIssueId cannot be null");
+ this.owner = Objects.requireNonNull(owner, "owner cannot be null");
+ this.majorVersion = Objects.requireNonNull(majorVersion, "majorVersion cannot be null");
+ this.metrics = Objects.requireNonNull(metrics, "metrics cannot be null");
+ this.pemDeployKey = Objects.requireNonNull(pemDeployKey, "pemDeployKey cannot be null");
+ this.projectId = Objects.requireNonNull(projectId, "projectId cannot be null");
+ this.internal = internal;
+ this.instances = ImmutableMap.copyOf((Iterable<Map.Entry<InstanceName, Instance>>)
+ instances.stream()
+ .map(instance -> Map.entry(instance.name(), instance))
+ .sorted(Comparator.comparing(Map.Entry::getKey))
+ ::iterator);
+ }
+
+ /** Returns an aggregate application, from the given instances, if at least one. */
+ public static Optional<Application> aggregate(List<Instance> instances) {
+ if (instances.isEmpty())
+ return Optional.empty();
+
+ Instance base = instances.stream()
+ .filter(instance -> instance.id().instance().isDefault())
+ .findFirst()
+ .orElse(instances.iterator().next());
+
+ return Optional.of(new Application(TenantAndApplicationId.from(base.id()), base.createdAt(), base.deploymentSpec(),
+ base.validationOverrides(), base.change(), base.outstandingChange(),
+ base.deploymentJobs().issueId(), base.ownershipIssueId(), base.owner(),
+ base.majorVersion(), base.metrics(), base.pemDeployKey(),
+ base.deploymentJobs().projectId(), base.deploymentJobs().deployedInternally(), instances));
+ }
+
+ /** Returns an old Instance representation of this and the given instance, for serialisation. */
+ public Instance legacy(InstanceName instance) {
+ Instance base = require(instance);
+
+ return new Instance(base.id(), createdAt, deploymentSpec, validationOverrides, base.deployments(),
+ new DeploymentJobs(projectId, base.deploymentJobs().jobStatus().values(), deploymentIssueId, internal),
+ change, outstandingChange, ownershipIssueId, owner,
+ majorVersion, metrics, pemDeployKey, base.rotations(), base.rotationStatus());
+ }
+
+ public TenantAndApplicationId id() { return id; }
+
+ public Instant createdAt() { return createdAt; }
+
+ /**
+ * Returns the last deployed deployment spec of this application,
+ * or the empty deployment spec if it has never been deployed
+ */
+ public DeploymentSpec deploymentSpec() { return deploymentSpec; }
+
+ /** Returns the project id of this application, if it has any. */
+ public OptionalLong projectId() { return projectId; }
+
+ /** Returns whether this application is run on the internal deployment pipeline. */
+ // TODO jonmv: Remove, as will be always true.
+ public boolean internal() { return internal; }
+
+ /**
+ * Returns the last deployed validation overrides of this application,
+ * or the empty validation overrides if it has never been deployed
+ * (or was deployed with an empty/missing validation overrides)
+ */
+ public ValidationOverrides validationOverrides() { return validationOverrides; }
+
+ /** Returns the instances of this application */
+ public Map<InstanceName, Instance> instances() { return instances; }
+
+ /** Returns the instance with the given name, if it exists. */
+ public Optional<Instance> get(InstanceName instance) { return Optional.ofNullable(instances.get(instance)); }
+
+ /** Returns the instance with the given name, or throws. */
+ public Instance require(InstanceName instance) {
+ return get(instance).orElseThrow(() -> new IllegalArgumentException("Unknown instance '" + instance + "'"));
+ }
+
+ /**
+ * Returns base change for this application, i.e., the change that is deployed outside block windows.
+ * This is empty when no change is currently under deployment.
+ */
+ public Change change() { return change; }
+
+ /**
+ * Returns whether this has an outstanding change (in the source repository), which
+ * has currently not started deploying (because a deployment is (or was) already in progress
+ */
+ public Change outstandingChange() { return outstandingChange; }
+
+ /** Returns ID of any open deployment issue filed for this */
+ public Optional<IssueId> deploymentIssueId() {
+ return deploymentIssueId;
+ }
+
+ /** Returns ID of the last ownership issue filed for this */
+ public Optional<IssueId> ownershipIssueId() {
+ return ownershipIssueId;
+ }
+
+ public Optional<User> owner() {
+ return owner;
+ }
+
+ /**
+ * Overrides the system major version for this application. This override takes effect if the deployment
+ * spec does not specify a major version.
+ */
+ public OptionalInt majorVersion() { return majorVersion; }
+
+ /** Returns metrics for this */
+ public ApplicationMetrics metrics() {
+ return metrics;
+ }
+
+ /** Returns activity for this */
+ public ApplicationActivity activity() {
+ return ApplicationActivity.from(instances.values().stream()
+ .flatMap(instance -> instance.deployments().values().stream())
+ .collect(Collectors.toUnmodifiableList()));
+ }
+
+ public Map<InstanceName, List<Deployment>> productionDeployments() {
+ return instances.values().stream()
+ .collect(Collectors.toUnmodifiableMap(Instance::name,
+ instance -> List.copyOf(instance.productionDeployments().values())));
+ }
+ /**
+ * Returns the oldest platform version this has deployed in a permanent zone (not test or staging).
+ *
+ * This is unfortunately quite similar to {@link ApplicationController#oldestInstalledPlatform(TenantAndApplicationId)},
+ * but this checks only what the controller has deployed to the production zones, while that checks the node repository
+ * to see what's actually installed on each node. Thus, this is the right choice for, e.g., target Vespa versions for
+ * new deployments, while that is the right choice for version to compile against.
+ */
+ public Optional<Version> oldestDeployedPlatform() {
+ return productionDeployments().values().stream().flatMap(List::stream)
+ .map(Deployment::version)
+ .min(Comparator.naturalOrder());
+ }
+
+ /**
+ * Returns the oldest application version this has deployed in a permanent zone (not test or staging).
+ */
+ public Optional<ApplicationVersion> oldestDeployedApplication() {
+ return productionDeployments().values().stream().flatMap(List::stream)
+ .map(Deployment::applicationVersion)
+ .min(Comparator.naturalOrder());
+ }
+
+ public Optional<String> pemDeployKey() { return pemDeployKey; }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (! (o instanceof Application)) return false;
+
+ Application that = (Application) o;
+
+ return id.equals(that.id);
+ }
+
+ @Override
+ public int hashCode() {
+ return id.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ return "application '" + id + "'";
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
index b6a647b434e..7c80593dee8 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
@@ -9,9 +9,8 @@ import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.athenz.api.AthenzDomain;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
@@ -57,6 +56,7 @@ import com.yahoo.vespa.hosted.controller.application.JobList;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
import com.yahoo.vespa.hosted.controller.application.JobStatus.JobRun;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.athenz.impl.AthenzFacade;
import com.yahoo.vespa.hosted.controller.concurrent.Once;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTrigger;
@@ -149,20 +149,35 @@ public class ApplicationController {
// Update serialization format of all applications
Once.after(Duration.ofMinutes(1), () -> {
+ curator.clearInstanceRoot();
Instant start = clock.instant();
int count = 0;
- for (Instance instance : curator.readInstances()) {
- lockIfPresent(instance.id(), this::store);
+ for (Application application : curator.readApplications()) {
+ lockApplicationIfPresent(application.id(), this::store);
count++;
}
log.log(Level.INFO, String.format("Wrote %d applications in %s", count,
Duration.between(start, clock.instant())));
});
+
+ // TODO jonmv: Do the above for applications as well when they split writes.
+ }
+
+ /** Returns the application with the given id, or null if it is not present */
+ public Optional<Application> getApplication(TenantAndApplicationId id) {
+ return curator.readApplication(id);
}
/** Returns the application with the given id, or null if it is not present */
- public Optional<Instance> get(ApplicationId id) {
- return curator.readInstance(id);
+ // TODO jonmv: remove
+ public Optional<Application> getApplication(ApplicationId id) {
+ return getApplication(TenantAndApplicationId.from(id));
+ }
+
+ /** Returns the instance with the given id, or null if it is not present */
+ // TODO jonmv: remove or inline
+ public Optional<Instance> getInstance(ApplicationId id) {
+ return getApplication(id).flatMap(application -> application.get(id.instance()));
}
/**
@@ -170,18 +185,28 @@ public class ApplicationController {
*
* @throws IllegalArgumentException if it does not exist
*/
- public Instance require(ApplicationId id) {
- return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
+ public Application requireApplication(TenantAndApplicationId id) {
+ return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
+ }
+
+ /**
+ * Returns the instance with the given id
+ *
+ * @throws IllegalArgumentException if it does not exist
+ */
+ // TODO jonvm: remove or inline
+ public Instance requireInstance(ApplicationId id) {
+ return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
- public List<Instance> asList() {
- return sort(curator.readInstances());
+ public List<Application> asList() {
+ return curator.readApplications();
}
- /** Returns all applications of a tenant */
- public List<Instance> asList(TenantName tenant) {
- return sort(curator.readInstances(tenant));
+ /** Returns a snapshot of all applications of a tenant */
+ public List<Application> asList(TenantName tenant) {
+ return curator.readApplications(tenant);
}
public ArtifactRepository artifacts() { return artifactRepository; }
@@ -189,13 +214,17 @@ public class ApplicationController {
public ApplicationStore applicationStore() { return applicationStore; }
/** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */
- public Version oldestInstalledPlatform(ApplicationId id) {
- return get(id).flatMap(application -> application.productionDeployments().keySet().stream()
- .flatMap(zone -> configServer.nodeRepository().list(zone, id, EnumSet.of(active, reserved)).stream())
- .map(Node::currentVersion)
- .filter(version -> ! version.isEmpty())
- .min(naturalOrder()))
- .orElse(controller.systemVersion());
+ public Version oldestInstalledPlatform(TenantAndApplicationId id) {
+ return requireApplication(id).instances().values().stream()
+ .flatMap(instance -> instance.productionDeployments().keySet().stream()
+ .flatMap(zone -> configServer.nodeRepository().list(zone,
+ id.instance(instance.name()),
+ EnumSet.of(active, reserved))
+ .stream())
+ .map(Node::currentVersion)
+ .filter(version -> ! version.isEmpty()))
+ .min(naturalOrder())
+ .orElse(controller.systemVersion());
}
/** Change the global endpoint status for given deployment */
@@ -234,20 +263,21 @@ public class ApplicationController {
*
* @throws IllegalArgumentException if the application already exists
*/
- public Instance createApplication(ApplicationId id, Optional<Credentials> credentials) {
+ // TODO jonmv: split in create application and create instance
+ public Application createApplication(ApplicationId id, Optional<Credentials> credentials) {
if (id.instance().isTester())
throw new IllegalArgumentException("'" + id + "' is a tester application!");
- try (Lock lock = lock(id)) {
+ try (Lock lock = lock(TenantAndApplicationId.from(id))) {
// Validate only application names which do not already exist.
- if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
+ if (getApplication(TenantAndApplicationId.from(id)).isEmpty())
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().get(id.tenant());
if (tenant.isEmpty())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
- if (get(id).isPresent())
+ if (getInstance(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
- if (get(dashToUnderscore(id)).isPresent()) // VESPA-1945
+ if (getInstance(dashToUnderscore(id)).isPresent()) // VESPA-1945
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (tenant.get().type() != Tenant.Type.user) {
if (credentials.isEmpty())
@@ -256,7 +286,11 @@ public class ApplicationController {
if ( ! id.instance().isTester()) // Only store the application permits for non-user applications.
accessControl.createApplication(id, credentials.get());
}
- LockedInstance application = new LockedInstance(new Instance(id, clock.instant()), lock);
+ List<Instance> instances = getApplication(TenantAndApplicationId.from(id)).map(application -> application.instances().values())
+ .map(ArrayList::new)
+ .orElse(new ArrayList<>());
+ instances.add(new Instance(id, clock.instant()));
+ LockedApplication application = new LockedApplication(Application.aggregate(instances).get(), lock);
store(application);
log.info("Created " + application);
return application.get();
@@ -280,8 +314,9 @@ public class ApplicationController {
if (applicationId.instance().isTester())
throw new IllegalArgumentException("'" + applicationId + "' is a tester application!");
+ // TODO jonmv: Change this to create instances on demand.
Tenant tenant = controller.tenants().require(applicationId.tenant());
- if (tenant.type() == Tenant.Type.user && get(applicationId).isEmpty())
+ if (tenant.type() == Tenant.Type.user && getInstance(applicationId).isEmpty())
createApplication(applicationId, Optional.empty());
try (Lock deploymentLock = lockForDeployment(applicationId, zone)) {
@@ -291,8 +326,9 @@ public class ApplicationController {
Set<ContainerEndpoint> endpoints;
Optional<ApplicationCertificate> applicationCertificate;
- try (Lock lock = lock(applicationId)) {
- LockedInstance application = new LockedInstance(require(applicationId), lock);
+ try (Lock lock = lock(TenantAndApplicationId.from(applicationId))) {
+ LockedApplication application = new LockedApplication(requireApplication(TenantAndApplicationId.from(applicationId)), lock);
+ InstanceName instance = applicationId.instance();
boolean manuallyDeployed = options.deployDirectly || zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
@@ -302,14 +338,15 @@ public class ApplicationController {
applicationVersion = applicationVersionFromDeployer.orElse(ApplicationVersion.unknown);
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
- platformVersion = options.vespaVersion.map(Version::new).orElse(applicationPackage.deploymentSpec().majorVersion()
- .flatMap(this::lastCompatibleVersion)
- .orElseGet(controller::systemVersion));
+ platformVersion = options.vespaVersion.map(Version::new)
+ .orElse(applicationPackage.deploymentSpec().majorVersion()
+ .flatMap(this::lastCompatibleVersion)
+ .orElseGet(controller::systemVersion));
}
else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job is known for " + zone + "."));
- Optional<JobStatus> job = Optional.ofNullable(application.get().deploymentJobs().jobStatus().get(jobType));
+ Optional<JobStatus> job = Optional.ofNullable(application.get().require(instance).deploymentJobs().jobStatus().get(jobType));
if ( job.isEmpty()
|| job.get().lastTriggered().isEmpty()
|| job.get().lastCompleted().isPresent() && job.get().lastCompleted().get().at().isAfter(job.get().lastTriggered().get().at()))
@@ -320,32 +357,33 @@ public class ApplicationController {
applicationVersion = preferOldestVersion ? triggered.sourceApplication().orElse(triggered.application())
: triggered.application();
- applicationPackage = getApplicationPackage(application.get(), applicationVersion);
+ applicationPackage = getApplicationPackage(applicationId, application.get().internal(), applicationVersion);
applicationPackage = withTesterCertificate(applicationPackage, applicationId, jobType);
- validateRun(application.get(), zone, platformVersion, applicationVersion);
+ validateRun(application.get(), instance, zone, platformVersion, applicationVersion);
}
// TODO jonmv: Remove this when all packages are validated upon submission, as in ApplicationApiHandler.submit(...).
verifyApplicationIdentityConfiguration(applicationId.tenant(), applicationPackage, deployingIdentity);
- // Assign and register endpoints
- application = withRotation(application, zone);
- endpoints = registerEndpointsInDns(application.get(), zone);
+ if (zone.environment().isProduction()) // Assign and register endpoints
+ application = withRotation(application, instance);
+
+ endpoints = registerEndpointsInDns(application.get().deploymentSpec(), application.get().require(applicationId.instance()), zone);
+
if (controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) {
- // Get application certificate (provisions a new certificate if missing)
- List<? extends ZoneApi> zones = controller.zoneRegistry().zones().all().zones();
- applicationCertificate = getApplicationCertificate(application.get());
+ // Provisions a new certificate if missing
+ applicationCertificate = getApplicationCertificate(application.get().require(instance));
} else {
applicationCertificate = Optional.empty();
}
- // Update application with information from application package
+ // TODO jonmv: REMOVE! This is now irrelevant for non-CD-test deployments and non-unit tests.
if ( ! preferOldestVersion
- && ! application.get().deploymentJobs().deployedInternally()
- && ! zone.environment().isManuallyDeployed())
- // TODO(jvenstad): Store only on submissions
+ && ! application.get().internal()
+ && ! zone.environment().isManuallyDeployed()) {
storeWithUpdatedConfig(application, applicationPackage);
+ }
} // Release application lock while doing the deployment, which is a lengthy task.
// Carry out deployment without holding the application lock.
@@ -353,9 +391,10 @@ public class ApplicationController {
ActivateResult result = deploy(applicationId, applicationPackage, zone, options, endpoints,
applicationCertificate.orElse(null));
- lockOrThrow(applicationId, application ->
- store(application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(),
- warningsFrom(result))));
+ lockApplicationOrThrow(TenantAndApplicationId.from(applicationId), application ->
+ store(application.with(applicationId.instance(),
+ instance -> instance.withNewDeployment(zone, applicationVersion, platformVersion,
+ clock.instant(), warningsFrom(result)))));
return result;
}
}
@@ -374,19 +413,19 @@ public class ApplicationController {
}
/** Fetches the requested application package from the artifact store(s). */
- public ApplicationPackage getApplicationPackage(Instance instance, ApplicationVersion version) {
+ public ApplicationPackage getApplicationPackage(ApplicationId id, boolean internal, ApplicationVersion version) {
try {
- return instance.deploymentJobs().deployedInternally()
- ? new ApplicationPackage(applicationStore.get(instance.id(), version))
- : new ApplicationPackage(artifactRepository.getApplicationPackage(instance.id(), version.id()));
+ return internal
+ ? new ApplicationPackage(applicationStore.get(id, version))
+ : new ApplicationPackage(artifactRepository.getApplicationPackage(id, version.id()));
}
catch (RuntimeException e) { // If application has switched deployment pipeline, artifacts stored prior to the switch are in the other artifact store.
try {
- log.info("Fetching application package for " + instance.id() + " from alternate repository; it is now deployed "
- + (instance.deploymentJobs().deployedInternally() ? "internally" : "externally") + "\nException was: " + Exceptions.toMessageString(e));
- return instance.deploymentJobs().deployedInternally()
- ? new ApplicationPackage(artifactRepository.getApplicationPackage(instance.id(), version.id()))
- : new ApplicationPackage(applicationStore.get(instance.id(), version));
+ log.info("Fetching application package for " + id + " from alternate repository; it is now deployed "
+ + (internal ? "internally" : "externally") + "\nException was: " + Exceptions.toMessageString(e));
+ return internal
+ ? new ApplicationPackage(artifactRepository.getApplicationPackage(id, version.id()))
+ : new ApplicationPackage(applicationStore.get(id, version));
}
catch (RuntimeException s) { // If this fails, too, the first failure is most likely the relevant one.
e.addSuppressed(s);
@@ -396,7 +435,7 @@ public class ApplicationController {
}
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */
- public LockedInstance storeWithUpdatedConfig(LockedInstance application, ApplicationPackage applicationPackage) {
+ public void storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
deploymentSpecValidator.validate(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.deploymentSpec());
@@ -405,13 +444,14 @@ public class ApplicationController {
// Delete zones not listed in DeploymentSpec, if allowed
// We do this at deployment time for externally built applications, and at submission time
// for internally built ones, to be able to return a validation failure message when necessary
- application = withoutDeletedDeployments(application);
-
- // Clean up deployment jobs that are no longer referenced by deployment spec
- application = withoutUnreferencedDeploymentJobs(application);
+ for (InstanceName instanceName : application.get().instances().keySet()) {
+ application = withoutDeletedDeployments(application, instanceName);
+ // Clean up deployment jobs that are no longer referenced by deployment spec
+ DeploymentSpec deploymentSpec = application.get().deploymentSpec();
+ application = application.with(instanceName, instance -> withoutUnreferencedDeploymentJobs(deploymentSpec, instance));
+ }
store(application);
- return application;
}
/** Deploy a system application to given zone */
@@ -459,13 +499,13 @@ public class ApplicationController {
}
/** Makes sure the application has a global rotation, if eligible. */
- private LockedInstance withRotation(LockedInstance application, ZoneId zone) {
- if (zone.environment() == Environment.prod) {
- try (RotationLock rotationLock = rotationRepository.lock()) {
- var rotations = rotationRepository.getOrAssignRotations(application.get(), rotationLock);
- application = application.with(rotations);
- store(application); // store assigned rotation even if deployment fails
- }
+ private LockedApplication withRotation(LockedApplication application, InstanceName instanceName) {
+ try (RotationLock rotationLock = rotationRepository.lock()) {
+ var rotations = rotationRepository.getOrAssignRotations(application.get().deploymentSpec(),
+ application.get().require(instanceName),
+ rotationLock);
+ application = application.with(instanceName, instance -> instance.with(rotations));
+ store(application); // store assigned rotation even if deployment fails
}
return application;
}
@@ -475,9 +515,9 @@ public class ApplicationController {
*
* @return the registered endpoints
*/
- private Set<ContainerEndpoint> registerEndpointsInDns(Instance instance, ZoneId zone) {
+ private Set<ContainerEndpoint> registerEndpointsInDns(DeploymentSpec deploymentSpec, Instance instance, ZoneId zone) {
var containerEndpoints = new HashSet<ContainerEndpoint>();
- var registerLegacyNames = instance.deploymentSpec().globalServiceId().isPresent();
+ var registerLegacyNames = deploymentSpec.globalServiceId().isPresent();
for (var assignedRotation : instance.rotations()) {
var names = new ArrayList<String>();
var endpoints = instance.endpointsIn(controller.system(), assignedRotation.endpointId())
@@ -566,16 +606,17 @@ public class ApplicationController {
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
- private LockedInstance withoutDeletedDeployments(LockedInstance application) {
- List<Deployment> deploymentsToRemove = application.get().productionDeployments().values().stream()
- .filter(deployment -> ! application.get().deploymentSpec().includes(deployment.zone().environment(),
- Optional.of(deployment.zone().region())))
- .collect(Collectors.toList());
+ private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) {
+ DeploymentSpec deploymentSpec = application.get().deploymentSpec();
+ List<Deployment> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream()
+ .filter(deployment -> ! deploymentSpec.includes(deployment.zone().environment(),
+ Optional.of(deployment.zone().region())))
+ .collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
- throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get() +
+ throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
@@ -585,20 +626,19 @@ public class ApplicationController {
" in deployment.xml. " +
ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval));
- LockedInstance applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
- applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
- return applicationWithRemoval;
+ application = deactivate(application, instance, deployment.zone());
+ return application;
}
- private LockedInstance withoutUnreferencedDeploymentJobs(LockedInstance application) {
- for (JobType job : JobList.from(application.get()).production().mapToList(JobStatus::type)) {
+ private Instance withoutUnreferencedDeploymentJobs(DeploymentSpec deploymentSpec, Instance instance) {
+ for (JobType job : JobList.from(instance).production().mapToList(JobStatus::type)) {
ZoneId zone = job.zone(controller.system());
- if (application.get().deploymentSpec().includes(zone.environment(), Optional.of(zone.region())))
+ if (deploymentSpec.includes(zone.environment(), Optional.of(zone.region())))
continue;
- application = application.withoutDeploymentJob(job);
+ instance = instance.withoutDeploymentJob(job);
}
- return application;
+ return instance;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
@@ -610,7 +650,7 @@ public class ApplicationController {
/** Returns the endpoints of the deployment, or empty if the request fails */
public List<URI> getDeploymentEndpoints(DeploymentId deploymentId) {
- if ( ! get(deploymentId.applicationId())
+ if ( ! getInstance(deploymentId.applicationId())
.map(application -> application.deployments().containsKey(deploymentId.zoneId()))
.orElse(deploymentId.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", deploymentId.toString());
@@ -629,7 +669,7 @@ public class ApplicationController {
/** Returns the non-empty endpoints per cluster in the given deployment, or empty if endpoints can't be found. */
public Map<ClusterSpec.Id, URI> clusterEndpoints(DeploymentId id) {
- if ( ! get(id.applicationId())
+ if ( ! getInstance(id.applicationId())
.map(application -> application.deployments().containsKey(id.zoneId()))
.orElse(id.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", id.toString());
@@ -671,15 +711,16 @@ public class ApplicationController {
throw new IllegalArgumentException("Could not delete application '" + tenantName + "." + applicationName + "': No credentials provided");
// Find all instances of the application
- List<ApplicationId> instances = asList(tenantName).stream()
- .map(Instance::id)
- .filter(id -> id.application().equals(applicationName))
- .collect(Collectors.toList());
+ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
+ List<ApplicationId> instances = requireApplication(id).instances().keySet().stream()
+ .map(id::instance)
+ .collect(Collectors.toUnmodifiableList());
if (instances.size() > 1)
throw new IllegalArgumentException("Could not delete application; more than one instance present: " + instances);
// TODO: Make this one transaction when database is moved to ZooKeeper
- instances.forEach(id -> deleteInstance(id, credentials));
+ for (ApplicationId instance : instances)
+ deleteInstance(instance, credentials);
}
/**
@@ -693,22 +734,22 @@ public class ApplicationController {
if (tenant.type() != Tenant.Type.user && credentials.isEmpty())
throw new IllegalArgumentException("Could not delete application '" + applicationId + "': No credentials provided");
- if (controller.applications().get(applicationId).isEmpty()) {
+ if (getInstance(applicationId).isEmpty())
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
- }
- lockOrThrow(applicationId, application -> {
- if ( ! application.get().deployments().isEmpty())
+ lockApplicationOrThrow(TenantAndApplicationId.from(applicationId), application -> {
+ if ( ! application.get().require(applicationId.instance()).deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " +
- application.get().deployments().keySet().stream().map(ZoneId::toString)
+ application.get().require(applicationId.instance()).deployments().keySet().stream().map(ZoneId::toString)
.sorted().collect(Collectors.joining(", ")));
- curator.removeInstance(applicationId);
+ curator.removeApplication(applicationId);
applicationStore.removeAll(applicationId);
applicationStore.removeAll(TesterId.of(applicationId));
- application.get().rotations().forEach(assignedRotation -> {
- var endpoints = application.get().endpointsIn(controller.system(), assignedRotation.endpointId());
+ Instance instance = application.get().require(applicationId.instance());
+ instance.rotations().forEach(assignedRotation -> {
+ var endpoints = instance.endpointsIn(controller.system(), assignedRotation.endpointId());
endpoints.asList().stream()
.map(Endpoint::dnsName)
.forEach(name -> {
@@ -720,10 +761,7 @@ public class ApplicationController {
});
- if ( tenant.type() != Tenant.Type.user
- && controller.applications().asList(applicationId.tenant()).stream()
- .map(application -> application.id().application())
- .noneMatch(applicationId.application()::equals))
+ if (tenant.type() != Tenant.Type.user && getApplication(applicationId).isEmpty())
// TODO jonmv: Implementations ignore the instance — refactor to provide tenant and application names only.
accessControl.deleteApplication(applicationId, credentials.get());
}
@@ -733,8 +771,8 @@ public class ApplicationController {
*
* @param application a locked application to store
*/
- public void store(LockedInstance application) {
- curator.writeInstance(application.get());
+ public void store(LockedApplication application) {
+ curator.writeApplication(application.get());
}
/**
@@ -743,9 +781,9 @@ public class ApplicationController {
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
- public void lockIfPresent(ApplicationId applicationId, Consumer<LockedInstance> action) {
+ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
- get(applicationId).map(application -> new LockedInstance(application, lock)).ifPresent(action);
+ getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
@@ -756,9 +794,9 @@ public class ApplicationController {
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
- public void lockOrThrow(ApplicationId applicationId, Consumer<LockedInstance> action) {
+ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
- action.accept(new LockedInstance(require(applicationId), lock));
+ action.accept(new LockedApplication(requireApplication(applicationId), lock));
}
}
@@ -787,8 +825,9 @@ public class ApplicationController {
}
/** Deactivate application in the given zone */
- public void deactivate(ApplicationId application, ZoneId zone) {
- lockOrThrow(application, lockedInstance -> store(deactivate(lockedInstance, zone)));
+ public void deactivate(ApplicationId id, ZoneId zone) {
+ lockApplicationOrThrow(TenantAndApplicationId.from(id),
+ application -> store(deactivate(application, id.instance(), zone)));
}
/**
@@ -796,15 +835,15 @@ public class ApplicationController {
*
* @return the application with the deployment in the given zone removed
*/
- private LockedInstance deactivate(LockedInstance application, ZoneId zone) {
+ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) {
try {
- configServer.deactivate(new DeploymentId(application.get().id(), zone));
+ configServer.deactivate(new DeploymentId(application.get().id().instance(instanceName), zone));
} catch (NotFoundException ignored) {
// ok; already gone
} finally {
- routingPolicies.refresh(application.get().id(), application.get().deploymentSpec(), zone);
+ routingPolicies.refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone);
}
- return application.withoutDeploymentIn(zone);
+ return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone));
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
@@ -820,7 +859,7 @@ public class ApplicationController {
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
- Lock lock(ApplicationId application) {
+ Lock lock(TenantAndApplicationId application) {
return curator.lock(application);
}
@@ -832,14 +871,14 @@ public class ApplicationController {
}
/** Verify that we don't downgrade an existing production deployment. */
- private void validateRun(Instance instance, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
- Deployment deployment = instance.deployments().get(zone);
+ private void validateRun(Application application, InstanceName instance, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
+ Deployment deployment = application.require(instance).deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
- && ( platformVersion.compareTo(deployment.version()) < 0 && ! instance.change().isPinned()
+ && ( platformVersion.compareTo(deployment.version()) < 0 && ! application.change().isPinned()
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
- throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
+ throw new IllegalArgumentException(String.format("Rejecting deployment of application %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
- instance, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
+ application.id().instance(instance), zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
/** Returns the rotation repository, used for managing global rotation assignments */
@@ -851,11 +890,6 @@ public class ApplicationController {
return routingPolicies;
}
- /** Sort given list of applications by application ID */
- private static List<Instance> sort(List<Instance> instances) {
- return instances.stream().sorted(Comparator.comparing(Instance::id)).collect(Collectors.toList());
- }
-
/**
* Verifies that the application can be deployed to the tenant, following these rules:
*
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
index 8ed94d2ead5..786eccc2e24 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
@@ -5,12 +5,15 @@ import com.google.inject.Inject;
import com.yahoo.component.AbstractComponent;
import com.yahoo.component.Version;
import com.yahoo.component.Vtag;
+import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.hosted.controller.api.integration.ApplicationIdSource;
import com.yahoo.vespa.hosted.controller.api.integration.ServiceRegistry;
import com.yahoo.vespa.hosted.controller.api.integration.maven.MavenRepository;
import com.yahoo.vespa.hosted.controller.api.integration.user.Roles;
@@ -34,6 +37,7 @@ import com.yahoo.vespa.serviceview.bindings.ApplicationView;
import java.time.Clock;
import java.util.LinkedHashMap;
+import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
@@ -56,7 +60,7 @@ import java.util.stream.Stream;
*
* @author bratseth
*/
-public class Controller extends AbstractComponent {
+public class Controller extends AbstractComponent implements ApplicationIdSource {
private static final Logger log = Logger.getLogger(Controller.class.getName());
@@ -271,22 +275,6 @@ public class Controller extends AbstractComponent {
return auditLogger;
}
- /** Returns all other roles the given tenant role implies. */
- public Set<Role> impliedRoles(TenantRole role) {
- return Stream.concat(Roles.tenantRoles(role.tenant()).stream(),
- applications().asList(role.tenant()).stream()
- .flatMap(application -> Roles.applicationRoles(application.id().tenant(), application.id().application()).stream()))
- .filter(role::implies)
- .collect(Collectors.toUnmodifiableSet());
- }
-
- /** Returns all other roles the given application role implies. */
- public Set<Role> impliedRoles(ApplicationRole role) {
- return Roles.applicationRoles(role.tenant(), role.application()).stream()
- .filter(role::implies)
- .collect(Collectors.toUnmodifiableSet());
- }
-
private Set<CloudName> clouds() {
return zoneRegistry.zones().all().zones().stream()
.map(ZoneApi::getCloudName)
@@ -297,4 +285,20 @@ public class Controller extends AbstractComponent {
return vespaVersion.map(v -> v.versionNumber().toFullString()).orElse("unknown");
}
+ @Override
+ public List<ApplicationId> listApplications() {
+ return applications().asList().stream()
+ .flatMap(application -> application.instances().keySet().stream()
+ .map(application.id()::instance))
+ .collect(Collectors.toUnmodifiableList());
+ }
+
+ @Override
+ public List<ApplicationId> listApplications(TenantName tenant) {
+ return applications().asList(tenant).stream()
+ .flatMap(application -> application.instances().keySet().stream()
+ .map(application.id()::instance))
+ .collect(Collectors.toUnmodifiableList());
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java
index 60d74e3719d..c9ec355a3b9 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java
@@ -6,25 +6,34 @@ import com.yahoo.component.Version;
import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
import com.yahoo.vespa.hosted.controller.application.ApplicationActivity;
import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
import com.yahoo.vespa.hosted.controller.application.Change;
+import com.yahoo.vespa.hosted.controller.application.ClusterInfo;
+import com.yahoo.vespa.hosted.controller.application.ClusterUtilization;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
+import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.vespa.hosted.controller.application.EndpointId;
import com.yahoo.vespa.hosted.controller.application.EndpointList;
+import com.yahoo.vespa.hosted.controller.application.JobStatus;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.metric.ApplicationMetrics;
import com.yahoo.vespa.hosted.controller.rotation.RotationStatus;
import java.time.Instant;
import java.util.Collections;
import java.util.Comparator;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@@ -68,6 +77,18 @@ public class Instance {
Optional.empty(), Collections.emptyList(), RotationStatus.EMPTY);
}
+ /** Creates an empty instance*/
+ public Instance(ApplicationId id, List<Deployment> deployments, DeploymentJobs deploymentJobs,
+ List<AssignedRotation> rotations, RotationStatus rotationStatus) {
+ this(id,
+ Instant.EPOCH, DeploymentSpec.empty, ValidationOverrides.empty,
+ deployments.stream().collect(Collectors.toMap(Deployment::zone, Function.identity())),
+ deploymentJobs,
+ Change.empty(), Change.empty(), Optional.empty(), Optional.empty(), OptionalInt.empty(),
+ new ApplicationMetrics(0, 0), Optional.empty(),
+ rotations, rotationStatus);
+ }
+
/** Used from persistence layer: Do not use */
public Instance(ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides,
List<Deployment> deployments, DeploymentJobs deploymentJobs, Change change,
@@ -102,8 +123,113 @@ public class Instance {
this.rotationStatus = Objects.requireNonNull(rotationStatus, "rotationStatus cannot be null");
}
+ public Instance withJobPause(JobType jobType, OptionalLong pausedUntil) {
+ return new Instance(id, createdAt, deploymentSpec, validationOverrides, deployments,
+ deploymentJobs.withPause(jobType, pausedUntil), change, outstandingChange,
+ ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ rotations, rotationStatus);
+ }
+
+ public Instance withJobCompletion(JobType jobType, JobStatus.JobRun completion,
+ Optional<DeploymentJobs.JobError> jobError) {
+ return new Instance(id, createdAt, deploymentSpec, validationOverrides, deployments,
+ deploymentJobs.withCompletion(jobType, completion, jobError),
+ change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics,
+ pemDeployKey, rotations, rotationStatus);
+ }
+
+ public Instance withJobTriggering(JobType jobType, JobStatus.JobRun job) {
+ return new Instance(id, createdAt, deploymentSpec, validationOverrides, deployments,
+ deploymentJobs.withTriggering(jobType, job), change, outstandingChange,
+ ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ rotations, rotationStatus);
+ }
+
+ public Instance withNewDeployment(ZoneId zone, ApplicationVersion applicationVersion, Version version,
+ Instant instant, Map<DeploymentMetrics.Warning, Integer> warnings) {
+ // Use info from previous deployment if available, otherwise create a new one.
+ Deployment previousDeployment = deployments.getOrDefault(zone, new Deployment(zone, applicationVersion,
+ version, instant));
+ Deployment newDeployment = new Deployment(zone, applicationVersion, version, instant,
+ previousDeployment.clusterUtils(),
+ previousDeployment.clusterInfo(),
+ previousDeployment.metrics().with(warnings),
+ previousDeployment.activity());
+ return with(newDeployment);
+ }
+
+ public Instance withClusterUtilization(ZoneId zone, Map<ClusterSpec.Id, ClusterUtilization> clusterUtilization) {
+ Deployment deployment = deployments.get(zone);
+ if (deployment == null) return this; // No longer deployed in this zone.
+ return with(deployment.withClusterUtils(clusterUtilization));
+ }
+
+ public Instance withClusterInfo(ZoneId zone, Map<ClusterSpec.Id, ClusterInfo> clusterInfo) {
+ Deployment deployment = deployments.get(zone);
+ if (deployment == null) return this; // No longer deployed in this zone.
+ return with(deployment.withClusterInfo(clusterInfo));
+
+ }
+
+ public Instance recordActivityAt(Instant instant, ZoneId zone) {
+ Deployment deployment = deployments.get(zone);
+ if (deployment == null) return this;
+ return with(deployment.recordActivityAt(instant));
+ }
+
+ public Instance with(ZoneId zone, DeploymentMetrics deploymentMetrics) {
+ Deployment deployment = deployments.get(zone);
+ if (deployment == null) return this; // No longer deployed in this zone.
+ return with(deployment.withMetrics(deploymentMetrics));
+ }
+
+ public Instance withoutDeploymentIn(ZoneId zone) {
+ Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments);
+ deployments.remove(zone);
+ return with(deployments);
+ }
+
+ public Instance withoutDeploymentJob(JobType jobType) {
+ return new Instance(id, createdAt, deploymentSpec, validationOverrides, deployments,
+ deploymentJobs.without(jobType), change, outstandingChange,
+ ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ rotations, rotationStatus);
+ }
+
+ public Instance with(ApplicationMetrics metrics) {
+ return new Instance(id, createdAt, deploymentSpec, validationOverrides, deployments,
+ deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
+ metrics, pemDeployKey, rotations, rotationStatus);
+ }
+
+ public Instance with(List<AssignedRotation> assignedRotations) {
+ return new Instance(id, createdAt, deploymentSpec, validationOverrides, deployments,
+ deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
+ metrics, pemDeployKey, assignedRotations, rotationStatus);
+ }
+
+ public Instance with(RotationStatus rotationStatus) {
+ return new Instance(id, createdAt, deploymentSpec, validationOverrides, deployments,
+ deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
+ metrics, pemDeployKey, rotations, rotationStatus);
+ }
+
+ private Instance with(Deployment deployment) {
+ Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments);
+ deployments.put(deployment.zone(), deployment);
+ return with(deployments);
+ }
+
+ private Instance with(Map<ZoneId, Deployment> deployments) {
+ return new Instance(id, createdAt, deploymentSpec, validationOverrides, deployments,
+ deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
+ metrics, pemDeployKey, rotations, rotationStatus);
+ }
+
public ApplicationId id() { return id; }
+ public InstanceName name() { return id.instance(); }
+
public Instant createdAt() { return createdAt; }
/**
@@ -174,7 +300,7 @@ public class Instance {
/**
* Returns the oldest platform version this has deployed in a permanent zone (not test or staging).
*
- * This is unfortunately quite similar to {@link ApplicationController#oldestInstalledPlatform(ApplicationId)},
+ * This is unfortunately quite similar to {@link ApplicationController#oldestInstalledPlatform(TenantAndApplicationId)},
* but this checks only what the controller has deployed to the production zones, while that checks the node repository
* to see what's actually installed on each node. Thus, this is the right choice for, e.g., target Vespa versions for
* new deployments, while that is the right choice for version to compile against.
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java
new file mode 100644
index 00000000000..9aba8921860
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java
@@ -0,0 +1,204 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller;
+
+import com.yahoo.component.Version;
+import com.yahoo.config.application.api.DeploymentSpec;
+import com.yahoo.config.application.api.ValidationOverrides;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.InstanceName;
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.searchlib.rankingexpression.rule.Function;
+import com.yahoo.vespa.curator.Lock;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
+import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
+import com.yahoo.vespa.hosted.controller.application.Change;
+import com.yahoo.vespa.hosted.controller.application.ClusterInfo;
+import com.yahoo.vespa.hosted.controller.application.ClusterUtilization;
+import com.yahoo.vespa.hosted.controller.application.Deployment;
+import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
+import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
+import com.yahoo.vespa.hosted.controller.application.JobStatus;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
+import com.yahoo.vespa.hosted.controller.metric.ApplicationMetrics;
+import com.yahoo.vespa.hosted.controller.rotation.RotationStatus;
+
+import java.time.Instant;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.OptionalInt;
+import java.util.OptionalLong;
+import java.util.function.BinaryOperator;
+import java.util.function.UnaryOperator;
+
+/**
+ * An application that has been locked for modification. Provides methods for modifying an application's fields.
+ *
+ * @author jonmv
+ */
+public class LockedApplication {
+
+ private final Lock lock;
+ private final TenantAndApplicationId id;
+ private final Instant createdAt;
+ private final DeploymentSpec deploymentSpec;
+ private final ValidationOverrides validationOverrides;
+ private final Change change;
+ private final Change outstandingChange;
+ private final Optional<IssueId> deploymentIssueId;
+ private final Optional<IssueId> ownershipIssueId;
+ private final Optional<User> owner;
+ private final OptionalInt majorVersion;
+ private final ApplicationMetrics metrics;
+ private final Optional<String> pemDeployKey;
+ private final OptionalLong projectId;
+ private final boolean internal;
+ private final Map<InstanceName, Instance> instances;
+
+ /**
+ * Used to create a locked application
+ *
+ * @param application The application to lock.
+ * @param lock The lock for the application.
+ */
+ LockedApplication(Application application, Lock lock) {
+ this(Objects.requireNonNull(lock, "lock cannot be null"), application.id(), application.createdAt(),
+ application.deploymentSpec(), application.validationOverrides(), application.change(),
+ application.outstandingChange(), application.deploymentIssueId(), application.ownershipIssueId(),
+ application.owner(), application.majorVersion(), application.metrics(), application.pemDeployKey(),
+ application.projectId(), application.internal(), application.instances());
+ }
+
+ private LockedApplication(Lock lock, TenantAndApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec,
+ ValidationOverrides validationOverrides, Change change, Change outstandingChange,
+ Optional<IssueId> deploymentIssueId, Optional<IssueId> ownershipIssueId, Optional<User> owner,
+ OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey,
+ OptionalLong projectId, boolean internal, Map<InstanceName, Instance> instances) {
+ this.lock = lock;
+ this.id = id;
+ this.createdAt = createdAt;
+ this.deploymentSpec = deploymentSpec;
+ this.validationOverrides = validationOverrides;
+ this.change = change;
+ this.outstandingChange = outstandingChange;
+ this.deploymentIssueId = deploymentIssueId;
+ this.ownershipIssueId = ownershipIssueId;
+ this.owner = owner;
+ this.majorVersion = majorVersion;
+ this.metrics = metrics;
+ this.pemDeployKey = pemDeployKey;
+ this.projectId = projectId;
+ this.internal = internal;
+ this.instances = Map.copyOf(instances);
+ }
+
+ /** Returns a read-only copy of this */
+ public Application get() {
+ return new Application(id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ projectId, internal, instances.values());
+ }
+
+ public LockedApplication with(InstanceName instance, UnaryOperator<Instance> modification) {
+ var instances = new HashMap<>(this.instances);
+ instances.put(instance, modification.apply(instances.get(instance)));
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ projectId, internal, instances);
+ }
+
+ private LockedApplication without(InstanceName instance) {
+ var instances = new HashMap<>(this.instances);
+ instances.remove(instance);
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ projectId, internal, instances);
+ }
+
+ public LockedApplication withBuiltInternally(boolean builtInternally) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ projectId, builtInternally, instances);
+ }
+
+ public LockedApplication withProjectId(OptionalLong projectId) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ projectId, internal, instances);
+ }
+
+ public LockedApplication withDeploymentIssueId(IssueId issueId) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ Optional.ofNullable(issueId), ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ projectId, internal, instances);
+ }
+
+ public LockedApplication with(DeploymentSpec deploymentSpec) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ projectId, internal, instances);
+ }
+
+ public LockedApplication with(ValidationOverrides validationOverrides) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ projectId, internal, instances);
+ }
+
+ public LockedApplication withChange(Change change) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ projectId, internal, instances);
+ }
+
+ public LockedApplication withOutstandingChange(Change outstandingChange) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ projectId, internal, instances);
+ }
+
+ public LockedApplication withOwnershipIssueId(IssueId issueId) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, Optional.of(issueId), owner, majorVersion, metrics, pemDeployKey,
+ projectId, internal, instances);
+ }
+
+ public LockedApplication withOwner(User owner) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, ownershipIssueId, Optional.of(owner), majorVersion, metrics, pemDeployKey,
+ projectId, internal, instances);
+ }
+
+ /** Set a major version for this, or set to null to remove any major version override */
+ public LockedApplication withMajorVersion(Integer majorVersion) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, ownershipIssueId, owner,
+ majorVersion == null ? OptionalInt.empty() : OptionalInt.of(majorVersion),
+ metrics, pemDeployKey, projectId, internal, instances);
+ }
+
+ public LockedApplication with(ApplicationMetrics metrics) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
+ projectId, internal, instances);
+ }
+
+ public LockedApplication withPemDeployKey(String pemDeployKey) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, change, outstandingChange,
+ deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, Optional.ofNullable(pemDeployKey),
+ projectId, internal, instances);
+ }
+
+ @Override
+ public String toString() {
+ return "application '" + id + "'";
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedInstance.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedInstance.java
deleted file mode 100644
index 26eacbbf4f4..00000000000
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedInstance.java
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller;
-
-import com.yahoo.component.Version;
-import com.yahoo.config.application.api.DeploymentSpec;
-import com.yahoo.config.application.api.ValidationOverrides;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.zone.ZoneId;
-import com.yahoo.vespa.curator.Lock;
-import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
-import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
-import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
-import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
-import com.yahoo.vespa.hosted.controller.application.Change;
-import com.yahoo.vespa.hosted.controller.application.ClusterInfo;
-import com.yahoo.vespa.hosted.controller.application.ClusterUtilization;
-import com.yahoo.vespa.hosted.controller.application.Deployment;
-import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
-import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
-import com.yahoo.vespa.hosted.controller.application.JobStatus;
-import com.yahoo.vespa.hosted.controller.metric.ApplicationMetrics;
-import com.yahoo.vespa.hosted.controller.rotation.RotationStatus;
-
-import java.time.Instant;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
-import java.util.OptionalInt;
-import java.util.OptionalLong;
-
-/**
- * An application that has been locked for modification. Provides methods for modifying an application's fields.
- *
- * @author mpolden
- * @author jonmv
- */
-public class LockedInstance {
-
- private final Lock lock;
- private final ApplicationId id;
- private final Instant createdAt;
- private final DeploymentSpec deploymentSpec;
- private final ValidationOverrides validationOverrides;
- private final Map<ZoneId, Deployment> deployments;
- private final DeploymentJobs deploymentJobs;
- private final Change change;
- private final Change outstandingChange;
- private final Optional<IssueId> ownershipIssueId;
- private final Optional<User> owner;
- private final OptionalInt majorVersion;
- private final ApplicationMetrics metrics;
- private final Optional<String> pemDeployKey;
- private final List<AssignedRotation> rotations;
- private final RotationStatus rotationStatus;
-
- /**
- * Used to create a locked application
- *
- * @param instance The application to lock.
- * @param lock The lock for the application.
- */
- LockedInstance(Instance instance, Lock lock) {
- this(Objects.requireNonNull(lock, "lock cannot be null"), instance.id(), instance.createdAt(),
- instance.deploymentSpec(), instance.validationOverrides(),
- instance.deployments(),
- instance.deploymentJobs(), instance.change(), instance.outstandingChange(),
- instance.ownershipIssueId(), instance.owner(), instance.majorVersion(), instance.metrics(),
- instance.pemDeployKey(), instance.rotations(), instance.rotationStatus());
- }
-
- private LockedInstance(Lock lock, ApplicationId id, Instant createdAt,
- DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides,
- Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change,
- Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner,
- OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey,
- List<AssignedRotation> rotations, RotationStatus rotationStatus) {
- this.lock = lock;
- this.id = id;
- this.createdAt = createdAt;
- this.deploymentSpec = deploymentSpec;
- this.validationOverrides = validationOverrides;
- this.deployments = deployments;
- this.deploymentJobs = deploymentJobs;
- this.change = change;
- this.outstandingChange = outstandingChange;
- this.ownershipIssueId = ownershipIssueId;
- this.owner = owner;
- this.majorVersion = majorVersion;
- this.metrics = metrics;
- this.pemDeployKey = pemDeployKey;
- this.rotations = rotations;
- this.rotationStatus = rotationStatus;
- }
-
- /** Returns a read-only copy of this */
- public Instance get() {
- return new Instance(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change,
- outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- rotations, rotationStatus);
- }
-
- public LockedInstance withBuiltInternally(boolean builtInternally) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs.withBuiltInternally(builtInternally), change, outstandingChange,
- ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- rotations, rotationStatus);
- }
-
- public LockedInstance withProjectId(OptionalLong projectId) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs.withProjectId(projectId), change, outstandingChange,
- ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- rotations, rotationStatus);
- }
-
- public LockedInstance withDeploymentIssueId(IssueId issueId) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs.with(issueId), change, outstandingChange,
- ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- rotations, rotationStatus);
- }
-
- public LockedInstance withJobPause(JobType jobType, OptionalLong pausedUntil) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs.withPause(jobType, pausedUntil), change, outstandingChange,
- ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- rotations, rotationStatus);
- }
-
- public LockedInstance withJobCompletion(long projectId, JobType jobType, JobStatus.JobRun completion,
- Optional<DeploymentJobs.JobError> jobError) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs.withCompletion(projectId, jobType, completion, jobError),
- change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics,
- pemDeployKey, rotations, rotationStatus);
- }
-
- public LockedInstance withJobTriggering(JobType jobType, JobStatus.JobRun job) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs.withTriggering(jobType, job), change, outstandingChange,
- ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- rotations, rotationStatus);
- }
-
- public LockedInstance withNewDeployment(ZoneId zone, ApplicationVersion applicationVersion, Version version,
- Instant instant, Map<DeploymentMetrics.Warning, Integer> warnings) {
- // Use info from previous deployment if available, otherwise create a new one.
- Deployment previousDeployment = deployments.getOrDefault(zone, new Deployment(zone, applicationVersion,
- version, instant));
- Deployment newDeployment = new Deployment(zone, applicationVersion, version, instant,
- previousDeployment.clusterUtils(),
- previousDeployment.clusterInfo(),
- previousDeployment.metrics().with(warnings),
- previousDeployment.activity());
- return with(newDeployment);
- }
-
- public LockedInstance withClusterUtilization(ZoneId zone, Map<ClusterSpec.Id, ClusterUtilization> clusterUtilization) {
- Deployment deployment = deployments.get(zone);
- if (deployment == null) return this; // No longer deployed in this zone.
- return with(deployment.withClusterUtils(clusterUtilization));
- }
-
- public LockedInstance withClusterInfo(ZoneId zone, Map<ClusterSpec.Id, ClusterInfo> clusterInfo) {
- Deployment deployment = deployments.get(zone);
- if (deployment == null) return this; // No longer deployed in this zone.
- return with(deployment.withClusterInfo(clusterInfo));
-
- }
-
- public LockedInstance recordActivityAt(Instant instant, ZoneId zone) {
- Deployment deployment = deployments.get(zone);
- if (deployment == null) return this;
- return with(deployment.recordActivityAt(instant));
- }
-
- public LockedInstance with(ZoneId zone, DeploymentMetrics deploymentMetrics) {
- Deployment deployment = deployments.get(zone);
- if (deployment == null) return this; // No longer deployed in this zone.
- return with(deployment.withMetrics(deploymentMetrics));
- }
-
- public LockedInstance withoutDeploymentIn(ZoneId zone) {
- Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments);
- deployments.remove(zone);
- return with(deployments);
- }
-
- public LockedInstance withoutDeploymentJob(JobType jobType) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs.without(jobType), change, outstandingChange,
- ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- rotations, rotationStatus);
- }
-
- public LockedInstance with(DeploymentSpec deploymentSpec) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs, change, outstandingChange,
- ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- rotations, rotationStatus);
- }
-
- public LockedInstance with(ValidationOverrides validationOverrides) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, rotations, rotationStatus);
- }
-
- public LockedInstance withChange(Change change) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, rotations, rotationStatus);
- }
-
- public LockedInstance withOutstandingChange(Change outstandingChange) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, rotations, rotationStatus);
- }
-
- public LockedInstance withOwnershipIssueId(IssueId issueId) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs, change, outstandingChange, Optional.ofNullable(issueId), owner,
- majorVersion, metrics, pemDeployKey, rotations, rotationStatus);
- }
-
- public LockedInstance withOwner(User owner) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs, change, outstandingChange, ownershipIssueId,
- Optional.ofNullable(owner), majorVersion, metrics, pemDeployKey,
- rotations, rotationStatus);
- }
-
- /** Set a major version for this, or set to null to remove any major version override */
- public LockedInstance withMajorVersion(Integer majorVersion) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs, change, outstandingChange, ownershipIssueId, owner,
- majorVersion == null ? OptionalInt.empty() : OptionalInt.of(majorVersion),
- metrics, pemDeployKey, rotations, rotationStatus);
- }
-
- public LockedInstance with(ApplicationMetrics metrics) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, rotations, rotationStatus);
- }
-
- public LockedInstance withPemDeployKey(String pemDeployKey) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, Optional.ofNullable(pemDeployKey), rotations, rotationStatus);
- }
-
- public LockedInstance with(List<AssignedRotation> assignedRotations) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, assignedRotations, rotationStatus);
- }
-
- public LockedInstance with(RotationStatus rotationStatus) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, rotations, rotationStatus);
- }
-
- /** Don't expose non-leaf sub-objects. */
- private LockedInstance with(Deployment deployment) {
- Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments);
- deployments.put(deployment.zone(), deployment);
- return with(deployments);
- }
-
- private LockedInstance with(Map<ZoneId, Deployment> deployments) {
- return new LockedInstance(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
- deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, rotations, rotationStatus);
- }
-
- @Override
- public String toString() {
- return "application '" + id + "'";
- }
-
-}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationList.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationList.java
new file mode 100644
index 00000000000..6a15e49134a
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationList.java
@@ -0,0 +1,245 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.application;
+
+import com.google.common.collect.ImmutableList;
+import com.yahoo.component.Version;
+import com.yahoo.config.application.api.DeploymentSpec.UpgradePolicy;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.vespa.hosted.controller.Application;
+import com.yahoo.vespa.hosted.controller.ApplicationController;
+import com.yahoo.vespa.hosted.controller.Instance;
+
+import java.time.Instant;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Optional;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+/**
+ * A list of applications which can be filtered in various ways.
+ *
+ * @author jonmv
+ */
+public class ApplicationList {
+
+ private final List<Application> list;
+
+ private ApplicationList(List<Application> applications) {
+ this.list = applications;
+ }
+
+ // ----------------------------------- Factories
+
+ public static ApplicationList from(Collection<Application> applications) {
+ return new ApplicationList(List.copyOf(applications));
+ }
+
+ public static ApplicationList from(Collection<ApplicationId> ids, ApplicationController applications) {
+ return new ApplicationList(ids.stream()
+ .map(TenantAndApplicationId::from)
+ .distinct()
+ .map(applications::requireApplication)
+ .collect(Collectors.toUnmodifiableList()));
+ }
+
+ // ----------------------------------- Accessors
+
+ /** Returns the applications in this as an immutable list */
+ public List<Application> asList() { return list; }
+
+ /** Returns the ids of the applications in this as an immutable list */
+ public List<TenantAndApplicationId> idList() { return list.stream().map(Application::id).collect(Collectors.toUnmodifiableList()); }
+
+ public boolean isEmpty() { return list.isEmpty(); }
+
+ public int size() { return list.size(); }
+
+ // ----------------------------------- Filters
+
+ /** Returns the subset of applications which are upgrading (to any version), not considering block windows. */
+ public ApplicationList upgrading() {
+ return filteredOn(application -> application.change().platform().isPresent());
+ }
+
+ /** Returns the subset of applications which are currently upgrading to the given version */
+ public ApplicationList upgradingTo(Version version) {
+ return filteredOn(application -> isUpgradingTo(version, application));
+ }
+
+ /** Returns the subset of applications which are not pinned to a certain Vespa version. */
+ public ApplicationList unpinned() {
+ return filteredOn(application -> ! application.change().isPinned());
+ }
+
+ /** Returns the subset of applications which are currently not upgrading to the given version */
+ public ApplicationList notUpgradingTo(Version version) {
+ return notUpgradingTo(Collections.singletonList(version));
+ }
+
+ /** Returns the subset of applications which are currently not upgrading to any of the given versions */
+ public ApplicationList notUpgradingTo(Collection<Version> versions) {
+ return filteredOn(application -> versions.stream().noneMatch(version -> isUpgradingTo(version, application)));
+ }
+
+ /**
+ * Returns the subset of applications which are currently not upgrading to the given version,
+ * or returns all if no version is specified
+ */
+ public ApplicationList notUpgradingTo(Optional<Version> version) {
+ if (version.isEmpty()) return this;
+ return notUpgradingTo(version.get());
+ }
+
+ /** Returns the subset of applications which have changes left to deploy; blocked, or deploying */
+ public ApplicationList withChanges() {
+ return filteredOn(application -> application.change().hasTargets() || application.outstandingChange().hasTargets());
+ }
+
+ /** Returns the subset of applications which are currently not deploying a change */
+ public ApplicationList notDeploying() {
+ return filteredOn(application -> ! application.change().hasTargets());
+ }
+
+ /** Returns the subset of applications which currently does not have any failing jobs */
+ public ApplicationList notFailing() {
+ return filteredOn(application -> application.instances().values().stream()
+ .noneMatch(instance -> instance.deploymentJobs().hasFailures()));
+ }
+
+ /** Returns the subset of applications which currently have failing jobs */
+ public ApplicationList failing() {
+ return filteredOn(application -> application.instances().values().stream()
+ .anyMatch(instance -> instance.deploymentJobs().hasFailures()));
+ }
+
+ /** Returns the subset of applications which have been failing an upgrade to the given version since the given instant */
+ public ApplicationList failingUpgradeToVersionSince(Version version, Instant threshold) {
+ return filteredOn(application -> application.instances().values().stream()
+ .anyMatch(instance -> failingUpgradeToVersionSince(instance, version, threshold)));
+ }
+
+ /** Returns the subset of applications which have been failing an application change since the given instant */
+ public ApplicationList failingApplicationChangeSince(Instant threshold) {
+ return filteredOn(application -> application.instances().values().stream()
+ .anyMatch(instance -> failingApplicationChangeSince(instance, threshold)));
+ }
+
+ /** Returns the subset of applications which currently does not have any failing jobs on the given version */
+ public ApplicationList notFailingOn(Version version) {
+ return filteredOn(application -> application.instances().values().stream()
+ .noneMatch(instance -> failingOn(version, instance)));
+ }
+
+ /** Returns the subset of applications which have at least one production deployment */
+ public ApplicationList withProductionDeployment() {
+ return filteredOn(application -> application.instances().values().stream()
+ .anyMatch(instance -> instance.productionDeployments().size() > 0));
+ }
+
+ /** Returns the subset of applications which started failing on the given version */
+ public ApplicationList startedFailingOn(Version version) {
+ return filteredOn(application -> application.instances().values().stream()
+ .anyMatch(instance -> ! JobList.from(instance).firstFailing().on(version).isEmpty()));
+ }
+
+ /** Returns the subset of applications which has the given upgrade policy */
+ public ApplicationList with(UpgradePolicy policy) {
+ return filteredOn(application -> application.deploymentSpec().upgradePolicy() == policy);
+ }
+
+ /** Returns the subset of applications which does not have the given upgrade policy */
+ public ApplicationList without(UpgradePolicy policy) {
+ return filteredOn(application -> application.deploymentSpec().upgradePolicy() != policy);
+ }
+
+ /** Returns the subset of applications which have at least one deployment on a lower version than the given one */
+ public ApplicationList onLowerVersionThan(Version version) {
+ return filteredOn(application -> application.instances().values().stream()
+ .flatMap(instance -> instance.productionDeployments().values().stream())
+ .anyMatch(deployment -> deployment.version().isBefore(version)));
+ }
+
+ /** Returns the subset of applications which have a project ID */
+ public ApplicationList withProjectId() {
+ return filteredOn(application -> application.projectId().isPresent());
+ }
+
+ /** Returns the subset of applications that are allowed to upgrade at the given time */
+ public ApplicationList canUpgradeAt(Instant instant) {
+ return filteredOn(application -> application.deploymentSpec().canUpgradeAt(instant));
+ }
+
+ /** Returns the subset of applications that have at least one assigned rotation */
+ public ApplicationList hasRotation() {
+ return filteredOn(application -> application.instances().values().stream()
+ .anyMatch(instance -> ! instance.rotations().isEmpty()));
+ }
+
+ /**
+ * Returns the subset of applications that hasn't pinned to an an earlier major version than the given one.
+ *
+ * @param targetMajorVersion the target major version which applications returned allows upgrading to
+ * @param defaultMajorVersion the default major version to assume for applications not specifying one
+ */
+ public ApplicationList allowMajorVersion(int targetMajorVersion, int defaultMajorVersion) {
+ return filteredOn(application -> targetMajorVersion <= application.deploymentSpec().majorVersion()
+ .orElse(application.majorVersion()
+ .orElse(defaultMajorVersion)));
+ }
+
+ /** Returns the first n application in this (or all, if there are less than n). */
+ public ApplicationList first(int n) {
+ if (list.size() < n) return this;
+ return new ApplicationList(list.subList(0, n));
+ }
+
+ // ----------------------------------- Sorting
+
+ /**
+ * Returns this list sorted by increasing deployed version.
+ * If multiple versions are deployed the oldest is used.
+ * Applications without any deployments are ordered first.
+ */
+ public ApplicationList byIncreasingDeployedVersion() {
+ return new ApplicationList(list.stream()
+ .sorted(Comparator.comparing(application -> application.oldestDeployedPlatform()
+ .orElse(Version.emptyVersion)))
+ .collect(Collectors.toUnmodifiableList()));
+ }
+
+ // ----------------------------------- Internal helpers
+
+ private static boolean isUpgradingTo(Version version, Application application) {
+ return application.change().platform().equals(Optional.of(version));
+ }
+
+ private static boolean failingOn(Version version, Instance instance) {
+ return ! JobList.from(instance)
+ .failing()
+ .lastCompleted().on(version)
+ .isEmpty();
+ }
+
+ private static boolean failingUpgradeToVersionSince(Instance instance, Version version, Instant threshold) {
+ return ! JobList.from(instance)
+ .not().failingApplicationChange()
+ .firstFailing().before(threshold)
+ .lastCompleted().on(version)
+ .isEmpty();
+ }
+
+ private static boolean failingApplicationChangeSince(Instance instance, Instant threshold) {
+ return ! JobList.from(instance)
+ .failingApplicationChange()
+ .firstFailing().before(threshold)
+ .isEmpty();
+ }
+
+ private ApplicationList filteredOn(Predicate<Application> condition) {
+ return new ApplicationList(list.stream().filter(condition).collect(Collectors.toUnmodifiableList()));
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentJobs.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentJobs.java
index 84d0615bc48..066ba0fbda5 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentJobs.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentJobs.java
@@ -55,13 +55,13 @@ public class DeploymentJobs {
}
/** Return a new instance with the given completion */
- public DeploymentJobs withCompletion(long projectId, JobType jobType, JobStatus.JobRun completion, Optional<JobError> jobError) {
+ public DeploymentJobs withCompletion(JobType jobType, JobStatus.JobRun completion, Optional<JobError> jobError) {
Map<JobType, JobStatus> status = new LinkedHashMap<>(this.status);
status.compute(jobType, (type, job) -> {
if (job == null) job = JobStatus.initial(jobType);
return job.withCompletion(completion, jobError);
});
- return new DeploymentJobs(jobType == JobType.component ? OptionalLong.of(projectId) : this.projectId, status, issueId, builtInternally);
+ return new DeploymentJobs(projectId, status, issueId, builtInternally);
}
public DeploymentJobs withTriggering(JobType jobType, JobStatus.JobRun jobRun) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/InstanceList.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/InstanceList.java
deleted file mode 100644
index 809e0a8f0fb..00000000000
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/InstanceList.java
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.application;
-
-import com.google.common.collect.ImmutableList;
-import com.yahoo.component.Version;
-import com.yahoo.config.application.api.DeploymentSpec.UpgradePolicy;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.vespa.hosted.controller.Instance;
-import com.yahoo.vespa.hosted.controller.ApplicationController;
-
-import java.time.Instant;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Optional;
-import java.util.stream.Stream;
-
-/**
- * A list of applications which can be filtered in various ways.
- *
- * @author bratseth
- */
-// TODO jvenstad: Make an AbstractFilteringList based on JobList and let this extend it for free not()s?
-public class InstanceList {
-
- private final ImmutableList<Instance> list;
-
- private InstanceList(Iterable<Instance> applications) {
- this.list = ImmutableList.copyOf(applications);
- }
-
- // ----------------------------------- Factories
-
- public static InstanceList from(Iterable<Instance> applications) {
- return new InstanceList(applications);
- }
-
- public static InstanceList from(Collection<ApplicationId> ids, ApplicationController applications) {
- return listOf(ids.stream().map(applications::require));
- }
-
- // ----------------------------------- Accessors
-
- /** Returns the applications in this as an immutable list */
- public List<Instance> asList() { return list; }
-
- /** Returns the ids of the applications in this as an immutable list */
- public List<ApplicationId> idList() { return ImmutableList.copyOf(list.stream().map(Instance::id)::iterator); }
-
- public boolean isEmpty() { return list.isEmpty(); }
-
- public int size() { return list.size(); }
-
- // ----------------------------------- Filters
-
- /** Returns the subset of applications which are upgrading (to any version), not considering block windows. */
- public InstanceList upgrading() {
- return listOf(list.stream().filter(application -> application.change().platform().isPresent()));
- }
-
- /** Returns the subset of applications which are currently upgrading to the given version */
- public InstanceList upgradingTo(Version version) {
- return listOf(list.stream().filter(application -> isUpgradingTo(version, application)));
- }
-
- /** Returns the subset of applications which are not pinned to a certain Vespa version. */
- public InstanceList unpinned() {
- return listOf(list.stream().filter(application -> ! application.change().isPinned()));
- }
-
- /** Returns the subset of applications which are currently not upgrading to the given version */
- public InstanceList notUpgradingTo(Version version) {
- return notUpgradingTo(Collections.singletonList(version));
- }
-
- /** Returns the subset of applications which are currently not upgrading to any of the given versions */
- public InstanceList notUpgradingTo(Collection<Version> versions) {
- return listOf(list.stream().filter(application -> versions.stream().noneMatch(version -> isUpgradingTo(version, application))));
- }
-
- /**
- * Returns the subset of applications which are currently not upgrading to the given version,
- * or returns all if no version is specified
- */
- public InstanceList notUpgradingTo(Optional<Version> version) {
- if (version.isEmpty()) return this;
- return notUpgradingTo(version.get());
- }
-
- /** Returns the subset of applications which have changes left to deploy; blocked, or deploying */
- public InstanceList withChanges() {
- return listOf(list.stream().filter(application -> application.change().hasTargets() || application.outstandingChange().hasTargets()));
- }
-
- /** Returns the subset of applications which are currently not deploying a change */
- public InstanceList notDeploying() {
- return listOf(list.stream().filter(application -> ! application.change().hasTargets()));
- }
-
- /** Returns the subset of applications which currently does not have any failing jobs */
- public InstanceList notFailing() {
- return listOf(list.stream().filter(application -> ! application.deploymentJobs().hasFailures()));
- }
-
- /** Returns the subset of applications which currently have failing jobs */
- public InstanceList failing() {
- return listOf(list.stream().filter(application -> application.deploymentJobs().hasFailures()));
- }
-
- /** Returns the subset of applications which have been failing an upgrade to the given version since the given instant */
- public InstanceList failingUpgradeToVersionSince(Version version, Instant threshold) {
- return listOf(list.stream().filter(application -> failingUpgradeToVersionSince(application, version, threshold)));
- }
-
- /** Returns the subset of applications which have been failing an application change since the given instant */
- public InstanceList failingApplicationChangeSince(Instant threshold) {
- return listOf(list.stream().filter(application -> failingApplicationChangeSince(application, threshold)));
- }
-
- /** Returns the subset of applications which currently does not have any failing jobs on the given version */
- public InstanceList notFailingOn(Version version) {
- return listOf(list.stream().filter(application -> ! failingOn(version, application)));
- }
-
- /** Returns the subset of applications which have at least one production deployment */
- public InstanceList hasDeployment() {
- return listOf(list.stream().filter(a -> !a.productionDeployments().isEmpty()));
- }
-
- /** Returns the subset of applications which started failing on the given version */
- public InstanceList startedFailingOn(Version version) {
- return listOf(list.stream().filter(application -> ! JobList.from(application).firstFailing().on(version).isEmpty()));
- }
-
- /** Returns the subset of applications which has the given upgrade policy */
- public InstanceList with(UpgradePolicy policy) {
- return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() == policy));
- }
-
- /** Returns the subset of applications which does not have the given upgrade policy */
- public InstanceList without(UpgradePolicy policy) {
- return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() != policy));
- }
-
- /** Returns the subset of applications which have at least one deployment on a lower version than the given one */
- public InstanceList onLowerVersionThan(Version version) {
- return listOf(list.stream()
- .filter(a -> a.productionDeployments().values().stream()
- .anyMatch(d -> d.version().isBefore(version))));
- }
-
- /** Returns the subset of applications which have a project ID */
- public InstanceList withProjectId() {
- return listOf(list.stream().filter(a -> a.deploymentJobs().projectId().isPresent()));
- }
-
- /** Returns the subset of applications which have at least one production deployment */
- public InstanceList hasProductionDeployment() {
- return listOf(list.stream().filter(a -> ! a.productionDeployments().isEmpty()));
- }
-
- /** Returns the subset of applications that are allowed to upgrade at the given time */
- public InstanceList canUpgradeAt(Instant instant) {
- return listOf(list.stream().filter(a -> a.deploymentSpec().canUpgradeAt(instant)));
- }
-
- /** Returns the subset of applications that have at least one assigned rotation */
- public InstanceList hasRotation() {
- return listOf(list.stream().filter(a -> !a.rotations().isEmpty()));
- }
-
- /**
- * Returns the subset of applications that hasn't pinned to an an earlier major version than the given one.
- *
- * @param targetMajorVersion the target major version which applications returned allows upgrading to
- * @param defaultMajorVersion the default major version to assume for applications not specifying one
- */
- public InstanceList allowMajorVersion(int targetMajorVersion, int defaultMajorVersion) {
- return listOf(list.stream().filter(a -> a.deploymentSpec().majorVersion().orElse(a.majorVersion().orElse(defaultMajorVersion))
- >= targetMajorVersion));
- }
-
- /** Returns the first n application in this (or all, if there are less than n). */
- public InstanceList first(int n) {
- if (list.size() < n) return this;
- return new InstanceList(list.subList(0, n));
- }
-
- // ----------------------------------- Sorting
-
- /**
- * Returns this list sorted by increasing deployed version.
- * If multiple versions are deployed the oldest is used.
- * Applications without any deployments are ordered first.
- */
- public InstanceList byIncreasingDeployedVersion() {
- return listOf(list.stream().sorted(Comparator.comparing(application -> application.oldestDeployedPlatform().orElse(Version.emptyVersion))));
- }
-
- // ----------------------------------- Internal helpers
-
- private static boolean isUpgradingTo(Version version, Instance instance) {
- return instance.change().platform().equals(Optional.of(version));
- }
-
- private static boolean failingOn(Version version, Instance instance) {
- return ! JobList.from(instance)
- .failing()
- .lastCompleted().on(version)
- .isEmpty();
- }
-
- private static boolean failingUpgradeToVersionSince(Instance instance, Version version, Instant threshold) {
- return ! JobList.from(instance)
- .not().failingApplicationChange()
- .firstFailing().before(threshold)
- .lastCompleted().on(version)
- .isEmpty();
- }
-
- private static boolean failingApplicationChangeSince(Instance instance, Instant threshold) {
- return ! JobList.from(instance)
- .failingApplicationChange()
- .firstFailing().before(threshold)
- .isEmpty();
- }
-
- /** Convenience converter from a stream to an ApplicationList */
- private static InstanceList listOf(Stream<Instance> applications) {
- return from(applications::iterator);
- }
-
-}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/JobList.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/JobList.java
index 9ae2ed94d2a..5c2df9f039b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/JobList.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/JobList.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.application;
import com.google.common.collect.ImmutableList;
import com.yahoo.component.Version;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/TenantAndApplicationId.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/TenantAndApplicationId.java
new file mode 100644
index 00000000000..0b537535315
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/TenantAndApplicationId.java
@@ -0,0 +1,98 @@
+package com.yahoo.vespa.hosted.controller.application;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ApplicationName;
+import com.yahoo.config.provision.InstanceName;
+import com.yahoo.config.provision.TenantName;
+
+import java.util.Objects;
+
+/**
+ * Tenant and application name pair.
+ *
+ * TODO jonmv: rename to ApplicationId if ApplicationId is renamed.
+ */
+public class TenantAndApplicationId implements Comparable<TenantAndApplicationId> {
+
+ private final TenantName tenant;
+ private final ApplicationName application;
+
+ private TenantAndApplicationId(TenantName tenant, ApplicationName application) {
+ requireNonBlank(tenant.value(), "Tenant name");
+ requireNonBlank(application.value(), "Application name");
+ this.tenant = tenant;
+ this.application = application;
+ }
+
+ public static TenantAndApplicationId from(TenantName tenant, ApplicationName application) {
+ return new TenantAndApplicationId(tenant, application);
+ }
+
+ public static TenantAndApplicationId from(String tenant, String application) {
+ return from(TenantName.from(tenant), ApplicationName.from(application));
+ }
+
+ public static TenantAndApplicationId fromSerialized(String value) {
+ String[] parts = value.split(":");
+ if (parts.length != 2)
+ throw new IllegalArgumentException("Serialized value should be '<tenant>:<application>', but was '" + value + "'");
+
+ return from(parts[0], parts[1]);
+ }
+
+ public static TenantAndApplicationId from(ApplicationId id) {
+ return from(id.tenant(), id.application());
+ }
+
+ public ApplicationId defaultInstance() {
+ return instance(InstanceName.defaultName());
+ }
+
+ public ApplicationId instance(InstanceName instance) {
+ return ApplicationId.from(tenant, application, instance);
+ }
+
+ public String serialized() {
+ return tenant.value() + ":" + application.value();
+ }
+
+ public TenantName tenant() {
+ return tenant;
+ }
+
+ public ApplicationName application() {
+ return application;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other) return true;
+ if (other == null || getClass() != other.getClass()) return false;
+ TenantAndApplicationId that = (TenantAndApplicationId) other;
+ return tenant.equals(that.tenant) &&
+ application.equals(that.application);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(tenant, application);
+ }
+
+ @Override
+ public int compareTo(TenantAndApplicationId other) {
+ int tenantComparison = tenant.compareTo(other.tenant);
+ return tenantComparison != 0 ? tenantComparison : application.compareTo(other.application);
+ }
+
+ @Override
+ public String toString() {
+ return tenant.value() + "." + application.value();
+ }
+
+ private static void requireNonBlank(String value, String name) {
+ Objects.requireNonNull(value, name + " cannot be null");
+ if (name.isBlank())
+ throw new IllegalArgumentException(name + " cannot be blank");
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzFacade.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzFacade.java
index c9234b87a8b..91f9e2d56d7 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzFacade.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzFacade.java
@@ -16,7 +16,7 @@ import com.yahoo.vespa.athenz.api.OktaAccessToken;
import com.yahoo.vespa.athenz.client.zms.RoleAction;
import com.yahoo.vespa.athenz.client.zms.ZmsClient;
import com.yahoo.vespa.athenz.client.zts.ZtsClient;
-import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.api.integration.athenz.AthenzClientFactory;
import com.yahoo.vespa.hosted.controller.api.integration.athenz.ApplicationAction;
import com.yahoo.vespa.hosted.controller.security.AccessControl;
@@ -91,7 +91,7 @@ public class AthenzFacade implements AccessControl {
}
@Override
- public Tenant updateTenant(TenantSpec tenantSpec, Credentials credentials, List<Tenant> existing, List<Instance> instances) {
+ public Tenant updateTenant(TenantSpec tenantSpec, Credentials credentials, List<Tenant> existing, List<Application> applications) {
AthenzTenantSpec spec = (AthenzTenantSpec) tenantSpec;
AthenzCredentials athenzCredentials = (AthenzCredentials) credentials;
AthenzDomain newDomain = spec.domain();
@@ -121,12 +121,12 @@ public class AthenzFacade implements AccessControl {
else { // Delete and recreate tenant, and optionally application, resources in Athenz otherwise.
log("createTenancy(tenantDomain=%s, service=%s)", newDomain, service);
zmsClient.createTenancy(newDomain, service, athenzCredentials.token());
- for (Instance instance : instances)
- createApplication(newDomain, instance.id().application(), athenzCredentials.token());
+ for (Application application : applications)
+ createApplication(newDomain, application.id().application(), athenzCredentials.token());
log("deleteTenancy(tenantDomain=%s, service=%s)", oldDomain, service);
- for (Instance instance : instances)
- deleteApplication(oldDomain, instance.id().application(), athenzCredentials.token());
+ for (Application application : applications)
+ deleteApplication(oldDomain, application.id().application(), athenzCredentials.token());
zmsClient.deleteTenancy(oldDomain, service, athenzCredentials.token());
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
index 7ccb4163284..1bba1baa91b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
@@ -5,6 +5,7 @@ import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.DeploymentSpec.Step;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.log.LogLevel;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.Controller;
@@ -14,12 +15,13 @@ import com.yahoo.vespa.hosted.controller.api.integration.BuildService.JobState;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.config.provision.zone.ZoneId;
-import com.yahoo.vespa.hosted.controller.application.InstanceList;
+import com.yahoo.vespa.hosted.controller.application.ApplicationList;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobReport;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
import com.yahoo.vespa.hosted.controller.application.JobStatus.JobRun;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import java.time.Clock;
import java.time.Duration;
@@ -37,9 +39,11 @@ import java.util.OptionalLong;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.logging.Logger;
+import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.yahoo.vespa.hosted.controller.api.integration.BuildService.BuildJob;
+import static com.yahoo.vespa.hosted.controller.api.integration.BuildService.JobState.disabled;
import static com.yahoo.vespa.hosted.controller.api.integration.BuildService.JobState.idle;
import static com.yahoo.vespa.hosted.controller.api.integration.BuildService.JobState.queued;
import static com.yahoo.vespa.hosted.controller.api.integration.BuildService.JobState.running;
@@ -57,7 +61,7 @@ import static java.util.stream.Collectors.toList;
/**
* Responsible for scheduling deployment jobs in a build system and keeping
- * {@link Instance#change()} in sync with what is scheduled.
+ * {@link Application#change()} in sync with what is scheduled.
*
* This class is multi-thread safe.
*
@@ -98,23 +102,25 @@ public class DeploymentTrigger {
report.jobType(),
report.applicationId(),
report.projectId()));
- if (applications().get(report.applicationId()).isEmpty()) {
+ if (applications().getInstance(report.applicationId()).isEmpty()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
- applications().lockOrThrow(report.applicationId(), application -> {
+ applications().lockApplicationOrThrow(TenantAndApplicationId.from(report.applicationId()), application -> {
JobRun triggering;
+ // TODO jonmv: Remove this, and replace with a simple application version counter.
if (report.jobType() == component) {
ApplicationVersion applicationVersion = report.version().get();
- triggering = JobRun.triggering(applications().oldestInstalledPlatform(report.applicationId()), applicationVersion,
+ triggering = JobRun.triggering(applications().oldestInstalledPlatform(TenantAndApplicationId.from(report.applicationId())),
+ applicationVersion,
Optional.empty(), Optional.empty(), "Application commit", clock.instant());
if (report.success()) {
if (acceptNewApplicationVersion(application.get())) {
application = application.withChange(application.get().change().with(applicationVersion))
.withOutstandingChange(Change.empty());
- if (application.get().deploymentJobs().deployedInternally())
+ if (application.get().internal())
for (Run run : jobs.active())
if (run.id().application().equals(report.applicationId()))
jobs.abort(run.id());
@@ -122,9 +128,11 @@ public class DeploymentTrigger {
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
+ application = application.withProjectId(OptionalLong.of(report.projectId()));
}
else {
- Optional<JobStatus> status = application.get().deploymentJobs().statusOf(report.jobType());
+ Optional<JobStatus> status = application.get().require(report.applicationId().instance())
+ .deploymentJobs().statusOf(report.jobType());
triggering = status.filter(job -> job.lastTriggered().isPresent()
&& job.lastCompleted()
.map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at()))
@@ -135,12 +143,12 @@ public class DeploymentTrigger {
.orElse("never")))
.lastTriggered().get();
}
- application = application.withJobCompletion(report.projectId(),
- report.jobType(),
- triggering.completion(report.buildNumber(), clock.instant()),
- report.jobError());
- application = application.withChange(remainingChange(application.get()));
- applications().store(application);
+
+ application = application.with(report.applicationId().instance(),
+ instance -> instance.withJobCompletion(report.jobType(),
+ triggering.completion(report.buildNumber(), clock.instant()),
+ report.jobError()));
+ applications().store(application.withChange(remainingChange(application.get())));
});
}
@@ -185,8 +193,8 @@ public class DeploymentTrigger {
public boolean trigger(Job job) {
log.log(LogLevel.DEBUG, String.format("Triggering %s: %s", job, job.triggering));
try {
- applications().lockOrThrow(job.applicationId(), application -> {
- if (application.get().deploymentJobs().deployedInternally())
+ applications().lockApplicationOrThrow(TenantAndApplicationId.from(job.applicationId()), application -> {
+ if (application.get().internal())
jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(),
job.triggering.application(),
job.triggering.sourcePlatform(),
@@ -194,35 +202,37 @@ public class DeploymentTrigger {
else
buildService.trigger(job);
- applications().store(application.withJobTriggering(job.jobType, job.triggering));
+ applications().store(application.with(job.applicationId().instance(),
+ instance -> instance.withJobTriggering(job.jobType, job.triggering)));
});
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
- applications().lockOrThrow(job.applicationId(), application ->
+ applications().lockApplicationOrThrow(TenantAndApplicationId.from(job.applicationId()), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
- /** Force triggering of a job for given application. */
+ /** Force triggering of a job for given instance. */
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) {
- Instance instance = applications().require(applicationId);
+ Application application = applications().requireApplication(TenantAndApplicationId.from(applicationId));
+ Instance instance = application.require(applicationId.instance());
if (jobType == component) {
- if (instance.deploymentJobs().deployedInternally())
+ if (application.internal())
throw new IllegalArgumentException(applicationId + " has no component job we can trigger.");
- buildService.trigger(BuildJob.of(applicationId, instance.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
+ buildService.trigger(BuildJob.of(applicationId, application.projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
- Versions versions = Versions.from(instance.change(), instance, deploymentFor(instance, jobType),
+ Versions versions = Versions.from(application.change(), application, deploymentFor(instance, jobType),
controller.systemVersion());
String reason = "Job triggered manually by " + user;
return (jobType.isProduction() && ! isTested(instance, versions)
- ? testJobs(instance, versions, reason, clock.instant(), __ -> true).stream()
- : Stream.of(deploymentJob(instance, versions, instance.change(), jobType, reason, clock.instant())))
+ ? testJobs(application.deploymentSpec(), application.change(), instance, versions, reason, clock.instant(), __ -> true).stream()
+ : Stream.of(deploymentJob(instance, versions, application.change(), jobType, reason, clock.instant())))
.peek(this::trigger)
.map(Job::jobType).collect(toList());
}
@@ -232,21 +242,22 @@ public class DeploymentTrigger {
if (until.isAfter(clock.instant().plus(maxPause)))
throw new IllegalArgumentException("Pause only allowed for up to " + maxPause);
- applications().lockOrThrow(id, application ->
- applications().store(application.withJobPause(jobType, OptionalLong.of(until.toEpochMilli()))));
+ applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application ->
+ applications().store(application.with(id.instance(),
+ instance -> instance.withJobPause(jobType, OptionalLong.of(until.toEpochMilli())))));
}
/** Triggers a change of this application, unless it already has a change. */
- public void triggerChange(ApplicationId applicationId, Change change) {
- applications().lockOrThrow(applicationId, application -> {
+ public void triggerChange(TenantAndApplicationId applicationId, Change change) {
+ applications().lockApplicationOrThrow(applicationId, application -> {
if ( ! application.get().change().hasTargets())
forceChange(applicationId, change);
});
}
/** Overrides the given application's platform and application changes with any contained in the given change. */
- public void forceChange(ApplicationId applicationId, Change change) {
- applications().lockOrThrow(applicationId, application -> {
+ public void forceChange(TenantAndApplicationId applicationId, Change change) {
+ applications().lockApplicationOrThrow(applicationId, application -> {
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application.withChange(change.onTopOf(application.get().change())));
@@ -254,8 +265,8 @@ public class DeploymentTrigger {
}
/** Cancels the indicated part of the given application's change. */
- public void cancelChange(ApplicationId applicationId, ChangesToCancel cancellation) {
- applications().lockOrThrow(applicationId, application -> {
+ public void cancelChange(TenantAndApplicationId applicationId, ChangesToCancel cancellation) {
+ applications().lockApplicationOrThrow(applicationId, application -> {
Change change;
switch (cancellation) {
case ALL: change = Change.empty(); break;
@@ -294,7 +305,14 @@ public class DeploymentTrigger {
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private List<Job> computeReadyJobs() {
- return InstanceList.from(applications().asList())
+ ApplicationList applications = ApplicationList.from(applications().asList());
+ applications = applications.withProjectId();
+ applications = applications.withChanges();
+ var jobs = applications.idList().stream()
+ .map(this::computeReadyJobs)
+ .flatMap(Collection::stream)
+ .collect(Collectors.toList());
+ return ApplicationList.from(applications().asList())
.withProjectId()
.withChanges()
.idList().stream()
@@ -306,70 +324,74 @@ public class DeploymentTrigger {
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
- private List<Job> computeReadyJobs(ApplicationId id) {
+ private List<Job> computeReadyJobs(TenantAndApplicationId id) {
List<Job> jobs = new ArrayList<>();
- applications().get(id).ifPresent(application -> {
- Change change = application.change();
- Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest)
- .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)),
- application.deploymentJobs().statusOf(stagingTest)
- .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)));
- String reason = "New change available";
- List<Job> testJobs = null; // null means "uninitialised", while empty means "don't run any jobs".
- DeploymentSteps steps = steps(application.deploymentSpec());
-
- if (change.hasTargets()) {
- for (Step step : steps.production()) {
- List<JobType> stepJobs = steps.toJobs(step);
- List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList());
- if ( ! remainingJobs.isEmpty()) { // Change is incomplete; trigger remaining jobs if ready, or their test jobs if untested.
- for (JobType job : remainingJobs) {
- Versions versions = Versions.from(change, application, deploymentFor(application, job),
- controller.systemVersion());
- if (isTested(application, versions)) {
- if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) {
- jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get()));
+ applications().getApplication(id).ifPresent(application -> {
+ for (Instance instance : application.instances().values()) {
+ Change change = application.change();
+ Optional<Instant> completedAt = max(instance.deploymentJobs().statusOf(systemTest)
+ .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)),
+ instance.deploymentJobs().statusOf(stagingTest)
+ .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)));
+ String reason = "New change available";
+ List<Job> testJobs = null; // null means "uninitialised", while empty means "don't run any jobs".
+ DeploymentSteps steps = steps(application.deploymentSpec());
+
+ if (change.hasTargets()) {
+ for (Step step : steps.production()) {
+ List<JobType> stepJobs = steps.toJobs(step);
+ List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, change, instance, job)).collect(toList());
+ if (!remainingJobs.isEmpty()) { // Change is incomplete; trigger remaining jobs if ready, or their test jobs if untested.
+ for (JobType job : remainingJobs) {
+ Versions versions = Versions.from(change, application, deploymentFor(instance, job),
+ controller.systemVersion());
+ if (isTested(instance, versions)) {
+ if (completedAt.isPresent() && canTrigger(job, versions, instance, application.deploymentSpec(), stepJobs)) {
+ jobs.add(deploymentJob(instance, versions, change, job, reason, completedAt.get()));
+ }
+ if ( ! alreadyTriggered(instance, versions) && testJobs == null) {
+ testJobs = emptyList();
+ }
}
- if ( ! alreadyTriggered(application, versions) && testJobs == null) {
- testJobs = emptyList();
+ else if (testJobs == null) {
+ testJobs = testJobs(application.deploymentSpec(),
+ change, instance, versions,
+ String.format("Testing deployment for %s (%s)",
+ job.jobName(), versions.toString()),
+ completedAt.orElseGet(clock::instant));
}
}
- else if (testJobs == null) {
- testJobs = testJobs(application, versions,
- String.format("Testing deployment for %s (%s)",
- job.jobName(), versions.toString()),
- completedAt.orElseGet(clock::instant));
- }
+ completedAt = Optional.empty();
}
- completedAt = Optional.empty();
- }
- else { // All jobs are complete; find the time of completion of this step.
- if (stepJobs.isEmpty()) { // No jobs means this is a delay step.
- Duration delay = ((DeploymentSpec.Delay) step).duration();
- completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant()));
- reason += " after a delay of " + delay;
- }
- else {
- completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder());
- reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
+ else { // All jobs are complete; find the time of completion of this step.
+ if (stepJobs.isEmpty()) { // No jobs means this is a delay step.
+ Duration delay = ((DeploymentSpec.Delay) step).duration();
+ completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant()));
+ reason += " after a delay of " + delay;
+ }
+ else {
+ completedAt = stepJobs.stream().map(job -> instance.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder());
+ reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
+ }
}
}
}
+ if (testJobs == null) { // If nothing to test, but outstanding commits, test those.
+ testJobs = testJobs(application.deploymentSpec(), change, instance,
+ Versions.from(application.outstandingChange().onTopOf(change),
+ application,
+ steps.sortedDeployments(instance.productionDeployments().values()).stream().findFirst(),
+ controller.systemVersion()),
+ "Testing last changes outside prod", clock.instant());
+ }
+ jobs.addAll(testJobs);
}
- if (testJobs == null) { // If nothing to test, but outstanding commits, test those.
- testJobs = testJobs(application, Versions.from(application.outstandingChange().onTopOf(application.change()),
- application,
- steps.sortedDeployments(application.productionDeployments().values()).stream().findFirst(),
- controller.systemVersion()),
- "Testing last changes outside prod", clock.instant());
- }
- jobs.addAll(testJobs);
});
return Collections.unmodifiableList(jobs);
}
/** Returns whether given job should be triggered */
- private boolean canTrigger(JobType job, Versions versions, Instance instance, List<JobType> parallelJobs) {
+ private boolean canTrigger(JobType job, Versions versions, Instance instance, DeploymentSpec deploymentSpec, List<JobType> parallelJobs) {
if (jobStateOf(instance, job) != idle) return false;
// Are we already running jobs which are not in the set which can run in parallel with this?
@@ -378,12 +400,12 @@ public class DeploymentTrigger {
// Are there another suspended deployment such that we shouldn't simultaneously change this?
if (job.isProduction() && isSuspendedInAnotherZone(instance, job.zone(controller.system()))) return false;
- return triggerAt(clock.instant(), job, versions, instance);
+ return triggerAt(clock.instant(), job, versions, instance, deploymentSpec);
}
/** Returns whether given job should be triggered */
- private boolean canTrigger(JobType job, Versions versions, Instance instance) {
- return canTrigger(job, versions, instance, null);
+ private boolean canTrigger(JobType job, Versions versions, Instance instance, DeploymentSpec deploymentSpec) {
+ return canTrigger(job, versions, instance, deploymentSpec, null);
}
private boolean isSuspendedInAnotherZone(Instance instance, ZoneId zone) {
@@ -396,7 +418,7 @@ public class DeploymentTrigger {
}
/** Returns whether the given job can trigger at the given instant */
- public boolean triggerAt(Instant instant, JobType job, Versions versions, Instance instance) {
+ public boolean triggerAt(Instant instant, JobType job, Versions versions, Instance instance, DeploymentSpec deploymentSpec) {
Optional<JobStatus> jobStatus = instance.deploymentJobs().statusOf(job);
if (jobStatus.isEmpty()) return true;
if (jobStatus.get().pausedUntil().isPresent() && jobStatus.get().pausedUntil().getAsLong() > clock.instant().toEpochMilli()) return false;
@@ -404,7 +426,7 @@ public class DeploymentTrigger {
if (jobStatus.get().lastCompleted().isEmpty()) return true; // Never completed
if (jobStatus.get().firstFailing().isEmpty()) return true; // Should not happen as firstFailing should be set for an unsuccessful job
if ( ! versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; // Always trigger as targets have changed
- if (instance.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; // Don't throttle canaries
+ if (deploymentSpec.upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; // Don't throttle canaries
Instant firstFailing = jobStatus.get().firstFailing().get().at();
Instant lastCompleted = jobStatus.get().lastCompleted().get().at();
@@ -442,13 +464,11 @@ public class DeploymentTrigger {
}
private JobState jobStateOf(Instance instance, JobType jobType) {
- if (instance.deploymentJobs().deployedInternally()) {
+ if (controller.applications().requireApplication(TenantAndApplicationId.from(instance.id())).internal()) {
Optional<Run> run = controller.jobController().last(instance.id(), jobType);
return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle;
}
- return buildService.stateOf(BuildJob.of(instance.id(),
- instance.deploymentJobs().projectId().getAsLong(),
- jobType.jobName()));
+ return buildService.stateOf(BuildJob.of(instance.id(), 0, jobType.jobName()));
}
// ---------- Completion logic ----------
@@ -465,7 +485,7 @@ public class DeploymentTrigger {
* Additionally, if the application is pinned to a Vespa version, and the given change has a (this) platform,
* the deployment for the job must be on the pinned version.
*/
- public boolean isComplete(Change change, Instance instance, JobType jobType) {
+ public boolean isComplete(Change change, Change fullChange, Instance instance, JobType jobType) {
Optional<Deployment> existingDeployment = deploymentFor(instance, jobType);
if ( change.isPinned()
&& change.platform().isPresent()
@@ -474,10 +494,10 @@ public class DeploymentTrigger {
return instance.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.map(job -> change.platform().map(job.platform()::equals).orElse(true)
- && change.application().map(job.application()::equals).orElse(true))
+ && change.application().map(job.application()::equals).orElse(true))
.orElse(false)
|| jobType.isProduction()
- && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(instance.change(), deployment))
+ && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(fullChange, deployment))
.orElse(false);
}
@@ -514,25 +534,28 @@ public class DeploymentTrigger {
// ---------- Change management o_O ----------
- private boolean acceptNewApplicationVersion(Instance instance) {
- if ( ! instance.deploymentSpec().canChangeRevisionAt(clock.instant())) return false;
- if (instance.change().application().isPresent()) return true; // Replacing a previous application change is ok.
- if (instance.deploymentJobs().hasFailures()) return true; // Allow changes to fix upgrade problems.
- return instance.change().platform().isEmpty();
+ private boolean acceptNewApplicationVersion(Application application) {
+ if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return false;
+ if (application.change().application().isPresent()) return true; // Replacing a previous application change is ok.
+ for (Instance instance : application.instances().values())
+ if (instance.deploymentJobs().hasFailures()) return true; // Allow changes to fix upgrade problems.
+ return application.change().platform().isEmpty();
}
- private Change remainingChange(Instance instance) {
- DeploymentSteps steps = steps(instance.deploymentSpec());
+ private Change remainingChange(Application application) {
+ DeploymentSteps steps = steps(application.deploymentSpec());
List<JobType> jobs = steps.production().isEmpty()
? steps.testJobs()
: steps.productionJobs();
- Change change = instance.change();
- if (jobs.stream().allMatch(job -> isComplete(instance.change().withoutApplication(), instance, job)))
- change = change.withoutPlatform();
+ Change change = application.change();
+ for (Instance instance : application.instances().values()) {
+ if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application.change(), instance, job)))
+ change = change.withoutPlatform();
- if (jobs.stream().allMatch(job -> isComplete(instance.change().withoutPlatform(), instance, job)))
- change = change.withoutApplication();
+ if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application.change(), instance, job)))
+ change = change.withoutApplication();
+ }
return change;
}
@@ -542,20 +565,23 @@ public class DeploymentTrigger {
/**
* Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested.
*/
- private List<Job> testJobs(Instance instance, Versions versions, String reason, Instant availableSince) {
- return testJobs(instance, versions, reason, availableSince, jobType -> canTrigger(jobType, versions, instance));
+ private List<Job> testJobs(DeploymentSpec deploymentSpec, Change change, Instance instance, Versions versions,
+ String reason, Instant availableSince) {
+ return testJobs(deploymentSpec, change, instance, versions, reason, availableSince,
+ jobType -> canTrigger(jobType, versions, instance, deploymentSpec));
}
/**
* Returns the list of test jobs that need to succeed on the given versions for it to be considered tested, filtered by the given condition.
*/
- private List<Job> testJobs(Instance instance, Versions versions, String reason, Instant availableSince, Predicate<JobType> condition) {
+ private List<Job> testJobs(DeploymentSpec deploymentSpec, Change change, Instance instance, Versions versions,
+ String reason, Instant availableSince, Predicate<JobType> condition) {
List<Job> jobs = new ArrayList<>();
- for (JobType jobType : steps(instance.deploymentSpec()).testJobs()) {
+ for (JobType jobType : steps(deploymentSpec).testJobs()) {
Optional<JobRun> completion = successOn(instance, jobType, versions)
.filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest);
if (completion.isEmpty() && condition.test(jobType))
- jobs.add(deploymentJob(instance, versions, instance.change(), jobType, reason, availableSince));
+ jobs.add(deploymentJob(instance, versions, change, jobType, reason, availableSince));
}
return jobs;
}
@@ -585,7 +611,7 @@ public class DeploymentTrigger {
private Job(Instance instance, JobRun triggering, JobType jobType, Instant availableSince,
boolean isRetry, boolean isApplicationUpgrade) {
- super(instance.id(), instance.deploymentJobs().projectId().getAsLong(), jobType.jobName());
+ super(instance.id(), 0L, jobType.jobName());
this.jobType = jobType;
this.triggering = triggering;
this.availableSince = availableSince;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
index a3c8459343b..1828a189cad 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
@@ -19,8 +19,9 @@ import com.yahoo.security.KeyUtils;
import com.yahoo.security.SignatureAlgorithm;
import com.yahoo.security.X509CertificateBuilder;
import com.yahoo.security.X509CertificateUtils;
-import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.ActivateResult;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeployOptions;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
@@ -37,6 +38,7 @@ import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobReport;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.yolean.Exceptions;
import javax.security.auth.x500.X500Principal;
@@ -294,13 +296,13 @@ public class InternalStepRunner implements StepRunner {
return Optional.of(running);
}
}
- else if (timedOut(deployment.get(), endpointTimeout)) {
+ else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
- if (timedOut(deployment.get(), installationTimeout)) {
+ if (timedOut(id, deployment.get(), installationTimeout)) {
logger.log(INFO, "Installation failed to complete within " + installationTimeout.toMinutes() + " minutes!");
return Optional.of(installationFailed);
}
@@ -326,13 +328,13 @@ public class InternalStepRunner implements StepRunner {
return Optional.of(running);
}
}
- else if (timedOut(deployment.get(), endpointTimeout)) {
+ else if (timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!");
return Optional.of(error);
}
}
- if (timedOut(deployment.get(), testerTimeout)) {
+ if (timedOut(id, deployment.get(), testerTimeout)) {
logger.log(WARNING, "Installation of tester failed to complete within " + testerTimeout.toMinutes() + " minutes of real deployment!");
return Optional.of(error);
}
@@ -443,14 +445,14 @@ public class InternalStepRunner implements StepRunner {
logger.log("Attempting to find endpoints ...");
var endpoints = controller.applications().clusterEndpoints(id.application(), zones);
- if ( ! endpoints.containsKey(id.type().zone(controller.system())) && timedOut(deployment.get(), endpointTimeout)) {
+ if ( ! endpoints.containsKey(id.type().zone(controller.system())) && timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
- if (testerEndpoint.isEmpty() && timedOut(deployment.get(), endpointTimeout)) {
+ if (testerEndpoint.isEmpty() && timedOut(id, deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!");
return Optional.of(error);
}
@@ -573,9 +575,9 @@ public class InternalStepRunner implements StepRunner {
/** Sends a mail with a notification of a failed run, if one should be sent. */
private void sendNotification(Run run, DualLogger logger) {
- Instance instance = controller.applications().require(run.id().application());
- Notifications notifications = instance.deploymentSpec().notifications();
- boolean newCommit = instance.change().application()
+ Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
+ Notifications notifications = application.deploymentSpec().notifications();
+ boolean newCommit = application.change().application()
.map(run.versions().targetApplication()::equals)
.orElse(false);
When when = newCommit ? failingCommit : failing;
@@ -611,8 +613,8 @@ public class InternalStepRunner implements StepRunner {
/** Returns the real application with the given id. */
private Instance application(ApplicationId id) {
- controller.applications().lockOrThrow(id, __ -> { }); // Memory fence.
- return controller.applications().require(id);
+ controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { }); // Memory fence.
+ return controller.applications().requireInstance(id);
}
/**
@@ -622,7 +624,13 @@ public class InternalStepRunner implements StepRunner {
* to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry,
* and the given default installation timeout, minus one minute, is used as a timeout threshold.
*/
- private boolean timedOut(Deployment deployment, Duration defaultTimeout) {
+ private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) {
+ // TODO jonmv: This is a workaround for new deployment writes not yet being visible in spite of Curator locking.
+ // TODO Investigate what's going on here, and remove this workaround.
+ Run run = controller.jobController().run(id).get();
+ if (run.start().isAfter(deployment.at()))
+ return false;
+
Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone())
.filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0)
.orElse(defaultTimeout);
@@ -632,7 +640,7 @@ public class InternalStepRunner implements StepRunner {
/** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */
private ApplicationPackage testerPackage(RunId id) {
ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication();
- DeploymentSpec spec = controller.applications().require(id.application()).deploymentSpec();
+ DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec();
ZoneId zone = id.type().zone(controller.system());
boolean useTesterCertificate = controller.system().isPublic() && id.type().isTest();
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
index 6d535254970..0ecce359a02 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
@@ -10,9 +10,10 @@ import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.Controller;
-import com.yahoo.vespa.hosted.controller.LockedInstance;
+import com.yahoo.vespa.hosted.controller.LockedApplication;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.LogEntry;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.NotFoundException;
@@ -26,6 +27,7 @@ import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.persistence.BufferedLogStore;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
@@ -98,7 +100,7 @@ public class JobController {
/** Rewrite all job data with the newest format. */
public void updateStorage() {
- for (ApplicationId id : applications())
+ for (ApplicationId id : instances())
for (JobType type : jobs(id)) {
locked(id, type, runs -> { // runs is not modified here, and is written as it was.
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
@@ -151,7 +153,7 @@ public class JobController {
return run;
ZoneId zone = id.type().zone(controller.system());
- Optional<Deployment> deployment = Optional.ofNullable(controller.applications().require(id.application())
+ Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
@@ -193,10 +195,11 @@ public class JobController {
locked(id, run -> run.with(testerCertificate));
}
- /** Returns a list of all application which have registered. */
- public List<ApplicationId> applications() {
+ /** Returns a list of all instances of applications which have registered. */
+ public List<ApplicationId> instances() {
return copyOf(controller.applications().asList().stream()
- .filter(application -> application.deploymentJobs().deployedInternally())
+ .filter(Application::internal)
+ .flatMap(application -> application.instances().values().stream())
.map(Instance::id)
.iterator());
}
@@ -236,12 +239,12 @@ public class JobController {
/** Returns a list of all active runs. */
public List<Run> active() {
- return copyOf(applications().stream()
- .flatMap(id -> Stream.of(JobType.values())
+ return copyOf(instances().stream()
+ .flatMap(id -> Stream.of(JobType.values())
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
- .iterator());
+ .iterator());
}
/** Changes the status of the given step, for the given run, provided it is still active. */
@@ -281,8 +284,8 @@ public class JobController {
public ApplicationVersion submit(ApplicationId id, SourceRevision revision, String authorEmail, long projectId,
ApplicationPackage applicationPackage, byte[] testPackageBytes) {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
- controller.applications().lockOrThrow(id, application -> {
- if ( ! application.get().deploymentJobs().deployedInternally())
+ controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { // TODO jonmv: change callers
+ if ( ! application.get().internal())
application = registered(application);
long run = nextBuild(id);
@@ -309,17 +312,19 @@ public class JobController {
}
/** Registers the given application, copying necessary application packages, and returns the modified version. */
- private LockedInstance registered(LockedInstance application) {
- // TODO jvenstad: Remove when there are no more SDv3 pipelines.
- // Copy all current packages to the new application store
- application.get().productionDeployments().values().stream()
- .map(Deployment::applicationVersion)
- .distinct()
- .forEach(appVersion -> {
- byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id());
- controller.applications().applicationStore().put(application.get().id(), appVersion, content);
- });
- // Make sure any ongoing upgrade is cancelled, since future jobs will require the tester artifact.
+ private LockedApplication registered(LockedApplication application) {
+ for (Instance instance : application.get().instances().values()) {
+ // TODO jvenstad: Remove when everyone has migrated off SDv3 pipelines. Real soon now!
+ // Copy all current packages to the new application store
+ instance.productionDeployments().values().stream()
+ .map(Deployment::applicationVersion)
+ .distinct()
+ .forEach(appVersion -> {
+ byte[] content = controller.applications().artifacts().getApplicationPackage(instance.id(), appVersion.id());
+ controller.applications().applicationStore().put(instance.id(), appVersion, content);
+ });
+ }
+ // Make sure any ongoing upgrade is cancelled, since future jobs will require the tester artifact.
return application.withChange(application.get().change().withoutPlatform().withoutApplication())
.withBuiltInternally(true);
}
@@ -329,8 +334,8 @@ public class JobController {
if ( ! type.environment().isManuallyDeployed() && versions.targetApplication().isUnknown())
throw new IllegalArgumentException("Target application must be a valid reference.");
- controller.applications().lockIfPresent(id, application -> {
- if ( ! application.get().deploymentJobs().deployedInternally())
+ controller.applications().lockApplicationIfPresent(TenantAndApplicationId.from(id), application -> {
+ if ( ! application.get().internal())
throw new IllegalArgumentException(id + " is not built here!");
locked(id, type, __ -> {
@@ -346,8 +351,8 @@ public class JobController {
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
- controller.applications().lockOrThrow(id, application -> {
- if ( ! application.get().deploymentJobs().deployedInternally())
+ controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
+ if ( ! application.get().internal())
controller.applications().store(registered(application));
});
if ( ! type.environment().isManuallyDeployed())
@@ -383,7 +388,7 @@ public class JobController {
/** Unregisters the given application and makes all associated data eligible for garbage collection. */
public void unregister(ApplicationId id) {
- controller.applications().lockIfPresent(id, application -> {
+ controller.applications().lockApplicationIfPresent(TenantAndApplicationId.from(id), application -> { // TODO jonmv: change callers.
controller.applications().store(application.withBuiltInternally(false));
jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id())));
});
@@ -391,7 +396,7 @@ public class JobController {
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
- Set<ApplicationId> applicationsToBuild = new HashSet<>(applications());
+ Set<ApplicationId> applicationsToBuild = new HashSet<>(instances());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
@@ -436,7 +441,7 @@ public class JobController {
/** Returns a URI which points at a badge showing current status for all jobs for the given application. */
public URI overviewBadge(ApplicationId id) {
- DeploymentSteps steps = new DeploymentSteps(controller.applications().require(id).deploymentSpec(), controller::system);
+ DeploymentSteps steps = new DeploymentSteps(controller.applications().requireApplication(TenantAndApplicationId.from(id)).deploymentSpec(), controller::system);
return badges.overview(id,
steps.jobs().stream()
.map(type -> last(id, type))
@@ -467,13 +472,13 @@ public class JobController {
/** Returns a set containing the zone of the deployment tested in the given run, and all production zones for the application. */
public Set<ZoneId> testedZoneAndProductionZones(ApplicationId id, JobType type) {
return Stream.concat(Stream.of(type.zone(controller.system())),
- controller.applications().require(id).productionDeployments().keySet().stream())
+ controller.applications().requireInstance(id).productionDeployments().keySet().stream())
.collect(Collectors.toSet());
}
// TODO jvenstad: Find a more appropriate way of doing this, at least when this is the only build service.
private long nextBuild(ApplicationId id) {
- return 1 + controller.applications().require(id).deploymentJobs()
+ return 1 + controller.applications().requireInstance(id).deploymentJobs()
.statusOf(JobType.component)
.flatMap(JobStatus::lastCompleted)
.map(JobStatus.JobRun::id)
@@ -481,8 +486,8 @@ public class JobController {
}
private void prunePackages(ApplicationId id) {
- controller.applications().lockIfPresent(id, application -> {
- application.get().productionDeployments().values().stream()
+ controller.applications().lockApplicationIfPresent(TenantAndApplicationId.from(id), application -> {
+ application.get().require(id.instance()).productionDeployments().values().stream()
.map(Deployment::applicationVersion)
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Versions.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Versions.java
index 70424cf9813..385eaae8215 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Versions.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Versions.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.controller.deployment;
import com.yahoo.component.Version;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
@@ -112,40 +113,43 @@ public class Versions {
}
/** Create versions using change contained in application */
- public static Versions from(Instance instance, Version defaultPlatformVersion) {
- return from(instance.change(), instance, Optional.empty(), defaultPlatformVersion);
+ public static Versions from(Application application, Version defaultPlatformVersion) {
+ return from(application.change(), application, Optional.empty(), defaultPlatformVersion);
}
/** Create versions using given change and application */
- public static Versions from(Change change, Instance instance, Optional<Deployment> deployment,
+ public static Versions from(Change change, Application application, Optional<Deployment> deployment,
Version defaultPlatformVersion) {
- return new Versions(targetPlatform(instance, change, deployment, defaultPlatformVersion),
- targetApplication(instance, change, deployment),
+ return new Versions(targetPlatform(application, change, deployment, defaultPlatformVersion),
+ targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
- private static Version targetPlatform(Instance instance, Change change, Optional<Deployment> deployment,
+ private static Version targetPlatform(Application application, Change change, Optional<Deployment> deployment,
Version defaultVersion) {
if (change.isPinned() && change.platform().isPresent())
return change.platform().get();
return max(change.platform(), deployment.map(Deployment::version))
- .orElseGet(() -> instance.oldestDeployedPlatform().orElse(defaultVersion));
+ .orElseGet(() -> application.oldestDeployedPlatform().orElse(defaultVersion));
}
- private static ApplicationVersion targetApplication(Instance instance, Change change,
+ private static ApplicationVersion targetApplication(Application application, Change change,
Optional<Deployment> deployment) {
return max(change.application(), deployment.map(Deployment::applicationVersion))
- .orElseGet(() -> defaultApplicationVersion(instance));
- }
-
- private static ApplicationVersion defaultApplicationVersion(Instance instance) {
- return instance.oldestDeployedApplication()
- .orElseGet(() -> Optional.ofNullable(instance.deploymentJobs().jobStatus().get(JobType.component))
- .flatMap(JobStatus::lastSuccess)
- .map(JobStatus.JobRun::application)
- .orElse(ApplicationVersion.unknown));
+ .orElseGet(() -> defaultApplicationVersion(application));
+ }
+
+ private static ApplicationVersion defaultApplicationVersion(Application application) {
+ return application.oldestDeployedApplication()
+ .or(() -> application.instances().values().stream()
+ .flatMap(instance -> instance.deploymentJobs().statusOf(JobType.component)
+ .flatMap(JobStatus::lastSuccess)
+ .map(JobStatus.JobRun::application)
+ .stream())
+ .findAny())
+ .orElse(ApplicationVersion.unknown);
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java
index d94004eda2b..39faffcb869 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.Controller;
@@ -10,7 +11,8 @@ import com.yahoo.vespa.hosted.controller.api.integration.organization.Applicatio
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.OwnershipIssues;
import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
-import com.yahoo.vespa.hosted.controller.application.InstanceList;
+import com.yahoo.vespa.hosted.controller.application.ApplicationList;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.tenant.Tenant;
import com.yahoo.vespa.hosted.controller.tenant.UserTenant;
import com.yahoo.yolean.Exceptions;
@@ -47,13 +49,13 @@ public class ApplicationOwnershipConfirmer extends Maintainer {
/** File an ownership issue with the owners of all applications we know about. */
private void confirmApplicationOwnerships() {
- InstanceList.from(controller().applications().asList())
- .withProjectId()
- .hasProductionDeployment()
- .asList()
- .stream()
- .filter(application -> application.createdAt().isBefore(controller().clock().instant().minus(Duration.ofDays(90))))
- .forEach(application -> {
+ ApplicationList.from(controller().applications().asList())
+ .withProjectId()
+ .withProductionDeployment()
+ .asList()
+ .stream()
+ .filter(application -> application.createdAt().isBefore(controller().clock().instant().minus(Duration.ofDays(90))))
+ .forEach(application -> {
try {
Tenant tenant = tenantOf(application.id());
tenant.contact().ifPresent(contact -> { // TODO jvenstad: Makes sense to require, and run this only in main?
@@ -71,25 +73,27 @@ public class ApplicationOwnershipConfirmer extends Maintainer {
}
- private ApplicationSummary summaryOf(ApplicationId application) {
- var app = applications.require(application);
+ private ApplicationSummary summaryOf(TenantAndApplicationId application) {
+ var app = applications.requireApplication(application);
var metrics = new HashMap<ZoneId, ApplicationSummary.Metric>();
- for (var kv : app.deployments().entrySet()) {
- var zone = kv.getKey();
- var deploymentMetrics = kv.getValue().metrics();
- metrics.put(zone, new ApplicationSummary.Metric(deploymentMetrics.documentCount(),
- deploymentMetrics.queriesPerSecond(),
- deploymentMetrics.writesPerSecond()));
- }
- return new ApplicationSummary(app.id(), app.activity().lastQueried(), app.activity().lastWritten(), metrics);
+ for (Instance instance : app.instances().values())
+ for (var kv : instance.deployments().entrySet()) {
+ var zone = kv.getKey();
+ var deploymentMetrics = kv.getValue().metrics();
+ metrics.put(zone, new ApplicationSummary.Metric(deploymentMetrics.documentCount(),
+ deploymentMetrics.queriesPerSecond(),
+ deploymentMetrics.writesPerSecond()));
+ }
+ // TODO jonmv: Default instance should really be replaced with something better.
+ return new ApplicationSummary(app.id().defaultInstance(), app.activity().lastQueried(), app.activity().lastWritten(), metrics);
}
/** Escalate ownership issues which have not been closed before a defined amount of time has passed. */
private void ensureConfirmationResponses() {
- for (Instance instance : controller().applications().asList())
- instance.ownershipIssueId().ifPresent(issueId -> {
+ for (Application application : controller().applications().asList())
+ application.ownershipIssueId().ifPresent(issueId -> {
try {
- Tenant tenant = tenantOf(instance.id());
+ Tenant tenant = tenantOf(application.id());
ownershipIssues.ensureResponse(issueId, tenant.type() == Tenant.Type.athenz ? tenant.contact() : Optional.empty());
}
catch (RuntimeException e) {
@@ -99,28 +103,26 @@ public class ApplicationOwnershipConfirmer extends Maintainer {
}
private void updateConfirmedApplicationOwners() {
- InstanceList.from(controller().applications().asList())
- .withProjectId()
- .hasProductionDeployment()
- .asList()
- .stream()
- .filter(application -> application.ownershipIssueId().isPresent())
- .forEach(application -> {
- IssueId ownershipIssueId = application.ownershipIssueId().get();
- ownershipIssues.getConfirmedOwner(ownershipIssueId).ifPresent(owner -> {
- controller().applications().lockIfPresent(application.id(), lockedApplication ->
- controller().applications().store(lockedApplication.withOwner(owner)));
- });
- });
+ ApplicationList.from(controller().applications().asList())
+ .withProjectId()
+ .withProductionDeployment()
+ .asList()
+ .stream()
+ .filter(application -> application.ownershipIssueId().isPresent())
+ .forEach(application -> {
+ IssueId ownershipIssueId = application.ownershipIssueId().get();
+ ownershipIssues.getConfirmedOwner(ownershipIssueId).ifPresent(owner -> {
+ controller().applications().lockApplicationIfPresent(application.id(), lockedApplication ->
+ controller().applications().store(lockedApplication.withOwner(owner)));
+ });
+ });
}
- private User determineAssignee(Tenant tenant, Instance instance) {
- return instance.owner().orElse(
- tenant instanceof UserTenant ? userFor(tenant) : null
- );
+ private User determineAssignee(Tenant tenant, Application application) {
+ return application.owner().orElse(tenant instanceof UserTenant ? userFor(tenant) : null);
}
- private Tenant tenantOf(ApplicationId applicationId) {
+ private Tenant tenantOf(TenantAndApplicationId applicationId) {
return controller().tenants().get(applicationId.tenant())
.orElseThrow(() -> new IllegalStateException("No tenant found for application " + applicationId));
}
@@ -129,8 +131,8 @@ public class ApplicationOwnershipConfirmer extends Maintainer {
return User.from(tenant.name().value().replaceFirst(Tenant.userPrefix, ""));
}
- protected void store(IssueId issueId, ApplicationId applicationId) {
- controller().applications().lockIfPresent(applicationId, application ->
+ protected void store(IssueId issueId, TenantAndApplicationId applicationId) {
+ controller().applications().lockApplicationIfPresent(applicationId, application ->
controller().applications().store(application.withOwnershipIssueId(issueId)));
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BillingMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BillingMaintainer.java
index 37d19986cef..2ab1b4c93ed 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BillingMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BillingMaintainer.java
@@ -28,10 +28,9 @@ public class BillingMaintainer extends Maintainer {
.filter(tenant -> tenant instanceof CloudTenant)
.map(tenant -> (CloudTenant) tenant)
.forEach(cloudTenant -> controller().applications().asList(cloudTenant.name())
- .stream()
- .forEach(application -> {
- billing.handleBilling(application.id(), cloudTenant.billingInfo().customerId());
- })
+ .forEach(application -> {
+ billing.handleBilling(application.id().defaultInstance(), cloudTenant.billingInfo().customerId());
+ })
);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainer.java
index c005cec598c..3b76aba94ba 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainer.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostName;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
@@ -72,16 +73,20 @@ public class ClusterInfoMaintainer extends Maintainer {
@Override
protected void maintain() {
- for (Instance instance : controller().applications().asList()) {
- for (Deployment deployment : instance.deployments().values()) {
- DeploymentId deploymentId = new DeploymentId(instance.id(), deployment.zone());
- try {
- var nodes = nodeRepository.list(deploymentId.zoneId(), deploymentId.applicationId());
- Map<ClusterSpec.Id, ClusterInfo> clusterInfo = getClusterInfo(nodes);
- controller().applications().lockIfPresent(instance.id(), lockedApplication ->
- controller.applications().store(lockedApplication.withClusterInfo(deployment.zone(), clusterInfo)));
- } catch (Exception e) {
- log.log(Level.WARNING, "Failing getting cluster information for " + deploymentId, e);
+ for (Application application : controller().applications().asList()) {
+ for (Instance instance : application.instances().values()) {
+ for (Deployment deployment : instance.deployments().values()) {
+ DeploymentId deploymentId = new DeploymentId(instance.id(), deployment.zone());
+ try {
+ var nodes = nodeRepository.list(deploymentId.zoneId(), deploymentId.applicationId());
+ Map<ClusterSpec.Id, ClusterInfo> clusterInfo = getClusterInfo(nodes);
+ controller().applications().lockApplicationIfPresent(application.id(), lockedApplication ->
+ controller.applications().store(lockedApplication.with(instance.name(),
+ locked -> locked.withClusterInfo(deployment.zone(), clusterInfo))));
+ }
+ catch (Exception e) {
+ log.log(Level.WARNING, "Failing getting cluster information for " + deploymentId, e);
+ }
}
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirer.java
index 44e0c9fd393..7756e6b23a7 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirer.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.maintenance;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.application.Deployment;
@@ -23,20 +24,20 @@ public class DeploymentExpirer extends Maintainer {
@Override
protected void maintain() {
- for (Instance instance : controller().applications().asList()) {
- for (Deployment deployment : instance.deployments().values()) {
- if (!isExpired(deployment)) continue;
-
- try {
- log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
- controller().applications().deactivate(instance.id(), deployment.zone());
- } catch (Exception e) {
- log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
- ": " + Exceptions.toMessageString(e) + ". Retrying in " +
- maintenanceInterval());
+ for (Application application : controller().applications().asList())
+ for (Instance instance : application.instances().values())
+ for (Deployment deployment : instance.deployments().values()) {
+ if ( ! isExpired(deployment)) continue;
+
+ try {
+ log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
+ controller().applications().deactivate(instance.id(), deployment.zone());
+ } catch (Exception e) {
+ log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
+ ": " + Exceptions.toMessageString(e) + ". Retrying in " +
+ maintenanceInterval());
+ }
}
- }
- }
}
/** Returns whether given deployment has expired according to its TTL */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java
index 11c210adc48..f139001aa28 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java
@@ -4,12 +4,13 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.SystemName;
-import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.integration.organization.DeploymentIssues;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
-import com.yahoo.vespa.hosted.controller.application.InstanceList;
+import com.yahoo.vespa.hosted.controller.application.ApplicationList;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.tenant.Tenant;
import com.yahoo.yolean.Exceptions;
@@ -50,10 +51,10 @@ public class DeploymentIssueReporter extends Maintainer {
}
/** Returns the applications to maintain issue status for. */
- private List<Instance> applications() {
- return InstanceList.from(controller().applications().asList())
- .withProjectId()
- .asList();
+ private List<Application> applications() {
+ return ApplicationList.from(controller().applications().asList())
+ .withProjectId()
+ .asList();
}
/**
@@ -61,18 +62,18 @@ public class DeploymentIssueReporter extends Maintainer {
* and store the issue id for the filed issues. Also, clear the issueIds of applications
* where deployment has not failed for this amount of time.
*/
- private void maintainDeploymentIssues(List<Instance> instances) {
- Set<ApplicationId> failingApplications = InstanceList.from(instances)
- .failingApplicationChangeSince(controller().clock().instant().minus(maxFailureAge))
- .asList().stream()
- .map(Instance::id)
- .collect(Collectors.toSet());
-
- for (Instance instance : instances)
- if (failingApplications.contains(instance.id()))
- fileDeploymentIssueFor(instance.id());
+ private void maintainDeploymentIssues(List<Application> applications) {
+ Set<TenantAndApplicationId> failingApplications = ApplicationList.from(applications)
+ .failingApplicationChangeSince(controller().clock().instant().minus(maxFailureAge))
+ .asList().stream()
+ .map(Application::id)
+ .collect(Collectors.toSet());
+
+ for (Application application : applications)
+ if (failingApplications.contains(application.id()))
+ fileDeploymentIssueFor(application.id());
else
- store(instance.id(), null);
+ store(application.id(), null);
}
/**
@@ -80,7 +81,7 @@ public class DeploymentIssueReporter extends Maintainer {
* applications that have been failing the upgrade to the system version for
* longer than the set grace period, or update this list if the issue already exists.
*/
- private void maintainPlatformIssue(List<Instance> instances) {
+ private void maintainPlatformIssue(List<Application> applications) {
if (controller().system() == SystemName.cd)
return;
@@ -89,21 +90,25 @@ public class DeploymentIssueReporter extends Maintainer {
if ((controller().versionStatus().version(systemVersion).confidence() != broken))
return;
- if (InstanceList.from(instances)
- .failingUpgradeToVersionSince(systemVersion, controller().clock().instant().minus(upgradeGracePeriod))
- .isEmpty())
+ if (ApplicationList.from(applications)
+ .failingUpgradeToVersionSince(systemVersion, controller().clock().instant().minus(upgradeGracePeriod))
+ .isEmpty())
return;
- List<ApplicationId> failingApplications = InstanceList.from(instances)
- .failingUpgradeToVersionSince(systemVersion, controller().clock().instant())
- .idList();
+ List<ApplicationId> failingApplications = ApplicationList.from(applications)
+ .failingUpgradeToVersionSince(systemVersion, controller().clock().instant())
+ .idList()
+ .stream()
+ .map(TenantAndApplicationId::defaultInstance)
+ .collect(Collectors.toUnmodifiableList());
+ // TODO jonmv: Send only tenant and application, here and elsewhere in this.
deploymentIssues.fileUnlessOpen(failingApplications, systemVersion);
}
- private Tenant ownerOf(ApplicationId applicationId) {
+ private Tenant ownerOf(TenantAndApplicationId applicationId) {
return controller().tenants().get(applicationId.tenant())
- .orElseThrow(() -> new IllegalStateException("No tenant found for application " + applicationId));
+ .orElseThrow(() -> new IllegalStateException("No tenant found for application " + applicationId));
}
private User userFor(Tenant tenant) {
@@ -111,13 +116,13 @@ public class DeploymentIssueReporter extends Maintainer {
}
/** File an issue for applicationId, if it doesn't already have an open issue associated with it. */
- private void fileDeploymentIssueFor(ApplicationId applicationId) {
+ private void fileDeploymentIssueFor(TenantAndApplicationId applicationId) {
try {
Tenant tenant = ownerOf(applicationId);
tenant.contact().ifPresent(contact -> {
User assignee = tenant.type() == Tenant.Type.user ? userFor(tenant) : null;
- Optional<IssueId> ourIssueId = controller().applications().require(applicationId).deploymentJobs().issueId();
- IssueId issueId = deploymentIssues.fileUnlessOpen(ourIssueId, applicationId, assignee, contact);
+ Optional<IssueId> ourIssueId = controller().applications().requireApplication(applicationId).deploymentIssueId();
+ IssueId issueId = deploymentIssues.fileUnlessOpen(ourIssueId, applicationId.defaultInstance(), assignee, contact);
store(applicationId, issueId);
});
}
@@ -127,8 +132,8 @@ public class DeploymentIssueReporter extends Maintainer {
}
/** Escalate issues for which there has been no activity for a certain amount of time. */
- private void escalateInactiveDeploymentIssues(Collection<Instance> instances) {
- instances.forEach(application -> application.deploymentJobs().issueId().ifPresent(issueId -> {
+ private void escalateInactiveDeploymentIssues(Collection<Application> applications) {
+ applications.forEach(application -> application.deploymentIssueId().ifPresent(issueId -> {
try {
Tenant tenant = ownerOf(application.id());
deploymentIssues.escalateIfInactive(issueId,
@@ -141,8 +146,8 @@ public class DeploymentIssueReporter extends Maintainer {
}));
}
- private void store(ApplicationId id, IssueId issueId) {
- controller().applications().lockIfPresent(id, application ->
+ private void store(TenantAndApplicationId id, IssueId issueId) {
+ controller().applications().lockApplicationIfPresent(id, application ->
controller().applications().store(application.withDeploymentIssueId(issueId)));
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
index d77856a1661..162e46e19d2 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.config.provision.SystemName;
import com.yahoo.log.LogLevel;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.Controller;
@@ -41,40 +42,40 @@ public class DeploymentMetricsMaintainer extends Maintainer {
@Override
protected void maintain() {
AtomicInteger failures = new AtomicInteger(0);
+ AtomicInteger attempts = new AtomicInteger(0);
AtomicReference<Exception> lastException = new AtomicReference<>(null);
- List<Instance> instanceList = applications.asList();
// Run parallel stream inside a custom ForkJoinPool so that we can control the number of threads used
ForkJoinPool pool = new ForkJoinPool(applicationsToUpdateInParallel);
pool.submit(() ->
- instanceList.parallelStream().forEach(application -> {
- applications.lockIfPresent(application.id(), locked ->
- applications.store(locked.with(controller().metrics().getApplicationMetrics(application.id()))));
+ applications.asList().parallelStream().forEach(application -> {
+ for (Instance instance : application.instances().values())
+ for (Deployment deployment : instance.deployments().values()) {
+ attempts.incrementAndGet();
+ try {
+ if (deployment.version().getMajor() < 7) continue;
+ var collectedMetrics = controller().metrics().getDeploymentMetrics(instance.id(), deployment.zone());
+ var now = controller().clock().instant();
+ applications.lockApplicationIfPresent(application.id(), locked -> {
+ Deployment existingDeployment = locked.get().require(instance.name()).deployments().get(deployment.zone());
+ if (existingDeployment == null) return; // Deployment removed since we started collecting metrics
+ DeploymentMetrics newMetrics = existingDeployment.metrics()
+ .withQueriesPerSecond(collectedMetrics.queriesPerSecond())
+ .withWritesPerSecond(collectedMetrics.writesPerSecond())
+ .withDocumentCount(collectedMetrics.documentCount())
+ .withQueryLatencyMillis(collectedMetrics.queryLatencyMillis())
+ .withWriteLatencyMillis(collectedMetrics.writeLatencyMillis())
+ .at(now);
+ applications.store(locked.with(instance.name(),
+ lockedInstance -> lockedInstance.with(existingDeployment.zone(), newMetrics)
+ .recordActivityAt(now, existingDeployment.zone())));
- for (Deployment deployment : application.deployments().values()) {
- try {
- if (deployment.version().getMajor() < 7) continue;
- var collectedMetrics = controller().metrics().getDeploymentMetrics(application.id(), deployment.zone());
- var now = controller().clock().instant();
- applications.lockIfPresent(application.id(), locked -> {
- Deployment existingDeployment = locked.get().deployments().get(deployment.zone());
- if (existingDeployment == null) return; // Deployment removed since we started collecting metrics
- DeploymentMetrics newMetrics = existingDeployment.metrics()
- .withQueriesPerSecond(collectedMetrics.queriesPerSecond())
- .withWritesPerSecond(collectedMetrics.writesPerSecond())
- .withDocumentCount(collectedMetrics.documentCount())
- .withQueryLatencyMillis(collectedMetrics.queryLatencyMillis())
- .withWriteLatencyMillis(collectedMetrics.writeLatencyMillis())
- .at(now);
- applications.store(locked.with(existingDeployment.zone(), newMetrics)
- .recordActivityAt(now, existingDeployment.zone()));
-
- });
- } catch (Exception e) {
- failures.incrementAndGet();
- lastException.set(e);
+ });
+ } catch (Exception e) {
+ failures.incrementAndGet();
+ lastException.set(e);
+ }
}
- }
})
);
pool.shutdown();
@@ -84,7 +85,7 @@ public class DeploymentMetricsMaintainer extends Maintainer {
log.log(LogLevel.WARNING,
String.format("Failed to gather metrics for %d/%d applications. Retrying in %s. Last error: %s",
failures.get(),
- instanceList.size(),
+ attempts.get(),
maintenanceInterval(),
Exceptions.toMessageString(lastException.get())));
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java
index ba3b2118e9f..79ababd20d3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java
@@ -4,10 +4,10 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.google.common.collect.ImmutableMap;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.jdisc.Metric;
-import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.vespa.hosted.controller.application.InstanceList;
+import com.yahoo.vespa.hosted.controller.application.ApplicationList;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.vespa.hosted.controller.application.JobList;
@@ -61,24 +61,26 @@ public class MetricsReporter extends Maintainer {
}
private void reportDeploymentMetrics() {
- InstanceList applications = InstanceList.from(controller().applications().asList())
- .hasProductionDeployment();
+ List<Instance> instances = ApplicationList.from(controller().applications().asList())
+ .withProductionDeployment().asList().stream()
+ .flatMap(application -> application.instances().values().stream())
+ .collect(Collectors.toUnmodifiableList());
- metric.set(DEPLOYMENT_FAIL_METRIC, deploymentFailRatio(applications) * 100, metric.createContext(Collections.emptyMap()));
+ metric.set(DEPLOYMENT_FAIL_METRIC, deploymentFailRatio(instances) * 100, metric.createContext(Collections.emptyMap()));
- averageDeploymentDurations(applications, clock.instant()).forEach((application, duration) -> {
+ averageDeploymentDurations(instances, clock.instant()).forEach((application, duration) -> {
metric.set(DEPLOYMENT_AVERAGE_DURATION, duration.getSeconds(), metric.createContext(dimensions(application)));
});
- deploymentsFailingUpgrade(applications).forEach((application, failingJobs) -> {
+ deploymentsFailingUpgrade(instances).forEach((application, failingJobs) -> {
metric.set(DEPLOYMENT_FAILING_UPGRADES, failingJobs, metric.createContext(dimensions(application)));
});
- deploymentWarnings(applications).forEach((application, warnings) -> {
+ deploymentWarnings(instances).forEach((application, warnings) -> {
metric.set(DEPLOYMENT_WARNINGS, warnings, metric.createContext(dimensions(application)));
});
- for (Instance instance : applications.asList())
+ for (Instance instance : instances)
instance.deploymentJobs().statusOf(JobType.component)
.flatMap(JobStatus::lastSuccess)
.flatMap(run -> run.application().buildTime())
@@ -92,24 +94,21 @@ public class MetricsReporter extends Maintainer {
metric.createContext(Map.of()));
}
- private static double deploymentFailRatio(InstanceList instanceList) {
- List<Instance> instances = instanceList.asList();
- if (instances.isEmpty()) return 0;
-
- return (double) instances.stream().filter(a -> a.deploymentJobs().hasFailures()).count() /
- (double) instances.size();
+ private static double deploymentFailRatio(List<Instance> instances) {
+ return instances.stream()
+ .mapToInt(instance -> instance.deploymentJobs().hasFailures() ? 1 : 0)
+ .average().orElse(0);
}
- private static Map<ApplicationId, Duration> averageDeploymentDurations(InstanceList applications, Instant now) {
- return applications.asList().stream()
- .collect(Collectors.toMap(Instance::id,
- application -> averageDeploymentDuration(application, now)));
+ private static Map<ApplicationId, Duration> averageDeploymentDurations(List<Instance> instances, Instant now) {
+ return instances.stream()
+ .collect(Collectors.toMap(Instance::id,
+ instance -> averageDeploymentDuration(instance, now)));
}
- private static Map<ApplicationId, Integer> deploymentsFailingUpgrade(InstanceList applications) {
- return applications.asList()
- .stream()
- .collect(Collectors.toMap(Instance::id, MetricsReporter::deploymentsFailingUpgrade));
+ private static Map<ApplicationId, Integer> deploymentsFailingUpgrade(List<Instance> instances) {
+ return instances.stream()
+ .collect(Collectors.toMap(Instance::id, MetricsReporter::deploymentsFailingUpgrade));
}
private static int deploymentsFailingUpgrade(Instance instance) {
@@ -134,9 +133,9 @@ public class MetricsReporter extends Maintainer {
.orElse(Duration.ZERO);
}
- private static Map<ApplicationId, Integer> deploymentWarnings(InstanceList applications) {
- return applications.asList().stream()
- .collect(Collectors.toMap(Instance::id, a -> maxWarningCountOf(a.deployments().values())));
+ private static Map<ApplicationId, Integer> deploymentWarnings(List<Instance> instances) {
+ return instances.stream()
+ .collect(Collectors.toMap(Instance::id, a -> maxWarningCountOf(a.deployments().values())));
}
private static int maxWarningCountOf(Collection<Deployment> deployments) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployer.java
index 047905c0841..95e1c53f10c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployer.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.maintenance;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.Controller;
@@ -19,11 +20,11 @@ public class OutstandingChangeDeployer extends Maintainer {
@Override
protected void maintain() {
- for (Instance instance : controller().applications().asList()) {
- if (instance.outstandingChange().hasTargets()
- && instance.deploymentSpec().canChangeRevisionAt(controller().clock().instant())) {
- controller().applications().deploymentTrigger().triggerChange(instance.id(),
- instance.outstandingChange());
+ for (Application application : controller().applications().asList()) {
+ if (application.outstandingChange().hasTargets()
+ && application.deploymentSpec().canChangeRevisionAt(controller().clock().instant())) {
+ controller().applications().deploymentTrigger().triggerChange(application.id(),
+ application.outstandingChange());
}
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RotationStatusUpdater.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RotationStatusUpdater.java
index 6c7d4cab908..e047051558b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RotationStatusUpdater.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RotationStatusUpdater.java
@@ -3,11 +3,12 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.log.LogLevel;
-import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.routing.GlobalRoutingService;
-import com.yahoo.vespa.hosted.controller.application.InstanceList;
+import com.yahoo.vespa.hosted.controller.application.ApplicationList;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.rotation.RotationId;
import com.yahoo.vespa.hosted.controller.rotation.RotationState;
import com.yahoo.vespa.hosted.controller.rotation.RotationStatus;
@@ -42,16 +43,22 @@ public class RotationStatusUpdater extends Maintainer {
@Override
protected void maintain() {
var failures = new AtomicInteger(0);
+ var attempts = new AtomicInteger(0);
var lastException = new AtomicReference<Exception>(null);
- var applicationList = InstanceList.from(applications.asList()).hasRotation();
+ var instancesWithRotations = ApplicationList.from(applications.asList()).hasRotation().asList().stream()
+ .flatMap(application -> application.instances().values().stream())
+ .filter(instance -> ! instance.rotations().isEmpty());
// Run parallel stream inside a custom ForkJoinPool so that we can control the number of threads used
var pool = new ForkJoinPool(applicationsToUpdateInParallel);
pool.submit(() -> {
- applicationList.asList().parallelStream().forEach(application -> {
+ instancesWithRotations.parallel().forEach(instance -> {
+ attempts.incrementAndGet();
try {
- applications.lockIfPresent(application.id(), (app) -> applications.store(app.with(getStatus(app.get()))));
+ RotationStatus status = getStatus(instance);
+ applications.lockApplicationIfPresent(TenantAndApplicationId.from(instance.id()), app ->
+ applications.store(app.with(instance.name(), locked -> locked.with(status))));
} catch (Exception e) {
failures.incrementAndGet();
lastException.set(e);
@@ -64,7 +71,7 @@ public class RotationStatusUpdater extends Maintainer {
if (lastException.get() != null) {
log.log(LogLevel.WARNING, String.format("Failed to get global routing status of %d/%d applications. Retrying in %s. Last error: ",
failures.get(),
- applicationList.size(),
+ attempts.get(),
maintenanceInterval()),
lastException.get());
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPolicies.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPolicies.java
index b2006c4c1f4..d6080bcda6c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPolicies.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPolicies.java
@@ -51,7 +51,7 @@ public class RoutingPolicies {
}
}
- /** Read all known routing policies for given application */
+ /** Read all known routing policies for given instance */
public Set<RoutingPolicy> get(ApplicationId application) {
return db.readRoutingPolicies(application);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java
index c4ca94e169d..73b135aa912 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java
@@ -4,9 +4,9 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.component.Version;
import com.yahoo.config.application.api.DeploymentSpec.UpgradePolicy;
import com.yahoo.vespa.curator.Lock;
-import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
-import com.yahoo.vespa.hosted.controller.application.InstanceList;
+import com.yahoo.vespa.hosted.controller.application.ApplicationList;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
@@ -70,7 +70,7 @@ public class Upgrader extends Maintainer {
cancelUpgradesOf(applications().with(UpgradePolicy.conservative).upgrading().failing().notUpgradingTo(conservativeTargets), reason);
// Schedule the right upgrades
- InstanceList applications = applications();
+ ApplicationList applications = applications();
canaryTarget.ifPresent(target -> upgrade(applications.with(UpgradePolicy.canary), target));
defaultTargets.forEach(target -> upgrade(applications.with(UpgradePolicy.defaultPolicy), target));
conservativeTargets.forEach(target -> upgrade(applications.with(UpgradePolicy.conservative), target));
@@ -90,12 +90,12 @@ public class Upgrader extends Maintainer {
}
/** Returns a list of all applications, except those which are pinned — these should not be manipulated by the Upgrader */
- private InstanceList applications() {
- return InstanceList.from(controller().applications().asList()).unpinned();
+ private ApplicationList applications() {
+ return ApplicationList.from(controller().applications().asList()).unpinned();
}
- private void upgrade(InstanceList applications, Version version) {
- applications = applications.hasProductionDeployment();
+ private void upgrade(ApplicationList applications, Version version) {
+ applications = applications.withProductionDeployment();
applications = applications.onLowerVersionThan(version);
applications = applications.allowMajorVersion(version.getMajor(), targetMajorVersion().orElse(version.getMajor()));
applications = applications.notDeploying(); // wait with applications deploying an application change or already upgrading
@@ -103,15 +103,15 @@ public class Upgrader extends Maintainer {
applications = applications.canUpgradeAt(controller().clock().instant()); // wait with applications that are currently blocking upgrades
applications = applications.byIncreasingDeployedVersion(); // start with lowest versions
applications = applications.first(numberOfApplicationsToUpgrade()); // throttle upgrades
- for (Instance instance : applications.asList())
- controller().applications().deploymentTrigger().triggerChange(instance.id(), Change.of(version));
+ for (Application application : applications.asList())
+ controller().applications().deploymentTrigger().triggerChange(application.id(), Change.of(version));
}
- private void cancelUpgradesOf(InstanceList applications, String reason) {
+ private void cancelUpgradesOf(ApplicationList applications, String reason) {
if (applications.isEmpty()) return;
log.info("Cancelling upgrading of " + applications.asList().size() + " applications: " + reason);
- for (Instance instance : applications.asList())
- controller().applications().deploymentTrigger().cancelChange(instance.id(), PLATFORM);
+ for (Application application : applications.asList())
+ controller().applications().deploymentTrigger().cancelChange(application.id(), PLATFORM);
}
/** Returns the number of applications to upgrade in this run */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
new file mode 100644
index 00000000000..1a2524fa05e
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
@@ -0,0 +1,612 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.persistence;
+
+import com.yahoo.component.Version;
+import com.yahoo.config.application.api.DeploymentSpec;
+import com.yahoo.config.application.api.ValidationOverrides;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.InstanceName;
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.slime.ArrayTraverser;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Inspector;
+import com.yahoo.slime.ObjectTraverser;
+import com.yahoo.slime.Slime;
+import com.yahoo.vespa.hosted.controller.Application;
+import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
+import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
+import com.yahoo.vespa.hosted.controller.application.Change;
+import com.yahoo.vespa.hosted.controller.application.ClusterInfo;
+import com.yahoo.vespa.hosted.controller.application.ClusterUtilization;
+import com.yahoo.vespa.hosted.controller.application.Deployment;
+import com.yahoo.vespa.hosted.controller.application.DeploymentActivity;
+import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
+import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
+import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
+import com.yahoo.vespa.hosted.controller.application.EndpointId;
+import com.yahoo.vespa.hosted.controller.application.JobStatus;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
+import com.yahoo.vespa.hosted.controller.metric.ApplicationMetrics;
+import com.yahoo.vespa.hosted.controller.rotation.RotationId;
+import com.yahoo.vespa.hosted.controller.rotation.RotationState;
+import com.yahoo.vespa.hosted.controller.rotation.RotationStatus;
+
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.OptionalInt;
+import java.util.OptionalLong;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+/**
+ * Serializes {@link Application}s to/from slime.
+ * This class is multithread safe.
+ *
+ * @author jonmv
+ */
+public class ApplicationSerializer {
+
+ // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one
+ // (and rewrite all nodes on startup), changes to the serialized format must be made
+ // such that what is serialized on version N+1 can be read by version N:
+ // - ADDING FIELDS: Always ok
+ // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version.
+ // - CHANGING THE FORMAT OF A FIELD: Don't do it bro.
+
+ // Application fields
+ private static final String idField = "id";
+ private static final String createdAtField = "createdAt";
+ private static final String deploymentSpecField = "deploymentSpecField";
+ private static final String validationOverridesField = "validationOverrides";
+ private static final String instancesField = "instances";
+ private static final String deployingField = "deployingField";
+ private static final String projectIdField = "projectId";
+ private static final String builtInternallyField = "builtInternally";
+ private static final String pinnedField = "pinned";
+ private static final String outstandingChangeField = "outstandingChangeField";
+ private static final String deploymentIssueField = "deploymentIssueId";
+ private static final String ownershipIssueIdField = "ownershipIssueId";
+ private static final String ownerField = "confirmedOwner";
+ private static final String majorVersionField = "majorVersion";
+ private static final String writeQualityField = "writeQuality";
+ private static final String queryQualityField = "queryQuality";
+ private static final String pemDeployKeyField = "pemDeployKeys";
+ private static final String assignedRotationClusterField = "clusterId";
+ private static final String assignedRotationRotationField = "rotationId";
+ private static final String applicationCertificateField = "applicationCertificate";
+
+ // Instance fields
+ private static final String instanceNameField = "instanceName";
+ private static final String deploymentsField = "deployments";
+ private static final String deploymentJobsField = "deploymentJobs";
+ private static final String assignedRotationsField = "assignedRotations";
+ private static final String assignedRotationEndpointField = "endpointId";
+
+ // Deployment fields
+ private static final String zoneField = "zone";
+ private static final String environmentField = "environment";
+ private static final String regionField = "region";
+ private static final String deployTimeField = "deployTime";
+ private static final String applicationBuildNumberField = "applicationBuildNumber";
+ private static final String applicationPackageRevisionField = "applicationPackageRevision";
+ private static final String sourceRevisionField = "sourceRevision";
+ private static final String repositoryField = "repositoryField";
+ private static final String branchField = "branchField";
+ private static final String commitField = "commitField";
+ private static final String authorEmailField = "authorEmailField";
+ private static final String compileVersionField = "compileVersion";
+ private static final String buildTimeField = "buildTime";
+ private static final String lastQueriedField = "lastQueried";
+ private static final String lastWrittenField = "lastWritten";
+ private static final String lastQueriesPerSecondField = "lastQueriesPerSecond";
+ private static final String lastWritesPerSecondField = "lastWritesPerSecond";
+
+ // DeploymentJobs fields
+ private static final String jobStatusField = "jobStatus";
+
+ // JobStatus field
+ private static final String jobTypeField = "jobType";
+ private static final String errorField = "jobError";
+ private static final String lastTriggeredField = "lastTriggered";
+ private static final String lastCompletedField = "lastCompleted";
+ private static final String firstFailingField = "firstFailing";
+ private static final String lastSuccessField = "lastSuccess";
+ private static final String pausedUntilField = "pausedUntil";
+
+ // JobRun fields
+ private static final String jobRunIdField = "id";
+ private static final String versionField = "version";
+ private static final String revisionField = "revision";
+ private static final String sourceVersionField = "sourceVersion";
+ private static final String sourceApplicationField = "sourceRevision";
+ private static final String reasonField = "reason";
+ private static final String atField = "at";
+
+ // ClusterInfo fields
+ private static final String clusterInfoField = "clusterInfo";
+ private static final String clusterInfoFlavorField = "flavor";
+ private static final String clusterInfoCostField = "cost";
+ private static final String clusterInfoCpuField = "flavorCpu";
+ private static final String clusterInfoMemField = "flavorMem";
+ private static final String clusterInfoDiskField = "flavorDisk";
+ private static final String clusterInfoTypeField = "clusterType";
+ private static final String clusterInfoHostnamesField = "hostnames";
+
+ // ClusterUtils fields
+ private static final String clusterUtilsField = "clusterUtils";
+ private static final String clusterUtilsCpuField = "cpu";
+ private static final String clusterUtilsMemField = "mem";
+ private static final String clusterUtilsDiskField = "disk";
+ private static final String clusterUtilsDiskBusyField = "diskbusy";
+
+ // Deployment metrics fields
+ private static final String deploymentMetricsField = "metrics";
+ private static final String deploymentMetricsQPSField = "queriesPerSecond";
+ private static final String deploymentMetricsWPSField = "writesPerSecond";
+ private static final String deploymentMetricsDocsField = "documentCount";
+ private static final String deploymentMetricsQueryLatencyField = "queryLatencyMillis";
+ private static final String deploymentMetricsWriteLatencyField = "writeLatencyMillis";
+ private static final String deploymentMetricsUpdateTime = "lastUpdated";
+ private static final String deploymentMetricsWarningsField = "warnings";
+
+ // RotationStatus fields
+ private static final String rotationStatusField = "rotationStatus2";
+ private static final String rotationIdField = "rotationId";
+ private static final String rotationStateField = "state";
+ private static final String statusField = "status";
+
+ // ------------------ Serialization
+
+ public Slime toSlime(Application application) {
+ Slime slime = new Slime();
+ Cursor root = slime.setObject();
+ root.setString(idField, application.id().serialized());
+ root.setLong(createdAtField, application.createdAt().toEpochMilli());
+ root.setString(deploymentSpecField, application.deploymentSpec().xmlForm());
+ root.setString(validationOverridesField, application.validationOverrides().xmlForm());
+ application.projectId().ifPresent(projectId -> root.setLong(projectIdField, projectId));
+ application.deploymentIssueId().ifPresent(jiraIssueId -> root.setString(deploymentIssueField, jiraIssueId.value()));
+ application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value()));
+ root.setBool(builtInternallyField, application.internal());
+ toSlime(application.change(), root, deployingField);
+ toSlime(application.outstandingChange(), root, outstandingChangeField);
+ application.owner().ifPresent(owner -> root.setString(ownerField, owner.username()));
+ application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion));
+ root.setDouble(queryQualityField, application.metrics().queryServiceQuality());
+ root.setDouble(writeQualityField, application.metrics().writeServiceQuality());
+ deployKeysToSlime(application.pemDeployKey().stream(), root.setArray(pemDeployKeyField));
+ instancesToSlime(application, root.setArray(instancesField));
+ return slime;
+ }
+
+ private void instancesToSlime(Application application, Cursor array) {
+ for (Instance instance : application.instances().values()) {
+ Cursor instanceObject = array.addObject();
+ instanceObject.setString(instanceNameField, instance.name().value());
+ deploymentsToSlime(instance.deployments().values(), instanceObject.setArray(deploymentsField));
+ toSlime(instance.deploymentJobs(), instanceObject.setObject(deploymentJobsField));
+ assignedRotationsToSlime(instance.rotations(), instanceObject, assignedRotationsField);
+ toSlime(instance.rotationStatus(), instanceObject.setArray(rotationStatusField));
+ }
+ }
+
+ private void deployKeysToSlime(Stream<String> pemDeployKeys, Cursor array) {
+ pemDeployKeys.forEach(array::addString);
+ }
+ private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) {
+ for (Deployment deployment : deployments)
+ deploymentToSlime(deployment, array.addObject());
+ }
+
+ private void deploymentToSlime(Deployment deployment, Cursor object) {
+ zoneIdToSlime(deployment.zone(), object.setObject(zoneField));
+ object.setString(versionField, deployment.version().toString());
+ object.setLong(deployTimeField, deployment.at().toEpochMilli());
+ toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField));
+ clusterInfoToSlime(deployment.clusterInfo(), object);
+ clusterUtilsToSlime(deployment.clusterUtils(), object);
+ deploymentMetricsToSlime(deployment.metrics(), object);
+ deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli()));
+ deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli()));
+ deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value));
+ deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value));
+ }
+
+ private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) {
+ Cursor root = object.setObject(deploymentMetricsField);
+ root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond());
+ root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond());
+ root.setDouble(deploymentMetricsDocsField, metrics.documentCount());
+ root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis());
+ root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis());
+ metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli()));
+ if (!metrics.warnings().isEmpty()) {
+ Cursor warningsObject = root.setObject(deploymentMetricsWarningsField);
+ metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count));
+ }
+ }
+
+ private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) {
+ Cursor root = object.setObject(clusterInfoField);
+ for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) {
+ toSlime(entry.getValue(), root.setObject(entry.getKey().value()));
+ }
+ }
+
+ private void toSlime(ClusterInfo info, Cursor object) {
+ object.setString(clusterInfoFlavorField, info.getFlavor());
+ object.setLong(clusterInfoCostField, info.getFlavorCost());
+ object.setDouble(clusterInfoCpuField, info.getFlavorCPU());
+ object.setDouble(clusterInfoMemField, info.getFlavorMem());
+ object.setDouble(clusterInfoDiskField, info.getFlavorDisk());
+ object.setString(clusterInfoTypeField, info.getClusterType().name());
+ Cursor array = object.setArray(clusterInfoHostnamesField);
+ for (String host : info.getHostnames()) {
+ array.addString(host);
+ }
+ }
+
+ private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) {
+ Cursor root = object.setObject(clusterUtilsField);
+ for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) {
+ toSlime(entry.getValue(), root.setObject(entry.getKey().value()));
+ }
+ }
+
+ private void toSlime(ClusterUtilization utils, Cursor object) {
+ object.setDouble(clusterUtilsCpuField, utils.getCpu());
+ object.setDouble(clusterUtilsMemField, utils.getMemory());
+ object.setDouble(clusterUtilsDiskField, utils.getDisk());
+ object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy());
+ }
+
+ private void zoneIdToSlime(ZoneId zone, Cursor object) {
+ object.setString(environmentField, zone.environment().value());
+ object.setString(regionField, zone.region().value());
+ }
+
+ private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
+ if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) {
+ object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong());
+ toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField));
+ applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email));
+ applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString()));
+ applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli()));
+ }
+ }
+
+ private void toSlime(SourceRevision sourceRevision, Cursor object) {
+ object.setString(repositoryField, sourceRevision.repository());
+ object.setString(branchField, sourceRevision.branch());
+ object.setString(commitField, sourceRevision.commit());
+ }
+
+ private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) {
+ jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField));
+ }
+
+ private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) {
+ for (JobStatus jobStatus : jobStatuses)
+ toSlime(jobStatus, jobStatusArray.addObject());
+ }
+
+ private void toSlime(JobStatus jobStatus, Cursor object) {
+ object.setString(jobTypeField, jobStatus.type().jobName());
+ if (jobStatus.jobError().isPresent())
+ object.setString(errorField, jobStatus.jobError().get().name());
+
+ jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField));
+ jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField));
+ jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField));
+ jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField));
+ jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until));
+ }
+
+ private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) {
+ Cursor object = parent.setObject(jobRunObjectName);
+ object.setLong(jobRunIdField, jobRun.id());
+ object.setString(versionField, jobRun.platform().toString());
+ toSlime(jobRun.application(), object.setObject(revisionField));
+ jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString()));
+ jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField)));
+ object.setString(reasonField, jobRun.reason());
+ object.setLong(atField, jobRun.at().toEpochMilli());
+ }
+
+ private void toSlime(Change deploying, Cursor parentObject, String fieldName) {
+ if (deploying.isEmpty()) return;
+
+ Cursor object = parentObject.setObject(fieldName);
+ if (deploying.platform().isPresent())
+ object.setString(versionField, deploying.platform().get().toString());
+ if (deploying.application().isPresent())
+ toSlime(deploying.application().get(), object);
+ if (deploying.isPinned())
+ object.setBool(pinnedField, true);
+ }
+
+ private void toSlime(RotationStatus status, Cursor array) {
+ status.asMap().forEach((rotationId, zoneStatus) -> {
+ Cursor rotationObject = array.addObject();
+ rotationObject.setString(rotationIdField, rotationId.asString());
+ Cursor statusArray = rotationObject.setArray(statusField);
+ zoneStatus.forEach((zone, state) -> {
+ Cursor statusObject = statusArray.addObject();
+ zoneIdToSlime(zone, statusObject);
+ statusObject.setString(rotationStateField, state.name());
+ });
+ });
+ }
+
+ private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) {
+ var rotationsArray = parent.setArray(fieldName);
+ for (var rotation : rotations) {
+ var object = rotationsArray.addObject();
+ object.setString(assignedRotationEndpointField, rotation.endpointId().id());
+ object.setString(assignedRotationRotationField, rotation.rotationId().asString());
+ object.setString(assignedRotationClusterField, rotation.clusterId().value());
+ }
+ }
+
+ // ------------------ Deserialization
+
+ public Application fromSlime(Slime slime) {
+ Inspector root = slime.get();
+
+ TenantAndApplicationId id = TenantAndApplicationId.fromSerialized(root.field(idField).asString());
+ Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong());
+ DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false);
+ ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString());
+ Change deploying = changeFromSlime(root.field(deployingField));
+ Change outstandingChange = changeFromSlime(root.field(outstandingChangeField));
+ Optional<IssueId> deploymentIssueId = Serializers.optionalString(root.field(deploymentIssueField)).map(IssueId::from);
+ Optional<IssueId> ownershipIssueId = Serializers.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from);
+ Optional<User> owner = Serializers.optionalString(root.field(ownerField)).map(User::from);
+ OptionalInt majorVersion = Serializers.optionalInteger(root.field(majorVersionField));
+ ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(),
+ root.field(writeQualityField).asDouble());
+ List<String> pemDeployKeys = pemDeployKeysFromSlime(root.field(pemDeployKeyField));
+ List<Instance> instances = instancesFromSlime(id, deploymentSpec, root.field(instancesField));
+ OptionalLong projectId = Serializers.optionalLong(root.field(projectIdField));
+ boolean builtInternally = root.field(builtInternallyField).asBool();
+
+ return new Application(id, createdAt, deploymentSpec, validationOverrides, deploying, outstandingChange,
+ deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics,
+ pemDeployKeys.stream().findFirst(), projectId, builtInternally, instances);
+ }
+
+ private List<Instance> instancesFromSlime(TenantAndApplicationId id, DeploymentSpec deploymentSpec, Inspector field) {
+ List<Instance> instances = new ArrayList<>();
+ field.traverse((ArrayTraverser) (name, object) -> {
+ InstanceName instanceName = InstanceName.from(object.field(instanceNameField).asString());
+ List<Deployment> deployments = deploymentsFromSlime(object.field(deploymentsField));
+ DeploymentJobs deploymentJobs = deploymentJobsFromSlime(object.field(deploymentJobsField));
+ List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, object);
+ RotationStatus rotationStatus = rotationStatusFromSlime(object);
+ instances.add(new Instance(id.instance(instanceName),
+ deployments,
+ deploymentJobs,
+ assignedRotations,
+ rotationStatus));
+ });
+ return instances;
+ }
+
+ private List<String> pemDeployKeysFromSlime(Inspector array) {
+ List<String> keys = new ArrayList<>();
+ array.traverse((ArrayTraverser) (__, key) -> keys.add(key.asString()));
+ return keys;
+ }
+ private List<Deployment> deploymentsFromSlime(Inspector array) {
+ List<Deployment> deployments = new ArrayList<>();
+ array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item)));
+ return deployments;
+ }
+
+ private Deployment deploymentFromSlime(Inspector deploymentObject) {
+ return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)),
+ applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)),
+ Version.fromString(deploymentObject.field(versionField).asString()),
+ Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()),
+ clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)),
+ clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)),
+ deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)),
+ DeploymentActivity.create(Serializers.optionalInstant(deploymentObject.field(lastQueriedField)),
+ Serializers.optionalInstant(deploymentObject.field(lastWrittenField)),
+ Serializers.optionalDouble(deploymentObject.field(lastQueriesPerSecondField)),
+ Serializers.optionalDouble(deploymentObject.field(lastWritesPerSecondField))));
+ }
+
+ private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) {
+ Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ?
+ Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) :
+ Optional.empty();
+ return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(),
+ object.field(deploymentMetricsWPSField).asDouble(),
+ object.field(deploymentMetricsDocsField).asDouble(),
+ object.field(deploymentMetricsQueryLatencyField).asDouble(),
+ object.field(deploymentMetricsWriteLatencyField).asDouble(),
+ instant,
+ deploymentWarningsFrom(object.field(deploymentMetricsWarningsField)));
+ }
+
+ private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) {
+ Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>();
+ object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name),
+ (int) value.asLong()));
+ return Collections.unmodifiableMap(warnings);
+ }
+
+ private RotationStatus rotationStatusFromSlime(Inspector parentObject) {
+ var object = parentObject.field(rotationStatusField);
+ var statusMap = new LinkedHashMap<RotationId, Map<ZoneId, RotationState>>();
+ object.traverse((ArrayTraverser) (idx, statusObject) -> statusMap.put(new RotationId(statusObject.field(rotationIdField).asString()),
+ singleRotationStatusFromSlime(statusObject.field(statusField))));
+ return RotationStatus.from(statusMap);
+ }
+
+ private Map<ZoneId, RotationState> singleRotationStatusFromSlime(Inspector object) {
+ if (!object.valid()) {
+ return Collections.emptyMap();
+ }
+ Map<ZoneId, RotationState> rotationStatus = new LinkedHashMap<>();
+ object.traverse((ArrayTraverser) (idx, statusObject) -> {
+ var zone = zoneIdFromSlime(statusObject);
+ var status = RotationState.valueOf(statusObject.field(rotationStateField).asString());
+ rotationStatus.put(zone, status);
+ });
+ return Collections.unmodifiableMap(rotationStatus);
+ }
+
+ private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) {
+ Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>();
+ object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value)));
+ return map;
+ }
+
+ private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) {
+ Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>();
+ object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value)));
+ return map;
+ }
+
+ private ClusterUtilization clusterUtililzationFromSlime(Inspector object) {
+ double cpu = object.field(clusterUtilsCpuField).asDouble();
+ double mem = object.field(clusterUtilsMemField).asDouble();
+ double disk = object.field(clusterUtilsDiskField).asDouble();
+ double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble();
+
+ return new ClusterUtilization(mem, cpu, disk, diskBusy);
+ }
+
+ private ClusterInfo clusterInfoFromSlime(Inspector inspector) {
+ String flavor = inspector.field(clusterInfoFlavorField).asString();
+ int cost = (int)inspector.field(clusterInfoCostField).asLong();
+ String type = inspector.field(clusterInfoTypeField).asString();
+ double flavorCpu = inspector.field(clusterInfoCpuField).asDouble();
+ double flavorMem = inspector.field(clusterInfoMemField).asDouble();
+ double flavorDisk = inspector.field(clusterInfoDiskField).asDouble();
+
+ List<String> hostnames = new ArrayList<>();
+ inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString()));
+ return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames);
+ }
+
+ private ZoneId zoneIdFromSlime(Inspector object) {
+ return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString());
+ }
+
+ private ApplicationVersion applicationVersionFromSlime(Inspector object) {
+ if ( ! object.valid()) return ApplicationVersion.unknown;
+ OptionalLong applicationBuildNumber = Serializers.optionalLong(object.field(applicationBuildNumberField));
+ Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField));
+ if (sourceRevision.isEmpty() || applicationBuildNumber.isEmpty()) {
+ return ApplicationVersion.unknown;
+ }
+ Optional<String> authorEmail = Serializers.optionalString(object.field(authorEmailField));
+ Optional<Version> compileVersion = Serializers.optionalString(object.field(compileVersionField)).map(Version::fromString);
+ Optional<Instant> buildTime = Serializers.optionalInstant(object.field(buildTimeField));
+
+ if (authorEmail.isEmpty())
+ return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong());
+
+ if (compileVersion.isEmpty() || buildTime.isEmpty())
+ return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get());
+
+ return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(),
+ compileVersion.get(), buildTime.get());
+ }
+
+ private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) {
+ if ( ! object.valid()) return Optional.empty();
+ return Optional.of(new SourceRevision(object.field(repositoryField).asString(),
+ object.field(branchField).asString(),
+ object.field(commitField).asString()));
+ }
+
+ private DeploymentJobs deploymentJobsFromSlime(Inspector object) {
+ List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField));
+
+ return new DeploymentJobs(OptionalLong.empty(), jobStatusList, Optional.empty(), false); // WARNING: Unused variables.
+ }
+
+ private Change changeFromSlime(Inspector object) {
+ if ( ! object.valid()) return Change.empty();
+ Inspector versionFieldValue = object.field(versionField);
+ Change change = Change.empty();
+ if (versionFieldValue.valid())
+ change = Change.of(Version.fromString(versionFieldValue.asString()));
+ if (object.field(applicationBuildNumberField).valid())
+ change = change.with(applicationVersionFromSlime(object));
+ if (object.field(pinnedField).asBool())
+ change = change.withPin();
+ return change;
+ }
+
+ private List<JobStatus> jobStatusListFromSlime(Inspector array) {
+ List<JobStatus> jobStatusList = new ArrayList<>();
+ array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add));
+ return jobStatusList;
+ }
+
+ private Optional<JobStatus> jobStatusFromSlime(Inspector object) {
+ // if the job type has since been removed, ignore it
+ Optional<JobType> jobType =
+ JobType.fromOptionalJobName(object.field(jobTypeField).asString());
+ if (jobType.isEmpty()) return Optional.empty();
+
+ Optional<JobError> jobError = Optional.empty();
+ if (object.field(errorField).valid())
+ jobError = Optional.of(JobError.valueOf(object.field(errorField).asString()));
+
+ return Optional.of(new JobStatus(jobType.get(),
+ jobError,
+ jobRunFromSlime(object.field(lastTriggeredField)),
+ jobRunFromSlime(object.field(lastCompletedField)),
+ jobRunFromSlime(object.field(firstFailingField)),
+ jobRunFromSlime(object.field(lastSuccessField)),
+ Serializers.optionalLong(object.field(pausedUntilField))));
+ }
+
+ private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) {
+ if ( ! object.valid()) return Optional.empty();
+ return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(),
+ new Version(object.field(versionField).asString()),
+ applicationVersionFromSlime(object.field(revisionField)),
+ Serializers.optionalString(object.field(sourceVersionField)).map(Version::fromString),
+ Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime),
+ object.field(reasonField).asString(),
+ Instant.ofEpochMilli(object.field(atField).asLong())));
+ }
+
+ private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, Inspector root) {
+ var assignedRotations = new LinkedHashMap<EndpointId, AssignedRotation>();
+
+ root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> {
+ var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString());
+ var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString());
+ var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString());
+ var regions = deploymentSpec.endpoints().stream()
+ .filter(endpoint -> endpoint.endpointId().equals(endpointId.id()))
+ .flatMap(endpoint -> endpoint.regions().stream())
+ .collect(Collectors.toSet());
+ assignedRotations.putIfAbsent(endpointId, new AssignedRotation(clusterId, endpointId, rotationId, regions));
+ });
+
+ return List.copyOf(assignedRotations.values());
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
index d439a84af70..2680160b1cb 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
@@ -6,6 +6,7 @@ import com.google.inject.Inject;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.HostName;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.path.Path;
@@ -13,11 +14,13 @@ import com.yahoo.slime.Slime;
import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.Lock;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.certificates.ApplicationCertificate;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.application.RoutingPolicy;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.auditlog.AuditLog;
import com.yahoo.vespa.hosted.controller.deployment.Run;
import com.yahoo.vespa.hosted.controller.deployment.Step;
@@ -85,6 +88,7 @@ public class CuratorDb {
private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer();
private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer();
private final TenantSerializer tenantSerializer = new TenantSerializer();
+ private final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
private final InstanceSerializer instanceSerializer = new InstanceSerializer();
private final RunSerializer runSerializer = new RunSerializer();
private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer();
@@ -135,6 +139,10 @@ public class CuratorDb {
return lock(lockPath(name), defaultLockTimeout.multipliedBy(2));
}
+ public Lock lock(TenantAndApplicationId id) {
+ return lock(lockPath(id), defaultLockTimeout.multipliedBy(2));
+ }
+
public Lock lock(ApplicationId id) {
return lock(lockPath(id), defaultLockTimeout.multipliedBy(2));
}
@@ -331,45 +339,97 @@ public class CuratorDb {
curator.delete(tenantPath(name));
}
- // -------------- Application ---------------------------------------------
+ // -------------- Applications ---------------------------------------------
+
+ public void writeApplication(Application application) {
+ curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application)));
+ for (InstanceName name : application.instances().keySet()) {
+ curator.set(oldApplicationPath(application.id().instance(name)),
+ asJson(instanceSerializer.toSlime(application.legacy(name))));
+ }
+ }
+
+ public Optional<Application> readApplication(TenantAndApplicationId application) {
+ List<Instance> instances = readInstances(id -> TenantAndApplicationId.from(id).equals(application));
+ return Application.aggregate(instances);
+ }
+
+ public List<Application> readApplications() {
+ return readApplications(ignored -> true);
+ }
- public void writeInstance(Instance instance) {
- curator.set(applicationPath(instance.id()), asJson(instanceSerializer.toSlime(instance)));
- curator.set(instancePath(instance.id()), asJson(instanceSerializer.toSlime(instance)));
+ public List<Application> readApplications(TenantName name) {
+ return readApplications(application -> application.tenant().equals(name));
}
- public Optional<Instance> readInstance(ApplicationId application) {
- return readSlime(applicationPath(application)).map(instanceSerializer::fromSlime);
+ private Stream<TenantAndApplicationId> readTenantAndApplicationIds() {
+ return readInstanceIds().map(TenantAndApplicationId::from).distinct();
}
- public List<Instance> readInstances() {
- return readInstances(ignored -> true);
+ private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter) {
+ return readTenantAndApplicationIds().filter(applicationFilter)
+ .sorted()
+ .map(this::readApplication)
+ .flatMap(Optional::stream)
+ .collect(Collectors.toUnmodifiableList());
}
- public List<Instance> readInstances(TenantName name) {
- return readInstances(application -> application.tenant().equals(name));
+ private Optional<Instance> readInstance(ApplicationId application) {
+ return readSlime(oldApplicationPath(application)).map(instanceSerializer::fromSlime);
}
private Stream<ApplicationId> readInstanceIds() {
return curator.getChildren(applicationRoot).stream()
.filter(id -> id.split(":").length == 3)
- .distinct()
.map(ApplicationId::fromSerializedForm);
}
- private List<Instance> readInstances(Predicate<ApplicationId> instanceFilter) {
- return readInstanceIds().filter(instanceFilter)
+ private List<Instance> readInstances(Predicate<ApplicationId> applicationFilter) {
+ return readInstanceIds().filter(applicationFilter)
+ .sorted()
.map(this::readInstance)
.flatMap(Optional::stream)
.collect(Collectors.toUnmodifiableList());
}
- public void removeInstance(ApplicationId id) {
+ public void removeApplication(ApplicationId id) {
// WARNING: This is part of a multi-step data move operation, so don't touch!!!
- curator.delete(applicationPath(id));
- curator.delete(instancePath(id));
+ curator.delete(oldApplicationPath(id));
+ if (readApplication(TenantAndApplicationId.from(id)).isEmpty())
+ curator.delete(applicationPath(TenantAndApplicationId.from(id)));
+ }
+
+ public void clearInstanceRoot() {
+ curator.delete(instanceRoot);
}
+ /**
+ * Migration plan:
+ *
+ * Add filter for reading only Instance from old application path RELEASED
+ * Write Instance to instance and old application path RELEASED
+ *
+ * Lock on application level for instance mutations MERGED
+ *
+ * Write Instance to instance and application and old application paths DONE TO CHANGE DONE
+ * Read Instance from instance path DONE TO REMOVE DONE
+ * Duplicate Application from Instance, with helper classes DONE
+ * Write Application to instance and application and old application paths DONE TO CHANGE DONE
+ * Read Application from instance path DONE TO REMOVE DONE
+ * Use Application where applicable DONE !!!
+ * Lock instances and application on same level: tenant + application DONE TO CHANGE DONE
+ * When reading an application, read all instances, and aggregate them DONE
+ * Write application with instances to application path DONE
+ * Write all instances of an application to old application path DONE
+ * Remove everything under instance root DONE
+ *
+ * Read Application with instances from application path (with filter)
+ * Stop locking applications on instance level
+ *
+ * Stop writing Instance to old application path
+ * Remove unused parts of Instance (Used only for legacy serialization)
+ */
+
// -------------- Job Runs ------------------------------------------------
public void writeLastRun(Run run) {
@@ -505,10 +565,14 @@ public class CuratorDb {
.append(tenant.value());
}
- private Path lockPath(ApplicationId application) {
+ private Path lockPath(TenantAndApplicationId application) {
return lockPath(application.tenant())
- .append(application.application().value())
- .append(application.instance().value());
+ .append(application.application().value());
+ }
+
+ private Path lockPath(ApplicationId instance) {
+ return lockPath(TenantAndApplicationId.from(instance))
+ .append(instance.instance().value());
}
private Path lockPath(ApplicationId application, ZoneId zone) {
@@ -585,7 +649,11 @@ public class CuratorDb {
return tenantRoot.append(name.value());
}
- private static Path applicationPath(ApplicationId application) {
+ private static Path applicationPath(TenantAndApplicationId id) {
+ return applicationRoot.append(id.serialized());
+ }
+
+ private static Path oldApplicationPath(ApplicationId application) {
return applicationRoot.append(application.serializedForm());
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index 123da447757..4b7414a42a6 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -9,6 +9,7 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.container.jdisc.HttpRequest;
@@ -26,6 +27,7 @@ import com.yahoo.vespa.athenz.api.AthenzUser;
import com.yahoo.vespa.athenz.client.zms.ZmsClientException;
import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.hosted.controller.AlreadyExistsException;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.NotExistsException;
@@ -63,6 +65,7 @@ import com.yahoo.vespa.hosted.controller.application.Endpoint;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
import com.yahoo.vespa.hosted.controller.application.RoutingPolicy;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTrigger;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTrigger.ChangesToCancel;
import com.yahoo.vespa.hosted.controller.deployment.TestConfigSerializer;
@@ -260,8 +263,8 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
}
private HttpResponse handlePATCH(Path path, HttpRequest request) {
- if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), "default", request);
- if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), path.get("instance"), request);
+ if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request);
+ if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
@@ -348,24 +351,25 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
TenantName tenant = TenantName.from(tenantName);
Slime slime = new Slime();
Cursor array = slime.setArray();
- for (Instance instance : controller.applications().asList(tenant)) {
- if (applicationName.isPresent() && ! instance.id().application().toString().equals(applicationName.get()))
- continue;
- toSlime(instance, array.addObject(), request);
+ for (Application application : controller.applications().asList(tenant)) {
+ if (applicationName.map(application.id().application().value()::equals).orElse(true))
+ for (InstanceName instance : application.instances().keySet())
+ toSlime(application.id().instance(instance), array.addObject(), request);
}
return new SlimeJsonResponse(slime);
}
private HttpResponse application(String tenantName, String applicationName, String instanceName, HttpRequest request) {
Slime slime = new Slime();
- toSlime(slime.setObject(), getApplication(tenantName, applicationName, instanceName), request);
+ toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName),
+ getApplication(tenantName, applicationName, instanceName), request);
return new SlimeJsonResponse(slime);
}
- private HttpResponse patchApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
+ private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) {
Inspector requestObject = toSlime(request.getData()).get();
StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes.");
- controller.applications().lockOrThrow(ApplicationId.from(tenantName, applicationName, instanceName), application -> {
+ controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> {
Inspector majorVersionField = requestObject.field("majorVersion");
if (majorVersionField.valid()) {
Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong();
@@ -379,15 +383,20 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
application = application.withPemDeployKey(pemDeployKey);
messageBuilder.add("Set pem deploy key to " + (pemDeployKey == null ? "empty" : pemDeployKey));
}
-
controller.applications().store(application);
});
return new MessageResponse(messageBuilder.toString());
}
- private Instance getApplication(String tenantName, String applicationName, String instanceName) {
+ private Application getApplication(String tenantName, String applicationName, String instanceName) {
+ ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
+ return controller.applications().getApplication(applicationId)
+ .orElseThrow(() -> new NotExistsException(applicationId + " not found"));
+ }
+
+ private Instance getInstance(String tenantName, String applicationName, String instanceName) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
- return controller.applications().get(applicationId)
+ return controller.applications().getInstance(applicationId)
.orElseThrow(() -> new NotExistsException(applicationId + " not found"));
}
@@ -475,7 +484,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause);
}
- private void toSlime(Cursor object, Instance instance, HttpRequest request) {
+ private void toSlime(Cursor object, Instance instance, Application application, HttpRequest request) {
object.setString("tenant", instance.id().tenant().value());
object.setString("application", instance.id().application().value());
object.setString("instance", instance.id().instance().value());
@@ -490,25 +499,24 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
.map(run -> run.application().source())
.ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source")));
- instance.deploymentJobs().projectId()
- .ifPresent(id -> object.setLong("projectId", id));
+ application.projectId().ifPresent(id -> object.setLong("projectId", id));
// Currently deploying change
- if ( ! instance.change().isEmpty()) {
- toSlime(object.setObject("deploying"), instance.change());
+ if ( ! application.change().isEmpty()) {
+ toSlime(object.setObject("deploying"), application.change());
}
// Outstanding change
- if ( ! instance.outstandingChange().isEmpty()) {
- toSlime(object.setObject("outstandingChange"), instance.outstandingChange());
+ if ( ! application.outstandingChange().isEmpty()) {
+ toSlime(object.setObject("outstandingChange"), application.outstandingChange());
}
// Jobs sorted according to deployment spec
List<JobStatus> jobStatus = controller.applications().deploymentTrigger()
- .steps(instance.deploymentSpec())
+ .steps(application.deploymentSpec())
.sortedJobs(instance.deploymentJobs().jobStatus().values());
- object.setBool("deployedInternally", instance.deploymentJobs().deployedInternally());
+ object.setBool("deployedInternally", application.internal());
Cursor deploymentsArray = object.setArray("deploymentJobs");
for (JobStatus job : jobStatus) {
Cursor jobObject = deploymentsArray.addObject();
@@ -523,7 +531,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
// Change blockers
Cursor changeBlockers = object.setArray("changeBlockers");
- instance.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
+ application.deploymentSpec().changeBlocker().forEach(changeBlocker -> {
Cursor changeBlockerObject = changeBlockers.addObject();
changeBlockerObject.setBool("versions", changeBlocker.blocksVersions());
changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions());
@@ -535,9 +543,9 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
});
// Compile version. The version that should be used when building an application
- object.setString("compileVersion", compileVersion(instance.id()).toFullString());
+ object.setString("compileVersion", compileVersion(application.id()).toFullString());
- instance.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
+ application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
// Rotation
Cursor globalRotationsArray = object.setArray("globalRotations");
@@ -565,7 +573,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
// Deployments sorted according to deployment spec
List<Deployment> deployments = controller.applications().deploymentTrigger()
- .steps(instance.deploymentSpec())
+ .steps(application.deploymentSpec())
.sortedDeployments(instance.deployments().values());
Cursor instancesArray = object.setArray("instances");
for (Deployment deployment : deployments) {
@@ -600,28 +608,28 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
}
}
- instance.pemDeployKey().ifPresent(key -> object.setString("pemDeployKey", key));
+ application.pemDeployKey().ifPresent(key -> object.setString("pemDeployKey", key));
// Metrics
Cursor metricsObject = object.setObject("metrics");
- metricsObject.setDouble("queryServiceQuality", instance.metrics().queryServiceQuality());
- metricsObject.setDouble("writeServiceQuality", instance.metrics().writeServiceQuality());
+ metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality());
+ metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality());
// Activity
Cursor activity = object.setObject("activity");
- instance.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
- instance.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
- instance.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
- instance.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
+ application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli()));
+ application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli()));
+ application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value));
+ application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
- instance.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
- instance.owner().ifPresent(owner -> object.setString("owner", owner.username()));
- instance.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
+ application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
+ application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
+ application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
- Instance instance = controller.applications().get(id)
+ Instance instance = controller.applications().getInstance(id)
.orElseThrow(() -> new NotExistsException(id + " not found"));
DeploymentId deploymentId = new DeploymentId(instance.id(),
@@ -675,7 +683,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId())
.ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli()));
- controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId()
+ controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())).projectId()
.ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.applicationVersion().source(), response);
@@ -741,7 +749,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
* If no known version is applicable, the newest version at least as old as the oldest platform is selected,
* among all versions released for this system. If no such versions exists, throws an IllegalStateException.
*/
- private Version compileVersion(ApplicationId id) {
+ private Version compileVersion(TenantAndApplicationId id) {
Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
return controller.versionStatus().versions().stream()
.filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
@@ -760,7 +768,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
}
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
- Instance instance = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
+ Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
Deployment deployment = instance.deployments().get(zone);
if (deployment == null) {
@@ -805,7 +813,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
- Instance instance = controller.applications().require(applicationId);
+ Instance instance = controller.applications().requireInstance(applicationId);
ZoneId zone = ZoneId.from(environment, region);
RotationId rotation = findRotationId(instance, endpointId);
Deployment deployment = instance.deployments().get(zone);
@@ -883,7 +891,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
}
private HttpResponse deploying(String tenant, String application, String instance, HttpRequest request) {
- Instance app = controller.applications().require(ApplicationId.from(tenant, application, instance));
+ Application app = controller.applications().requireApplication(TenantAndApplicationId.from(tenant, application));
Slime slime = new Slime();
Cursor root = slime.setObject();
if ( ! app.change().isEmpty()) {
@@ -968,10 +976,10 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
Optional<Credentials> credentials = controller.tenants().require(id.tenant()).type() == Tenant.Type.user
? Optional.empty()
: Optional.of(accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()));
- Instance instance = controller.applications().createApplication(id, credentials);
+ Application application = controller.applications().createApplication(id, credentials);
Slime slime = new Slime();
- toSlime(instance, slime.setObject(), request);
+ toSlime(id, slime.setObject(), request);
return new SlimeJsonResponse(slime);
}
catch (ZmsClientException e) { // TODO: Push conversion down
@@ -986,9 +994,9 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
- ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
+ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
StringBuilder response = new StringBuilder();
- controller.applications().lockOrThrow(id, application -> {
+ controller.applications().lockApplicationOrThrow(id, application -> {
Version version = Version.fromString(versionString);
if (version.equals(Version.emptyVersion))
version = controller.systemVersion();
@@ -1013,10 +1021,10 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
controller.auditLogger().log(request);
- ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
+ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
StringBuilder response = new StringBuilder();
- controller.applications().lockOrThrow(id, application -> {
- Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
+ controller.applications().lockApplicationOrThrow(id, application -> {
+ Change change = Change.of(application.get().require(InstanceName.from(instanceName)).deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application());
controller.applications().deploymentTrigger().forceChange(id, change);
response.append("Triggered " + change + " for " + id);
});
@@ -1025,9 +1033,9 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
/** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */
private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) {
- ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
+ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName);
StringBuilder response = new StringBuilder();
- controller.applications().lockOrThrow(id, application -> {
+ controller.applications().lockApplicationOrThrow(id, application -> {
Change change = application.get().change();
if (change.isEmpty()) {
response.append("No deployment in progress for " + application + " at this time");
@@ -1037,7 +1045,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase());
controller.applications().deploymentTrigger().cancelChange(id, cancel);
response.append("Changed deployment from '" + change + "' to '" +
- controller.applications().require(id).change() + "' for " + application);
+ controller.applications().requireApplication(id).change() + "' for " + application);
});
return new MessageResponse(response.toString());
@@ -1122,6 +1130,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip"))
.map(ApplicationPackage::new);
+ Optional<Application> application = controller.applications().getApplication(TenantAndApplicationId.from(applicationId));
Inspector sourceRevision = deployOptions.field("sourceRevision");
Inspector buildNumber = deployOptions.field("buildNumber");
@@ -1135,7 +1144,9 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision),
buildNumber.asLong()));
- applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
+ applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
+ application.get().internal(),
+ applicationVersion.get()));
}
boolean deployDirectly = deployOptions.field("deployDirectly").asBool();
@@ -1144,7 +1155,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
if (deployDirectly && applicationPackage.isEmpty() && applicationVersion.isEmpty() && vespaVersion.isEmpty()) {
// Redeploy the existing deployment with the same versions.
- Optional<Deployment> deployment = controller.applications().get(applicationId)
+ Optional<Deployment> deployment = controller.applications().getInstance(applicationId)
.map(Instance::deployments)
.flatMap(deployments -> Optional.ofNullable(deployments.get(zone)));
@@ -1157,7 +1168,9 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
applicationVersion = Optional.of(version);
vespaVersion = Optional.of(deployment.get().version());
- applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get()));
+ applicationPackage = Optional.of(controller.applications().getApplicationPackage(applicationId,
+ application.get().internal(),
+ applicationVersion.get()));
}
// TODO: get rid of the json object
@@ -1213,7 +1226,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
}
private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
- Instance instance = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
+ Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName));
// Attempt to deactivate application even if the deployment is not known by the controller
DeploymentId deploymentId = new DeploymentId(instance.id(), ZoneId.from(environment, region));
@@ -1226,7 +1239,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
try {
DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get());
if ( report.jobType() == JobType.component
- && controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally())
+ && controller.applications().requireApplication(TenantAndApplicationId.from(report.applicationId())).internal())
throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " +
"longer accepts submissions from Screwdriver v3 jobs. If you need to revert " +
"to the old pipeline, please file a ticket at yo/vespa-support and request this.");
@@ -1305,14 +1318,12 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
Cursor applicationArray = object.setArray("applications");
- for (Instance instance : controller.applications().asList(tenant.name())) {
- if ( ! instance.id().instance().isTester()) {
+ for (Application application : controller.applications().asList(tenant.name()))
+ for (Instance instance : application.instances().values())
if (recurseOverApplications(request))
- toSlime(applicationArray.addObject(), instance, request);
+ toSlime(applicationArray.addObject(), instance, application, request);
else
- toSlime(instance, applicationArray.addObject(), request);
- }
- }
+ toSlime(instance.id(), applicationArray.addObject(), request);
}
// A tenant has different content when in a list ... antipattern, but not solvable before application/v5
@@ -1391,14 +1402,14 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
return Joiner.on("/").join(elements);
}
- private void toSlime(Instance instance, Cursor object, HttpRequest request) {
- object.setString("tenant", instance.id().tenant().value());
- object.setString("application", instance.id().application().value());
- object.setString("instance", instance.id().instance().value());
+ private void toSlime(ApplicationId id, Cursor object, HttpRequest request) {
+ object.setString("tenant", id.tenant().value());
+ object.setString("application", id.application().value());
+ object.setString("instance", id.instance().value());
object.setString("url", withPath("/application/v4" +
- "/tenant/" + instance.id().tenant().value() +
- "/application/" + instance.id().application().value() +
- "/instance/" + instance.id().instance().value(),
+ "/tenant/" + id.tenant().value() +
+ "/application/" + id.application().value() +
+ "/instance/" + id.instance().value(),
request.getUri()).toString());
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
index 697e95e75b3..90a4ecdef9a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
@@ -10,6 +10,7 @@ import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Slime;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.NotExistsException;
@@ -22,6 +23,7 @@ import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentSteps;
import com.yahoo.vespa.hosted.controller.deployment.JobController;
import com.yahoo.vespa.hosted.controller.deployment.Run;
@@ -69,17 +71,18 @@ class JobControllerApiHandlerHelper {
* @return Response with all job types that have recorded runs for the application _and_ the status for the last run of that type
*/
static HttpResponse jobTypeResponse(Controller controller, ApplicationId id, URI baseUriForJobs) {
- Instance instance = controller.applications().require(id);
- Change change = instance.change();
- DeploymentSteps steps = new DeploymentSteps(instance.deploymentSpec(), controller::system);
+ Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
+ Instance instance = application.require(id.instance());
+ Change change = application.change();
+ DeploymentSteps steps = new DeploymentSteps(application.deploymentSpec(), controller::system);
// The logic for pending runs imitates DeploymentTrigger logic; not good, but the trigger wiring must be re-written to reuse :S
Map<JobType, Versions> pendingProduction =
steps.productionJobs().stream()
- .filter(type -> ! controller.applications().deploymentTrigger().isComplete(change, instance, type))
+ .filter(type -> ! controller.applications().deploymentTrigger().isComplete(change, change, instance, type))
.collect(Collectors.toMap(type -> type,
type -> Versions.from(change,
- instance,
+ application,
Optional.ofNullable(instance.deployments().get(type.zone(controller.system()))),
controller.systemVersion()),
(v1, v2) -> { throw new IllegalStateException("Entries '" + v1 + "' and '" + v2 + "' have the same key!"); },
@@ -97,8 +100,8 @@ class JobControllerApiHandlerHelper {
Cursor lastVersionsObject = responseObject.setObject("lastVersions");
if (instance.deploymentJobs().statusOf(component).flatMap(JobStatus::lastSuccess).isPresent()) {
- lastPlatformToSlime(lastVersionsObject.setObject("platform"), controller, instance, change, steps);
- lastApplicationToSlime(lastVersionsObject.setObject("application"), instance, change, steps, controller);
+ lastPlatformToSlime(lastVersionsObject.setObject("platform"), controller, application, instance, change, steps);
+ lastApplicationToSlime(lastVersionsObject.setObject("application"), application, instance, change, steps, controller);
}
Cursor deployingObject = responseObject.setObject("deploying");
@@ -128,6 +131,7 @@ class JobControllerApiHandlerHelper {
steps.jobs().forEach(type -> {
jobTypeToSlime(jobsObject.setObject(shortNameOf(type, controller.system())),
controller,
+ application,
instance,
type,
steps,
@@ -152,11 +156,11 @@ class JobControllerApiHandlerHelper {
return new SlimeJsonResponse(slime);
}
- private static void lastPlatformToSlime(Cursor lastPlatformObject, Controller controller, Instance instance, Change change, DeploymentSteps steps) {
+ private static void lastPlatformToSlime(Cursor lastPlatformObject, Controller controller, Application application, Instance instance, Change change, DeploymentSteps steps) {
VespaVersion lastVespa = controller.versionStatus().version(controller.systemVersion());
VespaVersion.Confidence targetConfidence = Map.of(defaultPolicy, normal,
conservative, high)
- .getOrDefault(instance.deploymentSpec().upgradePolicy(), broken);
+ .getOrDefault(application.deploymentSpec().upgradePolicy(), broken);
for (VespaVersion version : controller.versionStatus().versions())
if ( ! version.versionNumber().isAfter(controller.systemVersion())
&& version.confidence().equalOrHigherThan(targetConfidence))
@@ -165,39 +169,39 @@ class JobControllerApiHandlerHelper {
Version lastPlatform = lastVespa.versionNumber();
lastPlatformObject.setString("platform", lastPlatform.toString());
lastPlatformObject.setLong("at", lastVespa.committedAt().toEpochMilli());
- long completed = steps.productionJobs().stream().filter(type -> controller.applications().deploymentTrigger().isComplete(Change.of(lastPlatform), instance, type)).count();
+ long completed = steps.productionJobs().stream().filter(type -> controller.applications().deploymentTrigger().isComplete(Change.of(lastPlatform), change, instance, type)).count();
if (Optional.of(lastPlatform).equals(change.platform()))
lastPlatformObject.setString("deploying", completed + " of " + steps.productionJobs().size() + " complete");
else if (completed == steps.productionJobs().size())
lastPlatformObject.setString("completed", completed + " of " + steps.productionJobs().size() + " complete");
- else if ( ! instance.deploymentSpec().canUpgradeAt(controller.clock().instant())) {
- lastPlatformObject.setString("blocked", instance.deploymentSpec().changeBlocker().stream()
- .filter(blocker -> blocker.blocksVersions())
- .filter(blocker -> blocker.window().includes(controller.clock().instant()))
- .findAny().map(blocker -> blocker.window().toString()).get());
+ else if ( ! application.deploymentSpec().canUpgradeAt(controller.clock().instant())) {
+ lastPlatformObject.setString("blocked", application.deploymentSpec().changeBlocker().stream()
+ .filter(blocker -> blocker.blocksVersions())
+ .filter(blocker -> blocker.window().includes(controller.clock().instant()))
+ .findAny().map(blocker -> blocker.window().toString()).get());
}
else
lastPlatformObject.setString("pending",
- instance.change().isEmpty()
+ application.change().isEmpty()
? "Waiting for upgrade slot"
: "Waiting for " + instance.change() + " to complete");
}
- private static void lastApplicationToSlime(Cursor lastApplicationObject, Instance instance, Change change, DeploymentSteps steps, Controller controller) {
+ private static void lastApplicationToSlime(Cursor lastApplicationObject, Application application, Instance instance, Change change, DeploymentSteps steps, Controller controller) {
long completed;
ApplicationVersion lastApplication = instance.deploymentJobs().statusOf(component).flatMap(JobStatus::lastSuccess).get().application();
applicationVersionToSlime(lastApplicationObject.setObject("application"), lastApplication);
lastApplicationObject.setLong("at", instance.deploymentJobs().statusOf(component).flatMap(JobStatus::lastSuccess).get().at().toEpochMilli());
- completed = steps.productionJobs().stream().filter(type -> controller.applications().deploymentTrigger().isComplete(Change.of(lastApplication), instance, type)).count();
+ completed = steps.productionJobs().stream().filter(type -> controller.applications().deploymentTrigger().isComplete(Change.of(lastApplication), change, instance, type)).count();
if (Optional.of(lastApplication).equals(change.application()))
lastApplicationObject.setString("deploying", completed + " of " + steps.productionJobs().size() + " complete");
else if (completed == steps.productionJobs().size())
lastApplicationObject.setString("completed", completed + " of " + steps.productionJobs().size() + " complete");
- else if ( ! instance.deploymentSpec().canChangeRevisionAt(controller.clock().instant())) {
- lastApplicationObject.setString("blocked", instance.deploymentSpec().changeBlocker().stream()
- .filter(blocker -> blocker.blocksRevisions())
- .filter(blocker -> blocker.window().includes(controller.clock().instant()))
- .findAny().map(blocker -> blocker.window().toString()).get());
+ else if ( ! application.deploymentSpec().canChangeRevisionAt(controller.clock().instant())) {
+ lastApplicationObject.setString("blocked", application.deploymentSpec().changeBlocker().stream()
+ .filter(blocker -> blocker.blocksRevisions())
+ .filter(blocker -> blocker.window().includes(controller.clock().instant()))
+ .findAny().map(blocker -> blocker.window().toString()).get());
}
else
lastApplicationObject.setString("pending", "Waiting for current deployment to complete");
@@ -220,7 +224,7 @@ class JobControllerApiHandlerHelper {
deploymentObject.setString("status", pendingProduction.containsKey(type) ? "pending" : "completed");
}
- private static void jobTypeToSlime(Cursor jobObject, Controller controller, Instance instance, JobType type, DeploymentSteps steps,
+ private static void jobTypeToSlime(Cursor jobObject, Controller controller, Application application, Instance instance, JobType type, DeploymentSteps steps,
Map<JobType, Versions> pendingProduction, Map<JobType, Run> running, URI baseUriForJob) {
instance.deploymentJobs().statusOf(type).ifPresent(status -> status.pausedUntil().ifPresent(until ->
jobObject.setLong("pausedUntil", until)));
@@ -244,7 +248,7 @@ class JobControllerApiHandlerHelper {
Cursor runObject = runArray.addObject();
runObject.setString("status", "pending");
versionsToSlime(runObject, versions);
- if ( ! controller.applications().deploymentTrigger().triggerAt(controller.clock().instant(), type, versions, instance))
+ if ( ! controller.applications().deploymentTrigger().triggerAt(controller.clock().instant(), type, versions, instance, application.deploymentSpec()))
runObject.setObject("tasks").setString("cooldown", "failed");
else
runObject.setObject("tasks").setString("capacity", "running");
@@ -262,7 +266,7 @@ class JobControllerApiHandlerHelper {
Cursor pendingObject = runObject.setObject("tasks");
if (instance.deploymentJobs().statusOf(type).map(status -> status.pausedUntil().isPresent()).orElse(false))
pendingObject.setString("paused", "pending");
- else if ( ! controller.applications().deploymentTrigger().triggerAt(controller.clock().instant(), type, versions, instance))
+ else if ( ! controller.applications().deploymentTrigger().triggerAt(controller.clock().instant(), type, versions, instance, application.deploymentSpec()))
pendingObject.setString("cooldown", "failed");
else {
int pending = 0;
@@ -281,8 +285,8 @@ class JobControllerApiHandlerHelper {
break;
for (JobType stepType : steps.toJobs(step)) {
if (pendingProduction.containsKey(stepType)) {
- Versions jobVersions = Versions.from(instance.change(),
- instance,
+ Versions jobVersions = Versions.from(application.change(),
+ application,
Optional.ofNullable(instance.deployments().get(stepType.zone(controller.system()))),
controller.systemVersion());
pendingObject.setString(shortNameOf(stepType, controller.system()), statusOf(controller, instance.id(), stepType, jobVersions));
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiHandler.java
index b2bc6d72044..86310ca2f6b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiHandler.java
@@ -18,6 +18,7 @@ import com.yahoo.vespa.hosted.controller.application.JobStatus;
import com.yahoo.restapi.ErrorResponse;
import com.yahoo.restapi.SlimeJsonResponse;
import com.yahoo.restapi.Uri;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.restapi.application.EmptyResponse;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import com.yahoo.yolean.Exceptions;
@@ -97,7 +98,7 @@ public class DeploymentApiHandler extends LoggingRequestHandler {
Cursor failingArray = versionObject.setArray("failingApplications");
for (ApplicationId id : version.statistics().failing()) {
- controller.applications().get(id).ifPresent(application -> {
+ controller.applications().getInstance(id).ifPresent(application -> {
firstFailingOn(version.versionNumber(), application).ifPresent(firstFailing -> {
Cursor applicationObject = failingArray.addObject();
toSlime(applicationObject, application, request);
@@ -108,7 +109,7 @@ public class DeploymentApiHandler extends LoggingRequestHandler {
Cursor productionArray = versionObject.setArray("productionApplications");
for (ApplicationId id : version.statistics().production()) {
- controller.applications().get(id).ifPresent(application -> {
+ controller.applications().getInstance(id).ifPresent(application -> {
int successes = productionSuccessesFor(version.versionNumber(), application);
if (successes == 0) return; // Just upgraded to a newer version.
Cursor applicationObject = productionArray.addObject();
@@ -120,7 +121,7 @@ public class DeploymentApiHandler extends LoggingRequestHandler {
Cursor runningArray = versionObject.setArray("deployingApplications");
for (ApplicationId id : version.statistics().deploying()) {
- controller.applications().get(id).ifPresent(application -> {
+ controller.applications().getInstance(id).ifPresent(application -> {
lastDeployingTo(version.versionNumber(), application).ifPresent(lastDeploying -> {
Cursor applicationObject = runningArray.addObject();
toSlime(applicationObject, application, request);
@@ -140,7 +141,8 @@ public class DeploymentApiHandler extends LoggingRequestHandler {
instance.id().tenant().value() +
"/application/" +
instance.id().application().value()).toString());
- object.setString("upgradePolicy", toString(instance.deploymentSpec().upgradePolicy()));
+ object.setString("upgradePolicy", toString(controller.applications().requireApplication(TenantAndApplicationId.from(instance.id()))
+ .deploymentSpec().upgradePolicy()));
}
private static String toString(DeploymentSpec.UpgradePolicy upgradePolicy) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/SignatureFilter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/SignatureFilter.java
index 9d7fa68807e..e86a5d16452 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/SignatureFilter.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/SignatureFilter.java
@@ -10,10 +10,12 @@ import com.yahoo.config.provision.TenantName;
import com.yahoo.jdisc.http.filter.DiscFilterRequest;
import com.yahoo.jdisc.http.filter.security.base.JsonSecurityRequestFilterBase;
import com.yahoo.log.LogLevel;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.role.Role;
import com.yahoo.vespa.hosted.controller.api.role.SecurityContext;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.yolean.Exceptions;
import java.security.Principal;
@@ -45,8 +47,8 @@ public class SignatureFilter extends JsonSecurityRequestFilterBase {
&& request.getHeader("X-Authorization") != null)
try {
ApplicationId id = ApplicationId.fromSerializedForm(request.getHeader("X-Key-Id"));
- boolean verified = controller.applications().get(id)
- .flatMap(Instance::pemDeployKey)
+ boolean verified = controller.applications().getApplication(TenantAndApplicationId.from(id))
+ .flatMap(Application::pemDeployKey)
.map(key -> new RequestVerifier(key, controller.clock()))
.map(verifier -> verifier.verify(Method.valueOf(request.getMethod()),
request.getUri(),
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepository.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepository.java
index 988df9c6a66..a16ca5cb201 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepository.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepository.java
@@ -1,10 +1,14 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.rotation;
+import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.Endpoint;
+import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.RegionName;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
@@ -73,23 +77,24 @@ public class RotationRepository {
* If a rotation is already assigned to the application, that rotation will be returned.
* If no rotation is assigned, return an available rotation. The caller is responsible for assigning the rotation.
*
- * @param instance The application requesting a rotation
+ * @param deploymentSpec The deployment spec for the application
+ * @param instance The instance requesting a rotation
* @param lock Lock which must be acquired by the caller
*/
- public Rotation getOrAssignRotation(Instance instance, RotationLock lock) {
- if (! instance.rotations().isEmpty()) {
+ public Rotation getOrAssignRotation(DeploymentSpec deploymentSpec, Instance instance, RotationLock lock) {
+ if ( ! instance.rotations().isEmpty()) {
return allRotations.get(instance.rotations().get(0).rotationId());
}
- if (instance.deploymentSpec().globalServiceId().isEmpty()) {
+ if (deploymentSpec.globalServiceId().isEmpty()) {
throw new IllegalArgumentException("global-service-id is not set in deployment spec");
}
- long productionZones = instance.deploymentSpec().zones().stream()
- .filter(zone -> zone.deploysTo(Environment.prod))
- .count();
+ long productionZones = deploymentSpec.zones().stream()
+ .filter(zone -> zone.deploysTo(Environment.prod))
+ .count();
if (productionZones < 2) {
throw new IllegalArgumentException("global-service-id is set but less than 2 prod zones are defined");
}
- return findAvailableRotation(instance, lock);
+ return findAvailableRotation(instance.id(), lock);
}
/**
@@ -99,27 +104,28 @@ public class RotationRepository {
* If rotations are not assigned, a new assignment will be created taking new rotations from the repository.
* This method supports both global-service-id as well as the new endpoints tag.
*
- * @param instance The application requesting rotations.
+ * @param deploymentSpec The deployment spec of the application
+ * @param instance The application requesting rotations
* @param lock Lock which by acquired by the caller
- * @return List of rotation assignments - either new or existing.
+ * @return List of rotation assignments - either new or existing
*/
- public List<AssignedRotation> getOrAssignRotations(Instance instance, RotationLock lock) {
- if (instance.deploymentSpec().globalServiceId().isPresent() && ! instance.deploymentSpec().endpoints().isEmpty()) {
+ public List<AssignedRotation> getOrAssignRotations(DeploymentSpec deploymentSpec, Instance instance, RotationLock lock) {
+ if (deploymentSpec.globalServiceId().isPresent() && ! deploymentSpec.endpoints().isEmpty()) {
throw new IllegalArgumentException("Cannot provision rotations with both global-service-id and 'endpoints'");
}
// Support the older case of setting global-service-id
- if (instance.deploymentSpec().globalServiceId().isPresent()) {
- final var regions = instance.deploymentSpec().zones().stream()
- .filter(zone -> zone.environment().isProduction())
- .flatMap(zone -> zone.region().stream())
- .collect(Collectors.toSet());
+ if (deploymentSpec.globalServiceId().isPresent()) {
+ final var regions = deploymentSpec.zones().stream()
+ .filter(zone -> zone.environment().isProduction())
+ .flatMap(zone -> zone.region().stream())
+ .collect(Collectors.toSet());
- final var rotation = getOrAssignRotation(instance, lock);
+ final var rotation = getOrAssignRotation(deploymentSpec, instance, lock);
return List.of(
new AssignedRotation(
- new ClusterSpec.Id(instance.deploymentSpec().globalServiceId().get()),
+ new ClusterSpec.Id(deploymentSpec.globalServiceId().get()),
EndpointId.default_(),
rotation.id(),
regions
@@ -127,20 +133,20 @@ public class RotationRepository {
);
}
- final Map<EndpointId, AssignedRotation> existingAssignments = existingEndpointAssignments(instance);
- final Map<EndpointId, AssignedRotation> updatedAssignments = assignRotationsToEndpoints(instance, existingAssignments, lock);
+ final Map<EndpointId, AssignedRotation> existingAssignments = existingEndpointAssignments(deploymentSpec, instance);
+ final Map<EndpointId, AssignedRotation> updatedAssignments = assignRotationsToEndpoints(deploymentSpec, existingAssignments, lock);
existingAssignments.putAll(updatedAssignments);
return List.copyOf(existingAssignments.values());
}
- private Map<EndpointId, AssignedRotation> assignRotationsToEndpoints(Instance instance, Map<EndpointId, AssignedRotation> existingAssignments, RotationLock lock) {
+ private Map<EndpointId, AssignedRotation> assignRotationsToEndpoints(DeploymentSpec deploymentSpec, Map<EndpointId, AssignedRotation> existingAssignments, RotationLock lock) {
final var availableRotations = new ArrayList<>(availableRotations(lock).values());
- final var neededRotations = instance.deploymentSpec().endpoints().stream()
- .filter(Predicate.not(endpoint -> existingAssignments.containsKey(EndpointId.of(endpoint.endpointId()))))
- .collect(Collectors.toSet());
+ final var neededRotations = deploymentSpec.endpoints().stream()
+ .filter(Predicate.not(endpoint -> existingAssignments.containsKey(EndpointId.of(endpoint.endpointId()))))
+ .collect(Collectors.toSet());
if (neededRotations.size() > availableRotations.size()) {
throw new IllegalStateException("Hosted Vespa ran out of rotations, unable to assign rotation: need " + neededRotations.size() + ", have " + availableRotations.size());
@@ -165,17 +171,17 @@ public class RotationRepository {
);
}
- private Map<EndpointId, AssignedRotation> existingEndpointAssignments(Instance instance) {
+ private Map<EndpointId, AssignedRotation> existingEndpointAssignments(DeploymentSpec deploymentSpec, Instance instance) {
//
// Get the regions that has been configured for an endpoint. Empty set if the endpoint
// is no longer mentioned in the configuration file.
//
final Function<EndpointId, Set<RegionName>> configuredRegionsForEndpoint = endpointId -> {
- return instance.deploymentSpec().endpoints().stream()
- .filter(endpoint -> endpointId.id().equals(endpoint.endpointId()))
- .map(Endpoint::regions)
- .findFirst()
- .orElse(Set.of());
+ return deploymentSpec.endpoints().stream()
+ .filter(endpoint -> endpointId.id().equals(endpoint.endpointId()))
+ .map(Endpoint::regions)
+ .findFirst()
+ .orElse(Set.of());
};
//
@@ -210,8 +216,8 @@ public class RotationRepository {
*/
public Map<RotationId, Rotation> availableRotations(@SuppressWarnings("unused") RotationLock lock) {
List<RotationId> assignedRotations = applications.asList().stream()
- .filter(application -> ! application.rotations().isEmpty())
- .flatMap(application -> application.rotations().stream())
+ .flatMap(application -> application.instances().values().stream())
+ .flatMap(instance -> instance.rotations().stream())
.map(AssignedRotation::rotationId)
.collect(Collectors.toList());
Map<RotationId, Rotation> unassignedRotations = new LinkedHashMap<>(this.allRotations);
@@ -219,15 +225,15 @@ public class RotationRepository {
return Collections.unmodifiableMap(unassignedRotations);
}
- private Rotation findAvailableRotation(Instance instance, RotationLock lock) {
+ private Rotation findAvailableRotation(ApplicationId id, RotationLock lock) {
Map<RotationId, Rotation> availableRotations = availableRotations(lock);
if (availableRotations.isEmpty()) {
- throw new IllegalStateException("Unable to assign global rotation to " + instance.id()
+ throw new IllegalStateException("Unable to assign global rotation to " + id
+ " - no rotations available");
}
// Return first available rotation
RotationId rotation = availableRotations.keySet().iterator().next();
- log.info(String.format("Offering %s to application %s", rotation, instance.id()));
+ log.info(String.format("Offering %s to application %s", rotation, id));
return allRotations.get(rotation);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/security/AccessControl.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/security/AccessControl.java
index 6ed32feae28..77ccce873fe 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/security/AccessControl.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/security/AccessControl.java
@@ -2,7 +2,7 @@ package com.yahoo.vespa.hosted.controller.security;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.tenant.Tenant;
import java.util.List;
@@ -33,10 +33,10 @@ public interface AccessControl {
* @param tenantSpec specification for the tenant to update
* @param credentials the credentials for the entity requesting the update
* @param existing list of existing tenants, to check for conflicts
- * @param instances list of applications this tenant already owns
+ * @param applications list of applications this tenant already owns
* @return the updated tenant, for keeping
*/
- Tenant updateTenant(TenantSpec tenantSpec, Credentials credentials, List<Tenant> existing, List<Instance> instances);
+ Tenant updateTenant(TenantSpec tenantSpec, Credentials credentials, List<Tenant> existing, List<Application> applications);
/**
* Deletes access control for the given tenant.
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/security/CloudAccessControl.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/security/CloudAccessControl.java
index b53388d7aa6..33529c342a3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/security/CloudAccessControl.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/security/CloudAccessControl.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.security;
import com.google.inject.Inject;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.TenantName;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.organization.BillingInfo;
import com.yahoo.vespa.hosted.controller.api.integration.user.Roles;
@@ -44,7 +45,7 @@ public class CloudAccessControl implements AccessControl {
}
@Override
- public Tenant updateTenant(TenantSpec tenantSpec, Credentials credentials, List<Tenant> existing, List<Instance> instances) {
+ public Tenant updateTenant(TenantSpec tenantSpec, Credentials credentials, List<Tenant> existing, List<Application> applications) {
throw new UnsupportedOperationException("Update is not supported here, as it would entail changing the tenant name.");
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java
index c6c66eab1b4..9dc6b86e4be 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java
@@ -8,10 +8,11 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.log.LogLevel;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
-import com.yahoo.vespa.hosted.controller.application.InstanceList;
+import com.yahoo.vespa.hosted.controller.application.ApplicationList;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.JobList;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
@@ -189,51 +190,50 @@ public class VersionStatus {
}
private static Collection<DeploymentStatistics> computeDeploymentStatistics(Set<Version> infrastructureVersions,
- List<Instance> instances) {
+ List<Application> instances) {
Map<Version, DeploymentStatistics> versionMap = new HashMap<>();
for (Version infrastructureVersion : infrastructureVersions) {
versionMap.put(infrastructureVersion, DeploymentStatistics.empty(infrastructureVersion));
}
- InstanceList instanceList = InstanceList.from(instances)
- .hasProductionDeployment();
- for (Instance instance : instanceList.asList()) {
- // Note that each version deployed on this application in production exists
- // (ignore non-production versions)
- for (Deployment deployment : instance.productionDeployments().values()) {
- versionMap.computeIfAbsent(deployment.version(), DeploymentStatistics::empty);
- }
+ for (Application application : ApplicationList.from(instances).withProductionDeployment().asList())
+ for (Instance instance : application.instances().values()) {
+ // Note that each version deployed on this application in production exists
+ // (ignore non-production versions)
+ for (Deployment deployment : instance.productionDeployments().values()) {
+ versionMap.computeIfAbsent(deployment.version(), DeploymentStatistics::empty);
+ }
- // List versions which have failing jobs, versions which are in production, and versions for which there are running deployment jobs
-
- // Failing versions
- JobList.from(instance)
- .failing()
- .not().failingApplicationChange()
- .not().failingBecause(outOfCapacity)
- .mapToList(job -> job.lastCompleted().get().platform())
- .forEach(version -> versionMap
- .put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version))
- .withFailing(instance.id())));
-
- // Succeeding versions
- JobList.from(instance)
- .lastSuccess().present()
- .production()
- .mapToList(job -> job.lastSuccess().get().platform())
- .forEach(version -> versionMap
- .put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version))
- .withProduction(instance.id())));
-
- // Deploying versions
- JobList.from(instance)
- .upgrading()
- .mapToList(job -> job.lastTriggered().get().platform())
- .forEach(version -> versionMap
- .put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version))
- .withDeploying(instance.id())));
- }
+ // List versions which have failing jobs, versions which are in production, and versions for which there are running deployment jobs
+
+ // Failing versions
+ JobList.from(instance)
+ .failing()
+ .not().failingApplicationChange()
+ .not().failingBecause(outOfCapacity)
+ .mapToList(job -> job.lastCompleted().get().platform())
+ .forEach(version -> versionMap
+ .put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version))
+ .withFailing(instance.id())));
+
+ // Succeeding versions
+ JobList.from(instance)
+ .lastSuccess().present()
+ .production()
+ .mapToList(job -> job.lastSuccess().get().platform())
+ .forEach(version -> versionMap
+ .put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version))
+ .withProduction(instance.id())));
+
+ // Deploying versions
+ JobList.from(instance)
+ .upgrading()
+ .mapToList(job -> job.lastTriggered().get().platform())
+ .forEach(version -> versionMap
+ .put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version))
+ .withDeploying(instance.id())));
+ }
return versionMap.values();
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java
index aaf216f805b..dc0b2c12d5c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java
@@ -5,7 +5,7 @@ import com.google.common.collect.ImmutableSet;
import com.yahoo.component.Version;
import com.yahoo.config.provision.HostName;
import com.yahoo.vespa.hosted.controller.Controller;
-import com.yahoo.vespa.hosted.controller.application.InstanceList;
+import com.yahoo.vespa.hosted.controller.application.ApplicationList;
import java.time.Instant;
import java.time.ZoneOffset;
@@ -49,12 +49,12 @@ public class VespaVersion implements Comparable<VespaVersion> {
public static Confidence confidenceFrom(DeploymentStatistics statistics, Controller controller) {
// 'production on this': All deployment jobs upgrading to this version have completed without failure
- InstanceList productionOnThis = InstanceList.from(statistics.production(), controller.applications())
- .notUpgradingTo(statistics.version())
- .notFailing();
- InstanceList failingOnThis = InstanceList.from(statistics.failing(), controller.applications());
- InstanceList all = InstanceList.from(controller.applications().asList())
- .hasDeployment();
+ ApplicationList productionOnThis = ApplicationList.from(statistics.production(), controller.applications())
+ .notUpgradingTo(statistics.version())
+ .notFailing();
+ ApplicationList failingOnThis = ApplicationList.from(statistics.failing(), controller.applications());
+ ApplicationList all = ApplicationList.from(controller.applications().asList())
+ .withProductionDeployment();
// 'broken' if any Canary fails
if ( ! failingOnThis.with(UpgradePolicy.canary).isEmpty())
@@ -162,10 +162,10 @@ public class VespaVersion implements Comparable<VespaVersion> {
}
private static boolean nonCanaryApplicationsBroken(Version version,
- InstanceList failingOnThis,
- InstanceList productionOnThis) {
- InstanceList failingNonCanaries = failingOnThis.without(UpgradePolicy.canary).startedFailingOn(version);
- InstanceList productionNonCanaries = productionOnThis.without(UpgradePolicy.canary);
+ ApplicationList failingOnThis,
+ ApplicationList productionOnThis) {
+ ApplicationList failingNonCanaries = failingOnThis.without(UpgradePolicy.canary).startedFailingOn(version);
+ ApplicationList productionNonCanaries = productionOnThis.without(UpgradePolicy.canary);
if (productionNonCanaries.size() + failingNonCanaries.size() == 0) return false;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
index 57994e181c5..5d82225c9d5 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
@@ -7,7 +7,8 @@ import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.ValidationId;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.ApplicationName;
+import com.yahoo.config.provision.AthenzDomain;
+import com.yahoo.config.provision.AthenzService;
import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
@@ -15,6 +16,7 @@ import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.slime.JsonFormat;
import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeployOptions;
@@ -22,31 +24,44 @@ import com.yahoo.vespa.hosted.controller.api.application.v4.model.EndpointStatus
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.certificates.ApplicationCertificate;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
import com.yahoo.vespa.hosted.controller.api.integration.dns.Record;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingEndpoint;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
+import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.Deployment;
+import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.BuildJob;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock;
+import com.yahoo.vespa.hosted.controller.metric.ApplicationMetrics;
+import com.yahoo.vespa.hosted.controller.persistence.InstanceSerializer;
import com.yahoo.vespa.hosted.controller.persistence.MockCuratorDb;
import com.yahoo.vespa.hosted.controller.persistence.OldMockCuratorDb;
import com.yahoo.vespa.hosted.controller.rotation.RotationId;
import com.yahoo.vespa.hosted.controller.rotation.RotationLock;
+import com.yahoo.vespa.hosted.controller.rotation.RotationState;
+import com.yahoo.vespa.hosted.controller.rotation.RotationStatus;
import org.junit.Test;
+import java.io.IOException;
import java.time.Duration;
import java.time.Instant;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.OptionalInt;
+import java.util.OptionalLong;
import java.util.Set;
import java.util.function.Function;
import java.util.function.Supplier;
@@ -57,6 +72,7 @@ import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobTy
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.productionUsWest1;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.stagingTest;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.systemTest;
+import static java.nio.charset.StandardCharsets.UTF_8;
import static java.time.temporal.ChronoUnit.MILLIS;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -85,41 +101,42 @@ public class ControllerTest {
// staging job - succeeding
Version version1 = tester.defaultPlatformVersion();
- Instance app1 = tester.createApplication("app1", "tenant1", 1, 11L);
+ Application app1 = tester.createApplication("app1", "tenant1", 1, 11L);
+ Instance instance = tester.defaultInstance(app1.id());
tester.jobCompletion(component).application(app1).uploadArtifact(applicationPackage).submit();
assertEquals("Application version is known from completion of initial job",
ApplicationVersion.from(BuildJob.defaultSourceRevision, BuildJob.defaultBuildNumber),
- tester.controller().applications().require(app1.id()).change().application().get());
- tester.deployAndNotify(app1, applicationPackage, true, systemTest);
- tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
- assertEquals(4, applications.require(app1.id()).deploymentJobs().jobStatus().size());
+ tester.application(app1.id()).change().application().get());
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
+ assertEquals(4, tester.defaultInstance(app1.id()).deploymentJobs().jobStatus().size());
- ApplicationVersion applicationVersion = tester.controller().applications().require(app1.id()).change().application().get();
+ ApplicationVersion applicationVersion = tester.application(app1.id()).change().application().get();
assertFalse("Application version has been set during deployment", applicationVersion.isUnknown());
assertStatus(JobStatus.initial(stagingTest)
.withTriggering(version1, applicationVersion, Optional.empty(),"", tester.clock().instant().truncatedTo(MILLIS))
- .withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS)), app1.id(), tester.controller());
+ .withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS)), app1.id().defaultInstance(), tester.controller());
// Causes first deployment job to be triggered
assertStatus(JobStatus.initial(productionUsWest1)
- .withTriggering(version1, applicationVersion, Optional.empty(), "", tester.clock().instant().truncatedTo(MILLIS)), app1.id(), tester.controller());
+ .withTriggering(version1, applicationVersion, Optional.empty(), "", tester.clock().instant().truncatedTo(MILLIS)), app1.id().defaultInstance(), tester.controller());
tester.clock().advance(Duration.ofSeconds(1));
// production job (failing) after deployment
- tester.deploy(productionUsWest1, app1, applicationPackage);
- tester.deployAndNotify(app1, applicationPackage, false, productionUsWest1);
- assertEquals(4, applications.require(app1.id()).deploymentJobs().jobStatus().size());
+ tester.deploy(productionUsWest1, instance.id(), applicationPackage);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, productionUsWest1);
+ assertEquals(4, tester.defaultInstance(app1.id()).deploymentJobs().jobStatus().size());
JobStatus expectedJobStatus = JobStatus.initial(productionUsWest1)
.withTriggering(version1, applicationVersion, Optional.empty(), "", tester.clock().instant().truncatedTo(MILLIS)) // Triggered first without application version info
.withCompletion(42, Optional.of(JobError.unknown), tester.clock().instant().truncatedTo(MILLIS))
.withTriggering(version1,
applicationVersion,
- Optional.of(tester.application(app1.id()).deployments().get(productionUsWest1.zone(main))),
+ Optional.of(tester.defaultInstance(app1.id()).deployments().get(productionUsWest1.zone(main))),
"",
tester.clock().instant().truncatedTo(MILLIS)); // Re-triggering (due to failure) has application version info
- assertStatus(expectedJobStatus, app1.id(), tester.controller());
+ assertStatus(expectedJobStatus, app1.id().defaultInstance(), tester.controller());
// Simulate restart
tester.restartController();
@@ -127,40 +144,38 @@ public class ControllerTest {
applications = tester.controller().applications();
assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1")));
- assertNotNull(applications.get(ApplicationId.from(TenantName.from("tenant1"),
- ApplicationName.from("application1"),
- InstanceName.from("default"))));
- assertEquals(4, applications.require(app1.id()).deploymentJobs().jobStatus().size());
+ assertNotNull(tester.defaultInstance(app1.id()));
+ assertEquals(4, tester.defaultInstance(app1.id()).deploymentJobs().jobStatus().size());
tester.clock().advance(Duration.ofHours(1));
// system and staging test job - succeeding
tester.jobCompletion(component).application(app1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
- applicationVersion = tester.application("app1").change().application().get();
- tester.deployAndNotify(app1, applicationPackage, true, systemTest);
+ applicationVersion = tester.defaultInstance("app1").change().application().get();
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
assertStatus(JobStatus.initial(systemTest)
- .withTriggering(version1, applicationVersion, Optional.of(tester.application(app1.id()).deployments().get(productionUsWest1.zone(main))), "", tester.clock().instant().truncatedTo(MILLIS))
+ .withTriggering(version1, applicationVersion, Optional.of(tester.defaultInstance(app1.id()).deployments().get(productionUsWest1.zone(main))), "", tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS)),
- app1.id(), tester.controller());
+ app1.id().defaultInstance(), tester.controller());
tester.clock().advance(Duration.ofHours(1)); // Stop retrying
tester.jobCompletion(productionUsWest1).application(app1).unsuccessful().submit();
- tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
// production job succeeding now
expectedJobStatus = expectedJobStatus
- .withTriggering(version1, applicationVersion, Optional.of(tester.application(app1.id()).deployments().get(productionUsWest1.zone(main))), "", tester.clock().instant().truncatedTo(MILLIS))
+ .withTriggering(version1, applicationVersion, Optional.of(tester.defaultInstance(app1.id()).deployments().get(productionUsWest1.zone(main))), "", tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS));
- tester.deployAndNotify(app1, applicationPackage, true, productionUsWest1);
- assertStatus(expectedJobStatus, app1.id(), tester.controller());
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
+ assertStatus(expectedJobStatus, app1.id().defaultInstance(), tester.controller());
// causes triggering of next production job
assertStatus(JobStatus.initial(productionUsEast3)
.withTriggering(version1, applicationVersion, Optional.empty(), "", tester.clock().instant().truncatedTo(MILLIS)),
- app1.id(), tester.controller());
- tester.deployAndNotify(app1, applicationPackage, true, productionUsEast3);
+ app1.id().defaultInstance(), tester.controller());
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsEast3);
- assertEquals(5, applications.get(app1.id()).get().deploymentJobs().jobStatus().size());
+ assertEquals(5, tester.defaultInstance(app1.id()).deploymentJobs().jobStatus().size());
// Production zone for which there is no JobType is not allowed.
applicationPackage = new ApplicationPackageBuilder()
@@ -168,7 +183,7 @@ public class ControllerTest {
.region("deep-space-9")
.build();
try {
- tester.controller().jobController().submit(app1.id(), BuildJob.defaultSourceRevision, "a@b",
+ tester.controller().jobController().submit(app1.id().defaultInstance(), BuildJob.defaultSourceRevision, "a@b",
2, applicationPackage, new byte[0]);
fail("Expected exception due to illegal deployment spec.");
}
@@ -183,7 +198,8 @@ public class ControllerTest {
.build();
tester.jobCompletion(component).application(app1).nextBuildNumber().nextBuildNumber().uploadArtifact(applicationPackage).submit();
try {
- tester.deploy(systemTest, app1, applicationPackage);
+ assertTrue(tester.instance(instance.id()).deployments().containsKey(ZoneId.from("prod", "us-west-1")));
+ tester.deploy(systemTest, instance.id(), applicationPackage);
fail("Expected exception due to illegal production deployment removal");
}
catch (IllegalArgumentException e) {
@@ -192,8 +208,8 @@ public class ControllerTest {
e.getMessage());
}
assertNotNull("Zone was not removed",
- applications.require(app1.id()).deployments().get(productionUsWest1.zone(main)));
- JobStatus jobStatus = applications.require(app1.id()).deploymentJobs().jobStatus().get(productionUsWest1);
+ tester.defaultInstance(app1.id()).deployments().get(productionUsWest1.zone(main)));
+ JobStatus jobStatus = tester.defaultInstance(app1.id()).deploymentJobs().jobStatus().get(productionUsWest1);
assertNotNull("Deployment job was not removed", jobStatus);
assertEquals(42, jobStatus.lastCompleted().get().id());
assertEquals("New change available", jobStatus.lastCompleted().get().reason());
@@ -206,15 +222,15 @@ public class ControllerTest {
.region("us-east-3")
.build();
tester.jobCompletion(component).application(app1).nextBuildNumber(2).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app1, applicationPackage, true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
assertNull("Zone was removed",
- applications.require(app1.id()).deployments().get(productionUsWest1.zone(main)));
- assertNull("Deployment job was removed", applications.require(app1.id()).deploymentJobs().jobStatus().get(productionUsWest1));
+ tester.defaultInstance(app1.id()).deployments().get(productionUsWest1.zone(main)));
+ assertNull("Deployment job was removed", tester.defaultInstance(app1.id()).deploymentJobs().jobStatus().get(productionUsWest1));
}
@Test
public void testDeploymentApplicationVersion() {
- Instance app = tester.createApplication("app1", "tenant1", 1, 11L);
+ Application app = tester.createApplication("app1", "tenant1", 1, 11L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
@@ -225,11 +241,11 @@ public class ControllerTest {
ApplicationVersion applicationVersion = ApplicationVersion.from(source, 101);
runDeployment(tester, app.id(), applicationVersion, applicationPackage, source,101);
assertEquals("Artifact is downloaded twice in staging and once for other zones", 5,
- tester.controllerTester().serviceRegistry().artifactRepositoryMock().hits(app.id(), applicationVersion.id()));
+ tester.controllerTester().serviceRegistry().artifactRepositoryMock().hits(app.id().defaultInstance(), applicationVersion.id()));
// Application is upgraded. This makes deployment orchestration pick the last successful application version in
// zones which do not have permanent deployments, e.g. test and staging
- runUpgrade(tester, app.id(), applicationVersion);
+ runUpgrade(tester, app.id().defaultInstance(), applicationVersion);
}
@Test
@@ -282,7 +298,7 @@ public class ControllerTest {
@Test
public void testDnsAliasRegistration() {
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
@@ -291,14 +307,14 @@ public class ControllerTest {
.region("us-central-1") // Two deployments should result in each DNS alias being registered once
.build();
- tester.deployCompletely(instance, applicationPackage);
- Collection<Deployment> deployments = tester.application(instance.id()).deployments().values();
+ tester.deployCompletely(application, applicationPackage);
+ Collection<Deployment> deployments = tester.defaultInstance(application.id()).deployments().values();
assertFalse(deployments.isEmpty());
for (Deployment deployment : deployments) {
assertEquals("Rotation names are passed to config server in " + deployment.zone(),
Set.of("rotation-id-01",
"app1--tenant1.global.vespa.oath.cloud"),
- tester.configServer().rotationNames().get(new DeploymentId(instance.id(), deployment.zone())));
+ tester.configServer().rotationNames().get(new DeploymentId(application.id().defaultInstance(), deployment.zone())));
}
tester.flushDnsRequests();
@@ -312,7 +328,7 @@ public class ControllerTest {
@Test
public void testDnsAliasRegistrationLegacy() {
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
@@ -321,8 +337,8 @@ public class ControllerTest {
.region("us-central-1") // Two deployments should result in each DNS alias being registered once
.build();
- tester.deployCompletely(instance, applicationPackage);
- Collection<Deployment> deployments = tester.application(instance.id()).deployments().values();
+ tester.deployCompletely(application, applicationPackage);
+ Collection<Deployment> deployments = tester.defaultInstance(application.id()).deployments().values();
assertFalse(deployments.isEmpty());
for (Deployment deployment : deployments) {
assertEquals("Rotation names are passed to config server in " + deployment.zone(),
@@ -330,7 +346,7 @@ public class ControllerTest {
"app1--tenant1.global.vespa.oath.cloud",
"app1.tenant1.global.vespa.yahooapis.com",
"app1--tenant1.global.vespa.yahooapis.com"),
- tester.configServer().rotationNames().get(new DeploymentId(instance.id(), deployment.zone())));
+ tester.configServer().rotationNames().get(new DeploymentId(application.id().defaultInstance(), deployment.zone())));
}
tester.flushDnsRequests();
assertEquals(3, tester.controllerTester().nameService().records().size());
@@ -353,7 +369,7 @@ public class ControllerTest {
@Test
public void testDnsAliasRegistrationWithEndpoints() {
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
@@ -365,8 +381,8 @@ public class ControllerTest {
.region("us-central-1")
.build();
- tester.deployCompletely(instance, applicationPackage);
- Collection<Deployment> deployments = tester.application(instance.id()).deployments().values();
+ tester.deployCompletely(application, applicationPackage);
+ Collection<Deployment> deployments = tester.defaultInstance(application.id()).deployments().values();
assertFalse(deployments.isEmpty());
var notWest = Set.of(
@@ -379,7 +395,7 @@ public class ControllerTest {
for (Deployment deployment : deployments) {
assertEquals("Rotation names are passed to config server in " + deployment.zone(),
ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest,
- tester.configServer().rotationNames().get(new DeploymentId(instance.id(), deployment.zone())));
+ tester.configServer().rotationNames().get(new DeploymentId(application.id().defaultInstance(), deployment.zone())));
}
tester.flushDnsRequests();
@@ -408,7 +424,7 @@ public class ControllerTest {
@Test
public void testDnsAliasRegistrationWithChangingZones() {
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
@@ -417,16 +433,16 @@ public class ControllerTest {
.region("us-central-1")
.build();
- tester.deployCompletely(instance, applicationPackage);
+ tester.deployCompletely(application, applicationPackage);
assertEquals(
Set.of("rotation-id-01", "app1--tenant1.global.vespa.oath.cloud"),
- tester.configServer().rotationNames().get(new DeploymentId(instance.id(), ZoneId.from("prod", "us-west-1")))
+ tester.configServer().rotationNames().get(new DeploymentId(application.id().defaultInstance(), ZoneId.from("prod", "us-west-1")))
);
assertEquals(
Set.of("rotation-id-01", "app1--tenant1.global.vespa.oath.cloud"),
- tester.configServer().rotationNames().get(new DeploymentId(instance.id(), ZoneId.from("prod", "us-central-1")))
+ tester.configServer().rotationNames().get(new DeploymentId(application.id().defaultInstance(), ZoneId.from("prod", "us-central-1")))
);
@@ -437,24 +453,24 @@ public class ControllerTest {
.region("us-central-1")
.build();
- tester.deployCompletely(instance, applicationPackage2, BuildJob.defaultBuildNumber + 1);
+ tester.deployCompletely(application, applicationPackage2, BuildJob.defaultBuildNumber + 1);
assertEquals(
Set.of("rotation-id-01", "app1--tenant1.global.vespa.oath.cloud"),
- tester.configServer().rotationNames().get(new DeploymentId(instance.id(), ZoneId.from("prod", "us-west-1")))
+ tester.configServer().rotationNames().get(new DeploymentId(application.id().defaultInstance(), ZoneId.from("prod", "us-west-1")))
);
assertEquals(
Set.of(),
- tester.configServer().rotationNames().get(new DeploymentId(instance.id(), ZoneId.from("prod", "us-central-1")))
+ tester.configServer().rotationNames().get(new DeploymentId(application.id().defaultInstance(), ZoneId.from("prod", "us-central-1")))
);
- assertEquals(Set.of(RegionName.from("us-west-1")), tester.application(instance.id()).rotations().get(0).regions());
+ assertEquals(Set.of(RegionName.from("us-west-1")), tester.defaultInstance(application.id()).rotations().get(0).regions());
}
@Test
public void testUnassignRotations() {
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
@@ -463,7 +479,7 @@ public class ControllerTest {
.region("us-central-1") // Two deployments should result in each DNS alias being registered once
.build();
- tester.deployCompletely(instance, applicationPackage);
+ tester.deployCompletely(application, applicationPackage);
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
.environment(Environment.prod)
@@ -471,17 +487,17 @@ public class ControllerTest {
.region("us-central-1") // Two deployments should result in each DNS alias being registered once
.build();
- tester.deployCompletely(instance, applicationPackage2, BuildJob.defaultBuildNumber + 1);
+ tester.deployCompletely(application, applicationPackage2, BuildJob.defaultBuildNumber + 1);
assertEquals(
List.of(AssignedRotation.fromStrings("qrs", "default", "rotation-id-01", Set.of())),
- tester.application(instance.id()).rotations()
+ tester.defaultInstance(application.id()).rotations()
);
assertEquals(
Set.of(),
- tester.configServer().rotationNames().get(new DeploymentId(instance.id(), ZoneId.from("prod", "us-west-1")))
+ tester.configServer().rotationNames().get(new DeploymentId(application.id().defaultInstance(), ZoneId.from("prod", "us-west-1")))
);
}
@@ -489,7 +505,7 @@ public class ControllerTest {
public void testUpdatesExistingDnsAlias() {
// Application 1 is deployed and deleted
{
- Instance app1 = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application app1 = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.endpoint("default", "foo")
@@ -511,9 +527,9 @@ public class ControllerTest {
.allow(ValidationId.deploymentRemoval)
.build();
tester.jobCompletion(component).application(app1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app1, applicationPackage, true, systemTest);
- tester.applications().deactivate(app1.id(), ZoneId.from(Environment.test, RegionName.from("us-east-1")));
- tester.applications().deactivate(app1.id(), ZoneId.from(Environment.staging, RegionName.from("us-east-3")));
+ tester.deployAndNotify(tester.defaultInstance(app1.id()).id(), Optional.of(applicationPackage), true, systemTest);
+ tester.applications().deactivate(app1.id().defaultInstance(), ZoneId.from(Environment.test, RegionName.from("us-east-1")));
+ tester.applications().deactivate(app1.id().defaultInstance(), ZoneId.from(Environment.staging, RegionName.from("us-east-3")));
tester.applications().deleteApplication(app1.id().tenant(), app1.id().application(), tester.controllerTester().credentialsFor(app1.id()));
try (RotationLock lock = tester.applications().rotationRepository().lock()) {
assertTrue("Rotation is unassigned",
@@ -535,7 +551,7 @@ public class ControllerTest {
// Application 2 is deployed and assigned same rotation as application 1 had before deletion
{
- Instance app2 = tester.createApplication("app2", "tenant2", 2, 1L);
+ Application app2 = tester.createApplication("app2", "tenant2", 2, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.endpoint("default", "foo")
@@ -554,7 +570,7 @@ public class ControllerTest {
// Application 1 is recreated, deployed and assigned a new rotation
{
tester.buildService().clear();
- Instance app1 = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application app1 = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.endpoint("default", "foo")
@@ -562,8 +578,7 @@ public class ControllerTest {
.region("us-central-1")
.build();
tester.deployCompletely(app1, applicationPackage);
- app1 = tester.applications().require(app1.id());
- assertEquals("rotation-id-02", app1.rotations().get(0).rotationId().asString());
+ assertEquals("rotation-id-02", tester.defaultInstance(app1.id()).rotations().get(0).rotationId().asString());
// DNS records are created for the newly assigned rotation
assertEquals(2, tester.controllerTester().nameService().records().size());
@@ -592,25 +607,25 @@ public class ControllerTest {
.build();
// Create application
- Instance app = tester.createApplication("app1", "tenant1", 1, 2L);
+ Application app = tester.createApplication("app1", "tenant1", 1, 2L);
// Direct deploy is allowed when deployDirectly is true
ZoneId zone = ZoneId.from("prod", "cd-us-central-1");
// Same options as used in our integration tests
DeployOptions options = new DeployOptions(true, Optional.empty(), false,
false);
- tester.controller().applications().deploy(app.id(), zone, Optional.of(applicationPackage), options);
+ tester.controller().applications().deploy(app.id().defaultInstance(), zone, Optional.of(applicationPackage), options);
assertTrue("Application deployed and activated",
- tester.controllerTester().configServer().application(app.id(), zone).get().activated());
+ tester.controllerTester().configServer().application(app.id().defaultInstance(), zone).get().activated());
assertTrue("No job status added",
- tester.applications().require(app.id()).deploymentJobs().jobStatus().isEmpty());
+ tester.applications().requireInstance(app.id().defaultInstance()).deploymentJobs().jobStatus().isEmpty());
Version seven = Version.fromString("7.2");
tester.upgradeSystem(seven);
- tester.controller().applications().deploy(app.id(), zone, Optional.of(applicationPackage), options);
- assertEquals(six, tester.application(app.id()).deployments().get(zone).version());
+ tester.controller().applications().deploy(app.id().defaultInstance(), zone, Optional.of(applicationPackage), options);
+ assertEquals(six, tester.defaultInstance(app.id()).deployments().get(zone).version());
}
@Test
@@ -622,21 +637,21 @@ public class ControllerTest {
.build();
// Create application
- Instance app = tester.createApplication("app1", "tenant1", 1, 2L);
+ Application app = tester.createApplication("app1", "tenant1", 1, 2L);
ZoneId zone = ZoneId.from("dev", "us-east-1");
// Deploy
- tester.controller().applications().deploy(app.id(), zone, Optional.of(applicationPackage), DeployOptions.none());
+ tester.controller().applications().deploy(app.id().defaultInstance(), zone, Optional.of(applicationPackage), DeployOptions.none());
assertTrue("Application deployed and activated",
- tester.controllerTester().configServer().application(app.id(), zone).get().activated());
+ tester.controllerTester().configServer().application(app.id().defaultInstance(), zone).get().activated());
assertTrue("No job status added",
- tester.applications().require(app.id()).deploymentJobs().jobStatus().isEmpty());
- assertEquals("DeploymentSpec is not persisted", DeploymentSpec.empty, tester.applications().require(app.id()).deploymentSpec());
+ tester.defaultInstance(app.id()).deploymentJobs().jobStatus().isEmpty());
+ assertEquals("DeploymentSpec is not persisted", DeploymentSpec.empty, tester.application(app.id()).deploymentSpec());
}
@Test
public void testSuspension() {
- Instance app = tester.createApplication("app1", "tenant1", 1, 11L);
+ Application app = tester.createApplication("app1", "tenant1", 1, 11L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
@@ -647,8 +662,8 @@ public class ControllerTest {
ApplicationVersion applicationVersion = ApplicationVersion.from(source, 101);
runDeployment(tester, app.id(), applicationVersion, applicationPackage, source,101);
- DeploymentId deployment1 = new DeploymentId(app.id(), ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
- DeploymentId deployment2 = new DeploymentId(app.id(), ZoneId.from(Environment.prod, RegionName.from("us-east-3")));
+ DeploymentId deployment1 = new DeploymentId(app.id().defaultInstance(), ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
+ DeploymentId deployment2 = new DeploymentId(app.id().defaultInstance(), ZoneId.from(Environment.prod, RegionName.from("us-east-3")));
assertFalse(tester.configServer().isSuspended(deployment1));
assertFalse(tester.configServer().isSuspended(deployment2));
tester.configServer().setSuspended(deployment1, true);
@@ -660,7 +675,7 @@ public class ControllerTest {
// second time will not fail
@Test
public void testDeletingApplicationThatHasAlreadyBeenDeleted() {
- Instance app = tester.createApplication("app2", "tenant1", 1, 12L);
+ Application app = tester.createApplication("app2", "tenant1", 1, 12L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-east-3")
@@ -668,33 +683,33 @@ public class ControllerTest {
.build();
ZoneId zone = ZoneId.from("prod", "us-west-1");
- tester.controller().applications().deploy(app.id(), zone, Optional.of(applicationPackage), DeployOptions.none());
- tester.controller().applications().deactivate(app.id(), ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
- tester.controller().applications().deactivate(app.id(), ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
+ tester.controller().applications().deploy(app.id().defaultInstance(), zone, Optional.of(applicationPackage), DeployOptions.none());
+ tester.controller().applications().deactivate(app.id().defaultInstance(), ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
+ tester.controller().applications().deactivate(app.id().defaultInstance(), ZoneId.from(Environment.prod, RegionName.from("us-west-1")));
}
@Test
public void testDeployApplicationPackageWithApplicationDir() {
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
.build(true);
- tester.deployCompletely(instance, applicationPackage);
+ tester.deployCompletely(application, applicationPackage);
}
@Test
public void testDeployApplicationWithWarnings() {
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
.build();
ZoneId zone = ZoneId.from("prod", "us-west-1");
int warnings = 3;
- tester.configServer().generateWarnings(new DeploymentId(instance.id(), zone), warnings);
- tester.deployCompletely(instance, applicationPackage);
- assertEquals(warnings, tester.applications().require(instance.id()).deployments().get(zone)
+ tester.configServer().generateWarnings(new DeploymentId(application.id().defaultInstance(), zone), warnings);
+ tester.deployCompletely(application, applicationPackage);
+ assertEquals(warnings, tester.applications().requireInstance(application.id().defaultInstance()).deployments().get(zone)
.metrics().warnings().get(DeploymentMetrics.Warning.all).intValue());
}
@@ -710,7 +725,8 @@ public class ControllerTest {
.build();
// Deploy app1 in production
tester.deployCompletely(app1, applicationPackage);
- var cert = certificate.apply(app1);
+ Instance instance1 = tester.defaultInstance(app1.id());
+ var cert = certificate.apply(instance1);
assertTrue("Provisions certificate in " + Environment.prod, cert.isPresent());
assertEquals(List.of(
"vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud",
@@ -724,21 +740,22 @@ public class ControllerTest {
"*.app1.tenant1.us-central-1.vespa.oath.cloud",
"app1.tenant1.eu-west-1.vespa.oath.cloud",
"*.app1.tenant1.eu-west-1.vespa.oath.cloud"
- ), tester.controllerTester().serviceRegistry().applicationCertificateMock().dnsNamesOf(app1.id()));
+ ), tester.controllerTester().serviceRegistry().applicationCertificateMock().dnsNamesOf(app1.id().defaultInstance()));
// Next deployment reuses certificate
tester.deployCompletely(app1, applicationPackage, BuildJob.defaultBuildNumber + 1);
- assertEquals(cert, certificate.apply(app1));
+ assertEquals(cert, certificate.apply(instance1));
// Create app2
var app2 = tester.createApplication("app2", "tenant2", 3, 4L);
+ Instance instance2 = tester.defaultInstance(app2.id());
ZoneId zone = ZoneId.from("dev", "us-east-1");
// Deploy app2 in dev
- tester.controller().applications().deploy(app2.id(), zone, Optional.of(applicationPackage), DeployOptions.none());
+ tester.controller().applications().deploy(app2.id().defaultInstance(), zone, Optional.of(applicationPackage), DeployOptions.none());
assertTrue("Application deployed and activated",
- tester.controllerTester().configServer().application(app2.id(), zone).get().activated());
- assertFalse("Does not provision certificate in " + Environment.dev, certificate.apply(app2).isPresent());
+ tester.controllerTester().configServer().application(app2.id().defaultInstance(), zone).get().activated());
+ assertFalse("Does not provision certificate in " + Environment.dev, certificate.apply(instance2).isPresent());
}
@Test
@@ -777,47 +794,109 @@ public class ControllerTest {
@Test
- public void testInstanceDataMigration() {
+ public void testInstanceDataMigration() throws IOException {
+ /*
+ Set up initial state, using old DB:
+ Create two instances of an application; the default will be the base.
+
+ Read, modify and write the application using the new DB.
+ Verify results using both old and new DBs.
+ */
+
+ ApplicationPackage applicationPackage = new ApplicationPackageBuilder().allow(ValidationId.contentClusterRemoval)
+ .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service"))
+ .endpoint("endpoint", "container", "us-east-1")
+ .region("us-east-1")
+ .build();
+ ApplicationId defaultId = ApplicationId.from("t1", "a1", "default");
+ Instance old1 = new Instance(defaultId,
+ Instant.ofEpochMilli(123),
+ applicationPackage.deploymentSpec(),
+ applicationPackage.validationOverrides(),
+ List.of(new Deployment(ZoneId.from("prod", "us-east-1"),
+ ApplicationVersion.from(new SourceRevision("repo", "branch", "commit"), 3),
+ Version.fromString("7.8.9"),
+ Instant.ofEpochMilli(321))),
+ new DeploymentJobs(OptionalLong.of(72),
+ List.of(new JobStatus(JobType.productionAwsUsEast1a,
+ Optional.empty(),
+ Optional.of(new JobStatus.JobRun(32,
+ Version.fromString("7.8.9"),
+ ApplicationVersion.unknown,
+ Optional.empty(),
+ Optional.empty(),
+ "make the job great again",
+ Instant.ofEpochMilli(200))),
+ Optional.empty(),
+ Optional.empty(),
+ Optional.empty(),
+ OptionalLong.empty())),
+ Optional.of(IssueId.from("issue")),
+ true),
+ Change.of(Version.fromString("9")),
+ Change.empty(),
+ Optional.of(IssueId.from("tissue")),
+ Optional.of(User.from("user")),
+ OptionalInt.of(3),
+ new ApplicationMetrics(2, 3),
+ Optional.of("key"),
+ List.of(AssignedRotation.fromStrings("container", "endpoint", "rot13", List.of("us-east-1"))),
+ RotationStatus.from(Map.of(new RotationId("rot13"), Map.of(ZoneId.from("prod", "us-east-1"), RotationState.in))));
+
+ Instance old2 = new Instance(ApplicationId.from("t1", "a1", "i1"),
+ Instant.ofEpochMilli(400));
+
+
+ InstanceSerializer instanceSerializer = new InstanceSerializer();
+ String old1Serialized = new String(JsonFormat.toJsonBytes(instanceSerializer.toSlime(old1)), UTF_8);
+
MockCuratorDb newDb = new MockCuratorDb();
OldMockCuratorDb oldDb = new OldMockCuratorDb(newDb.curator());
- Instance instance1 = new Instance(ApplicationId.from("tenant1", "application1", "instance1"), Instant.ofEpochMilli(1));
- Instance instance2 = new Instance(ApplicationId.from("tenant2", "application2", "instance2"), Instant.ofEpochMilli(2));
-
- oldDb.writeInstance(instance1);
- newDb.writeInstance(instance2);
-
- assertEquals(instance1, oldDb.readInstance(instance1.id()).orElseThrow());
- assertEquals(instance1, newDb.readInstance(instance1.id()).orElseThrow());
-
- assertEquals(instance2, oldDb.readInstance(instance2.id()).orElseThrow());
- assertEquals(instance2, newDb.readInstance(instance2.id()).orElseThrow());
-
- assertEquals(List.of(instance1, instance2), oldDb.readInstances());
- assertEquals(List.of(instance1, instance2), newDb.readInstances());
-
- instance1 = new Instance(instance1.id(), Instant.ofEpochMilli(3));
- oldDb.writeInstance(instance1);
- assertEquals(instance1, oldDb.readInstance(instance1.id()).orElseThrow());
- assertEquals(instance1, newDb.readInstance(instance1.id()).orElseThrow());
-
- instance2 = new Instance(instance2.id(), Instant.ofEpochMilli(4));
- newDb.writeInstance(instance2);
- assertEquals(instance2, oldDb.readInstance(instance2.id()).orElseThrow());
- assertEquals(instance2, newDb.readInstance(instance2.id()).orElseThrow());
+ oldDb.writeInstance(old1);
+ oldDb.writeInstance(old2);
+
+ Application application = newDb.readApplication(TenantAndApplicationId.from("t1", "a1")).orElseThrow();
+ Instance new1 = application.legacy(InstanceName.defaultName());
+ String new1Serialized = new String(JsonFormat.toJsonBytes(instanceSerializer.toSlime(new1)), UTF_8);
+ assertEquals(old1Serialized, new1Serialized);
+
+ LockedApplication locked = new LockedApplication(application, newDb.lock(application.id()));
+ newDb.writeApplication(locked.with(new ApplicationMetrics(8, 9)).get());
+ Instance mod1 = oldDb.readInstance(old1.id()).orElseThrow();
+ Instance mod2 = oldDb.readInstance(old2.id()).orElseThrow();
+
+ old1 = old1.with(new ApplicationMetrics(8, 9));
+ old1Serialized = new String(JsonFormat.toJsonBytes(instanceSerializer.toSlime(old1)), UTF_8);
+ String mod1Serialized = new String(JsonFormat.toJsonBytes(instanceSerializer.toSlime(mod1)), UTF_8);
+ assertEquals(old1Serialized, mod1Serialized);
+
+ assertEquals(old1.createdAt(), mod2.createdAt());
+ assertEquals(old1.change(), mod2.change());
+ assertEquals(old1.outstandingChange(), mod2.outstandingChange());
+ assertEquals(old1.deploymentSpec(), mod2.deploymentSpec());
+ assertEquals(old2.deployments(), mod2.deployments());
+ assertEquals(old2.deploymentJobs().jobStatus(), mod2.deploymentJobs().jobStatus());
+
+ newDb.removeApplication(old1.id());
+ assertEquals(1, newDb.readApplication(application.id()).orElseThrow().instances().size());
+ newDb.removeApplication(old2.id());
+ assertTrue(newDb.readApplication(application.id()).isEmpty());
+ assertTrue(oldDb.readInstance(old1.id()).isEmpty());
+ assertTrue(oldDb.readInstance(old2.id()).isEmpty());
}
private void runUpgrade(DeploymentTester tester, ApplicationId application, ApplicationVersion version) {
Version next = Version.fromString("6.2");
tester.upgradeSystem(next);
- runDeployment(tester, tester.applications().require(application), version, Optional.of(next), Optional.empty());
+ runDeployment(tester, tester.applications().requireInstance(application), version, Optional.of(next), Optional.empty());
}
- private void runDeployment(DeploymentTester tester, ApplicationId application, ApplicationVersion version,
+ private void runDeployment(DeploymentTester tester, TenantAndApplicationId id, ApplicationVersion version,
ApplicationPackage applicationPackage, SourceRevision sourceRevision, long buildNumber) {
- Instance app = tester.applications().require(application);
+ Instance instance = tester.defaultInstance(id);
tester.jobCompletion(component)
- .application(app)
+ .application(tester.application(id))
.buildNumber(buildNumber)
.sourceRevision(sourceRevision)
.uploadArtifact(applicationPackage)
@@ -825,13 +904,13 @@ public class ControllerTest {
ApplicationVersion change = ApplicationVersion.from(sourceRevision, buildNumber);
assertEquals(change.id(), tester.controller().applications()
- .require(application)
+ .requireApplication(id)
.change().application().get().id());
- runDeployment(tester, app, version, Optional.empty(), Optional.of(applicationPackage));
+ runDeployment(tester, instance, version, Optional.empty(), Optional.of(applicationPackage));
}
private void assertStatus(JobStatus expectedStatus, ApplicationId id, Controller controller) {
- Instance app = controller.applications().get(id).get();
+ Instance app = controller.applications().getInstance(id).get();
JobStatus existingStatus = app.deploymentJobs().jobStatus().get(expectedStatus.type());
assertNotNull("Status of type " + expectedStatus.type() + " is present", existingStatus);
assertEquals(expectedStatus, existingStatus);
@@ -842,31 +921,31 @@ public class ControllerTest {
Version vespaVersion = upgrade.orElseGet(tester::defaultPlatformVersion);
// Deploy in test
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(app.id(), applicationPackage, true, systemTest);
+ tester.deployAndNotify(app.id(), applicationPackage, true, stagingTest);
JobStatus expected = JobStatus.initial(stagingTest)
- .withTriggering(vespaVersion, version, Optional.ofNullable(tester.application(app.id()).deployments().get(productionUsWest1.zone(main))), "",
+ .withTriggering(vespaVersion, version, Optional.ofNullable(tester.instance(app.id()).deployments().get(productionUsWest1.zone(main))), "",
tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS));
assertStatus(expected, app.id(), tester.controller());
// Deploy in production
expected = JobStatus.initial(productionUsWest1)
- .withTriggering(vespaVersion, version, Optional.ofNullable(tester.application(app.id()).deployments().get(productionUsWest1.zone(main))), "",
+ .withTriggering(vespaVersion, version, Optional.ofNullable(tester.instance(app.id()).deployments().get(productionUsWest1.zone(main))), "",
tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS));
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
+ tester.deployAndNotify(app.id(), applicationPackage, true, productionUsWest1);
assertStatus(expected, app.id(), tester.controller());
expected = JobStatus.initial(productionUsEast3)
- .withTriggering(vespaVersion, version, Optional.ofNullable(tester.application(app.id()).deployments().get(productionUsEast3.zone(main))), "",
+ .withTriggering(vespaVersion, version, Optional.ofNullable(tester.instance(app.id()).deployments().get(productionUsEast3.zone(main))), "",
tester.clock().instant().truncatedTo(MILLIS))
.withCompletion(42, Optional.empty(), tester.clock().instant().truncatedTo(MILLIS));
- tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
+ tester.deployAndNotify(app.id(), applicationPackage, true, productionUsEast3);
assertStatus(expected, app.id(), tester.controller());
// Verify deployed version
- app = tester.controller().applications().require(app.id());
+ app = tester.controller().applications().requireInstance(app.id());
for (Deployment deployment : app.productionDeployments().values()) {
assertEquals(version, deployment.applicationVersion());
upgrade.ifPresent(v -> assertEquals(v, deployment.version()));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
index 6744bdea985..82d9690f7d7 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
@@ -14,7 +14,6 @@ import com.yahoo.vespa.athenz.api.AthenzPrincipal;
import com.yahoo.vespa.athenz.api.AthenzUser;
import com.yahoo.vespa.athenz.api.OktaAccessToken;
import com.yahoo.vespa.curator.Lock;
-import com.yahoo.vespa.curator.mock.MockCurator;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeployOptions;
import com.yahoo.vespa.hosted.controller.api.identifiers.Property;
@@ -27,6 +26,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Contact;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMavenRepository;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.athenz.impl.AthenzFacade;
import com.yahoo.vespa.hosted.controller.api.integration.athenz.AthenzClientFactoryMock;
import com.yahoo.vespa.hosted.controller.api.integration.athenz.AthenzDbMock;
@@ -45,6 +45,7 @@ import com.yahoo.vespa.hosted.controller.versions.VersionStatus;
import com.yahoo.vespa.hosted.rotation.config.RotationsConfig;
import java.util.Arrays;
+import java.util.List;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.function.Consumer;
@@ -53,6 +54,7 @@ import java.util.logging.Handler;
import java.util.logging.Logger;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
/**
* Convenience methods for controller tests.
@@ -126,8 +128,11 @@ public final class ControllerTester {
.ifPresent(configureFunc);
}
- public static BuildService.BuildJob buildJob(Instance instance, JobType jobType) {
- return BuildService.BuildJob.of(instance.id(), instance.deploymentJobs().projectId().getAsLong(), jobType.jobName());
+ public static BuildService.BuildJob buildJob(ApplicationId id, JobType jobType) {
+ if (jobType == JobType.component)
+ throw new AssertionError("Not supposed to happen");
+
+ return BuildService.BuildJob.of(id, 0, jobType.jobName());
}
public Controller controller() { return controller; }
@@ -157,37 +162,33 @@ public final class ControllerTester {
}
/** Creates the given tenant and application and deploys it */
- public Instance createAndDeploy(String tenantName, String domainName, String applicationName, Environment environment, long projectId, Long propertyId) {
- return createAndDeploy(tenantName, domainName, applicationName, toZone(environment), projectId, propertyId);
+ public void createAndDeploy(String tenantName, String domainName, String applicationName, Environment environment, long projectId, Long propertyId) {
+ createAndDeploy(tenantName, domainName, applicationName, toZone(environment), projectId, propertyId);
}
/** Creates the given tenant and application and deploys it */
- public Instance createAndDeploy(String tenantName, String domainName, String applicationName,
+ public void createAndDeploy(String tenantName, String domainName, String applicationName,
String instanceName, ZoneId zone, long projectId, Long propertyId) {
- TenantName tenant = createTenant(tenantName, domainName, propertyId);
- Instance instance = createApplication(tenant, applicationName, instanceName, projectId);
- deploy(instance, zone);
- return instance;
+ throw new AssertionError("Not supposed to use this");
}
/** Creates the given tenant and application and deploys it */
- public Instance createAndDeploy(String tenantName, String domainName, String applicationName, ZoneId zone, long projectId, Long propertyId) {
- return createAndDeploy(tenantName, domainName, applicationName, "default", zone, projectId, propertyId);
+ public void createAndDeploy(String tenantName, String domainName, String applicationName, ZoneId zone, long projectId, Long propertyId) {
+ createAndDeploy(tenantName, domainName, applicationName, "default", zone, projectId, propertyId);
}
/** Creates the given tenant and application and deploys it */
- public Instance createAndDeploy(String tenantName, String domainName, String applicationName, Environment environment, long projectId) {
- return createAndDeploy(tenantName, domainName, applicationName, environment, projectId, null);
+ public void createAndDeploy(String tenantName, String domainName, String applicationName, Environment environment, long projectId) {
+ createAndDeploy(tenantName, domainName, applicationName, environment, projectId, null);
}
/** Create application from slime */
- public Instance createApplication(Slime slime) {
- InstanceSerializer serializer = new InstanceSerializer();
- Instance instance = serializer.fromSlime(slime);
- try (Lock lock = controller().applications().lock(instance.id())) {
- controller().applications().store(new LockedInstance(instance, lock));
+ public void createApplication(Slime slime) {
+ Instance instance = new InstanceSerializer().fromSlime(slime);
+ Application application = Application.aggregate(List.of(instance)).get();
+ try (Lock lock = controller().applications().lock(application.id())) {
+ controller().applications().store(new LockedApplication(application, lock));
}
- return instance;
}
public ZoneId toZone(Environment environment) {
@@ -207,7 +208,7 @@ public final class ControllerTester {
return domain;
}
- public Optional<AthenzDomain> domainOf(ApplicationId id) {
+ public Optional<AthenzDomain> domainOf(TenantAndApplicationId id) {
Tenant tenant = controller().tenants().require(id.tenant());
return tenant.type() == Tenant.Type.athenz ? Optional.of(((AthenzTenant) tenant).domain()) : Optional.empty();
}
@@ -235,50 +236,47 @@ public final class ControllerTester {
return createTenant(tenantName, domainName, propertyId, Optional.empty());
}
- public Optional<Credentials> credentialsFor(ApplicationId id) {
+ public Optional<Credentials> credentialsFor(TenantAndApplicationId id) {
return domainOf(id).map(domain -> new AthenzCredentials(new AthenzPrincipal(new AthenzUser("user")),
domain,
new OktaAccessToken("okta-token")));
}
- public Instance createApplication(TenantName tenant, String applicationName, String instanceName, long projectId) {
+ public Application createApplication(TenantName tenant, String applicationName, String instanceName, long projectId) {
ApplicationId applicationId = ApplicationId.from(tenant.value(), applicationName, instanceName);
- controller().applications().createApplication(applicationId, credentialsFor(applicationId));
- controller().applications().lockOrThrow(applicationId, lockedInstance ->
- controller().applications().store(lockedInstance.withProjectId(OptionalLong.of(projectId))));
- return controller().applications().require(applicationId);
+ controller().applications().createApplication(applicationId, credentialsFor(TenantAndApplicationId.from(applicationId)));
+ controller().applications().lockApplicationOrThrow(TenantAndApplicationId.from(applicationId), application ->
+ controller().applications().store(application.withProjectId(OptionalLong.of(projectId))));
+ Application application = controller().applications().requireApplication(TenantAndApplicationId.from(applicationId));
+ assertTrue(application.projectId().isPresent());
+ return application;
}
- public void deploy(Instance instance, ZoneId zone) {
- deploy(instance, zone, new ApplicationPackage(new byte[0]));
+ public void deploy(ApplicationId id, ZoneId zone) {
+ deploy(id, zone, new ApplicationPackage(new byte[0]));
}
- public void deploy(Instance instance, ZoneId zone, ApplicationPackage applicationPackage) {
- deploy(instance, zone, applicationPackage, false);
+ public void deploy(ApplicationId id, ZoneId zone, ApplicationPackage applicationPackage) {
+ deploy(id, zone, applicationPackage, false);
}
- public void deploy(Instance instance, ZoneId zone, ApplicationPackage applicationPackage, boolean deployCurrentVersion) {
- deploy(instance, zone, Optional.of(applicationPackage), deployCurrentVersion);
+ public void deploy(ApplicationId id, ZoneId zone, ApplicationPackage applicationPackage, boolean deployCurrentVersion) {
+ deploy(id, zone, Optional.of(applicationPackage), deployCurrentVersion);
}
- public void deploy(Instance instance, ZoneId zone, Optional<ApplicationPackage> applicationPackage, boolean deployCurrentVersion) {
- deploy(instance, zone, applicationPackage, deployCurrentVersion, Optional.empty());
+ public void deploy(ApplicationId id, ZoneId zone, Optional<ApplicationPackage> applicationPackage, boolean deployCurrentVersion) {
+ deploy(id, zone, applicationPackage, deployCurrentVersion, Optional.empty());
}
- public void deploy(Instance instance, ZoneId zone, Optional<ApplicationPackage> applicationPackage, boolean deployCurrentVersion, Optional<Version> version) {
- controller().applications().deploy(instance.id(),
+ public void deploy(ApplicationId id, ZoneId zone, Optional<ApplicationPackage> applicationPackage, boolean deployCurrentVersion, Optional<Version> version) {
+ controller().applications().deploy(id,
zone,
applicationPackage,
new DeployOptions(false, version, false, deployCurrentVersion));
}
public Supplier<Instance> application(ApplicationId application) {
- return () -> controller().applications().require(application);
- }
-
- /** Used by ApplicationSerializerTest to avoid breaking encapsulation. Should not be used by anything else */
- public static LockedInstance writable(Instance instance) {
- return new LockedInstance(instance, new Lock("/test", new MockCurator()));
+ return () -> controller().applications().requireInstance(application);
}
private static Controller createController(CuratorDb curator, RotationsConfig rotationsConfig,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/BuildJob.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/BuildJob.java
index 7b2aba296a4..ce733d60a77 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/BuildJob.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/BuildJob.java
@@ -2,7 +2,7 @@
package com.yahoo.vespa.hosted.controller.deployment;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
@@ -51,12 +51,11 @@ public class BuildJob {
return this;
}
- public BuildJob application(Instance instance) {
- this.applicationId = instance.id();
- if (instance.deploymentJobs().projectId().isPresent()) {
- this.projectId = instance.deploymentJobs().projectId().getAsLong();
- }
- return this;
+ public BuildJob application(Application application) {
+ if (application.projectId().isPresent())
+ this.projectId = application.projectId().getAsLong();
+
+ return application(application.id().defaultInstance());
}
public BuildJob application(ApplicationId applicationId) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java
index 02b45b77769..5dc6fb183a2 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java
@@ -4,9 +4,11 @@ package com.yahoo.vespa.hosted.controller.deployment;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.test.ManualClock;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.Controller;
@@ -18,6 +20,7 @@ import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.integration.ConfigServerMock;
import com.yahoo.vespa.hosted.controller.maintenance.JobControl;
import com.yahoo.vespa.hosted.controller.maintenance.NameServiceDispatcher;
@@ -112,12 +115,20 @@ public class DeploymentTester {
public ConfigServerMock configServer() { return tester.serviceRegistry().configServerMock(); }
- public Instance application(String name) {
- return application(ApplicationId.from("tenant1", name, "default"));
+ public Application application(TenantAndApplicationId id) {
+ return controller().applications().requireApplication(id);
}
- public Instance application(ApplicationId application) {
- return controller().applications().require(application);
+ public Instance defaultInstance(String name) {
+ return instance(ApplicationId.from("tenant1", name, "default"));
+ }
+
+ public Instance defaultInstance(TenantAndApplicationId application) {
+ return controller().applications().requireApplication(application).require(InstanceName.defaultName());
+ }
+
+ public Instance instance(ApplicationId application) {
+ return controller().applications().requireInstance(application);
}
/** Re-compute and write version status */
@@ -169,11 +180,11 @@ public class DeploymentTester {
return configServer().initialVersion();
}
- public Instance createApplication(String applicationName, String tenantName, long projectId, long propertyId) {
+ public Application createApplication(String applicationName, String tenantName, long projectId, long propertyId) {
return createApplication("default", applicationName, tenantName, projectId, propertyId);
}
- public Instance createApplication(String instanceName, String applicationName, String tenantName, long projectId, long propertyId) {
+ public Application createApplication(String instanceName, String applicationName, String tenantName, long projectId, long propertyId) {
TenantName tenant = tester.createTenant(tenantName, UUID.randomUUID().toString(), propertyId);
return tester.createApplication(tenant, applicationName, instanceName, projectId);
}
@@ -191,134 +202,138 @@ public class DeploymentTester {
}
/** Simulate the full lifecycle of an application deployment as declared in given application package */
- public Instance createAndDeploy(String applicationName, int projectId, ApplicationPackage applicationPackage) {
+ public Application createAndDeploy(String applicationName, int projectId, ApplicationPackage applicationPackage) {
TenantName tenant = tester.createTenant("tenant1", "domain1", 1L);
return createAndDeploy(tenant, applicationName, projectId, applicationPackage);
}
/** Simulate the full lifecycle of an application deployment as declared in given application package */
- public Instance createAndDeploy(TenantName tenant, String applicationName, int projectId, ApplicationPackage applicationPackage) {
- Instance instance = tester.createApplication(tenant, applicationName, "default", projectId);
- deployCompletely(instance, applicationPackage);
- return applications().require(instance.id());
+ public Application createAndDeploy(TenantName tenant, String applicationName, int projectId, ApplicationPackage applicationPackage) {
+ Application application = tester.createApplication(tenant, applicationName, "default", projectId);
+ deployCompletely(application, applicationPackage);
+ return applications().requireApplication(application.id());
}
/** Simulate the full lifecycle of an application deployment to prod.us-west-1 with the given upgrade policy */
- public Instance createAndDeploy(String applicationName, int projectId, String upgradePolicy) {
+ public Application createAndDeploy(String applicationName, int projectId, String upgradePolicy) {
return createAndDeploy(applicationName, projectId, applicationPackage(upgradePolicy));
}
/** Simulate the full lifecycle of an application deployment to prod.us-west-1 with the given upgrade policy */
- public Instance createAndDeploy(TenantName tenant, String applicationName, int projectId, String upgradePolicy) {
- return createAndDeploy(tenant, applicationName, projectId, applicationPackage(upgradePolicy));
+ public void createAndDeploy(TenantName tenant, String applicationName, int projectId, String upgradePolicy) {
+ createAndDeploy(tenant, applicationName, projectId, applicationPackage(upgradePolicy));
}
/** Deploy application completely using the given application package */
- public void deployCompletely(Instance instance, ApplicationPackage applicationPackage) {
- deployCompletely(instance, applicationPackage, BuildJob.defaultBuildNumber);
+ public void deployCompletely(Application application, ApplicationPackage applicationPackage) {
+ deployCompletely(application, applicationPackage, BuildJob.defaultBuildNumber);
}
- public void completeDeploymentWithError(Instance instance, ApplicationPackage applicationPackage, long buildNumber, JobType failOnJob) {
- jobCompletion(JobType.component).application(instance)
+ public void completeDeploymentWithError(Application application, ApplicationPackage applicationPackage, long buildNumber, JobType failOnJob) {
+ jobCompletion(JobType.component).application(application)
.buildNumber(buildNumber)
.uploadArtifact(applicationPackage)
.submit();
- completeDeployment(instance, applicationPackage, Optional.ofNullable(failOnJob));
+ completeDeployment(application, applicationPackage, Optional.ofNullable(failOnJob));
}
- public void deployCompletely(Instance instance, ApplicationPackage applicationPackage, long buildNumber) {
- completeDeploymentWithError(instance, applicationPackage, buildNumber, null);
+ public void deployCompletely(Application application, ApplicationPackage applicationPackage, long buildNumber) {
+ completeDeploymentWithError(application, applicationPackage, buildNumber, null);
}
- private void completeDeployment(Instance instance, ApplicationPackage applicationPackage, Optional<JobType> failOnJob) {
- assertTrue(instance.id() + " has pending changes to deploy", applications().require(instance.id()).change().hasTargets());
+ private void completeDeployment(Application application, ApplicationPackage applicationPackage, Optional<JobType> failOnJob) {
+ assertTrue(application.id() + " has pending changes to deploy", applications().requireApplication(application.id()).change().hasTargets());
DeploymentSteps steps = controller().applications().deploymentTrigger().steps(applicationPackage.deploymentSpec());
List<JobType> jobs = steps.jobs();
+ // TODO jonmv: Change to list instances here.
for (JobType job : jobs) {
boolean failJob = failOnJob.map(j -> j.equals(job)).orElse(false);
- deployAndNotify(instance, applicationPackage, ! failJob, job);
+ deployAndNotify(application.id().defaultInstance(), applicationPackage, ! failJob, job);
if (failJob) {
break;
}
}
if (failOnJob.isPresent()) {
- assertTrue(applications().require(instance.id()).change().hasTargets());
- assertTrue(applications().require(instance.id()).deploymentJobs().hasFailures());
+ assertTrue(applications().requireApplication(application.id()).change().hasTargets());
+ assertTrue(defaultInstance(application.id()).deploymentJobs().hasFailures());
} else {
- assertFalse(applications().require(instance.id()).change().hasTargets());
+ assertFalse(applications().requireApplication(application.id()).change().hasTargets());
}
if (updateDnsAutomatically) {
flushDnsRequests();
}
}
- public void completeUpgrade(Instance instance, Version version, String upgradePolicy) {
- completeUpgrade(instance, version, applicationPackage(upgradePolicy));
+ public void completeUpgrade(Application application, Version version, String upgradePolicy) {
+ completeUpgrade(application, version, applicationPackage(upgradePolicy));
}
- public void completeUpgrade(Instance instance, Version version, ApplicationPackage applicationPackage) {
- assertTrue(instance + " has a change", applications().require(instance.id()).change().hasTargets());
- assertEquals(Change.of(version), applications().require(instance.id()).change());
- completeDeployment(instance, applicationPackage, Optional.empty());
+ public void completeUpgrade(Application application, Version version, ApplicationPackage applicationPackage) {
+ assertTrue(application + " has a change", applications().requireApplication(application.id()).change().hasTargets());
+ assertEquals(Change.of(version), applications().requireApplication(application.id()).change());
+ completeDeployment(application, applicationPackage, Optional.empty());
}
- public void completeUpgradeWithError(Instance instance, Version version, String upgradePolicy, JobType failOnJob) {
- completeUpgradeWithError(instance, version, applicationPackage(upgradePolicy), Optional.of(failOnJob));
+ public void completeUpgradeWithError(Application application, Version version, String upgradePolicy, JobType failOnJob) {
+ completeUpgradeWithError(application, version, applicationPackage(upgradePolicy), Optional.of(failOnJob));
}
- public void completeUpgradeWithError(Instance instance, Version version, ApplicationPackage applicationPackage, JobType failOnJob) {
- completeUpgradeWithError(instance, version, applicationPackage, Optional.of(failOnJob));
+ public void completeUpgradeWithError(Application application, Version version, ApplicationPackage applicationPackage, JobType failOnJob) {
+ completeUpgradeWithError(application, version, applicationPackage, Optional.of(failOnJob));
}
- private void completeUpgradeWithError(Instance instance, Version version, ApplicationPackage applicationPackage, Optional<JobType> failOnJob) {
- assertTrue(applications().require(instance.id()).change().hasTargets());
- assertEquals(Change.of(version), applications().require(instance.id()).change());
- completeDeployment(instance, applicationPackage, failOnJob);
+ private void completeUpgradeWithError(Application application, Version version, ApplicationPackage applicationPackage, Optional<JobType> failOnJob) {
+ assertTrue(applications().requireApplication(application.id()).change().hasTargets());
+ assertEquals(Change.of(version), applications().requireApplication(application.id()).change());
+ completeDeployment(application, applicationPackage, failOnJob);
}
- public void deploy(JobType job, Instance instance, ApplicationPackage applicationPackage) {
- deploy(job, instance, Optional.of(applicationPackage), false);
+ public void deploy(JobType job, ApplicationId id, ApplicationPackage applicationPackage) {
+ deploy(job, id, Optional.of(applicationPackage), false);
}
- public void deploy(JobType job, Instance instance, ApplicationPackage applicationPackage,
+ public void deploy(JobType job, ApplicationId id, ApplicationPackage applicationPackage,
boolean deployCurrentVersion) {
- deploy(job, instance, Optional.of(applicationPackage), deployCurrentVersion);
+ deploy(job, id, Optional.of(applicationPackage), deployCurrentVersion);
}
- public void deploy(JobType job, Instance instance, Optional<ApplicationPackage> applicationPackage,
+ public void deploy(JobType job, ApplicationId id, Optional<ApplicationPackage> applicationPackage,
boolean deployCurrentVersion) {
- tester.deploy(instance, job.zone(controller().system()), applicationPackage, deployCurrentVersion);
+ tester.deploy(id, job.zone(controller().system()), applicationPackage, deployCurrentVersion);
}
- public void deployAndNotify(Instance instance, String upgradePolicy, boolean success, JobType job) {
- deployAndNotify(instance, applicationPackage(upgradePolicy), success, job);
+ public void deployAndNotify(Instance i, String upgradePolicy, boolean success, JobType job) {
+ deployAndNotify(i.id(), applicationPackage(upgradePolicy), success, job);
}
- public void deployAndNotify(Instance instance, ApplicationPackage applicationPackage, boolean success, JobType job) {
- deployAndNotify(instance, Optional.of(applicationPackage), success, job);
+ public void deployAndNotify(ApplicationId id, ApplicationPackage applicationPackage, boolean success, JobType job) {
+ deployAndNotify(id, Optional.of(applicationPackage), success, job);
}
- public void deployAndNotify(Instance instance, boolean success, JobType job) {
- deployAndNotify(instance, Optional.empty(), success, job);
+ public void deployAndNotify(Instance i, boolean success, JobType job) {
+ deployAndNotify(i.id(), Optional.empty(), success, job);
+ }
+ public void deployAndNotify(ApplicationId id, boolean success, JobType job) {
+ deployAndNotify(id, Optional.empty(), success, job);
}
- public void deployAndNotify(Instance instance, Optional<ApplicationPackage> applicationPackage, boolean success, JobType job) {
+ public void deployAndNotify(ApplicationId id, Optional<ApplicationPackage> applicationPackage, boolean success, JobType job) {
if (success) {
// Staging deploys twice, once with current version and once with new version
if (job == JobType.stagingTest) {
- deploy(job, instance, applicationPackage, true);
+ deploy(job, id, applicationPackage, true);
}
- deploy(job, instance, applicationPackage, false);
+ deploy(job, id, applicationPackage, false);
}
// Deactivate test deployments after deploy. This replicates the behaviour of the tenant pipeline
if (job.isTest()) {
- controller().applications().deactivate(instance.id(), job.zone(controller().system()));
+ controller().applications().deactivate(id, job.zone(controller().system()));
}
- jobCompletion(job).application(instance).success(success).submit();
+ jobCompletion(job).application(id).success(success).submit();
}
public Optional<JobStatus.JobRun> firstFailing(Instance instance, JobType job) {
- return tester.controller().applications().require(instance.id())
+ return tester.controller().applications().requireInstance(instance.id())
.deploymentJobs().jobStatus().get(job).firstFailing();
}
@@ -350,7 +365,7 @@ public class DeploymentTester {
}
private boolean isRunning(JobType job, ApplicationId application) {
- return buildService().jobs().contains(ControllerTester.buildJob(application(application), job));
+ return buildService().jobs().contains(ControllerTester.buildJob(instance(application).id(), job));
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
index 7cb520f0fd9..5441ed6aec0 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
@@ -3,12 +3,13 @@ package com.yahoo.vespa.hosted.controller.deployment;
import com.yahoo.component.Version;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.slime.Slime;
import com.yahoo.vespa.config.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
-import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.BuildService;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
@@ -19,9 +20,11 @@ import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockBuildService;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.maintenance.JobControl;
import com.yahoo.vespa.hosted.controller.maintenance.ReadyJobsTrigger;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
+import org.junit.Ignore;
import org.junit.Test;
import java.nio.file.Files;
@@ -68,7 +71,8 @@ public class DeploymentTriggerTest {
@Test
public void testTriggerFailing() {
- Instance app = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application app = tester.createApplication("app1", "tenant1", 1, 1L);
+ Instance instance = tester.defaultInstance(app.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.environment(Environment.prod)
@@ -80,37 +84,41 @@ public class DeploymentTriggerTest {
// Deploy completely once
tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(app, applicationPackage, true, JobType.stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1);
+ tester.deployAndNotify(instance.id(), applicationPackage, true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), applicationPackage, true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), applicationPackage, true, JobType.productionUsWest1);
// New version is released
version = Version.fromString("6.3");
tester.upgradeSystem(version);
// staging-test times out and is retried
- tester.buildService().remove(buildJob(app, stagingTest));
+ tester.buildService().remove(buildJob(app.id().defaultInstance(), stagingTest));
tester.readyJobTrigger().maintain();
assertEquals("Retried dead job", 2, tester.buildService().jobs().size());
- tester.assertRunning(stagingTest, app.id());
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+ tester.assertRunning(stagingTest, app.id().defaultInstance());
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
// system-test is now the only running job -- production jobs haven't started yet, since it is unfinished.
- tester.assertRunning(systemTest, app.id());
+ tester.assertRunning(systemTest, app.id().defaultInstance());
assertEquals(1, tester.buildService().jobs().size());
// system-test fails and is retried
- tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, JobType.systemTest);
assertEquals("Job is retried on failure", 1, tester.buildService().jobs().size());
- tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.systemTest);
- tester.assertRunning(productionUsWest1, app.id());
+ tester.assertRunning(productionUsWest1, app.id().defaultInstance());
}
@Test
+ @Ignore
+ // TODO jonmv: Re-enable, but changed, when instances are orchestrated.
public void testIndependentInstances() {
- Instance instance1 = tester.createApplication("instance1", "app", "tenant", 1, 1L);
- Instance instance2 = tester.createApplication("instance2", "app", "tenant", 2, 1L);
+ Application app1 = tester.createApplication("instance1", "app", "tenant", 1, 1L);
+ Application app2 = tester.createApplication("instance2", "app", "tenant", 2, 1L);
+ Instance instance1 = tester.instance(app1.id().instance(InstanceName.from("instance1")));
+ Instance instance2 = tester.instance(app2.id().instance(InstanceName.from("instance2")));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.upgradePolicy("default")
.environment(Environment.prod)
@@ -121,27 +129,27 @@ public class DeploymentTriggerTest {
tester.upgradeSystem(version);
// Deploy completely once
- tester.jobCompletion(component).application(instance1).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(instance1, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(instance1, applicationPackage, true, JobType.stagingTest);
- tester.deployAndNotify(instance1, applicationPackage, true, JobType.productionUsWest1);
+ tester.jobCompletion(component).application(app1).application(instance1.id()).uploadArtifact(applicationPackage).submit();
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, JobType.productionUsWest1);
- tester.jobCompletion(component).application(instance2).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(instance2, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(instance2, applicationPackage, true, JobType.stagingTest);
- tester.deployAndNotify(instance2, applicationPackage, true, JobType.productionUsWest1);
+ tester.jobCompletion(component).application(app2).application(instance2.id()).uploadArtifact(applicationPackage).submit();
+ tester.deployAndNotify(instance2.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance2.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
+ tester.deployAndNotify(instance2.id(), Optional.of(applicationPackage), true, JobType.productionUsWest1);
// New version is released
Version newVersion = Version.fromString("6.3");
tester.upgradeSystem(newVersion);
// instance1 upgrades, but not instance 2
- tester.deployAndNotify(instance1, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(instance1, applicationPackage, true, JobType.stagingTest);
- tester.deployAndNotify(instance1, applicationPackage, true, JobType.productionUsWest1);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, JobType.productionUsWest1);
- Version instance1Version = tester.application(instance1.id()).deployments().get(JobType.productionUsWest1.zone(main)).version();
- Version instance2Version = tester.application(instance2.id()).deployments().get(JobType.productionUsWest1.zone(main)).version();
+ Version instance1Version = tester.defaultInstance(app1.id()).deployments().get(JobType.productionUsWest1.zone(main)).version();
+ Version instance2Version = tester.defaultInstance(app2.id()).deployments().get(JobType.productionUsWest1.zone(main)).version();
assertEquals(newVersion, instance1Version);
assertEquals(version, instance2Version);
@@ -152,26 +160,27 @@ public class DeploymentTriggerTest {
InternalDeploymentTester iTester = new InternalDeploymentTester();
DeploymentTester tester = iTester.tester();
- Instance app = iTester.app();
+ Instance instance = iTester.instance();
+ Application application = tester.application(TenantAndApplicationId.from(instance.id()));
ApplicationPackage applicationPackage = InternalDeploymentTester.applicationPackage;
- tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app, true, systemTest);
- tester.deployAndNotify(app, true, stagingTest);
- tester.assertRunning(productionUsCentral1, app.id());
+ tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit();
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
+ tester.assertRunning(productionUsCentral1, instance.id());
// Jobs run externally are not affected.
- tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
- tester.assertRunning(productionUsCentral1, app.id());
-
- tester.applications().deploymentTrigger().cancelChange(app.id(), ALL);
- tester.deployAndNotify(app, false, systemTest);
- tester.deployAndNotify(app, false, stagingTest);
- tester.deployAndNotify(app, false, productionUsCentral1);
- assertEquals(Change.empty(), tester.application(app.id()).change());
- tester.assertNotRunning(systemTest, app.id());
- tester.assertNotRunning(stagingTest, app.id());
- tester.assertNotRunning(productionUsCentral1, app.id());
+ tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit();
+ tester.assertRunning(productionUsCentral1, instance.id());
+
+ tester.applications().deploymentTrigger().cancelChange(application.id(), ALL);
+ tester.deployAndNotify(instance.id(), Optional.empty(), false, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), false, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), false, productionUsCentral1);
+ assertEquals(Change.empty(), tester.instance(instance.id()).change());
+ tester.assertNotRunning(systemTest, instance.id());
+ tester.assertNotRunning(stagingTest, instance.id());
+ tester.assertNotRunning(productionUsCentral1, instance.id());
RunId id = iTester.newRun(productionUsCentral1);
assertTrue(iTester.jobs().active(id).isPresent());
@@ -194,7 +203,7 @@ public class DeploymentTriggerTest {
tester.readyJobTrigger().maintain();
iTester.runJob(JobType.productionUsWest1);
iTester.runJob(JobType.productionUsEast3);
- assertEquals(Change.empty(), iTester.app().change());
+ assertEquals(Change.empty(), iTester.instance().change());
tester.upgradeSystem(new Version("8.9"));
iTester.runJob(JobType.systemTest);
@@ -213,7 +222,8 @@ public class DeploymentTriggerTest {
public void deploymentSpecDecidesTriggerOrder() {
TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L);
MockBuildService mockBuildService = tester.buildService();
- Instance instance = tester.controllerTester().createApplication(tenant, "app1", "default", 1L);
+ Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L);
+ Instance instance = tester.defaultInstance(application.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-east-3")
@@ -222,21 +232,22 @@ public class DeploymentTriggerTest {
.build();
// Component job finishes
- tester.jobCompletion(component).application(instance).uploadArtifact(applicationPackage).submit();
+ tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit();
// Application is deployed to all test environments and declared zones
- tester.deployAndNotify(instance, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(instance, applicationPackage, true, JobType.stagingTest);
- tester.deployAndNotify(instance, applicationPackage, true, JobType.productionUsEast3);
- tester.deployAndNotify(instance, applicationPackage, true, JobType.productionUsCentral1);
- tester.deployAndNotify(instance, applicationPackage, true, JobType.productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsWest1);
assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty());
}
@Test
public void deploymentsSpecWithDelays() {
MockBuildService mockBuildService = tester.buildService();
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
+ Instance instance = tester.defaultInstance(application.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
@@ -249,11 +260,11 @@ public class DeploymentTriggerTest {
.build();
// Component job finishes
- tester.jobCompletion(component).application(instance).uploadArtifact(applicationPackage).submit();
+ tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit();
// Test jobs pass
- tester.deployAndNotify(instance, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(instance, applicationPackage, true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
tester.deploymentTrigger().triggerReadyJobs();
// No jobs have started yet, as 30 seconds have not yet passed.
@@ -272,7 +283,7 @@ public class DeploymentTriggerTest {
tester.assertRunning(productionUsWest1, instance.id());
// us-west-1 completes
- tester.deployAndNotify(instance, applicationPackage, true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
// Delayed trigger does nothing as not enough time has passed after us-west-1 completion
tester.deploymentTrigger().triggerReadyJobs();
@@ -286,7 +297,7 @@ public class DeploymentTriggerTest {
// 4 minutes pass, us-central-1 is triggered
tester.clock().advance(Duration.ofMinutes(1));
tester.deploymentTrigger().triggerReadyJobs();
- tester.deployAndNotify(instance, applicationPackage, true, JobType.productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsCentral1);
assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty());
// Delayed trigger job runs again, with nothing to trigger
@@ -297,7 +308,8 @@ public class DeploymentTriggerTest {
@Test
public void deploymentSpecWithParallelDeployments() {
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
+ Instance instance = tester.defaultInstance(application.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
@@ -307,66 +319,67 @@ public class DeploymentTriggerTest {
.build();
// Component job finishes
- tester.jobCompletion(component).application(instance).uploadArtifact(applicationPackage).submit();
+ tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit();
// Test jobs pass
- tester.deployAndNotify(instance, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(instance, applicationPackage, true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
// Deploys in first region
assertEquals(1, tester.buildService().jobs().size());
- tester.deployAndNotify(instance, applicationPackage, true, JobType.productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsCentral1);
// Deploys in two regions in parallel
assertEquals(2, tester.buildService().jobs().size());
tester.assertRunning(productionUsEast3, instance.id());
tester.assertRunning(productionUsWest1, instance.id());
- tester.deploy(JobType.productionUsWest1, instance, applicationPackage, false);
- tester.jobCompletion(JobType.productionUsWest1).application(instance).submit();
+ tester.deploy(JobType.productionUsWest1, instance.id(), applicationPackage, false);
+ tester.jobCompletion(JobType.productionUsWest1).application(application).submit();
assertEquals("One job still running.", JobType.productionUsEast3.jobName(), tester.buildService().jobs().get(0).jobName());
- tester.deploy(JobType.productionUsEast3, instance, applicationPackage, false);
- tester.jobCompletion(JobType.productionUsEast3).application(instance).submit();
+ tester.deploy(JobType.productionUsEast3, instance.id(), applicationPackage, false);
+ tester.jobCompletion(JobType.productionUsEast3).application(application).submit();
// Last region completes
assertEquals(1, tester.buildService().jobs().size());
- tester.deployAndNotify(instance, applicationPackage, true, JobType.productionEuWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionEuWest1);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
@Test
public void testNoOtherChangesDuringSuspension() {
// Application is deployed in 3 regions:
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
+ Instance instance = tester.defaultInstance(application.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
.parallel("us-west-1", "us-east-3")
.build();
tester.jobCompletion(component)
- .application(instance)
+ .application(application)
.uploadArtifact(applicationPackage)
.submit();
- tester.deployAndNotify(instance, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(instance, applicationPackage, true, JobType.stagingTest);
- tester.deployAndNotify(instance, applicationPackage, true, JobType.productionUsCentral1);
- tester.deployAndNotify(instance, applicationPackage, true, JobType.productionUsWest1);
- tester.deployAndNotify(instance, applicationPackage, true, JobType.productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsEast3);
// The first production zone is suspended:
tester.configServer().setSuspended(new DeploymentId(instance.id(), JobType.productionUsCentral1.zone(tester.controller().system())), true);
// A new change needs to be pushed out, but should not go beyond the suspended zone:
tester.jobCompletion(component)
- .application(instance)
+ .application(application)
.nextBuildNumber()
.sourceRevision(new SourceRevision("repository1", "master", "cafed00d"))
.uploadArtifact(applicationPackage)
.submit();
- tester.deployAndNotify(instance, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(instance, applicationPackage, true, JobType.stagingTest);
- tester.deployAndNotify(instance, applicationPackage, true, JobType.productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsCentral1);
tester.triggerUntilQuiescence();
tester.assertNotRunning(JobType.productionUsEast3, instance.id());
tester.assertNotRunning(JobType.productionUsWest1, instance.id());
@@ -374,8 +387,8 @@ public class DeploymentTriggerTest {
// The zone is unsuspended so jobs start:
tester.configServer().setSuspended(new DeploymentId(instance.id(), JobType.productionUsCentral1.zone(tester.controller().system())), false);
tester.triggerUntilQuiescence();
- tester.deployAndNotify(instance, applicationPackage, true, JobType.productionUsWest1);
- tester.deployAndNotify(instance, applicationPackage, true, JobType.productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsEast3);
}
@Test
@@ -385,31 +398,33 @@ public class DeploymentTriggerTest {
.parallel("us-east-3", "us-west-1")
.build();
- Instance app = tester.createApplication("app1", "tenant1", 1, 11L);
+ Application app = tester.createApplication("app1", "tenant1", 1, 11L);
+ Instance instance = tester.defaultInstance(app.id());
tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit();
// Test environments pass
- tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(app, applicationPackage, true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
// Last declared job completes first
- tester.deploy(JobType.productionUsWest1, app, applicationPackage);
+ tester.deploy(JobType.productionUsWest1, instance.id(), applicationPackage);
tester.jobCompletion(JobType.productionUsWest1).application(app).submit();
assertTrue("Change is present as not all jobs are complete",
- tester.applications().require(app.id()).change().hasTargets());
+ tester.applications().requireApplication(app.id()).change().hasTargets());
// All jobs complete
- tester.deploy(JobType.productionUsEast3, app, applicationPackage);
+ tester.deploy(JobType.productionUsEast3, instance.id(), applicationPackage);
tester.jobCompletion(JobType.productionUsEast3).application(app).submit();
assertFalse("Change has been deployed",
- tester.applications().require(app.id()).change().hasTargets());
+ tester.applications().requireApplication(app.id()).change().hasTargets());
}
@Test
public void testSuccessfulDeploymentApplicationPackageChanged() {
TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L);
MockBuildService mockBuildService = tester.buildService();
- Instance instance = tester.controllerTester().createApplication(tenant, "app1", "default", 1L);
+ Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L);
+ Instance instance = tester.defaultInstance(application.id());
ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-east-3")
@@ -425,16 +440,16 @@ public class DeploymentTriggerTest {
.build();
// Component job finishes
- tester.jobCompletion(component).application(instance).uploadArtifact(newApplicationPackage).submit();
+ tester.jobCompletion(component).application(application).uploadArtifact(newApplicationPackage).submit();
// Application is deployed to all test environments and declared zones
- tester.deployAndNotify(instance, newApplicationPackage, true, JobType.systemTest);
- tester.deploy(JobType.stagingTest, instance, previousApplicationPackage, true);
- tester.deployAndNotify(instance, newApplicationPackage, true, JobType.stagingTest);
- tester.deployAndNotify(instance, newApplicationPackage, true, JobType.productionUsEast3);
- tester.deployAndNotify(instance, newApplicationPackage, true, JobType.productionUsCentral1);
- tester.deployAndNotify(instance, newApplicationPackage, true, JobType.productionUsWest1);
- tester.deployAndNotify(instance, newApplicationPackage, true, JobType.productionEuWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(newApplicationPackage), true, JobType.systemTest);
+ tester.deploy(JobType.stagingTest, instance.id(), previousApplicationPackage, true);
+ tester.deployAndNotify(instance.id(), Optional.of(newApplicationPackage), true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(newApplicationPackage), true, JobType.productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(newApplicationPackage), true, JobType.productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(newApplicationPackage), true, JobType.productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(newApplicationPackage), true, JobType.productionEuWest1);
assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty());
}
@@ -457,7 +472,8 @@ public class DeploymentTriggerTest {
.region("us-central-1")
.region("us-east-3");
- Instance app = tester.createAndDeploy("app1", 1, applicationPackageBuilder.build());
+ Application app = tester.createAndDeploy("app1", 1, applicationPackageBuilder.build());
+ Instance instance = tester.defaultInstance(app.id());
tester.clock().advance(Duration.ofHours(1)); // --------------- Enter block window: 18:30
@@ -478,12 +494,12 @@ public class DeploymentTriggerTest {
.sourceRevision(new SourceRevision("repository1", "master", "cafed00d"))
.uploadArtifact(changedApplication)
.submit();
- assertTrue(tester.applications().require(app.id()).outstandingChange().hasTargets());
- tester.deployAndNotify(app, changedApplication, true, systemTest);
- tester.deployAndNotify(app, changedApplication, true, stagingTest);
+ assertTrue(tester.applications().requireApplication(app.id()).outstandingChange().hasTargets());
+ tester.deployAndNotify(instance.id(), Optional.of(changedApplication), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(changedApplication), true, stagingTest);
tester.outstandingChangeDeployer().run();
- assertTrue(tester.applications().require(app.id()).outstandingChange().hasTargets());
+ assertTrue(tester.applications().requireApplication(app.id()).outstandingChange().hasTargets());
readyJobsTrigger.run();
assertEquals(emptyList(), tester.buildService().jobs());
@@ -491,10 +507,10 @@ public class DeploymentTriggerTest {
tester.clock().advance(Duration.ofHours(2)); // ---------------- Exit block window: 20:30
tester.outstandingChangeDeployer().run();
- assertFalse(tester.applications().require(app.id()).outstandingChange().hasTargets());
+ assertFalse(tester.applications().requireApplication(app.id()).outstandingChange().hasTargets());
tester.deploymentTrigger().triggerReadyJobs(); // Schedules staging test for the blocked production job(s)
- assertEquals(singletonList(buildJob(app, productionUsWest1)), tester.buildService().jobs());
+ assertEquals(singletonList(buildJob(instance.id(), productionUsWest1)), tester.buildService().jobs());
}
@Test
@@ -506,7 +522,8 @@ public class DeploymentTriggerTest {
.region("us-west-1")
.region("us-east-3")
.build();
- Instance instance = tester.createAndDeploy("app1", 1, applicationPackage);
+ Application application = tester.createAndDeploy("app1", 1, applicationPackage);
+ Instance instance = tester.defaultInstance(application.id());
// Application on (6.1, 1.0.42)
Version v1 = Version.fromString("6.1");
@@ -514,74 +531,75 @@ public class DeploymentTriggerTest {
// Application is mid-upgrade when block window begins, and has an outstanding change.
Version v2 = Version.fromString("6.2");
tester.upgradeSystem(v2);
- tester.jobCompletion(component).application(instance).nextBuildNumber().uploadArtifact(applicationPackage).submit();
+ tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(instance, applicationPackage, true, stagingTest);
- tester.deployAndNotify(instance, applicationPackage, true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
// Entering block window will keep the outstanding change in place.
tester.clock().advance(Duration.ofHours(1));
tester.outstandingChangeDeployer().run();
- tester.deployAndNotify(instance, applicationPackage, true, productionUsWest1);
- assertEquals(BuildJob.defaultBuildNumber, tester.application(instance.id()).deploymentJobs().jobStatus()
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
+ assertEquals(BuildJob.defaultBuildNumber, tester.defaultInstance(application.id()).deploymentJobs().jobStatus()
.get(productionUsWest1).lastSuccess().get().application().buildNumber().getAsLong());
- assertEquals((BuildJob.defaultBuildNumber + 1), tester.application(instance.id()).outstandingChange().application().get().buildNumber().getAsLong());
+ assertEquals((BuildJob.defaultBuildNumber + 1), tester.defaultInstance(application.id()).outstandingChange().application().get().buildNumber().getAsLong());
tester.readyJobTrigger().maintain();
// Platform upgrade keeps rolling, since it has already deployed in a production zone, and tests for the new revision have also started.
assertEquals(3, tester.buildService().jobs().size());
- tester.deployAndNotify(instance, applicationPackage, true, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsEast3);
assertEquals(2, tester.buildService().jobs().size());
// Upgrade is done, and oustanding change rolls out when block window ends.
- assertEquals(Change.empty(), tester.application(instance.id()).change());
- assertFalse(tester.application(instance.id()).change().hasTargets());
- assertTrue(tester.application(instance.id()).outstandingChange().hasTargets());
+ assertEquals(Change.empty(), tester.defaultInstance(application.id()).change());
+ assertFalse(tester.defaultInstance(application.id()).change().hasTargets());
+ assertTrue(tester.defaultInstance(application.id()).outstandingChange().hasTargets());
- tester.deployAndNotify(instance, applicationPackage, true, stagingTest);
- tester.deployAndNotify(instance, applicationPackage, true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
tester.clock().advance(Duration.ofHours(1));
tester.outstandingChangeDeployer().run();
- assertTrue(tester.application(instance.id()).change().hasTargets());
- assertFalse(tester.application(instance.id()).outstandingChange().hasTargets());
+ assertTrue(tester.defaultInstance(application.id()).change().hasTargets());
+ assertFalse(tester.defaultInstance(application.id()).outstandingChange().hasTargets());
tester.readyJobTrigger().run();
- tester.deployAndNotify(instance, applicationPackage, true, productionUsWest1);
- tester.deployAndNotify(instance, applicationPackage, true, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsEast3);
- assertFalse(tester.application(instance.id()).change().hasTargets());
- assertFalse(tester.application(instance.id()).outstandingChange().hasTargets());
+ assertFalse(tester.defaultInstance(application.id()).change().hasTargets());
+ assertFalse(tester.defaultInstance(application.id()).outstandingChange().hasTargets());
}
@Test
public void testJobPause() {
- Instance app = tester.createAndDeploy("app", 3, "default");
+ Application app = tester.createAndDeploy("app", 3, "default");
+ Instance instance = tester.defaultInstance(app.id());
tester.upgradeSystem(new Version("9.8.7"));
- tester.applications().deploymentTrigger().pauseJob(app.id(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1)));
- tester.applications().deploymentTrigger().pauseJob(app.id(), productionUsEast3, tester.clock().instant().plus(Duration.ofSeconds(3)));
+ tester.applications().deploymentTrigger().pauseJob(instance.id(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1)));
+ tester.applications().deploymentTrigger().pauseJob(instance.id(), productionUsEast3, tester.clock().instant().plus(Duration.ofSeconds(3)));
// us-west-1 does not trigger when paused.
- tester.deployAndNotify(app, true, systemTest);
- tester.deployAndNotify(app, true, stagingTest);
- tester.assertNotRunning(productionUsWest1, app.id());
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
+ tester.assertNotRunning(productionUsWest1, instance.id());
// us-west-1 triggers when no longer paused, but does not retry when paused again.
tester.clock().advance(Duration.ofMillis(1500));
tester.readyJobTrigger().run();
- tester.assertRunning(productionUsWest1, app.id());
- tester.applications().deploymentTrigger().pauseJob(app.id(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1)));
- tester.deployAndNotify(app, false, productionUsWest1);
- tester.assertNotRunning(productionUsWest1, app.id());
+ tester.assertRunning(productionUsWest1, instance.id());
+ tester.applications().deploymentTrigger().pauseJob(instance.id(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1)));
+ tester.deployAndNotify(instance.id(), Optional.empty(), false, productionUsWest1);
+ tester.assertNotRunning(productionUsWest1, instance.id());
tester.clock().advance(Duration.ofMillis(1000));
tester.readyJobTrigger().run();
- tester.deployAndNotify(app, true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsWest1);
// us-east-3 does not automatically trigger when paused, but does when forced.
- tester.assertNotRunning(productionUsEast3, app.id());
- tester.deploymentTrigger().forceTrigger(app.id(), productionUsEast3, "mrTrigger");
- tester.assertRunning(productionUsEast3, app.id());
- assertFalse(tester.application(app.id()).deploymentJobs().jobStatus().get(productionUsEast3).pausedUntil().isPresent());
+ tester.assertNotRunning(productionUsEast3, instance.id());
+ tester.deploymentTrigger().forceTrigger(instance.id(), productionUsEast3, "mrTrigger");
+ tester.assertRunning(productionUsEast3, instance.id());
+ assertFalse(tester.instance(instance.id()).deploymentJobs().jobStatus().get(productionUsEast3).pausedUntil().isPresent());
}
@Test
@@ -589,71 +607,74 @@ public class DeploymentTriggerTest {
ReadyJobsTrigger readyJobsTrigger = new ReadyJobsTrigger(tester.controller(),
Duration.ofHours(1),
new JobControl(tester.controllerTester().curator()));
- Instance app = tester.createAndDeploy("default0", 3, "default");
+ Application app = tester.createAndDeploy("default0", 3, "default");
// Store that we are upgrading but don't start the system-tests job
- tester.controller().applications().lockOrThrow(app.id(), locked -> {
+ tester.controller().applications().lockApplicationOrThrow(app.id(), locked -> {
tester.controller().applications().store(locked.withChange(Change.of(Version.fromString("6.2"))));
});
assertEquals(0, tester.buildService().jobs().size());
readyJobsTrigger.run();
- tester.assertRunning(systemTest, app.id());
- tester.assertRunning(stagingTest, app.id());
+ tester.assertRunning(systemTest, app.id().defaultInstance());
+ tester.assertRunning(stagingTest, app.id().defaultInstance());
}
@Test
public void applicationVersionIsNotDowngraded() {
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
- Supplier<Instance> app = () -> tester.application(instance.id());
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
+ Instance instance = tester.defaultInstance(application.id());
+ Supplier<Instance> app = () -> tester.defaultInstance(application.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
.region("eu-west-1")
.build();
- tester.deployCompletely(instance, applicationPackage);
+ tester.deployCompletely(application, applicationPackage);
// productionUsCentral1 fails after deployment, causing a mismatch between deployed and successful state.
- tester.completeDeploymentWithError(instance, applicationPackage, BuildJob.defaultBuildNumber + 1, productionUsCentral1);
+ tester.completeDeploymentWithError(application, applicationPackage, BuildJob.defaultBuildNumber + 1, productionUsCentral1);
// deployAndNotify doesn't actually deploy if the job fails, so we need to do that manually.
- tester.deployAndNotify(instance, false, productionUsCentral1);
- tester.deploy(productionUsCentral1, instance, Optional.empty(), false);
+ tester.deployAndNotify(instance.id(), Optional.empty(), false, productionUsCentral1);
+ tester.deploy(productionUsCentral1, instance.id(), Optional.empty(), false);
ApplicationVersion appVersion1 = ApplicationVersion.from(BuildJob.defaultSourceRevision, BuildJob.defaultBuildNumber + 1);
assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion());
// Verify the application change is not removed when change is cancelled.
- tester.deploymentTrigger().cancelChange(instance.id(), PLATFORM);
+ tester.deploymentTrigger().cancelChange(application.id(), PLATFORM);
assertEquals(Change.of(appVersion1), app.get().change());
// Now cancel the change as is done through the web API.
- tester.deploymentTrigger().cancelChange(instance.id(), ALL);
+ tester.deploymentTrigger().cancelChange(application.id(), ALL);
assertEquals(Change.empty(), app.get().change());
// A new version is released, which should now deploy the currently deployed application version to avoid downgrades.
Version version1 = new Version("6.2");
tester.upgradeSystem(version1);
- tester.jobCompletion(productionUsCentral1).application(instance).unsuccessful().submit();
- tester.deployAndNotify(instance, true, systemTest);
- tester.deployAndNotify(instance, true, stagingTest);
- tester.deployAndNotify(instance, false, productionUsCentral1);
+ tester.jobCompletion(productionUsCentral1).application(application).unsuccessful().submit();
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), false, productionUsCentral1);
// The last job has a different target, and the tests need to run again.
// These may now start, since the first job has been triggered once, and thus is verified already.
- tester.deployAndNotify(instance, true, systemTest);
- tester.deployAndNotify(instance, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
// Finally, the two production jobs complete, in order.
- tester.deployAndNotify(instance, true, productionUsCentral1);
- tester.deployAndNotify(instance, true, productionEuWest1);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionEuWest1);
assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion());
}
@Test
public void stepIsCompletePreciselyWhenItShouldBe() {
- Instance instance1 = tester.createApplication("app1", "tenant1", 1, 1L);
- Instance instance2 = tester.createApplication("app2", "tenant2", 2, 2L);
- Supplier<Instance> app1 = () -> tester.application(instance1.id());
+ Application application1 = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application application2 = tester.createApplication("app2", "tenant2", 2, 2L);
+ Instance instance1 = tester.defaultInstance(application1.id());
+ Instance instance2 = tester.defaultInstance(application2.id());
+ Supplier<Instance> app1 = () -> tester.defaultInstance(application1.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
@@ -663,23 +684,23 @@ public class DeploymentTriggerTest {
// System upgrades to version0 and applications deploy on that version
Version version0 = Version.fromString("7.0");
tester.upgradeSystem(version0);
- tester.deployCompletely(instance1, applicationPackage);
- tester.deployCompletely(instance2, applicationPackage);
+ tester.deployCompletely(application1, applicationPackage);
+ tester.deployCompletely(application2, applicationPackage);
// version1 is released and application1 skips upgrading to that version
Version version1 = Version.fromString("7.1");
tester.upgradeSystem(version1);
// Deploy application2 to keep this version present in the system
- tester.deployCompletely(instance2, applicationPackage);
- tester.applications().deploymentTrigger().cancelChange(instance1.id(), ALL);
+ tester.deployCompletely(application2, applicationPackage);
+ tester.applications().deploymentTrigger().cancelChange(application1.id(), ALL);
tester.buildService().clear(); // Clear stale build jobs for cancelled change
// version2 is released and application1 starts upgrading
Version version2 = Version.fromString("7.2");
tester.upgradeSystem(version2);
- tester.completeUpgradeWithError(instance1, version2, applicationPackage, productionUsCentral1);
- tester.deploy(productionUsCentral1, instance1, applicationPackage);
- tester.deployAndNotify(instance1, applicationPackage, false, productionUsCentral1);
+ tester.completeUpgradeWithError(application1, version2, applicationPackage, productionUsCentral1);
+ tester.deploy(productionUsCentral1, instance1.id(), applicationPackage);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), false, productionUsCentral1);
assertEquals(version2, app1.get().deployments().get(productionUsCentral1.zone(main)).version());
// version2 becomes broken and upgrade targets latest non-broken
@@ -687,25 +708,25 @@ public class DeploymentTriggerTest {
tester.computeVersionStatus();
tester.upgrader().maintain(); // Cancel upgrades to broken version
assertEquals("Change becomes latest non-broken version", Change.of(version1), app1.get().change());
- tester.deployAndNotify(instance1, applicationPackage, false, productionUsCentral1);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), false, productionUsCentral1);
Instant triggered = app1.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at();
tester.clock().advance(Duration.ofHours(1));
// version1 proceeds 'til the last job, where it fails; us-central-1 is skipped, as current change is strictly dominated by what's deployed there.
- tester.deployAndNotify(instance1, applicationPackage, true, systemTest);
- tester.deployAndNotify(instance1, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, stagingTest);
assertEquals(triggered, app1.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at());
- tester.deployAndNotify(instance1, applicationPackage, false, productionEuWest1);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), false, productionEuWest1);
//Eagerly triggered system and staging tests complete.
- tester.deployAndNotify(instance1, applicationPackage, true, systemTest);
- tester.deployAndNotify(instance1, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, stagingTest);
// Roll out a new application version, which gives a dual change -- this should trigger us-central-1, but only as long as it hasn't yet deployed there.
- tester.jobCompletion(component).application(instance1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(instance1, applicationPackage, false, productionEuWest1);
- tester.deployAndNotify(instance1, applicationPackage, true, systemTest);
- tester.deployAndNotify(instance1, applicationPackage, true, stagingTest);
+ tester.jobCompletion(component).application(application1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), false, productionEuWest1);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, stagingTest);
tester.assertRunning(productionUsCentral1, instance1.id());
assertEquals(version2, app1.get().deployments().get(productionUsCentral1.zone(main)).version());
@@ -713,8 +734,8 @@ public class DeploymentTriggerTest {
assertNotEquals(triggered, app1.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at());
// Change has a higher application version than what is deployed -- deployment should trigger.
- tester.deployAndNotify(instance1, applicationPackage, false, productionUsCentral1);
- tester.deploy(productionUsCentral1, instance1, applicationPackage);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), false, productionUsCentral1);
+ tester.deploy(productionUsCentral1, instance1.id(), applicationPackage);
assertEquals(version2, app1.get().deployments().get(productionUsCentral1.zone(main)).version());
assertEquals(43, app1.get().deployments().get(productionUsCentral1.zone(main)).applicationVersion().buildNumber().getAsLong());
@@ -725,64 +746,65 @@ public class DeploymentTriggerTest {
tester.assertNotRunning(productionUsCentral1, instance1.id());
// Last job has a different deployment target, so tests need to run again.
- tester.deployAndNotify(instance1, true, systemTest);
- tester.deployAndNotify(instance1, true, stagingTest);
- tester.deployAndNotify(instance1, applicationPackage, true, productionEuWest1);
+ tester.deployAndNotify(instance1.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance1.id(), Optional.empty(), true, stagingTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, productionEuWest1);
assertFalse(app1.get().change().hasTargets());
assertFalse(app1.get().deploymentJobs().jobStatus().get(productionUsCentral1).isSuccess());
}
@Test
public void eachDeployTargetIsTested() {
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
- Supplier<Instance> app = () -> tester.application(instance.id());
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
+ Instance instance = tester.defaultInstance(application.id());
+ Supplier<Instance> app = () -> tester.defaultInstance(application.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.parallel("eu-west-1", "us-east-3")
.build();
// Application version 42 and platform version 6.1.
- tester.deployCompletely(instance, applicationPackage);
+ tester.deployCompletely(application, applicationPackage);
// Success in first prod zone, change cancelled between triggering and deployment to two parallel zones.
// One of the parallel zones get a deployment, but both fail their jobs.
Version v1 = new Version("6.1");
Version v2 = new Version("6.2");
tester.upgradeSystem(v2);
- tester.deployAndNotify(instance, true, systemTest);
- tester.deployAndNotify(instance, true, stagingTest);
- tester.deploymentTrigger().cancelChange(instance.id(), PLATFORM);
- tester.deploy(productionEuWest1, instance, applicationPackage);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
+ tester.deploymentTrigger().cancelChange(application.id(), PLATFORM);
+ tester.deploy(productionEuWest1, instance.id(), applicationPackage);
assertEquals(v2, app.get().deployments().get(productionEuWest1.zone(main)).version());
assertEquals(v1, app.get().deployments().get(productionUsEast3.zone(main)).version());
// New application version should run system and staging tests against both 6.1 and 6.2, in no particular order.
- tester.jobCompletion(component).application(instance).nextBuildNumber().uploadArtifact(applicationPackage).submit();
+ tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit();
Version firstTested = app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform();
assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform());
- tester.deployAndNotify(instance, true, systemTest);
- tester.deployAndNotify(instance, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
// Tests are not re-triggered, because the deployments that were tested have not yet been triggered on the tested versions.
assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform());
assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform());
// Finish old runs of the production jobs, which fail.
- tester.deployAndNotify(instance, applicationPackage, false, productionEuWest1);
- tester.deployAndNotify(instance, applicationPackage, false, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, productionEuWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, productionUsEast3);
tester.triggerUntilQuiescence();
// New upgrade is already tested for one of the jobs, which has now been triggered, and tests may run for the other job.
assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform());
assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform());
- tester.deployAndNotify(instance, true, systemTest);
- tester.deployAndNotify(instance, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
// Both jobs fail again, and must be re-triggered -- this is ok, as they are both already triggered on their current targets.
- tester.deployAndNotify(instance, false, productionEuWest1);
- tester.deployAndNotify(instance, false, productionUsEast3);
- tester.deployAndNotify(instance, true, productionUsEast3);
- tester.deployAndNotify(instance, true, productionEuWest1);
+ tester.deployAndNotify(instance.id(), Optional.empty(), false, productionEuWest1);
+ tester.deployAndNotify(instance.id(), Optional.empty(), false, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionEuWest1);
assertFalse(app.get().change().hasTargets());
assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionEuWest1).lastSuccess().get().application().buildNumber().getAsLong());
assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionUsEast3).lastSuccess().get().application().buildNumber().getAsLong());
@@ -790,69 +812,71 @@ public class DeploymentTriggerTest {
@Test
public void eachDifferentUpgradeCombinationIsTested() {
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
- Supplier<Instance> app = () -> tester.application(instance.id());
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
+ Instance instance = tester.defaultInstance(application.id());
+ Supplier<Instance> app = () -> tester.defaultInstance(application.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
.parallel("eu-west-1", "us-east-3")
.build();
// Application version 42 and platform version 6.1.
- tester.deployCompletely(instance, applicationPackage);
+ tester.deployCompletely(application, applicationPackage);
// Application partially upgrades, then a new version is released.
Version v1 = new Version("6.1");
Version v2 = new Version("6.2");
tester.upgradeSystem(v2);
- tester.deployAndNotify(instance, true, systemTest);
- tester.deployAndNotify(instance, true, stagingTest);
- tester.deployAndNotify(instance, true, productionUsCentral1);
- tester.deployAndNotify(instance, true, productionEuWest1);
- tester.deployAndNotify(instance, false, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionEuWest1);
+ tester.deployAndNotify(instance.id(), Optional.empty(), false, productionUsEast3);
assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "us-central-1")).version());
assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "eu-west-1")).version());
assertEquals(v1, app.get().deployments().get(ZoneId.from("prod", "us-east-3")).version());
Version v3 = new Version("6.3");
tester.upgradeSystem(v3);
- tester.deployAndNotify(instance, false, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.empty(), false, productionUsEast3);
// See that sources for staging are: first v2, then v1.
- tester.deployAndNotify(instance, true, systemTest);
- tester.deployAndNotify(instance, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
assertEquals(v2, app.get().deploymentJobs().jobStatus().get(stagingTest).lastSuccess().get().sourcePlatform().get());
- tester.deployAndNotify(instance, true, productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsCentral1);
assertEquals(v1, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().sourcePlatform().get());
- tester.deployAndNotify(instance, true, stagingTest);
- tester.deployAndNotify(instance, true, productionEuWest1);
- tester.deployAndNotify(instance, true, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionEuWest1);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsEast3);
}
@Test
public void retriesFailingJobs() {
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
+ Instance instance = tester.defaultInstance(application.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
.build();
// Deploy completely on default application and platform versions
- tester.deployCompletely(instance, applicationPackage);
+ tester.deployCompletely(application, applicationPackage);
// New application change is deployed and fails in system-test for a while
- tester.jobCompletion(component).application(instance).nextBuildNumber().uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(instance, false, systemTest);
- tester.deployAndNotify(instance, true, stagingTest);
+ tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit();
+ tester.deployAndNotify(instance.id(), Optional.empty(), false, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
// Retries immediately in the first minute after failing
tester.clock().advance(Duration.ofSeconds(59));
- tester.jobCompletion(systemTest).application(instance).unsuccessful().submit();
+ tester.jobCompletion(systemTest).application(application).unsuccessful().submit();
tester.readyJobTrigger().maintain();
tester.assertRunning(systemTest, instance.id());
// Stops immediate retry after failing for 1 minute
tester.clock().advance(Duration.ofSeconds(1));
- tester.jobCompletion(systemTest).application(instance).unsuccessful().submit();
+ tester.jobCompletion(systemTest).application(application).unsuccessful().submit();
tester.readyJobTrigger().maintain();
tester.assertNotRunning(systemTest, instance.id());
@@ -863,7 +887,7 @@ public class DeploymentTriggerTest {
// Retries less frequently after 1 hour of failure
tester.clock().advance(Duration.ofMinutes(50));
- tester.jobCompletion(systemTest).application(instance).unsuccessful().submit();
+ tester.jobCompletion(systemTest).application(application).unsuccessful().submit();
tester.readyJobTrigger().maintain();
tester.assertNotRunning(systemTest, instance.id());
@@ -873,15 +897,15 @@ public class DeploymentTriggerTest {
tester.assertRunning(systemTest, instance.id());
// Still fails and is not retried
- tester.jobCompletion(systemTest).application(instance).unsuccessful().submit();
+ tester.jobCompletion(systemTest).application(application).unsuccessful().submit();
tester.readyJobTrigger().maintain();
tester.assertNotRunning(systemTest, instance.id());
// Another application change is deployed and fixes system-test. Change is triggered immediately as target changes
- tester.jobCompletion(component).application(instance).nextBuildNumber(2).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(instance, true, systemTest);
- tester.deployAndNotify(instance, true, stagingTest);
- tester.deployAndNotify(instance, true, productionUsCentral1);
+ tester.jobCompletion(component).application(application).nextBuildNumber(2).uploadArtifact(applicationPackage).submit();
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsCentral1);
assertTrue("Deployment completed", tester.buildService().jobs().isEmpty());
}
@@ -895,11 +919,12 @@ public class DeploymentTriggerTest {
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
- Instance app = tester.createApplication("app1", "tenant1", 1, 11L);
+ Application app = tester.createApplication("app1", "tenant1", 1, 11L);
+ Instance instance = tester.defaultInstance(app.id());
tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(app, applicationPackage, true, JobType.stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsEast3);
// New version is released
version = Version.fromString("6.3");
@@ -909,14 +934,14 @@ public class DeploymentTriggerTest {
tester.readyJobTrigger().maintain();
// Test environments pass
- tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(app, applicationPackage, true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
// Production job fails and is retried
tester.clock().advance(Duration.ofSeconds(1)); // Advance time so that we can detect jobs in progress
- tester.deployAndNotify(app, applicationPackage, false, JobType.productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, JobType.productionUsEast3);
assertEquals("Production job is retried", 1, tester.buildService().jobs().size());
- assertEquals("Application has pending upgrade to " + version, version, tester.application(app.id()).change().platform().get());
+ assertEquals("Application has pending upgrade to " + version, version, tester.defaultInstance(app.id()).change().platform().get());
// Another version is released, which cancels any pending upgrades to lower versions
version = Version.fromString("6.4");
@@ -924,24 +949,24 @@ public class DeploymentTriggerTest {
tester.upgrader().maintain();
tester.jobCompletion(JobType.productionUsEast3).application(app).unsuccessful().submit();
assertEquals("Application starts upgrading to new version", 2, tester.buildService().jobs().size());
- assertEquals("Application has pending upgrade to " + version, version, tester.application(app.id()).change().platform().get());
+ assertEquals("Application has pending upgrade to " + version, version, tester.defaultInstance(app.id()).change().platform().get());
// Failure re-deployer did not retry failing job for prod.us-east-3, since it no longer had an available change
assertFalse("Job is not retried", tester.buildService().jobs().stream()
.anyMatch(j -> j.jobName().equals(JobType.productionUsEast3.jobName())));
// Test environments pass
- tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(app, applicationPackage, true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
// Production job fails again, and is retried
- tester.deployAndNotify(app, applicationPackage, false, JobType.productionUsEast3);
- assertEquals("Job is retried", Collections.singletonList(ControllerTester.buildJob(app, productionUsEast3)), tester.buildService().jobs());
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, JobType.productionUsEast3);
+ assertEquals("Job is retried", Collections.singletonList(buildJob(instance.id(), productionUsEast3)), tester.buildService().jobs());
// Production job finally succeeds
- tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsEast3);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
- assertFalse("No failures", tester.application(app.id()).deploymentJobs().hasFailures());
+ assertFalse("No failures", tester.defaultInstance(app.id()).deploymentJobs().hasFailures());
}
@Test
@@ -954,11 +979,12 @@ public class DeploymentTriggerTest {
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
- Instance app = tester.createApplication("app1", "tenant1", 1, 11L);
+ Application app = tester.createApplication("app1", "tenant1", 1, 11L);
+ Instance instance = tester.defaultInstance(app.id());
tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(app, applicationPackage, true, JobType.stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsEast3);
// New version is released
version = Version.fromString("6.3");
@@ -966,29 +992,30 @@ public class DeploymentTriggerTest {
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
tester.readyJobTrigger().maintain();
- assertEquals("Application has pending upgrade to " + version, version, tester.application(app.id()).change().platform().get());
+ assertEquals("Application has pending upgrade to " + version, version, tester.defaultInstance(app.id()).change().platform().get());
// system-test fails and is left with a retry
- tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, JobType.systemTest);
// Another version is released
version = Version.fromString("6.4");
tester.upgradeSystem(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
- tester.buildService().remove(ControllerTester.buildJob(app, systemTest));
+ tester.buildService().remove(buildJob(instance.id(), systemTest));
tester.upgrader().maintain();
tester.readyJobTrigger().maintain();
- assertEquals("Application has pending upgrade to " + version, version, tester.application(app.id()).change().platform().get());
+ assertEquals("Application has pending upgrade to " + version, version, tester.defaultInstance(app.id()).change().platform().get());
// Cancellation of outdated version and triggering on a new version is done by the upgrader.
- assertEquals(version, tester.application(app.id()).deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform());
+ assertEquals(version, tester.defaultInstance(app.id()).deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform());
}
@Test
public void testUpdatesFailingJobStatus() {
// Setup application
- Instance app = tester.createApplication("app1", "foo", 1, 1L);
+ Application app = tester.createApplication("app1", "foo", 1, 1L);
+ Instance instance = tester.defaultInstance(app.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
@@ -997,39 +1024,39 @@ public class DeploymentTriggerTest {
// Initial failure
Instant initialFailure = tester.clock().instant().truncatedTo(MILLIS);
tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app, applicationPackage, false, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, systemTest);
assertEquals("Failure age is right at initial failure",
- initialFailure, tester.firstFailing(app, systemTest).get().at());
+ initialFailure, tester.firstFailing(instance, systemTest).get().at());
// Failure again -- failingSince should remain the same
tester.clock().advance(Duration.ofMillis(1000));
- tester.deployAndNotify(app, applicationPackage, false, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, systemTest);
assertEquals("Failure age is right at second consecutive failure",
- initialFailure, tester.firstFailing(app, systemTest).get().at());
+ initialFailure, tester.firstFailing(instance, systemTest).get().at());
// Success resets failingSince
tester.clock().advance(Duration.ofMillis(1000));
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- assertFalse(tester.firstFailing(app, systemTest).isPresent());
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ assertFalse(tester.firstFailing(instance, systemTest).isPresent());
// Complete deployment
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
// Two repeated failures again.
// Initial failure
tester.clock().advance(Duration.ofMillis(1000));
initialFailure = tester.clock().instant().truncatedTo(MILLIS);
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app, applicationPackage, false, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, systemTest);
assertEquals("Failure age is right at initial failure",
- initialFailure, tester.firstFailing(app, systemTest).get().at());
+ initialFailure, tester.firstFailing(instance, systemTest).get().at());
// Failure again -- failingSince should remain the same
tester.clock().advance(Duration.ofMillis(1000));
- tester.deployAndNotify(app, applicationPackage, false, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, systemTest);
assertEquals("Failure age is right at second consecutive failure",
- initialFailure, tester.firstFailing(app, systemTest).get().at());
+ initialFailure, tester.firstFailing(instance, systemTest).get().at());
}
@Test
@@ -1058,22 +1085,24 @@ public class DeploymentTriggerTest {
.build();
Version version1 = tester.controller().versionStatus().systemVersion().get().versionNumber();
- Instance app1 = tester.createApplication("application1", "tenant1", 1, 1L);
+ Application app1 = tester.createApplication("application1", "tenant1", 1, 1L);
+ Instance instance1 = tester.defaultInstance(app1.id());
// First deployment: An application change
tester.jobCompletion(component).application(app1).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app1, applicationPackage, true, systemTest);
- tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app1, applicationPackage, true, productionUsWest1);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, productionUsWest1);
app1 = tester.application(app1.id());
assertEquals("First deployment gets system version", version1, app1.oldestDeployedPlatform().get());
assertEquals(version1, tester.configServer().lastPrepareVersion().get());
+ instance1 = tester.instance(instance1.id());
// Unexpected deployment
- tester.deploy(productionUsWest1, app1, applicationPackage);
- // applications are immutable, so any change to one, including deployment changes, would give rise to a new instance.
- assertEquals("Unexpected deployment is ignored", app1, tester.application(app1.id()));
+ tester.deploy(productionUsWest1, instance1.id(), applicationPackage);
+ // instances are immutable, so any change to one, including deployment changes, would give rise to a new instance.
+ assertEquals("Unexpected deployment is ignored", instance1, tester.defaultInstance(app1.id()));
// Application change after a new system version, and a region added
Version version2 = new Version(version1.getMajor(), version1.getMinor() + 1);
@@ -1086,16 +1115,16 @@ public class DeploymentTriggerTest {
.region("us-east-3")
.build();
tester.jobCompletion(component).application(app1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app1, applicationPackage, true, systemTest);
- tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app1, applicationPackage, true, productionUsWest1);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, productionUsWest1);
app1 = tester.application(app1.id());
assertEquals("Application change preserves version", version1, app1.oldestDeployedPlatform().get());
assertEquals(version1, tester.configServer().lastPrepareVersion().get());
// A deployment to the new region gets the same version
- tester.deployAndNotify(app1, applicationPackage, true, productionUsEast3);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, productionUsEast3);
app1 = tester.application(app1.id());
assertEquals("Application change preserves version", version1, app1.oldestDeployedPlatform().get());
assertEquals(version1, tester.configServer().lastPrepareVersion().get());
@@ -1104,10 +1133,10 @@ public class DeploymentTriggerTest {
// Version upgrade changes system version
tester.deploymentTrigger().triggerChange(app1.id(), Change.of(version2));
tester.deploymentTrigger().triggerReadyJobs();
- tester.deployAndNotify(app1, applicationPackage, true, systemTest);
- tester.deployAndNotify(app1, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app1, applicationPackage, true, productionUsWest1);
- tester.deployAndNotify(app1, applicationPackage, true, productionUsEast3);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, productionUsWest1);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, productionUsEast3);
app1 = tester.application(app1.id());
assertEquals("Version upgrade changes version", version2, app1.oldestDeployedPlatform().get());
@@ -1124,22 +1153,25 @@ public class DeploymentTriggerTest {
long project1 = 1;
long project2 = 2;
long project3 = 3;
- Instance app1 = tester.createApplication("app1", "tenant1", project1, 1L);
- Instance app2 = tester.createApplication("app2", "tenant2", project2, 1L);
- Instance app3 = tester.createApplication("app3", "tenant3", project3, 1L);
+ Application app1 = tester.createApplication("app1", "tenant1", project1, 1L);
+ Application app2 = tester.createApplication("app2", "tenant2", project2, 1L);
+ Application app3 = tester.createApplication("app3", "tenant3", project3, 1L);
+ Instance instance1 = tester.defaultInstance(app1.id());
+ Instance instance2 = tester.defaultInstance(app2.id());
+ Instance instance3 = tester.defaultInstance(app3.id());
MockBuildService mockBuildService = tester.buildService();
// all applications: system-test completes successfully with some time in between, to determine trigger order.
tester.jobCompletion(component).application(app2).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app2, applicationPackage, true, systemTest);
+ tester.deployAndNotify(instance2.id(), Optional.of(applicationPackage), true, systemTest);
tester.clock().advance(Duration.ofMinutes(1));
tester.jobCompletion(component).application(app1).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app1, applicationPackage, true, systemTest);
+ tester.deployAndNotify(instance1.id(), Optional.of(applicationPackage), true, systemTest);
tester.clock().advance(Duration.ofMinutes(1));
tester.jobCompletion(component).application(app3).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app3, applicationPackage, true, systemTest);
+ tester.deployAndNotify(instance3.id(), Optional.of(applicationPackage), true, systemTest);
// all applications: staging test jobs queued
assertEquals(3, mockBuildService.jobs().size());
@@ -1151,30 +1183,30 @@ public class DeploymentTriggerTest {
assertJobsInOrder(jobs, tester.buildService().jobs());
tester.triggerUntilQuiescence();
- jobs.add(buildJob(app2, stagingTest));
- jobs.add(buildJob(app1, stagingTest));
- jobs.add(buildJob(app3, stagingTest));
+ jobs.add(buildJob(instance2.id(), stagingTest));
+ jobs.add(buildJob(instance1.id(), stagingTest));
+ jobs.add(buildJob(instance3.id(), stagingTest));
assertJobsInOrder(jobs, tester.buildService().jobs());
// Remove the jobs for app1 and app2, and then let app3 fail with outOfCapacity.
// All three jobs are now eligible, but the one for app3 should trigger first as an outOfCapacity-retry.
- tester.buildService().remove(buildJob(app1, stagingTest));
- tester.buildService().remove(buildJob(app2, stagingTest));
- jobs.remove(buildJob(app1, stagingTest));
- jobs.remove(buildJob(app2, stagingTest));
+ tester.buildService().remove(buildJob(instance1.id(), stagingTest));
+ tester.buildService().remove(buildJob(instance2.id(), stagingTest));
+ jobs.remove(buildJob(instance1.id(), stagingTest));
+ jobs.remove(buildJob(instance2.id(), stagingTest));
tester.jobCompletion(stagingTest).application(app3).error(DeploymentJobs.JobError.outOfCapacity).submit();
assertJobsInOrder(jobs, tester.buildService().jobs());
tester.triggerUntilQuiescence();
- jobs.add(buildJob(app2, stagingTest));
- jobs.add(buildJob(app1, stagingTest));
+ jobs.add(buildJob(instance2.id(), stagingTest));
+ jobs.add(buildJob(instance1.id(), stagingTest));
assertJobsInOrder(jobs, tester.buildService().jobs());
// Finish deployment for apps 2 and 3, then release a new version, leaving only app1 with an application upgrade.
- tester.deployAndNotify(app2, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app2, applicationPackage, true, productionUsEast3);
- tester.deployAndNotify(app3, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app3, applicationPackage, true, productionUsEast3);
+ tester.deployAndNotify(instance2.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance2.id(), Optional.of(applicationPackage), true, productionUsEast3);
+ tester.deployAndNotify(instance3.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance3.id(), Optional.of(applicationPackage), true, productionUsEast3);
tester.upgradeSystem(new Version("6.2"));
// app1 also gets a new application change, so its time of availability is after the version upgrade.
@@ -1182,29 +1214,29 @@ public class DeploymentTriggerTest {
tester.buildService().clear();
tester.jobCompletion(component).application(app1).nextBuildNumber().uploadArtifact(applicationPackage).submit();
jobs.clear();
- jobs.add(buildJob(app1, stagingTest));
- jobs.add(buildJob(app1, systemTest));
+ jobs.add(buildJob(instance1.id(), stagingTest));
+ jobs.add(buildJob(instance1.id(), systemTest));
// Tests for app1 trigger before the others since it carries an application upgrade.
assertJobsInOrder(jobs, tester.buildService().jobs());
// Let the test jobs start, remove everything expect system test for app3, which fails with outOfCapacity again.
tester.triggerUntilQuiescence();
- tester.buildService().remove(buildJob(app1, systemTest));
- tester.buildService().remove(buildJob(app2, systemTest));
- tester.buildService().remove(buildJob(app1, stagingTest));
- tester.buildService().remove(buildJob(app2, stagingTest));
- tester.buildService().remove(buildJob(app3, stagingTest));
+ tester.buildService().remove(buildJob(instance1.id(), systemTest));
+ tester.buildService().remove(buildJob(instance2.id(), systemTest));
+ tester.buildService().remove(buildJob(instance1.id(), stagingTest));
+ tester.buildService().remove(buildJob(instance2.id(), stagingTest));
+ tester.buildService().remove(buildJob(instance3.id(), stagingTest));
tester.jobCompletion(systemTest).application(app3).error(DeploymentJobs.JobError.outOfCapacity).submit();
jobs.clear();
- jobs.add(buildJob(app1, stagingTest));
- jobs.add(buildJob(app3, systemTest));
+ jobs.add(buildJob(instance1.id(), stagingTest));
+ jobs.add(buildJob(instance3.id(), systemTest));
assertJobsInOrder(jobs, tester.buildService().jobs());
tester.triggerUntilQuiescence();
- jobs.add(buildJob(app2, stagingTest));
- jobs.add(buildJob(app1, systemTest));
- jobs.add(buildJob(app3, stagingTest));
- jobs.add(buildJob(app2, systemTest));
+ jobs.add(buildJob(instance2.id(), stagingTest));
+ jobs.add(buildJob(instance1.id(), systemTest));
+ jobs.add(buildJob(instance3.id(), stagingTest));
+ jobs.add(buildJob(instance2.id(), systemTest));
assertJobsInOrder(jobs, tester.buildService().jobs());
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalDeploymentTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalDeploymentTester.java
index b6315bc6780..bcc6fcfda44 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalDeploymentTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalDeploymentTester.java
@@ -5,12 +5,14 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.AthenzDomain;
import com.yahoo.config.provision.AthenzService;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.log.LogLevel;
import com.yahoo.security.KeyAlgorithm;
import com.yahoo.security.KeyUtils;
import com.yahoo.security.SignatureAlgorithm;
import com.yahoo.security.X509CertificateBuilder;
import com.yahoo.test.ManualClock;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
@@ -24,6 +26,7 @@ import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.athenz.AthenzDbMock;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.integration.ConfigServerMock;
import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingGeneratorMock;
import com.yahoo.vespa.hosted.controller.maintenance.JobControl;
@@ -68,8 +71,9 @@ public class InternalDeploymentTester {
.emailAddress("b@a")
.trust(generateCertificate())
.build();
- public static final ApplicationId appId = ApplicationId.from("tenant", "application", "default");
- public static final TesterId testerId = TesterId.of(appId);
+ public static final TenantAndApplicationId appId = TenantAndApplicationId.from("tenant", "application");
+ public static final ApplicationId instanceId = appId.defaultInstance();
+ public static final TesterId testerId = TesterId.of(instanceId);
public static final String athenzDomain = "domain";
private final DeploymentTester tester;
@@ -86,12 +90,13 @@ public class InternalDeploymentTester {
public ConfigServerMock configServer() { return tester.configServer(); }
public ApplicationController applications() { return tester.applications(); }
public ManualClock clock() { return tester.clock(); }
- public Instance app() { return tester.application(appId); }
+ public Application application() { return tester.application(appId); }
+ public Instance instance() { return tester.instance(instanceId); }
public InternalDeploymentTester() {
tester = new DeploymentTester();
- tester.controllerTester().createApplication(tester.controllerTester().createTenant(appId.tenant().value(), athenzDomain, 1L),
- appId.application().value(),
+ tester.controllerTester().createApplication(tester.controllerTester().createTenant(instanceId.tenant().value(), athenzDomain, 1L),
+ instanceId.application().value(),
"default",
1);
jobs = tester.controller().jobController();
@@ -115,7 +120,7 @@ public class InternalDeploymentTester {
* Submits a new application, and returns the version of the new submission.
*/
public ApplicationVersion newSubmission() {
- return jobs.submit(appId, BuildJob.defaultSourceRevision, "a@b", 2,
+ return jobs.submit(instanceId, BuildJob.defaultSourceRevision, "a@b", 2,
tester.controller().system().isPublic() ? publicCdApplicationPackage : applicationPackage, new byte[0]);
}
@@ -145,10 +150,10 @@ public class InternalDeploymentTester {
public ApplicationVersion deployNewSubmission() {
ApplicationVersion applicationVersion = newSubmission();
- assertFalse(app().deployments().values().stream()
- .anyMatch(deployment -> deployment.applicationVersion().equals(applicationVersion)));
- assertEquals(applicationVersion, app().change().application().get());
- assertFalse(app().change().platform().isPresent());
+ assertFalse(instance().deployments().values().stream()
+ .anyMatch(deployment -> deployment.applicationVersion().equals(applicationVersion)));
+ assertEquals(applicationVersion, instance().change().application().get());
+ assertFalse(instance().change().platform().isPresent());
runJob(JobType.systemTest);
runJob(JobType.stagingTest);
@@ -164,28 +169,28 @@ public class InternalDeploymentTester {
*/
public void deployNewPlatform(Version version) {
tester.upgradeSystem(version);
- assertFalse(app().deployments().values().stream()
- .anyMatch(deployment -> deployment.version().equals(version)));
- assertEquals(version, app().change().platform().get());
- assertFalse(app().change().application().isPresent());
+ assertFalse(instance().deployments().values().stream()
+ .anyMatch(deployment -> deployment.version().equals(version)));
+ assertEquals(version, instance().change().platform().get());
+ assertFalse(instance().change().application().isPresent());
runJob(JobType.systemTest);
runJob(JobType.stagingTest);
runJob(JobType.productionUsCentral1);
runJob(JobType.productionUsWest1);
runJob(JobType.productionUsEast3);
- assertTrue(app().productionDeployments().values().stream()
- .allMatch(deployment -> deployment.version().equals(version)));
+ assertTrue(instance().productionDeployments().values().stream()
+ .allMatch(deployment -> deployment.version().equals(version)));
assertTrue(tester.configServer().nodeRepository()
- .list(JobType.productionAwsUsEast1a.zone(tester.controller().system()), appId).stream()
+ .list(JobType.productionAwsUsEast1a.zone(tester.controller().system()), instanceId).stream()
.allMatch(node -> node.currentVersion().equals(version)));
assertTrue(tester.configServer().nodeRepository()
- .list(JobType.productionUsEast3.zone(tester.controller().system()), appId).stream()
+ .list(JobType.productionUsEast3.zone(tester.controller().system()), instanceId).stream()
.allMatch(node -> node.currentVersion().equals(version)));
assertTrue(tester.configServer().nodeRepository()
- .list(JobType.productionUsEast3.zone(tester.controller().system()), appId).stream()
+ .list(JobType.productionUsEast3.zone(tester.controller().system()), instanceId).stream()
.allMatch(node -> node.currentVersion().equals(version)));
- assertFalse(app().change().hasTargets());
+ assertFalse(instance().change().hasTargets());
}
/**
@@ -201,15 +206,15 @@ public class InternalDeploymentTester {
assertNotSame(aborted, run.status());
ZoneId zone = type.zone(tester.controller().system());
- DeploymentId deployment = new DeploymentId(appId, zone);
+ DeploymentId deployment = new DeploymentId(instanceId, zone);
// First steps are always deployments.
runner.run();
if (type == JobType.stagingTest) { // Do the initial deployment and installation of the real application.
assertEquals(unfinished, jobs.active(run.id()).get().steps().get(Step.installInitialReal));
- tester.configServer().convergeServices(appId, zone);
- setEndpoints(appId, zone);
+ tester.configServer().convergeServices(instanceId, zone);
+ setEndpoints(instanceId, zone);
run.versions().sourcePlatform().ifPresent(version -> tester.configServer().nodeRepository().doUpgrade(deployment, Optional.empty(), version));
runner.run();
assertEquals(Step.Status.succeeded, jobs.active(run.id()).get().steps().get(Step.installInitialReal));
@@ -219,12 +224,12 @@ public class InternalDeploymentTester {
tester.configServer().nodeRepository().doUpgrade(deployment, Optional.empty(), run.versions().targetPlatform());
runner.run();
assertEquals(unfinished, jobs.active(run.id()).get().steps().get(Step.installReal));
- tester.configServer().convergeServices(appId, zone);
+ tester.configServer().convergeServices(instanceId, zone);
runner.run();
if ( ! (run.versions().sourceApplication().isPresent() && type.isProduction())
&& type != JobType.stagingTest) {
assertEquals(unfinished, jobs.active(run.id()).get().steps().get(Step.installReal));
- setEndpoints(appId, zone);
+ setEndpoints(instanceId, zone);
}
runner.run();
if (type.environment().isManuallyDeployed()) {
@@ -253,10 +258,10 @@ public class InternalDeploymentTester {
runner.run();
assertTrue(jobs.run(run.id()).get().hasEnded());
assertFalse(jobs.run(run.id()).get().hasFailed());
- assertEquals(type.isProduction(), app().deployments().containsKey(zone));
+ assertEquals(type.isProduction(), instance().deployments().containsKey(zone));
assertTrue(tester.configServer().nodeRepository().list(zone, testerId.id()).isEmpty());
- if ( ! app().deployments().containsKey(zone))
+ if ( ! instance().deployments().containsKey(zone))
routing.removeEndpoints(deployment);
routing.removeEndpoints(new DeploymentId(testerId.id(), zone));
}
@@ -264,9 +269,9 @@ public class InternalDeploymentTester {
public RunId startSystemTestTests() {
RunId id = newRun(JobType.systemTest);
runner.run();
- tester.configServer().convergeServices(appId, JobType.systemTest.zone(tester.controller().system()));
+ tester.configServer().convergeServices(instanceId, JobType.systemTest.zone(tester.controller().system()));
tester.configServer().convergeServices(testerId.id(), JobType.systemTest.zone(tester.controller().system()));
- setEndpoints(appId, JobType.systemTest.zone(tester.controller().system()));
+ setEndpoints(instanceId, JobType.systemTest.zone(tester.controller().system()));
setEndpoints(testerId.id(), JobType.systemTest.zone(tester.controller().system()));
runner.run();
assertEquals(unfinished, jobs.run(id).get().steps().get(Step.endTests));
@@ -277,7 +282,7 @@ public class InternalDeploymentTester {
* Creates and submits a new application, and then starts the job of the given type.
*/
public RunId newRun(JobType type) {
- assertFalse(app().deploymentJobs().deployedInternally()); // Use this only once per test.
+ assertFalse(instance().deploymentJobs().deployedInternally()); // Use this only once per test.
newSubmission();
tester.readyJobTrigger().maintain();
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
index 9dcfdb00036..034fd8a64b5 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
@@ -50,7 +50,7 @@ import java.util.concurrent.Future;
import static com.yahoo.vespa.hosted.controller.api.integration.LogEntry.Type.error;
import static com.yahoo.vespa.hosted.controller.api.integration.LogEntry.Type.info;
import static com.yahoo.vespa.hosted.controller.api.integration.LogEntry.Type.warning;
-import static com.yahoo.vespa.hosted.controller.deployment.InternalDeploymentTester.appId;
+import static com.yahoo.vespa.hosted.controller.deployment.InternalDeploymentTester.instanceId;
import static com.yahoo.vespa.hosted.controller.deployment.InternalDeploymentTester.applicationPackage;
import static com.yahoo.vespa.hosted.controller.deployment.InternalDeploymentTester.publicCdApplicationPackage;
import static com.yahoo.vespa.hosted.controller.deployment.InternalDeploymentTester.testerId;
@@ -90,10 +90,10 @@ public class InternalStepRunnerTest {
@Test
public void canSwitchFromScrewdriverAndBackAgain() {
// Deploys a default application package with default build number.
- tester.tester().deployCompletely(tester.app(), InternalDeploymentTester.applicationPackage);
- tester.setEndpoints(appId, JobType.productionUsCentral1.zone(system()));
- tester.setEndpoints(appId, JobType.productionUsWest1.zone(system()));
- tester.setEndpoints(appId, JobType.productionUsEast3.zone(system()));
+ tester.tester().deployCompletely(tester.application(), InternalDeploymentTester.applicationPackage);
+ tester.setEndpoints(instanceId, JobType.productionUsCentral1.zone(system()));
+ tester.setEndpoints(instanceId, JobType.productionUsWest1.zone(system()));
+ tester.setEndpoints(instanceId, JobType.productionUsEast3.zone(system()));
// Let application have an ongoing upgrade when it switches (but kill the jobs, as the tester assumes they aren't running).
tester.tester().upgradeSystem(new Version("7.1"));
@@ -104,13 +104,13 @@ public class InternalStepRunnerTest {
tester.deployNewPlatform(new Version("7.2"));
- tester.jobs().unregister(appId);
+ tester.jobs().unregister(instanceId);
try {
- tester.tester().deployCompletely(tester.app(), InternalDeploymentTester.applicationPackage, BuildJob.defaultBuildNumber + 1);
+ tester.tester().deployCompletely(tester.application(), InternalDeploymentTester.applicationPackage, BuildJob.defaultBuildNumber + 1);
throw new IllegalStateException("Component job should get even again with build numbers to produce a change.");
}
catch (AssertionError expected) { }
- tester.tester().deployCompletely(tester.app(), InternalDeploymentTester.applicationPackage, BuildJob.defaultBuildNumber + 2);
+ tester.tester().deployCompletely(tester.application(), InternalDeploymentTester.applicationPackage, BuildJob.defaultBuildNumber + 2);
}
@Test
@@ -133,8 +133,8 @@ public class InternalStepRunnerTest {
tester.runner().run();
assertEquals(unfinished, tester.jobs().run(id).get().steps().get(Step.installInitialReal));
- tester.setEndpoints(appId, JobType.stagingTest.zone(system()));
- tester.configServer().convergeServices(appId, JobType.stagingTest.zone(system()));
+ tester.setEndpoints(instanceId, JobType.stagingTest.zone(system()));
+ tester.configServer().convergeServices(instanceId, JobType.stagingTest.zone(system()));
tester.configServer().setConfigChangeActions(new ConfigChangeActions(Collections.emptyList(),
singletonList(new RefeedAction("Refeed",
false,
@@ -151,7 +151,7 @@ public class InternalStepRunnerTest {
public void restartsServicesAndWaitsForRestartAndReboot() {
RunId id = tester.newRun(JobType.productionUsCentral1);
ZoneId zone = id.type().zone(system());
- HostName host = tester.configServer().hostFor(appId, zone);
+ HostName host = tester.configServer().hostFor(instanceId, zone);
tester.setEndpoints(testerId.id(), JobType.productionUsCentral1.zone(system()));
tester.runner().run();
@@ -168,11 +168,11 @@ public class InternalStepRunnerTest {
tester.runner().run();
assertEquals(succeeded, tester.jobs().run(id).get().steps().get(Step.deployReal));
- tester.configServer().convergeServices(appId, zone);
+ tester.configServer().convergeServices(instanceId, zone);
assertEquals(unfinished, tester.jobs().run(id).get().steps().get(Step.installReal));
- tester.configServer().nodeRepository().doRestart(new DeploymentId(appId, zone), Optional.of(host));
- tester.configServer().nodeRepository().requestReboot(new DeploymentId(appId, zone), Optional.of(host));
+ tester.configServer().nodeRepository().doRestart(new DeploymentId(instanceId, zone), Optional.of(host));
+ tester.configServer().nodeRepository().requestReboot(new DeploymentId(instanceId, zone), Optional.of(host));
tester.runner().run();
assertEquals(unfinished, tester.jobs().run(id).get().steps().get(Step.installReal));
@@ -187,67 +187,67 @@ public class InternalStepRunnerTest {
// Tester fails to show up for staging tests, and the real deployment for system tests.
tester.setEndpoints(testerId.id(), JobType.systemTest.zone(system()));
- tester.setEndpoints(appId, JobType.stagingTest.zone(system()));
+ tester.setEndpoints(instanceId, JobType.stagingTest.zone(system()));
tester.runner().run();
- tester.configServer().convergeServices(appId, JobType.stagingTest.zone(system()));
+ tester.configServer().convergeServices(instanceId, JobType.stagingTest.zone(system()));
tester.runner().run();
- tester.configServer().convergeServices(appId, JobType.systemTest.zone(system()));
+ tester.configServer().convergeServices(instanceId, JobType.systemTest.zone(system()));
tester.configServer().convergeServices(testerId.id(), JobType.systemTest.zone(system()));
- tester.configServer().convergeServices(appId, JobType.stagingTest.zone(system()));
+ tester.configServer().convergeServices(instanceId, JobType.stagingTest.zone(system()));
tester.configServer().convergeServices(testerId.id(), JobType.stagingTest.zone(system()));
tester.runner().run();
tester.clock().advance(InternalStepRunner.endpointTimeout.plus(Duration.ofSeconds(1)));
tester.runner().run();
- assertEquals(failed, tester.jobs().last(appId, JobType.systemTest).get().steps().get(Step.installReal));
- assertEquals(failed, tester.jobs().last(appId, JobType.stagingTest).get().steps().get(Step.installTester));
+ assertEquals(failed, tester.jobs().last(instanceId, JobType.systemTest).get().steps().get(Step.installReal));
+ assertEquals(failed, tester.jobs().last(instanceId, JobType.stagingTest).get().steps().get(Step.installTester));
}
@Test
public void installationFailsIfDeploymentExpires() {
tester.newRun(JobType.systemTest);
tester.runner().run();
- tester.configServer().convergeServices(appId, JobType.systemTest.zone(system()));
- tester.setEndpoints(appId, JobType.systemTest.zone(system()));
+ tester.configServer().convergeServices(instanceId, JobType.systemTest.zone(system()));
+ tester.setEndpoints(instanceId, JobType.systemTest.zone(system()));
tester.runner().run();
- assertEquals(succeeded, tester.jobs().last(appId, JobType.systemTest).get().steps().get(Step.installReal));
+ assertEquals(succeeded, tester.jobs().last(instanceId, JobType.systemTest).get().steps().get(Step.installReal));
- tester.applications().deactivate(appId, JobType.systemTest.zone(system()));
+ tester.applications().deactivate(instanceId, JobType.systemTest.zone(system()));
tester.runner().run();
- assertEquals(failed, tester.jobs().last(appId, JobType.systemTest).get().steps().get(Step.installTester));
- assertTrue(tester.jobs().last(appId, JobType.systemTest).get().hasEnded());
- assertTrue(tester.jobs().last(appId, JobType.systemTest).get().hasFailed());
+ assertEquals(failed, tester.jobs().last(instanceId, JobType.systemTest).get().steps().get(Step.installTester));
+ assertTrue(tester.jobs().last(instanceId, JobType.systemTest).get().hasEnded());
+ assertTrue(tester.jobs().last(instanceId, JobType.systemTest).get().hasFailed());
}
@Test
public void startTestsFailsIfDeploymentExpires() {
tester.newRun(JobType.systemTest);
tester.runner().run();
- tester.configServer().convergeServices(appId, JobType.systemTest.zone(system()));
+ tester.configServer().convergeServices(instanceId, JobType.systemTest.zone(system()));
tester.configServer().convergeServices(testerId.id(), JobType.systemTest.zone(system()));
tester.runner().run();
- tester.applications().deactivate(appId, JobType.systemTest.zone(system()));
+ tester.applications().deactivate(instanceId, JobType.systemTest.zone(system()));
tester.runner().run();
- assertEquals(unfinished, tester.jobs().last(appId, JobType.systemTest).get().steps().get(Step.startTests));
+ assertEquals(unfinished, tester.jobs().last(instanceId, JobType.systemTest).get().steps().get(Step.startTests));
}
@Test
public void alternativeEndpointsAreDetected() {
tester.newRun(JobType.systemTest);
tester.runner().run();;
- tester.configServer().convergeServices(appId, JobType.systemTest.zone(system()));
+ tester.configServer().convergeServices(instanceId, JobType.systemTest.zone(system()));
tester.configServer().convergeServices(testerId.id(), JobType.systemTest.zone(system()));
- assertEquals(unfinished, tester.jobs().last(appId, JobType.systemTest).get().steps().get(Step.installReal));
- assertEquals(unfinished, tester.jobs().last(appId, JobType.systemTest).get().steps().get(Step.installTester));
-
- tester.tester().controller().curator().writeRoutingPolicies(appId, Set.of(new RoutingPolicy(appId,
- ClusterSpec.Id.from("default"),
- JobType.systemTest.zone(system()),
- HostName.from("host"),
- Optional.empty(),
- emptySet())));
+ assertEquals(unfinished, tester.jobs().last(instanceId, JobType.systemTest).get().steps().get(Step.installReal));
+ assertEquals(unfinished, tester.jobs().last(instanceId, JobType.systemTest).get().steps().get(Step.installTester));
+
+ tester.tester().controller().curator().writeRoutingPolicies(instanceId, Set.of(new RoutingPolicy(instanceId,
+ ClusterSpec.Id.from("default"),
+ JobType.systemTest.zone(system()),
+ HostName.from("host"),
+ Optional.empty(),
+ emptySet())));
tester.tester().controller().curator().writeRoutingPolicies(testerId.id(), Set.of(new RoutingPolicy(testerId.id(),
ClusterSpec.Id.from("default"),
JobType.systemTest.zone(system()),
@@ -255,8 +255,8 @@ public class InternalStepRunnerTest {
Optional.empty(),
emptySet())));
tester.runner().run();;
- assertEquals(succeeded, tester.jobs().last(appId, JobType.systemTest).get().steps().get(Step.installReal));
- assertEquals(succeeded, tester.jobs().last(appId, JobType.systemTest).get().steps().get(Step.installTester));
+ assertEquals(succeeded, tester.jobs().last(instanceId, JobType.systemTest).get().steps().get(Step.installReal));
+ assertEquals(succeeded, tester.jobs().last(instanceId, JobType.systemTest).get().steps().get(Step.installTester));
}
@Test
@@ -303,12 +303,12 @@ public class InternalStepRunnerTest {
assertEquals(URI.create(tester.routing().endpoints(new DeploymentId(testerId.id(), JobType.systemTest.zone(system()))).get(0).endpoint()),
tester.cloud().testerUrl());
Inspector configObject = SlimeUtils.jsonToSlime(tester.cloud().config()).get();
- assertEquals(appId.serializedForm(), configObject.field("application").asString());
+ assertEquals(instanceId.serializedForm(), configObject.field("application").asString());
assertEquals(JobType.systemTest.zone(system()).value(), configObject.field("zone").asString());
assertEquals(system().value(), configObject.field("system").asString());
assertEquals(1, configObject.field("endpoints").children());
assertEquals(1, configObject.field("endpoints").field(JobType.systemTest.zone(system()).value()).entries());
- configObject.field("endpoints").field(JobType.systemTest.zone(system()).value()).traverse((ArrayTraverser) (__, endpoint) -> assertEquals(tester.routing().endpoints(new DeploymentId(appId, JobType.systemTest.zone(system()))).get(0).endpoint(), endpoint.asString()));
+ configObject.field("endpoints").field(JobType.systemTest.zone(system()).value()).traverse((ArrayTraverser) (__, endpoint) -> assertEquals(tester.routing().endpoints(new DeploymentId(instanceId, JobType.systemTest.zone(system()))).get(0).endpoint(), endpoint.asString()));
long lastId = tester.jobs().details(id).get().lastId().getAsLong();
tester.cloud().add(new LogEntry(0, Instant.ofEpochMilli(123), info, "Ready!"));
@@ -336,36 +336,36 @@ public class InternalStepRunnerTest {
@Test
public void deployToDev() {
ZoneId zone = JobType.devUsEast1.zone(system());
- tester.jobs().deploy(appId, JobType.devUsEast1, Optional.empty(), applicationPackage);
+ tester.jobs().deploy(instanceId, JobType.devUsEast1, Optional.empty(), applicationPackage);
tester.runner().run();
- RunId id = tester.jobs().last(appId, JobType.devUsEast1).get().id();
+ RunId id = tester.jobs().last(instanceId, JobType.devUsEast1).get().id();
assertEquals(unfinished, tester.jobs().run(id).get().steps().get(Step.installReal));
Version version = new Version("7.8.9");
Future<?> concurrentDeployment = Executors.newSingleThreadExecutor().submit(() -> {
- tester.jobs().deploy(appId, JobType.devUsEast1, Optional.of(version), applicationPackage);
+ tester.jobs().deploy(instanceId, JobType.devUsEast1, Optional.of(version), applicationPackage);
});
while ( ! concurrentDeployment.isDone())
tester.runner().run();
- assertEquals(id.number() + 1, tester.jobs().last(appId, JobType.devUsEast1).get().id().number());
+ assertEquals(id.number() + 1, tester.jobs().last(instanceId, JobType.devUsEast1).get().id().number());
ApplicationPackage otherPackage = new ApplicationPackageBuilder().region("us-central-1").build();
- tester.jobs().deploy(appId, JobType.perfUsEast3, Optional.empty(), otherPackage);
+ tester.jobs().deploy(instanceId, JobType.perfUsEast3, Optional.empty(), otherPackage);
tester.runner().run(); // Job run order determined by JobType enum order per application.
- tester.configServer().convergeServices(appId, zone);
- tester.setEndpoints(appId, zone);
+ tester.configServer().convergeServices(instanceId, zone);
+ tester.setEndpoints(instanceId, zone);
assertEquals(unfinished, tester.jobs().run(id).get().steps().get(Step.installReal));
- assertEquals(applicationPackage.hash(), tester.configServer().application(appId, zone).get().applicationPackage().hash());
- assertEquals(otherPackage.hash(), tester.configServer().application(appId, JobType.perfUsEast3.zone(system())).get().applicationPackage().hash());
+ assertEquals(applicationPackage.hash(), tester.configServer().application(instanceId, zone).get().applicationPackage().hash());
+ assertEquals(otherPackage.hash(), tester.configServer().application(instanceId, JobType.perfUsEast3.zone(system())).get().applicationPackage().hash());
- tester.configServer().setVersion(appId, zone, version);
+ tester.configServer().setVersion(instanceId, zone, version);
tester.runner().run();
assertEquals(1, tester.jobs().active().size());
- assertEquals(version, tester.tester().application(appId).deployments().get(zone).version());
+ assertEquals(version, tester.tester().instance(instanceId).deployments().get(zone).version());
try {
- tester.jobs().deploy(appId, JobType.productionApNortheast1, Optional.empty(), applicationPackage);
+ tester.jobs().deploy(instanceId, JobType.productionApNortheast1, Optional.empty(), applicationPackage);
fail("Deployments outside dev should not be allowed.");
}
catch (IllegalArgumentException expected) { }
@@ -416,7 +416,7 @@ public class InternalStepRunnerTest {
List<X509Certificate> trusted = new ArrayList<>(publicCdApplicationPackage.trustedCertificates());
trusted.add(tester.jobs().run(id).get().testerCertificate().get());
- assertEquals(trusted, tester.configServer().application(appId, id.type().zone(system())).get().applicationPackage().trustedCertificates());
+ assertEquals(trusted, tester.configServer().application(instanceId, id.type().zone(system())).get().applicationPackage().trustedCertificates());
tester.clock().advance(InternalStepRunner.certificateTimeout.plus(Duration.ofSeconds(1)));
tester.runner().run();
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/TestConfigSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/TestConfigSerializerTest.java
index d4550ecc338..2ddef642065 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/TestConfigSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/TestConfigSerializerTest.java
@@ -14,7 +14,7 @@ import java.nio.file.Paths;
import java.util.List;
import java.util.Map;
-import static com.yahoo.vespa.hosted.controller.deployment.InternalDeploymentTester.appId;
+import static com.yahoo.vespa.hosted.controller.deployment.InternalDeploymentTester.instanceId;
import static org.junit.Assert.assertEquals;
/**
@@ -25,7 +25,7 @@ public class TestConfigSerializerTest {
@Test
public void testConfig() throws IOException {
ZoneId zone = JobType.systemTest.zone(SystemName.PublicCd);
- byte[] json = new TestConfigSerializer(SystemName.PublicCd).configJson(appId,
+ byte[] json = new TestConfigSerializer(SystemName.PublicCd).configJson(instanceId,
JobType.systemTest,
Map.of(zone, Map.of(ClusterSpec.Id.from("ai"),
URI.create("https://server/"))),
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java
index 98e8eff508e..ec98fbf69cb 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java
@@ -44,12 +44,12 @@ public class ApplicationOwnershipConfirmerTest {
Optional<Contact> contact = Optional.of(tester.controllerTester().serviceRegistry().contactRetrieverMock().contact());
TenantName property = tester.controllerTester().createTenant("property", "domain", 1L, contact);
tester.createAndDeploy(property, "application", 1, "default");
- Supplier<Instance> propertyApp = () -> tester.controller().applications().require(ApplicationId.from("property", "application", "default"));
+ Supplier<Instance> propertyApp = () -> tester.controller().applications().requireInstance(ApplicationId.from("property", "application", "default"));
UserTenant user = UserTenant.create("by-user", contact);
tester.controller().tenants().createUser(user);
tester.createAndDeploy(user.name(), "application", 2, "default");
- Supplier<Instance> userApp = () -> tester.controller().applications().require(ApplicationId.from("by-user", "application", "default"));
+ Supplier<Instance> userApp = () -> tester.controller().applications().requireInstance(ApplicationId.from("by-user", "application", "default"));
assertFalse("No issue is initially stored for a new application.", propertyApp.get().ownershipIssueId().isPresent());
assertFalse("No issue is initially stored for a new application.", userApp.get().ownershipIssueId().isPresent());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainerTest.java
index 72b16d76864..9739a24af01 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainerTest.java
@@ -4,9 +4,9 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.NodeType;
+import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
@@ -29,21 +29,23 @@ public class ClusterInfoMaintainerTest {
@Test
public void maintain() {
- ApplicationId app = tester.createAndDeploy("tenant1", "domain1", "app1",
- Environment.dev, 123).id();
+ tester.createTenant("tenant1", "domain123", 321L);
+ ApplicationId app = tester.createApplication(TenantName.from("tenant1"), "app1", "default", 123).id().defaultInstance();
+ ZoneId zone = ZoneId.from("dev", "us-east-1");
+ tester.deploy(app, zone);
// Precondition: no cluster info attached to the deployments
- Deployment deployment = tester.controller().applications().get(app).get().deployments().values().stream()
+ Deployment deployment = tester.controller().applications().getInstance(app).get().deployments().values().stream()
.findFirst()
.get();
assertEquals(0, deployment.clusterInfo().size());
- addNodes(ZoneId.from("dev", "us-east-1"));
+ addNodes(zone);
ClusterInfoMaintainer maintainer = new ClusterInfoMaintainer(tester.controller(), Duration.ofHours(1),
new JobControl(new MockCuratorDb()));
maintainer.maintain();
- deployment = tester.controller().applications().get(app).get().deployments().values().stream()
+ deployment = tester.controller().applications().getInstance(app).get().deployments().values().stream()
.findFirst()
.get();
assertEquals(2, deployment.clusterInfo().size());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
index b2ac44bf23b..d43526ce5fa 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Deployment;
@@ -33,11 +34,14 @@ public class DeploymentExpirerTest {
);
DeploymentExpirer expirer = new DeploymentExpirer(tester.controller(), Duration.ofDays(10),
new JobControl(new MockCuratorDb()));
- Instance devApp = tester.createApplication("app1", "tenant1", 123L, 1L);
- Instance prodApp = tester.createApplication("app2", "tenant2", 456L, 2L);
+ Application devApp = tester.createApplication("app1", "tenant1", 123L, 1L);
+ Application prodApp = tester.createApplication("app2", "tenant2", 456L, 2L);
+
+ Instance devInstance = tester.defaultInstance(devApp.id());
+ Instance prodInstance = tester.defaultInstance(prodApp.id());
// Deploy dev
- tester.controllerTester().deploy(devApp, tester.controllerTester().toZone(Environment.dev));
+ tester.controllerTester().deploy(devInstance.id(), tester.controllerTester().toZone(Environment.dev));
// Deploy prod
ApplicationPackage prodAppPackage = new ApplicationPackageBuilder()
@@ -45,23 +49,23 @@ public class DeploymentExpirerTest {
.build();
tester.deployCompletely(prodApp, prodAppPackage);
- assertEquals(1, permanentDeployments(devApp).size());
- assertEquals(1, permanentDeployments(prodApp).size());
+ assertEquals(1, permanentDeployments(devInstance).size());
+ assertEquals(1, permanentDeployments(prodInstance).size());
// Not expired at first
expirer.maintain();
- assertEquals(1, permanentDeployments(devApp).size());
- assertEquals(1, permanentDeployments(prodApp).size());
+ assertEquals(1, permanentDeployments(devInstance).size());
+ assertEquals(1, permanentDeployments(prodInstance).size());
// The dev application is removed
tester.clock().advance(Duration.ofDays(15));
expirer.maintain();
- assertEquals(0, permanentDeployments(devApp).size());
- assertEquals(1, permanentDeployments(prodApp).size());
+ assertEquals(0, permanentDeployments(devInstance).size());
+ assertEquals(1, permanentDeployments(prodInstance).size());
}
private List<Deployment> permanentDeployments(Instance instance) {
- return tester.controller().applications().get(instance.id()).get().deployments().values().stream()
+ return tester.controller().applications().getInstance(instance.id()).get().deployments().values().stream()
.filter(deployment -> deployment.zone().environment() != Environment.test &&
deployment.zone().environment() != Environment.staging)
.collect(Collectors.toList());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java
index d20ccdf5963..3c1fb2fffd0 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java
@@ -4,11 +4,13 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Environment;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Contact;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.LoggingDeploymentIssues;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.persistence.MockCuratorDb;
@@ -78,24 +80,24 @@ public class DeploymentIssueReporterTest {
tester.controllerTester().createTenant("tenant3", "domain3", 1L, contact);
// Create and deploy one application for each of three tenants.
- Instance app1 = tester.createApplication("application1", "tenant1", projectId1, propertyId1);
- Instance app2 = tester.createApplication("application2", "tenant2", projectId2, propertyId2);
- Instance app3 = tester.createApplication("application3", "tenant3", projectId3, propertyId3);
+ Application app1 = tester.createApplication("application1", "tenant1", projectId1, propertyId1);
+ Application app2 = tester.createApplication("application2", "tenant2", projectId2, propertyId2);
+ Application app3 = tester.createApplication("application3", "tenant3", projectId3, propertyId3);
// NOTE: All maintenance should be idempotent within a small enough time interval, so maintain is called twice in succession throughout.
// apps 1 and 3 have one failure each.
tester.jobCompletion(component).application(app1).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app1, applicationPackage, true, systemTest);
- tester.deployAndNotify(app1, applicationPackage, false, stagingTest);
+ tester.deployAndNotify(app1.id().defaultInstance(), applicationPackage, true, systemTest);
+ tester.deployAndNotify(app1.id().defaultInstance(), applicationPackage, false, stagingTest);
// app2 is successful, but will fail later.
tester.deployCompletely(app2, canaryPackage);
tester.jobCompletion(component).application(app3).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app3, applicationPackage, true, systemTest);
- tester.deployAndNotify(app3, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app3, applicationPackage, false, productionUsWest1);
+ tester.deployAndNotify(app3.id().defaultInstance(), applicationPackage, true, systemTest);
+ tester.deployAndNotify(app3.id().defaultInstance(), applicationPackage, true, stagingTest);
+ tester.deployAndNotify(app3.id().defaultInstance(), applicationPackage, false, productionUsWest1);
reporter.maintain();
reporter.maintain();
@@ -130,7 +132,7 @@ public class DeploymentIssueReporterTest {
// app3 fixes their problems, but the ticket for app3 is left open; see the resolved ticket is not escalated when another escalation period has passed.
- tester.deployAndNotify(app3, applicationPackage, true, productionUsWest1);
+ tester.deployAndNotify(app3.id().defaultInstance(), applicationPackage, true, productionUsWest1);
tester.clock().advance(maxInactivity.plus(Duration.ofDays(1)));
reporter.maintain();
@@ -142,7 +144,7 @@ public class DeploymentIssueReporterTest {
// app3 now has a new failure past max failure age; see that a new issue is filed.
tester.jobCompletion(component).application(app3).nextBuildNumber().uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app3, applicationPackage, false, systemTest);
+ tester.deployAndNotify(app3.id().defaultInstance(), applicationPackage, false, systemTest);
tester.clock().advance(maxInactivity.plus(maxFailureAge));
reporter.maintain();
@@ -175,7 +177,7 @@ public class DeploymentIssueReporterTest {
class MockDeploymentIssues extends LoggingDeploymentIssues {
- private final Map<ApplicationId, IssueId> applicationIssues = new HashMap<>();
+ private final Map<TenantAndApplicationId, IssueId> applicationIssues = new HashMap<>();
private final Map<IssueId, Integer> issueLevels = new HashMap<>();
MockDeploymentIssues() {
@@ -191,24 +193,24 @@ public class DeploymentIssueReporterTest {
@Override
protected IssueId fileIssue(ApplicationId applicationId) {
IssueId issueId = super.fileIssue(applicationId);
- applicationIssues.put(applicationId, issueId);
+ applicationIssues.put(TenantAndApplicationId.from(applicationId), issueId);
return issueId;
}
- void closeFor(ApplicationId applicationId) {
- issueUpdates.remove(applicationIssues.remove(applicationId));
+ void closeFor(TenantAndApplicationId id) {
+ issueUpdates.remove(applicationIssues.remove(id));
}
- void touchFor(ApplicationId applicationId) {
- issueUpdates.put(applicationIssues.get(applicationId), tester.clock().instant());
+ void touchFor(TenantAndApplicationId id) {
+ issueUpdates.put(applicationIssues.get(id), tester.clock().instant());
}
- boolean isOpenFor(ApplicationId applicationId) {
- return applicationIssues.containsKey(applicationId);
+ boolean isOpenFor(TenantAndApplicationId id) {
+ return applicationIssues.containsKey(id);
}
- int escalationLevelFor(ApplicationId applicationId) {
- return issueLevels.getOrDefault(applicationIssues.get(applicationId), 0);
+ int escalationLevelFor(TenantAndApplicationId id) {
+ return issueLevels.getOrDefault(applicationIssues.get(id), 0);
}
int size() {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
index c4a835cd7e7..cf33b354139 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
@@ -37,10 +37,10 @@ public class DeploymentMetricsMaintainerTest {
@Test
public void updates_metrics() {
var application = tester.createApplication("app1", "tenant1", 123L, 1L);
- deploy(application, Version.fromString("7.1"));
+ deploy(application.id().defaultInstance(), Version.fromString("7.1"));
DeploymentMetricsMaintainer maintainer = maintainer(tester.controller());
- Supplier<Instance> app = () -> tester.application(application.id());
+ Supplier<Instance> app = () -> tester.defaultInstance(application.id());
Supplier<Deployment> deployment = () -> app.get().deployments().values().stream().findFirst().get();
// No metrics gathered yet
@@ -51,7 +51,7 @@ public class DeploymentMetricsMaintainerTest {
assertFalse("Never received any writes", deployment.get().activity().lastWritten().isPresent());
// Only get application metrics for old version
- deploy(app.get(), Version.fromString("6.3.3"));
+ deploy(application.id().defaultInstance(), Version.fromString("6.3.3"));
maintainer.maintain();
assertEquals(0, app.get().metrics().queryServiceQuality(), 0);
assertEquals(0, deployment.get().metrics().documentCount(), 0);
@@ -60,13 +60,13 @@ public class DeploymentMetricsMaintainerTest {
assertFalse("Never received any writes", deployment.get().activity().lastWritten().isPresent());
// Metrics are gathered and saved to application
- deploy(app.get(), Version.fromString("7.5.5"));
+ deploy(application.id().defaultInstance(), Version.fromString("7.5.5"));
var metrics0 = Map.of(ClusterMetrics.QUERIES_PER_SECOND, 1D,
ClusterMetrics.FEED_PER_SECOND, 2D,
ClusterMetrics.DOCUMENT_COUNT, 3D,
ClusterMetrics.QUERY_LATENCY, 4D,
ClusterMetrics.FEED_LATENCY, 5D);
- setMetrics(application.id(), metrics0);
+ setMetrics(application.id().defaultInstance(), metrics0);
maintainer.maintain();
Instant t1 = tester.clock().instant().truncatedTo(MILLIS);
assertEquals(0.0, app.get().metrics().queryServiceQuality(), Double.MIN_VALUE);
@@ -96,7 +96,7 @@ public class DeploymentMetricsMaintainerTest {
var metrics1 = new HashMap<>(metrics0);
metrics1.put(ClusterMetrics.QUERIES_PER_SECOND, 0D);
metrics1.put(ClusterMetrics.FEED_PER_SECOND, 5D);
- setMetrics(application.id(), metrics1);
+ setMetrics(application.id().defaultInstance(), metrics1);
maintainer.maintain();
assertEquals(t2, deployment.get().activity().lastQueried().get());
assertEquals(t3, deployment.get().activity().lastWritten().get());
@@ -107,7 +107,7 @@ public class DeploymentMetricsMaintainerTest {
tester.clock().advance(Duration.ofHours(1));
var metrics2 = new HashMap<>(metrics1);
metrics2.put(ClusterMetrics.FEED_PER_SECOND, 0D);
- setMetrics(application.id(), metrics2);
+ setMetrics(application.id().defaultInstance(), metrics2);
maintainer.maintain();
assertEquals(t2, deployment.get().activity().lastQueried().get());
assertEquals(t3, deployment.get().activity().lastWritten().get());
@@ -127,8 +127,8 @@ public class DeploymentMetricsMaintainerTest {
return new DeploymentMetricsMaintainer(controller, Duration.ofDays(1), new JobControl(controller.curator()));
}
- private void deploy(Instance instance, Version version) {
- tester.controllerTester().deploy(instance,
+ private void deploy(ApplicationId id, Version version) {
+ tester.controllerTester().deploy(id,
ZoneId.from(Environment.dev, RegionName.from("us-east-1")),
Optional.of(new ApplicationPackage(new byte[0])),
false,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java
index d0896b5e60b..ff245e2e488 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java
@@ -8,6 +8,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationV
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.deployment.JobController;
import com.yahoo.vespa.hosted.controller.deployment.Run;
@@ -83,7 +84,7 @@ public class JobRunnerTest {
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
phasedExecutor(phaser), stepRunner);
- ApplicationId id = tester.createApplication("real", "tenant", 1, 1L).id();
+ ApplicationId id = tester.createApplication("real", "tenant", 1, 1L).id().defaultInstance();
jobs.submit(id, versions.targetApplication().source().get(), "a@b", 2, applicationPackage, new byte[0]);
jobs.start(id, systemTest, versions);
@@ -114,7 +115,7 @@ public class JobRunnerTest {
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
- ApplicationId id = tester.createApplication("real", "tenant", 1, 1L).id();
+ ApplicationId id = tester.createApplication("real", "tenant", 1, 1L).id().defaultInstance();
jobs.submit(id, versions.targetApplication().source().get(), "a@b", 2, applicationPackage, new byte[0]);
Supplier<Run> run = () -> jobs.last(id, systemTest).get();
@@ -202,7 +203,7 @@ public class JobRunnerTest {
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
Executors.newFixedThreadPool(32), waitingRunner(barrier));
- ApplicationId id = tester.createApplication("real", "tenant", 1, 1L).id();
+ ApplicationId id = tester.createApplication("real", "tenant", 1, 1L).id().defaultInstance();
jobs.submit(id, versions.targetApplication().source().get(), "a@b", 2, applicationPackage, new byte[0]);
RunId runId = new RunId(id, systemTest, 1);
@@ -217,7 +218,7 @@ public class JobRunnerTest {
// Thread is still trying to deploy tester -- delete application, and see all data is garbage collected.
assertEquals(Collections.singletonList(runId), jobs.active().stream().map(run -> run.id()).collect(Collectors.toList()));
- tester.controllerTester().controller().applications().deleteApplication(id.tenant(), id.application(), tester.controllerTester().credentialsFor(id));
+ tester.controllerTester().controller().applications().deleteApplication(id.tenant(), id.application(), tester.controllerTester().credentialsFor(TenantAndApplicationId.from(id)));
assertEquals(Collections.emptyList(), jobs.active());
assertEquals(runId, jobs.last(id, systemTest).get().id());
@@ -238,7 +239,7 @@ public class JobRunnerTest {
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), (id, step) -> Optional.of(running));
- ApplicationId id = tester.createApplication("real", "tenant", 1, 1L).id();
+ ApplicationId id = tester.createApplication("real", "tenant", 1, 1L).id().defaultInstance();
jobs.submit(id, versions.targetApplication().source().get(), "a@b", 2, applicationPackage, new byte[0]);
for (int i = 0; i < jobs.historyLength(); i++) {
@@ -266,7 +267,7 @@ public class JobRunnerTest {
JobRunner runner = new JobRunner(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()),
inThreadExecutor(), mappedRunner(outcomes));
- ApplicationId id = tester.createApplication("real", "tenant", 1, 1L).id();
+ ApplicationId id = tester.createApplication("real", "tenant", 1, 1L).id().defaultInstance();
jobs.submit(id, versions.targetApplication().source().get(), "a@b", 2, applicationPackage, new byte[0]);
jobs.start(id, systemTest, versions);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java
index 46e6d9c3f50..4fc952b0b15 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java
@@ -2,9 +2,10 @@
package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.component.Version;
+import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.zone.ZoneId;
-import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
@@ -18,6 +19,7 @@ import com.yahoo.vespa.hosted.controller.persistence.MockCuratorDb;
import org.junit.Test;
import java.time.Duration;
+import java.util.Optional;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.component;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.productionUsWest1;
@@ -46,10 +48,10 @@ public class MetricsReporterTest {
assertEquals(0.0, metrics.getMetric(MetricsReporter.DEPLOYMENT_FAIL_METRIC));
// Deploy all apps successfully
- Instance app1 = tester.createApplication("app1", "tenant1", 1, 11L);
- Instance app2 = tester.createApplication("app2", "tenant1", 2, 22L);
- Instance app3 = tester.createApplication("app3", "tenant1", 3, 33L);
- Instance app4 = tester.createApplication("app4", "tenant1", 4, 44L);
+ Application app1 = tester.createApplication("app1", "tenant1", 1, 11L);
+ Application app2 = tester.createApplication("app2", "tenant1", 2, 22L);
+ Application app3 = tester.createApplication("app3", "tenant1", 3, 33L);
+ Application app4 = tester.createApplication("app4", "tenant1", 4, 44L);
tester.deployCompletely(app1, applicationPackage);
tester.deployCompletely(app2, applicationPackage);
tester.deployCompletely(app3, applicationPackage);
@@ -60,7 +62,7 @@ public class MetricsReporterTest {
// 1 app fails system-test
tester.jobCompletion(component).application(app4).nextBuildNumber().uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app4, applicationPackage, false, systemTest);
+ tester.deployAndNotify(app4.id().defaultInstance(), Optional.of(applicationPackage), false, systemTest);
metricsReporter.maintain();
assertEquals(25.0, metrics.getMetric(MetricsReporter.DEPLOYMENT_FAIL_METRIC));
@@ -76,25 +78,25 @@ public class MetricsReporterTest {
MetricsReporter reporter = createReporter(tester.controller());
- Instance app = tester.createApplication("app1", "tenant1", 1, 11L);
+ Application app = tester.createApplication("app1", "tenant1", 1, 11L);
tester.deployCompletely(app, applicationPackage);
reporter.maintain();
- assertEquals(Duration.ZERO, getAverageDeploymentDuration(app)); // An exceptionally fast deployment :-)
+ assertEquals(Duration.ZERO, getAverageDeploymentDuration(app.id().defaultInstance())); // An exceptionally fast deployment :-)
// App spends 3 hours deploying
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.clock().advance(Duration.ofHours(1));
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
+ tester.deployAndNotify(app.id().defaultInstance(), Optional.of(applicationPackage), true, systemTest);
tester.clock().advance(Duration.ofMinutes(30));
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(app.id().defaultInstance(), Optional.of(applicationPackage), true, stagingTest);
tester.clock().advance(Duration.ofMinutes(90));
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
+ tester.deployAndNotify(app.id().defaultInstance(), Optional.of(applicationPackage), true, productionUsWest1);
reporter.maintain();
// Average time is 1 hour (system-test) + 90 minutes (staging-test runs in parallel with system-test) + 90 minutes (production) / 3 jobs
- assertEquals(Duration.ofMinutes(80), getAverageDeploymentDuration(app));
+ assertEquals(Duration.ofMinutes(80), getAverageDeploymentDuration(app.id().defaultInstance()));
// Another deployment starts and stalls for 12 hours
tester.jobCompletion(component).application(app).nextBuildNumber(2).uploadArtifact(applicationPackage).submit();
@@ -105,7 +107,7 @@ public class MetricsReporterTest {
.plus(Duration.ofHours(12)) // hanging staging-test
.plus(Duration.ofMinutes(90)) // previous production job
.dividedBy(3), // Total number of orchestrated jobs
- getAverageDeploymentDuration(app));
+ getAverageDeploymentDuration(app.id().defaultInstance()));
}
@Test
@@ -117,46 +119,46 @@ public class MetricsReporterTest {
.build();
MetricsReporter reporter = createReporter(tester.controller());
- Instance app = tester.createApplication("app1", "tenant1", 1, 11L);
+ Application app = tester.createApplication("app1", "tenant1", 1, 11L);
// Initial deployment without failures
tester.deployCompletely(app, applicationPackage);
reporter.maintain();
- assertEquals(0, getDeploymentsFailingUpgrade(app));
+ assertEquals(0, getDeploymentsFailingUpgrade(app.id().defaultInstance()));
// Failing application change is not counted
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app, applicationPackage, false, systemTest);
+ tester.deployAndNotify(app.id().defaultInstance(), Optional.of(applicationPackage), false, systemTest);
reporter.maintain();
- assertEquals(0, getDeploymentsFailingUpgrade(app));
+ assertEquals(0, getDeploymentsFailingUpgrade(app.id().defaultInstance()));
// Application change completes
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
- assertFalse("Change deployed", tester.controller().applications().require(app.id()).change().hasTargets());
+ tester.deployAndNotify(app.id().defaultInstance(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(app.id().defaultInstance(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(app.id().defaultInstance(), Optional.of(applicationPackage), true, productionUsWest1);
+ assertFalse("Change deployed", tester.controller().applications().requireApplication(app.id()).change().hasTargets());
// New versions is released and upgrade fails in test environments
Version version = Version.fromString("7.1");
tester.upgradeSystem(version);
tester.upgrader().maintain();
- tester.deployAndNotify(app, applicationPackage, false, systemTest);
- tester.deployAndNotify(app, applicationPackage, false, stagingTest);
+ tester.deployAndNotify(app.id().defaultInstance(), Optional.of(applicationPackage), false, systemTest);
+ tester.deployAndNotify(app.id().defaultInstance(), Optional.of(applicationPackage), false, stagingTest);
reporter.maintain();
- assertEquals(2, getDeploymentsFailingUpgrade(app));
+ assertEquals(2, getDeploymentsFailingUpgrade(app.id().defaultInstance()));
// Test and staging pass and upgrade fails in production
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app, applicationPackage, false, productionUsWest1);
+ tester.deployAndNotify(app.id().defaultInstance(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(app.id().defaultInstance(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(app.id().defaultInstance(), Optional.of(applicationPackage), false, productionUsWest1);
reporter.maintain();
- assertEquals(1, getDeploymentsFailingUpgrade(app));
+ assertEquals(1, getDeploymentsFailingUpgrade(app.id().defaultInstance()));
// Upgrade eventually succeeds
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
- assertFalse("Upgrade deployed", tester.controller().applications().require(app.id()).change().hasTargets());
+ tester.deployAndNotify(app.id().defaultInstance(), Optional.of(applicationPackage), true, productionUsWest1);
+ assertFalse("Upgrade deployed", tester.controller().applications().requireApplication(app.id()).change().hasTargets());
reporter.maintain();
- assertEquals(0, getDeploymentsFailingUpgrade(app));
+ assertEquals(0, getDeploymentsFailingUpgrade(app.id().defaultInstance()));
}
@Test
@@ -168,12 +170,12 @@ public class MetricsReporterTest {
.region("us-east-3")
.build();
MetricsReporter reporter = createReporter(tester.controller());
- Instance instance = tester.createApplication("app1", "tenant1", 1, 11L);
- tester.configServer().generateWarnings(new DeploymentId(instance.id(), ZoneId.from("prod", "us-west-1")), 3);
- tester.configServer().generateWarnings(new DeploymentId(instance.id(), ZoneId.from("prod", "us-east-3")), 4);
- tester.deployCompletely(instance, applicationPackage);
+ Application application = tester.createApplication("app1", "tenant1", 1, 11L);
+ tester.configServer().generateWarnings(new DeploymentId(application.id().defaultInstance(), ZoneId.from("prod", "us-west-1")), 3);
+ tester.configServer().generateWarnings(new DeploymentId(application.id().defaultInstance(), ZoneId.from("prod", "us-east-3")), 4);
+ tester.deployCompletely(application, applicationPackage);
reporter.maintain();
- assertEquals(4, getDeploymentWarnings(instance));
+ assertEquals(4, getDeploymentWarnings(application.id().defaultInstance()));
}
@Test
@@ -185,7 +187,7 @@ public class MetricsReporterTest {
MetricsReporter reporter = createReporter(tester.tester().controller());
reporter.maintain();
assertEquals(tester.clock().instant().getEpochSecond() - 1,
- getMetric(MetricsReporter.DEPLOYMENT_BUILD_AGE_SECONDS, tester.app()));
+ getMetric(MetricsReporter.DEPLOYMENT_BUILD_AGE_SECONDS, tester.instance().id()));
}
@Test
@@ -198,11 +200,11 @@ public class MetricsReporterTest {
.region("us-east-3")
.build();
MetricsReporter reporter = createReporter(tester.controller());
- Instance instance = tester.createApplication("app1", "tenant1", 1, 11L);
+ Application application = tester.createApplication("app1", "tenant1", 1, 11L);
reporter.maintain();
assertEquals("Queue is empty initially", 0, metrics.getMetric(MetricsReporter.NAME_SERVICE_REQUESTS_QUEUED).intValue());
- tester.deployCompletely(instance, applicationPackage);
+ tester.deployCompletely(application, applicationPackage);
reporter.maintain();
assertEquals("Deployment queues name services requests", 6, metrics.getMetric(MetricsReporter.NAME_SERVICE_REQUESTS_QUEUED).intValue());
@@ -211,31 +213,31 @@ public class MetricsReporterTest {
assertEquals("Queue consumed", 0, metrics.getMetric(MetricsReporter.NAME_SERVICE_REQUESTS_QUEUED).intValue());
}
- private Duration getAverageDeploymentDuration(Instance instance) {
- return Duration.ofSeconds(getMetric(MetricsReporter.DEPLOYMENT_AVERAGE_DURATION, instance).longValue());
+ private Duration getAverageDeploymentDuration(ApplicationId id) {
+ return Duration.ofSeconds(getMetric(MetricsReporter.DEPLOYMENT_AVERAGE_DURATION, id).longValue());
}
- private int getDeploymentsFailingUpgrade(Instance instance) {
- return getMetric(MetricsReporter.DEPLOYMENT_FAILING_UPGRADES, instance).intValue();
+ private int getDeploymentsFailingUpgrade(ApplicationId id) {
+ return getMetric(MetricsReporter.DEPLOYMENT_FAILING_UPGRADES, id).intValue();
}
- private int getDeploymentWarnings(Instance instance) {
- return getMetric(MetricsReporter.DEPLOYMENT_WARNINGS, instance).intValue();
+ private int getDeploymentWarnings(ApplicationId id) {
+ return getMetric(MetricsReporter.DEPLOYMENT_WARNINGS, id).intValue();
}
- private Number getMetric(String name, Instance instance) {
- return metrics.getMetric((dimensions) -> instance.id().tenant().value().equals(dimensions.get("tenant")) &&
- appDimension(instance).equals(dimensions.get("app")),
+ private Number getMetric(String name, ApplicationId id) {
+ return metrics.getMetric((dimensions) -> id.tenant().value().equals(dimensions.get("tenant")) &&
+ appDimension(id).equals(dimensions.get("app")),
name)
- .orElseThrow(() -> new RuntimeException("Expected metric to exist for " + instance.id()));
+ .orElseThrow(() -> new RuntimeException("Expected metric to exist for " + id));
}
private MetricsReporter createReporter(Controller controller) {
return new MetricsReporter(controller, metrics, new JobControl(new MockCuratorDb()));
}
- private static String appDimension(Instance instance) {
- return instance.id().application().value() + "." + instance.id().instance().value();
+ private static String appDimension(ApplicationId id) {
+ return id.application().value() + "." + id.instance().value();
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java
index 4bd1bfcffae..02282894544 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.component.Version;
import com.yahoo.config.provision.Environment;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.BuildService;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
@@ -16,6 +17,7 @@ import org.junit.Test;
import java.time.Duration;
import java.util.List;
+import java.util.Optional;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -36,49 +38,49 @@ public class OutstandingChangeDeployerTest {
.region("us-west-1")
.build();
- tester.createAndDeploy("app1", 11, applicationPackage);
- tester.createAndDeploy("app2", 22, applicationPackage);
+ Application app1 = tester.createAndDeploy("app1", 11, applicationPackage);
+ Application app2 = tester.createAndDeploy("app2", 22, applicationPackage);
Version version = new Version(6, 2);
- tester.deploymentTrigger().triggerChange(tester.application("app1").id(), Change.of(version));
+ tester.deploymentTrigger().triggerChange(app1.id(), Change.of(version));
tester.deploymentTrigger().triggerReadyJobs();
- assertEquals(Change.of(version), tester.application("app1").change());
- assertFalse(tester.application("app1").outstandingChange().hasTargets());
+ assertEquals(Change.of(version), tester.defaultInstance("app1").change());
+ assertFalse(tester.defaultInstance("app1").outstandingChange().hasTargets());
tester.jobCompletion(JobType.component)
- .application(tester.application("app1"))
+ .application(app1)
.sourceRevision(new SourceRevision("repository1","master", "cafed00d"))
.nextBuildNumber()
.uploadArtifact(applicationPackage)
.submit();
- Instance app = tester.application("app1");
- assertTrue(app.outstandingChange().hasTargets());
- assertEquals("1.0.43-cafed00d", app.outstandingChange().application().get().id());
+ Instance instance = tester.defaultInstance("app1");
+ assertTrue(instance.outstandingChange().hasTargets());
+ assertEquals("1.0.43-cafed00d", instance.outstandingChange().application().get().id());
assertEquals(2, tester.buildService().jobs().size());
deployer.maintain();
tester.deploymentTrigger().triggerReadyJobs();
assertEquals("No effect as job is in progress", 2, tester.buildService().jobs().size());
- assertEquals("1.0.43-cafed00d", app.outstandingChange().application().get().id());
+ assertEquals("1.0.43-cafed00d", instance.outstandingChange().application().get().id());
- tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(app, applicationPackage, true, JobType.stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1);
- tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest);
- tester.deployAndNotify(app, applicationPackage, true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, JobType.stagingTest);
assertEquals("Upgrade done", 0, tester.buildService().jobs().size());
deployer.maintain();
tester.deploymentTrigger().triggerReadyJobs();
- app = tester.application("app1");
- assertEquals("1.0.43-cafed00d", app.change().application().get().id());
+ instance = tester.defaultInstance("app1");
+ assertEquals("1.0.43-cafed00d", instance.change().application().get().id());
List<BuildService.BuildJob> jobs = tester.buildService().jobs();
assertEquals(1, jobs.size());
assertEquals(JobType.productionUsWest1.jobName(), jobs.get(0).jobName());
- assertEquals(11, jobs.get(0).projectId());
- assertFalse(tester.application("app1").outstandingChange().hasTargets());
+ assertEquals(app1.id().defaultInstance(), jobs.get(0).applicationId());
+ assertFalse(tester.defaultInstance("app1").outstandingChange().hasTargets());
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RotationStatusUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RotationStatusUpdaterTest.java
index 7f97f1bc4a9..b0201a21de5 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RotationStatusUpdaterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RotationStatusUpdaterTest.java
@@ -42,7 +42,7 @@ public class RotationStatusUpdaterTest {
.build();
tester.deployCompletely(application, applicationPackage);
- Supplier<Instance> app = () -> tester.application(application.id());
+ Supplier<Instance> app = () -> tester.defaultInstance(application.id());
Supplier<Deployment> deployment1 = () -> app.get().deployments().get(zone1);
Supplier<Deployment> deployment2 = () -> app.get().deployments().get(zone2);
Supplier<Deployment> deployment3 = () -> app.get().deployments().get(zone3);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java
index 1763ff74fb0..78a9be9f1d3 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java
@@ -5,6 +5,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.LoadBalancer;
@@ -38,8 +39,10 @@ public class RoutingPoliciesTest {
private final DeploymentTester tester = new DeploymentTester();
- private final Instance app1 = tester.createApplication("app1", "tenant1", 1, 1L);
- private final Instance app2 = tester.createApplication("app2", "tenant1", 1, 1L);
+ private final Application app1 = tester.createApplication("app1", "tenant1", 1, 1L);
+ private final Application app2 = tester.createApplication("app2", "tenant1", 1, 1L);
+ private final Instance instance1 = tester.defaultInstance(app1.id());
+ private final Instance instance2 = tester.defaultInstance(app2.id());
private final ZoneId zone1 = ZoneId.from("prod", "us-west-1");
private final ZoneId zone2 = ZoneId.from("prod", "us-central-1");
@@ -62,7 +65,7 @@ public class RoutingPoliciesTest {
.endpoint("r1", "c0", "us-west-1")
.endpoint("r2", "c1")
.build();
- provisionLoadBalancers(clustersPerZone, app1.id(), zone1, zone2);
+ provisionLoadBalancers(clustersPerZone, instance1.id(), zone1, zone2);
// Creates alias records
tester.deployCompletely(app1, applicationPackage, ++buildNumber);
@@ -83,7 +86,7 @@ public class RoutingPoliciesTest {
aliasDataOf(endpoint3));
assertEquals("Routing policy count is equal to cluster count",
numberOfDeployments * clustersPerZone,
- tester.controller().applications().routingPolicies().get(app1.id()).size());
+ tester.controller().applications().routingPolicies().get(instance1.id()).size());
// Applications gains a new deployment
ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder()
@@ -95,7 +98,7 @@ public class RoutingPoliciesTest {
.endpoint("r2", "c1")
.build();
numberOfDeployments++;
- provisionLoadBalancers(clustersPerZone, app1.id(), zone3);
+ provisionLoadBalancers(clustersPerZone, instance1.id(), zone3);
tester.deployCompletely(app1, applicationPackage2, ++buildNumber);
// Endpoint is updated to contain cluster in new deployment
@@ -107,7 +110,7 @@ public class RoutingPoliciesTest {
// Another application is deployed with a single cluster and global endpoint
var endpoint4 = "r0.app2.tenant1.global.vespa.oath.cloud";
- provisionLoadBalancers(1, app2.id(), zone1, zone2);
+ provisionLoadBalancers(1, instance2.id(), zone1, zone2);
var applicationPackage3 = new ApplicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
@@ -129,7 +132,7 @@ public class RoutingPoliciesTest {
assertEquals("DNS records are removed", List.of(), aliasDataOf(endpoint1));
assertEquals("DNS records are removed", List.of(), aliasDataOf(endpoint2));
assertEquals("DNS records are removed", List.of(), aliasDataOf(endpoint3));
- Set<RoutingPolicy> policies = tester.controller().curator().readRoutingPolicies(app1.id());
+ Set<RoutingPolicy> policies = tester.controller().curator().readRoutingPolicies(instance1.id());
assertEquals(clustersPerZone * numberOfDeployments, policies.size());
assertTrue("Rotation membership is removed from all policies",
policies.stream().allMatch(policy -> policy.endpoints().isEmpty()));
@@ -141,7 +144,7 @@ public class RoutingPoliciesTest {
// Deploy application
int clustersPerZone = 2;
int buildNumber = 42;
- provisionLoadBalancers(clustersPerZone, app1.id(), zone1, zone2);
+ provisionLoadBalancers(clustersPerZone, instance1.id(), zone1, zone2);
tester.deployCompletely(app1, applicationPackage, ++buildNumber);
// Deployment creates records and policies for all clusters in all zones
@@ -152,15 +155,15 @@ public class RoutingPoliciesTest {
"c1.app1.tenant1.us-central-1.vespa.oath.cloud"
);
assertEquals(expectedRecords, recordNames());
- assertEquals(4, policies(app1).size());
+ assertEquals(4, policies(instance1).size());
// Next deploy does nothing
tester.deployCompletely(app1, applicationPackage, ++buildNumber);
assertEquals(expectedRecords, recordNames());
- assertEquals(4, policies(app1).size());
+ assertEquals(4, policies(instance1).size());
// Add 1 cluster in each zone and deploy
- provisionLoadBalancers(clustersPerZone + 1, app1.id(), zone1, zone2);
+ provisionLoadBalancers(clustersPerZone + 1, instance1.id(), zone1, zone2);
tester.deployCompletely(app1, applicationPackage, ++buildNumber);
expectedRecords = Set.of(
"c0.app1.tenant1.us-west-1.vespa.oath.cloud",
@@ -171,10 +174,10 @@ public class RoutingPoliciesTest {
"c2.app1.tenant1.us-central-1.vespa.oath.cloud"
);
assertEquals(expectedRecords, recordNames());
- assertEquals(6, policies(app1).size());
+ assertEquals(6, policies(instance1).size());
// Deploy another application
- provisionLoadBalancers(clustersPerZone, app2.id(), zone1, zone2);
+ provisionLoadBalancers(clustersPerZone, instance2.id(), zone1, zone2);
tester.deployCompletely(app2, applicationPackage, ++buildNumber);
expectedRecords = Set.of(
"c0.app1.tenant1.us-west-1.vespa.oath.cloud",
@@ -189,10 +192,10 @@ public class RoutingPoliciesTest {
"c1.app2.tenant1.us-west-1.vespa.oath.cloud"
);
assertEquals(expectedRecords, recordNames());
- assertEquals(4, policies(app2).size());
+ assertEquals(4, policies(instance2).size());
// Deploy removes cluster from app1
- provisionLoadBalancers(clustersPerZone, app1.id(), zone1, zone2);
+ provisionLoadBalancers(clustersPerZone, instance1.id(), zone1, zone2);
tester.deployCompletely(app1, applicationPackage, ++buildNumber);
expectedRecords = Set.of(
"c0.app1.tenant1.us-west-1.vespa.oath.cloud",
@@ -207,10 +210,10 @@ public class RoutingPoliciesTest {
assertEquals(expectedRecords, recordNames());
// Remove app2 completely
- tester.controller().applications().require(app2.id()).deployments().keySet()
+ tester.controller().applications().requireInstance(instance2.id()).deployments().keySet()
.forEach(zone -> {
- tester.configServer().removeLoadBalancers(app2.id(), zone);
- tester.controller().applications().deactivate(app2.id(), zone);
+ tester.configServer().removeLoadBalancers(instance2.id(), zone);
+ tester.controller().applications().deactivate(instance2.id(), zone);
});
tester.flushDnsRequests();
expectedRecords = Set.of(
@@ -220,22 +223,22 @@ public class RoutingPoliciesTest {
"c1.app1.tenant1.us-central-1.vespa.oath.cloud"
);
assertEquals(expectedRecords, recordNames());
- assertTrue("Removes stale routing policies " + app2, tester.controller().applications().routingPolicies().get(app2.id()).isEmpty());
- assertEquals("Keeps routing policies for " + app1, 4, tester.controller().applications().routingPolicies().get(app1.id()).size());
+ assertTrue("Removes stale routing policies " + app2, tester.controller().applications().routingPolicies().get(instance2.id()).isEmpty());
+ assertEquals("Keeps routing policies for " + app1, 4, tester.controller().applications().routingPolicies().get(instance1.id()).size());
}
@Test
public void cluster_endpoints_resolve_from_policies() {
- provisionLoadBalancers(3, app1.id(), zone1);
+ provisionLoadBalancers(3, instance1.id(), zone1);
tester.deployCompletely(app1, applicationPackage);
- tester.controllerTester().serviceRegistry().routingGeneratorMock().putEndpoints(new DeploymentId(app1.id(), zone1), Collections.emptyList());
+ tester.controllerTester().serviceRegistry().routingGeneratorMock().putEndpoints(new DeploymentId(instance1.id(), zone1), Collections.emptyList());
assertEquals(Map.of(ClusterSpec.Id.from("c0"),
URI.create("https://c0.app1.tenant1.us-west-1.vespa.oath.cloud/"),
ClusterSpec.Id.from("c1"),
URI.create("https://c1.app1.tenant1.us-west-1.vespa.oath.cloud/"),
ClusterSpec.Id.from("c2"),
URI.create("https://c2.app1.tenant1.us-west-1.vespa.oath.cloud/")),
- tester.controller().applications().clusterEndpoints(new DeploymentId(app1.id(), zone1)));
+ tester.controller().applications().clusterEndpoints(new DeploymentId(instance1.id(), zone1)));
}
private Set<RoutingPolicy> policies(Instance instance) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
index a54d6f3ece7..e247c2af3c9 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
@@ -5,6 +5,7 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
@@ -54,12 +55,19 @@ public class UpgraderTest {
assertEquals("No applications: Nothing to do", 0, tester.buildService().jobs().size());
// Setup applications
- Instance canary0 = tester.createAndDeploy("canary0", 1, "canary");
- Instance canary1 = tester.createAndDeploy("canary1", 2, "canary");
- Instance default0 = tester.createAndDeploy("default0", 3, "default");
- Instance default1 = tester.createAndDeploy("default1", 4, "default");
- Instance default2 = tester.createAndDeploy("default2", 5, "default");
- Instance conservative0 = tester.createAndDeploy("conservative0", 6, "conservative");
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
+ Application default0 = tester.createAndDeploy("default0", 3, "default");
+ Application default1 = tester.createAndDeploy("default1", 4, "default");
+ Application default2 = tester.createAndDeploy("default2", 5, "default");
+ Application conservative0 = tester.createAndDeploy("conservative0", 6, "conservative");
+
+ Instance canary0default = tester.defaultInstance(canary0.id());
+ Instance canary1default = tester.defaultInstance(canary1.id());
+ Instance default0default = tester.defaultInstance(default0.id());
+ Instance default1default = tester.defaultInstance(default1.id());
+ Instance default2default = tester.defaultInstance(default2.id());
+ Instance conservative0default = tester.defaultInstance(conservative0.id());
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
@@ -126,9 +134,9 @@ public class UpgraderTest {
tester.upgradeSystem(version3);
assertEquals(version3, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
- tester.buildService().remove(ControllerTester.buildJob(canary0, stagingTest));
- tester.buildService().remove(ControllerTester.buildJob(canary1, systemTest));
- tester.buildService().remove(ControllerTester.buildJob(canary1, stagingTest));
+ tester.buildService().remove(ControllerTester.buildJob(canary0default.id(), stagingTest));
+ tester.buildService().remove(ControllerTester.buildJob(canary1default.id(), systemTest));
+ tester.buildService().remove(ControllerTester.buildJob(canary1default.id(), stagingTest));
tester.triggerUntilQuiescence();
assertEquals("New system version: Should upgrade Canaries", 4, tester.buildService().jobs().size());
@@ -164,10 +172,10 @@ public class UpgraderTest {
// Deploy application change
tester.jobCompletion(component).application(default0).nextBuildNumber().uploadArtifact(DeploymentTester.applicationPackage("default")).submit();
tester.jobCompletion(stagingTest).application(default0).unsuccessful().submit();
- tester.deployAndNotify(default0, "default", true, systemTest);
- tester.deployAndNotify(default0, "default", true, stagingTest);
- tester.deployAndNotify(default0, "default", true, productionUsWest1);
- tester.deployAndNotify(default0, "default", true, productionUsEast3);
+ tester.deployAndNotify(default0default.id(), DeploymentTester.applicationPackage("default"), true, systemTest);
+ tester.deployAndNotify(default0default.id(), DeploymentTester.applicationPackage("default"), true, stagingTest);
+ tester.deployAndNotify(default0default.id(), DeploymentTester.applicationPackage("default"), true, productionUsWest1);
+ tester.deployAndNotify(default0default.id(), DeploymentTester.applicationPackage("default"), true, productionUsEast3);
tester.upgradeSystem(version3);
assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence());
@@ -183,8 +191,8 @@ public class UpgraderTest {
// --- Starting upgrading to a new version which breaks, causing upgrades to commence on the previous version
Version version4 = Version.fromString("6.6");
- Instance default3 = tester.createAndDeploy("default3", 7, "default"); // need 4 to break a version
- Instance default4 = tester.createAndDeploy("default4", 8, "default");
+ Application default3 = tester.createAndDeploy("default3", 7, "default"); // need 4 to break a version
+ Application default4 = tester.createAndDeploy("default4", 8, "default");
tester.upgradeSystem(version4);
tester.upgrader().maintain(); // cause canary upgrades to new version
tester.triggerUntilQuiescence();
@@ -196,11 +204,11 @@ public class UpgraderTest {
tester.triggerUntilQuiescence();
assertEquals("Upgrade of defaults are scheduled", 10, tester.buildService().jobs().size());
- assertEquals(version4, tester.application(default0.id()).change().platform().get());
- assertEquals(version4, tester.application(default1.id()).change().platform().get());
- assertEquals(version4, tester.application(default2.id()).change().platform().get());
- assertEquals(version4, tester.application(default3.id()).change().platform().get());
- assertEquals(version4, tester.application(default4.id()).change().platform().get());
+ assertEquals(version4, tester.defaultInstance(default0.id()).change().platform().get());
+ assertEquals(version4, tester.defaultInstance(default1.id()).change().platform().get());
+ assertEquals(version4, tester.defaultInstance(default2.id()).change().platform().get());
+ assertEquals(version4, tester.defaultInstance(default3.id()).change().platform().get());
+ assertEquals(version4, tester.defaultInstance(default4.id()).change().platform().get());
tester.completeUpgrade(default0, version4, "default");
// State: Default applications started upgrading to version4 (and one completed)
@@ -216,11 +224,11 @@ public class UpgraderTest {
tester.triggerUntilQuiescence();
assertEquals("Upgrade of defaults are scheduled", 10, tester.buildService().jobs().size());
- assertEquals(version5, tester.application(default0.id()).change().platform().get());
- assertEquals(version4, tester.application(default1.id()).change().platform().get());
- assertEquals(version4, tester.application(default2.id()).change().platform().get());
- assertEquals(version4, tester.application(default3.id()).change().platform().get());
- assertEquals(version4, tester.application(default4.id()).change().platform().get());
+ assertEquals(version5, tester.defaultInstance(default0.id()).change().platform().get());
+ assertEquals(version4, tester.defaultInstance(default1.id()).change().platform().get());
+ assertEquals(version4, tester.defaultInstance(default2.id()).change().platform().get());
+ assertEquals(version4, tester.defaultInstance(default3.id()).change().platform().get());
+ assertEquals(version4, tester.defaultInstance(default4.id()).change().platform().get());
tester.completeUpgrade(default1, version4, "default");
tester.completeUpgrade(default2, version4, "default");
@@ -252,7 +260,7 @@ public class UpgraderTest {
assertEquals("Upgrade of defaults are scheduled on " + version4 + " instead, since " + version5 + " is broken: " +
"This is default3 since it failed upgrade on both " + version4 + " and " + version5,
2, tester.buildService().jobs().size());
- assertEquals(version4, tester.application(default3.id()).change().platform().get());
+ assertEquals(version4, tester.defaultInstance(default3.id()).change().platform().get());
}
@Test
@@ -271,18 +279,18 @@ public class UpgraderTest {
assertEquals("No applications: Nothing to do", 0, tester.buildService().jobs().size());
// Setup applications
- Instance canary0 = tester.createAndDeploy("canary0", 1, "canary");
- Instance canary1 = tester.createAndDeploy("canary1", 2, "canary");
- Instance default0 = tester.createAndDeploy("default0", 3, "default");
- Instance default1 = tester.createAndDeploy("default1", 4, "default");
- Instance default2 = tester.createAndDeploy("default2", 5, "default");
- Instance default3 = tester.createAndDeploy("default3", 6, "default");
- Instance default4 = tester.createAndDeploy("default4", 7, "default");
- Instance default5 = tester.createAndDeploy("default5", 8, "default");
- Instance default6 = tester.createAndDeploy("default6", 9, "default");
- Instance default7 = tester.createAndDeploy("default7", 10, "default");
- Instance default8 = tester.createAndDeploy("default8", 11, "default");
- Instance default9 = tester.createAndDeploy("default9", 12, "default");
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
+ Application default0 = tester.createAndDeploy("default0", 3, "default");
+ Application default1 = tester.createAndDeploy("default1", 4, "default");
+ Application default2 = tester.createAndDeploy("default2", 5, "default");
+ Application default3 = tester.createAndDeploy("default3", 6, "default");
+ Application default4 = tester.createAndDeploy("default4", 7, "default");
+ Application default5 = tester.createAndDeploy("default5", 8, "default");
+ Application default6 = tester.createAndDeploy("default6", 9, "default");
+ Application default7 = tester.createAndDeploy("default7", 10, "default");
+ Application default8 = tester.createAndDeploy("default8", 11, "default");
+ Application default9 = tester.createAndDeploy("default9", 12, "default");
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
@@ -338,11 +346,12 @@ public class UpgraderTest {
Version version = Version.fromString("6.2");
tester.upgradeSystem(version);
- Instance app = tester.createApplication("app1", "tenant1", 1, 11L);
+ Application app = tester.createApplication("app1", "tenant1", 1, 11L);
+ Instance instance = tester.defaultInstance(app.id());
tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit();
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsEast3);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
@@ -357,12 +366,12 @@ public class UpgraderTest {
tester.triggerUntilQuiescence();
// system-test completes successfully
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
// staging-test fails and failure is recorded
- tester.deployAndNotify(app, applicationPackage, false, stagingTest);
- assertTrue("Failure is recorded", tester.application(app.id()).deploymentJobs().hasFailures());
- assertTrue("Application has pending change", tester.application(app.id()).change().hasTargets());
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, stagingTest);
+ assertTrue("Failure is recorded", tester.defaultInstance(app.id()).deploymentJobs().hasFailures());
+ assertTrue("Application has pending change", tester.defaultInstance(app.id()).change().hasTargets());
// New version is released
version = Version.fromString("6.4");
@@ -373,7 +382,7 @@ public class UpgraderTest {
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
tester.jobCompletion(stagingTest).application(app).unsuccessful().submit();
- assertTrue("Application still has failures", tester.application(app.id()).deploymentJobs().hasFailures());
+ assertTrue("Application still has failures", tester.defaultInstance(app.id()).deploymentJobs().hasFailures());
assertEquals(2, tester.buildService().jobs().size());
// Upgrader runs again, nothing happens as test jobs are already running.
@@ -389,13 +398,13 @@ public class UpgraderTest {
tester.upgradeSystem(version);
// Setup applications
- Instance canary0 = tester.createAndDeploy("canary0", 1, "canary");
- Instance canary1 = tester.createAndDeploy("canary1", 2, "canary");
- Instance default0 = tester.createAndDeploy("default0", 3, "default");
- Instance default1 = tester.createAndDeploy("default1", 4, "default");
- Instance default2 = tester.createAndDeploy("default2", 5, "default");
- Instance default3 = tester.createAndDeploy("default3", 6, "default");
- Instance default4 = tester.createAndDeploy("default4", 7, "default");
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
+ Application default0 = tester.createAndDeploy("default0", 3, "default");
+ Application default1 = tester.createAndDeploy("default1", 4, "default");
+ Application default2 = tester.createAndDeploy("default2", 5, "default");
+ Application default3 = tester.createAndDeploy("default3", 6, "default");
+ Application default4 = tester.createAndDeploy("default4", 7, "default");
// New version is released
version = Version.fromString("6.3");
@@ -426,7 +435,7 @@ public class UpgraderTest {
tester.triggerUntilQuiescence();
// apps pass system-test, but do not trigger next jobs as upgrade is cancelled
- assertFalse("No change present", tester.applications().require(default4.id()).change().hasTargets());
+ assertFalse("No change present", tester.applications().requireInstance(default4.id().defaultInstance()).change().hasTargets());
tester.jobCompletion(systemTest).application(default0).submit();
tester.jobCompletion(systemTest).application(default1).submit();
tester.jobCompletion(systemTest).application(default2).submit();
@@ -455,13 +464,13 @@ public class UpgraderTest {
tester.upgradeSystem(v0);
// Setup applications on V0
- Instance canary0 = tester.createAndDeploy("canary0", 1, "canary");
- Instance canary1 = tester.createAndDeploy("canary1", 2, "canary");
- Instance default0 = tester.createAndDeploy("default0", 3, "default");
- Instance default1 = tester.createAndDeploy("default1", 4, "default");
- Instance default2 = tester.createAndDeploy("default2", 5, "default");
- Instance default3 = tester.createAndDeploy("default3", 6, "default");
- Instance default4 = tester.createAndDeploy("default4", 7, "default");
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
+ Application default0 = tester.createAndDeploy("default0", 3, "default");
+ Application default1 = tester.createAndDeploy("default1", 4, "default");
+ Application default2 = tester.createAndDeploy("default2", 5, "default");
+ Application default3 = tester.createAndDeploy("default3", 6, "default");
+ Application default4 = tester.createAndDeploy("default4", 7, "default");
// V1 is released
Version v1 = Version.fromString("6.3");
@@ -501,7 +510,7 @@ public class UpgraderTest {
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
assertEquals("Upgrade scheduled for remaining apps", 10, tester.buildService().jobs().size());
- assertEquals("default4 is still upgrading to 5.1", v1, tester.application(default4.id()).change().platform().get());
+ assertEquals("default4 is still upgrading to 5.1", v1, tester.defaultInstance(default4.id()).change().platform().get());
// 4/5 applications fail (in the last prod zone) and lowers confidence
tester.completeUpgradeWithError(default0, v2, "default", productionUsEast3);
@@ -511,8 +520,8 @@ public class UpgraderTest {
tester.upgradeSystem(v2);
assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
- assertEquals(v2, tester.application("default0").deployments().get(ZoneId.from("prod.us-west-1")).version());
- assertEquals(v0, tester.application("default0").deployments().get(ZoneId.from("prod.us-east-3")).version());
+ assertEquals(v2, tester.defaultInstance("default0").deployments().get(ZoneId.from("prod.us-west-1")).version());
+ assertEquals(v0, tester.defaultInstance("default0").deployments().get(ZoneId.from("prod.us-east-3")).version());
tester.upgrader().maintain();
tester.buildService().clear();
tester.triggerUntilQuiescence();
@@ -520,16 +529,16 @@ public class UpgraderTest {
assertEquals("Upgrade to 5.1 scheduled for apps not completely on 5.1 or 5.2", 10, tester.buildService().jobs().size());
// The tester code for completing upgrades does not handle this scenario, so we trigger each step manually (for one app)
- tester.deployAndNotify(tester.application("default0"), "default", true, systemTest);
- tester.deployAndNotify(tester.application("default0"), "default", true, stagingTest);
+ tester.deployAndNotify(tester.defaultInstance("default0").id(), DeploymentTester.applicationPackage("default"), true, systemTest);
+ tester.deployAndNotify(tester.defaultInstance("default0").id(), DeploymentTester.applicationPackage("default"), true, stagingTest);
// prod zone on 5.2 (usWest1) is skipped, but we still trigger the next zone from triggerReadyJobs:
- tester.deployAndNotify(tester.application("default0"), "default", true, productionUsEast3);
+ tester.deployAndNotify(tester.defaultInstance("default0").id(), DeploymentTester.applicationPackage("default"), true, productionUsEast3);
// Resulting state:
- assertEquals(v2, tester.application("default0").deployments().get(ZoneId.from("prod.us-west-1")).version());
+ assertEquals(v2, tester.defaultInstance("default0").deployments().get(ZoneId.from("prod.us-west-1")).version());
assertEquals("Last zone is upgraded to v1",
- v1, tester.application("default0").deployments().get(ZoneId.from("prod.us-east-3")).version());
- assertFalse(tester.application("default0").change().hasTargets());
+ v1, tester.defaultInstance("default0").deployments().get(ZoneId.from("prod.us-east-3")).version());
+ assertFalse(tester.defaultInstance("default0").change().hasTargets());
}
@Test
@@ -542,13 +551,13 @@ public class UpgraderTest {
ApplicationPackage defaultPolicy = DeploymentTester.applicationPackage("default");
// Setup applications
- Instance canary0 = tester.createAndDeploy("canary0", 1, canaryPolicy);
- Instance canary1 = tester.createAndDeploy("canary1", 2, canaryPolicy);
- Instance default0 = tester.createAndDeploy("default0", 3, defaultPolicy);
- Instance default1 = tester.createAndDeploy("default1", 4, defaultPolicy);
- Instance default2 = tester.createAndDeploy("default2", 5, defaultPolicy);
- Instance default3 = tester.createAndDeploy("default3", 6, defaultPolicy);
- Instance default4 = tester.createAndDeploy("default4", 7, defaultPolicy);
+ Application canary0 = tester.createAndDeploy("canary0", 1, canaryPolicy);
+ Application canary1 = tester.createAndDeploy("canary1", 2, canaryPolicy);
+ Application default0 = tester.createAndDeploy("default0", 3, defaultPolicy);
+ Application default1 = tester.createAndDeploy("default1", 4, defaultPolicy);
+ Application default2 = tester.createAndDeploy("default2", 5, defaultPolicy);
+ Application default3 = tester.createAndDeploy("default3", 6, defaultPolicy);
+ Application default4 = tester.createAndDeploy("default4", 7, defaultPolicy);
// New version is released
version = Version.fromString("6.3");
@@ -600,7 +609,7 @@ public class UpgraderTest {
.region("us-west-1")
.build();
- Instance app = tester.createAndDeploy("app1", 1, applicationPackage);
+ Application app = tester.createAndDeploy("app1", 1, applicationPackage);
// New version is released
version = Version.fromString("6.3");
@@ -643,7 +652,8 @@ public class UpgraderTest {
.region("us-east-3")
.build();
- Instance app = tester.createAndDeploy("app1", 1, applicationPackage);
+ Application app = tester.createAndDeploy("app1", 1, applicationPackage);
+ Instance instance = tester.defaultInstance(app.id());
// New version is released
version = Version.fromString("6.3");
@@ -652,14 +662,14 @@ public class UpgraderTest {
// Application upgrade starts
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
tester.clock().advance(Duration.ofHours(1)); // Entering block window after prod job is triggered
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
assertEquals(1, tester.buildService().jobs().size()); // Next job triggered because upgrade is already rolling out.
- tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
- tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsEast3);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
@@ -680,7 +690,8 @@ public class UpgraderTest {
.region("us-east-3")
.build();
- Instance app = tester.createAndDeploy("app1", 1, applicationPackage);
+ Application app = tester.createAndDeploy("app1", 1, applicationPackage);
+ Instance instance = tester.defaultInstance(app.id());
// New version is released
version = Version.fromString("6.3");
@@ -689,24 +700,25 @@ public class UpgraderTest {
// Application upgrade starts
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
tester.clock().advance(Duration.ofHours(1)); // Entering block window after prod job is triggered
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
assertEquals(1, tester.buildService().jobs().size()); // Next job triggered, as upgrade is already in progress.
- tester.deployAndNotify(app, applicationPackage, false, productionUsCentral1); // us-central-1 fails, permitting a new revision.
+ // us-central-1 fails, permitting a new revision.
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, productionUsCentral1);
// A new revision is submitted and starts rolling out.
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
// us-central-1 fails again, and isn't re-triggered, because the target is now a revision instead.
- tester.deployAndNotify(app, applicationPackage, false, productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, productionUsCentral1);
assertEquals(2, tester.buildService().jobs().size());
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
// us-central-1 has an older version, and needs a new staging test to begin.
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
// A new version is also released, cancelling the upgrade, since it is failing on a now outdated version.
tester.clock().advance(Duration.ofDays(1));
@@ -716,10 +728,10 @@ public class UpgraderTest {
tester.triggerUntilQuiescence();
// us-central-1 succeeds upgrade to 5.1, with the revision, but us-east-3 wants to proceed with only the revision change.
- tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsEast3);
assertEquals(Collections.emptyList(), tester.buildService().jobs());
// Monday morning: We are not blocked, and the new version rolls out to all zones.
@@ -727,16 +739,16 @@ public class UpgraderTest {
tester.clock().advance(Duration.ofHours(17)); // Monday, 10:00
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
- tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsEast3);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
// App is completely upgraded to the latest version
- for (Deployment deployment : tester.applications().require(app.id()).deployments().values())
+ for (Deployment deployment : tester.applications().requireInstance(instance.id()).deployments().values())
assertEquals(version, deployment.version());
}
@@ -758,13 +770,13 @@ public class UpgraderTest {
.build();
// Setup applications
- Instance canary0 = tester.createAndDeploy("canary0", 1, canaryApplicationPackage);
- Instance canary1 = tester.createAndDeploy("canary1", 2, canaryApplicationPackage);
- Instance default0 = tester.createAndDeploy("default0", 3, defaultApplicationPackage);
- Instance default1 = tester.createAndDeploy("default1", 4, defaultApplicationPackage);
- Instance default2 = tester.createAndDeploy("default2", 5, defaultApplicationPackage);
- Instance default3 = tester.createAndDeploy("default3", 6, defaultApplicationPackage);
- Instance default4 = tester.createAndDeploy("default4", 7, defaultApplicationPackage);
+ Application canary0 = tester.createAndDeploy("canary0", 1, canaryApplicationPackage);
+ Application canary1 = tester.createAndDeploy("canary1", 2, canaryApplicationPackage);
+ Application default0 = tester.createAndDeploy("default0", 3, defaultApplicationPackage);
+ Application default1 = tester.createAndDeploy("default1", 4, defaultApplicationPackage);
+ Application default2 = tester.createAndDeploy("default2", 5, defaultApplicationPackage);
+ Application default3 = tester.createAndDeploy("default3", 6, defaultApplicationPackage);
+ Application default4 = tester.createAndDeploy("default4", 7, defaultApplicationPackage);
assertEquals(version, default0.oldestDeployedPlatform().get());
@@ -810,7 +822,7 @@ public class UpgraderTest {
tester.jobCompletion(stagingTest).application(default4).unsuccessful().submit();
// 5th app never reports back and has a dead job, but no ongoing change
- Instance deadLocked = tester.applications().require(default4.id());
+ Instance deadLocked = tester.applications().requireInstance(default4.id().defaultInstance());
tester.assertRunning(systemTest, deadLocked.id());
assertFalse("No change present", deadLocked.change().hasTargets());
@@ -838,10 +850,10 @@ public class UpgraderTest {
tester.completeUpgrade(default2, version, defaultApplicationPackageV2);
tester.completeUpgrade(default3, version, defaultApplicationPackageV2);
- assertEquals(version, tester.application(default0.id()).oldestDeployedPlatform().get());
- assertEquals(version, tester.application(default1.id()).oldestDeployedPlatform().get());
- assertEquals(version, tester.application(default2.id()).oldestDeployedPlatform().get());
- assertEquals(version, tester.application(default3.id()).oldestDeployedPlatform().get());
+ assertEquals(version, tester.defaultInstance(default0.id()).oldestDeployedPlatform().get());
+ assertEquals(version, tester.defaultInstance(default1.id()).oldestDeployedPlatform().get());
+ assertEquals(version, tester.defaultInstance(default2.id()).oldestDeployedPlatform().get());
+ assertEquals(version, tester.defaultInstance(default3.id()).oldestDeployedPlatform().get());
}
@Test
@@ -857,16 +869,16 @@ public class UpgraderTest {
upgrader.setUpgradesPerMinute(0.2);
// Setup applications
- Instance canary0 = tester.createAndDeploy("canary0", 1, "canary");
- Instance canary1 = tester.createAndDeploy("canary1", 2, "canary");
- Instance default0 = tester.createAndDeploy("default0", 3, "default");
- Instance default1 = tester.createAndDeploy("default1", 4, "default");
- Instance default2 = tester.createAndDeploy("default2", 5, "default");
- Instance default3 = tester.createAndDeploy("default3", 6, "default");
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
+ Application default0 = tester.createAndDeploy("default0", 3, "default");
+ Application default1 = tester.createAndDeploy("default1", 4, "default");
+ Application default2 = tester.createAndDeploy("default2", 5, "default");
+ Application default3 = tester.createAndDeploy("default3", 6, "default");
// Dev deployment which should be ignored
- Instance dev0 = tester.createApplication("dev0", "tenant1", 7, 1L);
- tester.controllerTester().deploy(dev0, ZoneId.from(Environment.dev, RegionName.from("dev-region")));
+ Application dev0 = tester.createApplication("dev0", "tenant1", 7, 1L);
+ tester.controllerTester().deploy(dev0.id().defaultInstance(), ZoneId.from(Environment.dev, RegionName.from("dev-region")));
// New version is released and canaries upgrade
version = Version.fromString("6.3");
@@ -910,8 +922,8 @@ public class UpgraderTest {
.build();
// Setup applications
- Instance canary0 = tester.createAndDeploy("canary0", 1, "canary");
- Instance default0 = tester.createAndDeploy("default0", 2, version6ApplicationPackage);
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application default0 = tester.createAndDeploy("default0", 2, version6ApplicationPackage);
// New major version is released
version = Version.fromString("7.0");
@@ -944,10 +956,10 @@ public class UpgraderTest {
.build();
// Setup applications
- Instance canary0 = tester.createAndDeploy("canary0", 1, "canary");
- Instance default0 = tester.createAndDeploy("default0", 2, default0ApplicationPackage);
- tester.applications().lockOrThrow(default0.id(), a -> tester.applications().store(a.withMajorVersion(6)));
- assertEquals(OptionalInt.of(6), tester.applications().get(default0.id()).get().majorVersion());
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application default0 = tester.createAndDeploy("default0", 2, default0ApplicationPackage);
+ tester.applications().lockApplicationOrThrow(default0.id(), a -> tester.applications().store(a.withMajorVersion(6)));
+ assertEquals(OptionalInt.of(6), tester.applications().getInstance(default0.id().defaultInstance()).get().majorVersion());
// New major version is released
version = Version.fromString("7.0");
@@ -987,9 +999,9 @@ public class UpgraderTest {
.build();
// Setup applications
- Instance canary0 = tester.createAndDeploy("canary", 1, version7CanaryApplicationPackage);
- Instance default0 = tester.createAndDeploy("default0", 2, version7DefaultApplicationPackage);
- Instance default1 = tester.createAndDeploy("default1", 3, "default");
+ Application canary0 = tester.createAndDeploy("canary", 1, version7CanaryApplicationPackage);
+ Application default0 = tester.createAndDeploy("default0", 2, version7DefaultApplicationPackage);
+ Application default1 = tester.createAndDeploy("default1", 3, "default");
// New major version is released, but we don't want to upgrade to it yet
tester.upgrader().setTargetMajorVersion(Optional.of(6));
@@ -1034,7 +1046,8 @@ public class UpgraderTest {
.region("us-west-1")
.build();
- Instance app = tester.createAndDeploy("app1", 1, applicationPackage);
+ Application app = tester.createAndDeploy("app1", 1, applicationPackage);
+ Instance instance = tester.defaultInstance(app.id());
// New version is released
version = Version.fromString("6.3");
@@ -1042,9 +1055,9 @@ public class UpgraderTest {
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app, applicationPackage, false, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), false, productionUsWest1);
// New application change
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
@@ -1057,14 +1070,14 @@ public class UpgraderTest {
app.change().application().get().id().equals(applicationVersion));
// Deployment completes
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
tester.jobCompletion(productionUsWest1).application(app).unsuccessful().submit();
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
- app = tester.application(app.id());
- for (Deployment deployment : app.deployments().values()) {
+ instance = tester.defaultInstance(app.id());
+ for (Deployment deployment : instance.deployments().values()) {
assertEquals(version, deployment.version());
assertEquals(applicationVersion, deployment.applicationVersion().id());
}
@@ -1087,17 +1100,18 @@ public class UpgraderTest {
.region("us-east-3")
.build();
- Instance app = tester.createAndDeploy("app1", 1, applicationPackage);
+ Application app = tester.createAndDeploy("app1", 1, applicationPackage);
+ Instance instance = tester.defaultInstance(app.id());
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
// Application upgrade starts.
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
tester.clock().advance(Duration.ofHours(1)); // Entering block window after prod job is triggered.
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
assertEquals(1, tester.buildService().jobs().size()); // Next job triggered in spite of block, because it is already rolling out.
// New version is released, but upgrades won't start since there's already a revision rolling out.
@@ -1106,18 +1120,18 @@ public class UpgraderTest {
tester.triggerUntilQuiescence();
assertEquals(1, tester.buildService().jobs().size()); // Still just the revision upgrade.
- tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
- tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsEast3);
assertEquals(Collections.emptyList(), tester.buildService().jobs()); // No jobs left.
// Upgrade may start, now that revision is rolled out.
tester.upgrader().maintain();
tester.readyJobTrigger().maintain();
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
- tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
- tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsEast3);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
@@ -1139,17 +1153,18 @@ public class UpgraderTest {
.region("us-east-3")
.build();
- Instance app = tester.createAndDeploy("app1", 1, applicationPackage);
+ Application app = tester.createAndDeploy("app1", 1, applicationPackage);
+ Instance instance = tester.defaultInstance(app.id());
tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
// Application revision starts rolling out.
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
tester.clock().advance(Duration.ofHours(1)); // Entering block window after prod job is triggered.
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
assertEquals(1, tester.buildService().jobs().size());
// New revision is submitted, but is stored as outstanding, since the upgrade is proceeding in good fashion.
@@ -1157,23 +1172,23 @@ public class UpgraderTest {
tester.triggerUntilQuiescence();
assertEquals(3, tester.buildService().jobs().size()); // Just the running upgrade, and tests for the new revision.
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
- tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsEast3);
assertEquals(Collections.emptyList(), tester.buildService().jobs()); // No jobs left.
tester.outstandingChangeDeployer().run();
- assertFalse(tester.application(app.id()).change().hasTargets());
+ assertFalse(tester.defaultInstance(app.id()).change().hasTargets());
tester.clock().advance(Duration.ofHours(2));
tester.outstandingChangeDeployer().run();
- assertTrue(tester.application(app.id()).change().hasTargets());
+ assertTrue(tester.defaultInstance(app.id()).change().hasTargets());
tester.readyJobTrigger().run();
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
- tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
- tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
- assertFalse(tester.application(app.id()).change().hasTargets());
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.of(applicationPackage), true, productionUsEast3);
+ assertFalse(tester.defaultInstance(app.id()).change().hasTargets());
}
@Test
@@ -1183,61 +1198,62 @@ public class UpgraderTest {
tester.upgradeSystem(version0);
// Create an application with pinned platform version.
- Instance instance = tester.createApplication("application", "tenant", 2, 3);
+ Application application = tester.createApplication("application", "tenant", 2, 3);
+ Instance instance = tester.defaultInstance(application.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder().environment(Environment.prod)
.region("us-east-3")
.region("us-west-1")
.build();
- tester.deploymentTrigger().forceChange(instance.id(), Change.empty().withPin());
+ tester.deploymentTrigger().forceChange(application.id(), Change.empty().withPin());
- tester.deployCompletely(instance, applicationPackage);
- assertFalse(tester.application(instance.id()).change().hasTargets());
- assertTrue(tester.application(instance.id()).change().isPinned());
- assertEquals(2, tester.application(instance.id()).deployments().size());
+ tester.deployCompletely(application, applicationPackage);
+ assertFalse(tester.defaultInstance(application.id()).change().hasTargets());
+ assertTrue(tester.defaultInstance(application.id()).change().isPinned());
+ assertEquals(2, tester.defaultInstance(application.id()).deployments().size());
// Application does not upgrade.
Version version1 = Version.fromString("6.3");
tester.upgradeSystem(version1);
tester.upgrader().maintain();
- assertFalse(tester.application(instance.id()).change().hasTargets());
- assertTrue(tester.application(instance.id()).change().isPinned());
+ assertFalse(tester.defaultInstance(application.id()).change().hasTargets());
+ assertTrue(tester.defaultInstance(application.id()).change().isPinned());
// New application package is deployed.
- tester.deployCompletely(instance, applicationPackage, BuildJob.defaultBuildNumber + 1);
- assertFalse(tester.application(instance.id()).change().hasTargets());
- assertTrue(tester.application(instance.id()).change().isPinned());
+ tester.deployCompletely(application, applicationPackage, BuildJob.defaultBuildNumber + 1);
+ assertFalse(tester.defaultInstance(application.id()).change().hasTargets());
+ assertTrue(tester.defaultInstance(application.id()).change().isPinned());
// Application upgrades to new version when pin is removed.
- tester.deploymentTrigger().cancelChange(instance.id(), PIN);
+ tester.deploymentTrigger().cancelChange(application.id(), PIN);
tester.upgrader().maintain();
- assertTrue(tester.application(instance.id()).change().hasTargets());
- assertFalse(tester.application(instance.id()).change().isPinned());
+ assertTrue(tester.defaultInstance(application.id()).change().hasTargets());
+ assertFalse(tester.defaultInstance(application.id()).change().isPinned());
// Application is pinned to new version, and upgrade is therefore not cancelled, even though confidence is broken.
- tester.deploymentTrigger().forceChange(instance.id(), Change.empty().withPin());
+ tester.deploymentTrigger().forceChange(application.id(), Change.empty().withPin());
tester.upgrader().maintain();
tester.readyJobTrigger().maintain();
- assertEquals(version1, tester.application(instance.id()).change().platform().get());
+ assertEquals(version1, tester.defaultInstance(application.id()).change().platform().get());
// Application fails upgrade after one zone is complete, and is pinned again to the old version.
- tester.deployAndNotify(instance, true, systemTest);
- tester.deployAndNotify(instance, true, stagingTest);
- tester.deployAndNotify(instance, true, productionUsEast3);
- tester.deploy(productionUsWest1, instance, Optional.empty(), false);
- tester.deployAndNotify(instance, false, productionUsWest1);
- tester.deploymentTrigger().cancelChange(instance.id(), ALL);
- tester.deploymentTrigger().forceChange(instance.id(), Change.of(version0).withPin());
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsEast3);
+ tester.deploy(productionUsWest1, instance.id(), Optional.empty(), false);
+ tester.deployAndNotify(instance.id(), Optional.empty(), false, productionUsWest1);
+ tester.deploymentTrigger().cancelChange(application.id(), ALL);
+ tester.deploymentTrigger().forceChange(application.id(), Change.of(version0).withPin());
tester.buildService().clear();
- assertEquals(version0, tester.application(instance.id()).change().platform().get());
+ assertEquals(version0, tester.defaultInstance(application.id()).change().platform().get());
// Application downgrades to pinned version.
tester.readyJobTrigger().maintain();
- tester.deployAndNotify(instance, true, systemTest);
- tester.deployAndNotify(instance, true, stagingTest);
- tester.deployAndNotify(instance, true, productionUsEast3);
- assertTrue(tester.application(instance.id()).change().hasTargets());
- tester.deployAndNotify(instance, true, productionUsWest1);
- assertFalse(tester.application(instance.id()).change().hasTargets());
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsEast3);
+ assertTrue(tester.defaultInstance(application.id()).change().hasTargets());
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsWest1);
+ assertFalse(tester.defaultInstance(application.id()).change().hasTargets());
}
@Test
@@ -1250,11 +1266,12 @@ public class UpgraderTest {
tester.upgrader().setTargetMajorVersion(Optional.of(6));
// All applications deploy on current version
- Instance app1 = tester.createAndDeploy("app1", 1, "default");
- Instance app2 = tester.createAndDeploy("app2", 1, "default");
+ Application app1 = tester.createAndDeploy("app1", 1, "default");
+ Application app2 = tester.createAndDeploy("app2", 1, "default");
// Keep app 1 on current version
- tester.controller().applications().lockIfPresent(app1.id(), app -> tester.controller().applications().store(app.withChange(app.get().change().withPin())));
+ tester.controller().applications().lockApplicationIfPresent(app1.id(), app ->
+ tester.controller().applications().store(app.withChange(app.get().change().withPin())));
// New version is released
Version version1 = Version.fromString("6.2");
@@ -1269,22 +1286,24 @@ public class UpgraderTest {
tester.upgradeSystem(version2);
// App 2 is allowed on new major and upgrades
- tester.controller().applications().lockIfPresent(app2.id(), app -> tester.applications().store(app.withMajorVersion(7)));
+ tester.controller().applications().lockApplicationIfPresent(app2.id(), app -> tester.applications().store(app.withMajorVersion(7)));
tester.upgrader().maintain();
- assertEquals(version2, tester.controller().applications().require(app2.id()).change().platform().get());
+ assertEquals(version2, tester.controller().applications().requireInstance(app2.id().defaultInstance()).change().platform().get());
// App 1 is unpinned and upgrades to latest 6
- tester.controller().applications().lockIfPresent(app1.id(), app -> tester.controller().applications().store(app.withChange(app.get().change().withoutPin())));
+ tester.controller().applications().lockApplicationIfPresent(app1.id(), app ->
+ tester.controller().applications().store(app.withChange(app.get().change().withoutPin())));
tester.upgrader().maintain();
assertEquals("Application upgrades to latest allowed major", version1,
- tester.controller().applications().require(app1.id()).change().platform().get());
+ tester.controller().applications().requireInstance(app1.id().defaultInstance()).change().platform().get());
}
@Test
public void testsEachUpgradeCombinationWithFailingDeployments() {
DeploymentTester tester = new DeploymentTester();
- Instance instance = tester.createApplication("app1", "tenant1", 1, 1L);
- Supplier<Instance> app = () -> tester.application(instance.id());
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
+ Instance instance = tester.defaultInstance(application.id());
+ Supplier<Instance> app = () -> tester.defaultInstance(application.id());
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
@@ -1294,22 +1313,22 @@ public class UpgraderTest {
// Application deploys on system version
Version v1 = Version.fromString("6.1");
- tester.deployCompletely(instance, applicationPackage);
+ tester.deployCompletely(application, applicationPackage);
// Next version is released and 2/3 deployments upgrade
Version v2 = Version.fromString("6.2");
tester.upgradeSystem(v2);
tester.upgrader().maintain();
assertEquals(Change.of(v2), app.get().change());
- tester.deployAndNotify(instance, true, systemTest);
- tester.deployAndNotify(instance, true, stagingTest);
- tester.deployAndNotify(instance, true, productionUsCentral1);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsCentral1);
// While second deployment completes upgrade, version confidence becomes broken and upgrade is cancelled
tester.upgrader().overrideConfidence(v2, VespaVersion.Confidence.broken);
tester.computeVersionStatus();
tester.upgrader().maintain();
- tester.deployAndNotify(instance, true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsWest1);
assertTrue(app.get().change().isEmpty());
// Next version is released
@@ -1317,21 +1336,21 @@ public class UpgraderTest {
tester.upgradeSystem(v3);
tester.upgrader().maintain();
assertEquals(Change.of(v3), app.get().change());
- tester.deployAndNotify(instance, true, systemTest);
- tester.deployAndNotify(instance, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, systemTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
// First deployment starts upgrading
- tester.deploy(productionUsCentral1, instance, applicationPackage);
+ tester.deploy(productionUsCentral1, instance.id(), applicationPackage);
// Before deployment completes, v1->v3 combination is tested as us-east-3 is still on v1
tester.readyJobTrigger().maintain();
- tester.deployAndNotify(instance, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
assertEquals(v1, app.get().deploymentJobs().jobStatus().get(stagingTest).lastSuccess().get().sourcePlatform().get());
assertEquals(v3, app.get().deploymentJobs().jobStatus().get(stagingTest).lastSuccess().get().platform());
// First deployment fails and then successfully upgrades to v3
- tester.jobCompletion(productionUsCentral1).application(instance).unsuccessful().submit();
- tester.jobCompletion(productionUsCentral1).application(instance).submit();
+ tester.jobCompletion(productionUsCentral1).application(application).unsuccessful().submit();
+ tester.jobCompletion(productionUsCentral1).application(application).submit();
// Deployments are now on 3 versions
assertEquals(v3, app.get().deployments().get(productionUsCentral1.zone(main)).version());
@@ -1342,19 +1361,19 @@ public class UpgraderTest {
tester.readyJobTrigger().maintain();
assertEquals(v2, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().sourcePlatform().get());
assertEquals(v3, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform());
- tester.deployAndNotify(instance, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
// Second deployment upgrades
- tester.deployAndNotify(instance, true, productionUsWest1);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsWest1);
// ... now we have to test v1->v3 again :(
tester.readyJobTrigger().maintain();
assertEquals(v1, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().sourcePlatform().get());
assertEquals(v3, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform());
- tester.deployAndNotify(instance, true, stagingTest);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, stagingTest);
// Upgrade completes
- tester.deployAndNotify(instance, true, productionUsEast3);
+ tester.deployAndNotify(instance.id(), Optional.empty(), true, productionUsEast3);
assertTrue("Upgrade complete", app.get().change().isEmpty());
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
new file mode 100644
index 00000000000..5889b71bdd9
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
@@ -0,0 +1,250 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.persistence;
+
+import com.yahoo.component.Version;
+import com.yahoo.config.application.api.DeploymentSpec;
+import com.yahoo.config.application.api.ValidationOverrides;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.config.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.Application;
+import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
+import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
+import com.yahoo.vespa.hosted.controller.application.Change;
+import com.yahoo.vespa.hosted.controller.application.ClusterInfo;
+import com.yahoo.vespa.hosted.controller.application.ClusterUtilization;
+import com.yahoo.vespa.hosted.controller.application.Deployment;
+import com.yahoo.vespa.hosted.controller.application.DeploymentActivity;
+import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
+import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
+import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
+import com.yahoo.vespa.hosted.controller.application.JobStatus;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
+import com.yahoo.vespa.hosted.controller.metric.ApplicationMetrics;
+import com.yahoo.vespa.hosted.controller.rotation.RotationId;
+import com.yahoo.vespa.hosted.controller.rotation.RotationState;
+import com.yahoo.vespa.hosted.controller.rotation.RotationStatus;
+import org.junit.Test;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Instant;
+import java.time.temporal.ChronoUnit;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.OptionalDouble;
+import java.util.OptionalInt;
+import java.util.OptionalLong;
+import java.util.Set;
+
+import static com.yahoo.config.provision.SystemName.main;
+import static java.util.Optional.empty;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author bratseth
+ */
+
+public class ApplicationSerializerTest {
+
+ private static final ApplicationSerializer APPLICATION_SERIALIZER = new ApplicationSerializer();
+ private static final Path testData = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/");
+ private static final ZoneId zone1 = ZoneId.from("prod", "us-west-1");
+ private static final ZoneId zone2 = ZoneId.from("prod", "us-east-3");
+
+ @Test
+ public void testSerialization() {
+ DeploymentSpec deploymentSpec = DeploymentSpec.fromXml("<deployment version='1.0'>" +
+ " <staging/>" +
+ "</deployment>");
+ ValidationOverrides validationOverrides = ValidationOverrides.fromXml("<validation-overrides version='1.0'>" +
+ " <allow until='2017-06-15'>deployment-removal</allow>" +
+ "</validation-overrides>");
+
+ OptionalLong projectId = OptionalLong.of(123L);
+
+ List<Deployment> deployments = new ArrayList<>();
+ ApplicationVersion applicationVersion1 = ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 31);
+ ApplicationVersion applicationVersion2 = ApplicationVersion
+ .from(new SourceRevision("repo1", "branch1", "commit1"), 32, "a@b",
+ Version.fromString("6.3.1"), Instant.ofEpochMilli(496));
+ Instant activityAt = Instant.parse("2018-06-01T10:15:30.00Z");
+ deployments.add(new Deployment(zone1, applicationVersion1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3))); // One deployment without cluster info and utils
+ deployments.add(new Deployment(zone2, applicationVersion2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5),
+ createClusterUtils(3, 0.2), createClusterInfo(3, 4),
+ new DeploymentMetrics(2, 3, 4, 5, 6,
+ Optional.of(Instant.now().truncatedTo(ChronoUnit.MILLIS)),
+ Map.of(DeploymentMetrics.Warning.all, 3)),
+ DeploymentActivity.create(Optional.of(activityAt), Optional.of(activityAt),
+ OptionalDouble.of(200), OptionalDouble.of(10))));
+
+ List<JobStatus> statusList = new ArrayList<>();
+
+ statusList.add(JobStatus.initial(JobType.systemTest)
+ .withTriggering(Version.fromString("5.6.7"), ApplicationVersion.unknown, empty(), "Test", Instant.ofEpochMilli(7))
+ .withCompletion(30, empty(), Instant.ofEpochMilli(8))
+ .withPause(OptionalLong.of(1L << 32)));
+ statusList.add(JobStatus.initial(JobType.stagingTest)
+ .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, empty(), "Test 2", Instant.ofEpochMilli(5))
+ .withCompletion(11, Optional.of(JobError.unknown), Instant.ofEpochMilli(6)));
+ statusList.add(JobStatus.initial(JobType.from(main, zone1).get())
+ .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, deployments.stream().findFirst(), "Test 3", Instant.ofEpochMilli(6))
+ .withCompletion(11, empty(), Instant.ofEpochMilli(7)));
+
+ DeploymentJobs deploymentJobs = new DeploymentJobs(OptionalLong.empty(), statusList, empty(), true);
+
+ var rotationStatus = RotationStatus.from(Map.of(new RotationId("my-rotation"),
+ Map.of(ZoneId.from("prod", "us-west-1"), RotationState.in,
+ ZoneId.from("prod", "us-east-3"), RotationState.out)));
+
+ ApplicationId id1 = ApplicationId.from("t1", "a1", "i1");
+ ApplicationId id3 = ApplicationId.from("t1", "a1", "i3");
+ List<Instance> instances = List.of(new Instance(id1,
+ deployments,
+ deploymentJobs,
+ List.of(AssignedRotation.fromStrings("foo", "default", "my-rotation", Set.of())),
+ rotationStatus),
+ new Instance(id3,
+ List.of(),
+ new DeploymentJobs(OptionalLong.empty(), List.of(), empty(), true),
+ List.of(),
+ RotationStatus.EMPTY));
+
+ Application original = new Application(TenantAndApplicationId.from(id1),
+ Instant.now().truncatedTo(ChronoUnit.MILLIS),
+ deploymentSpec,
+ validationOverrides,
+ Change.of(Version.fromString("6.7")).withPin(),
+ Change.of(ApplicationVersion.from(new SourceRevision("repo", "master", "deadcafe"), 42)),
+ Optional.of(IssueId.from("4321")),
+ Optional.of(IssueId.from("1234")),
+ Optional.of(User.from("by-username")),
+ OptionalInt.of(7),
+ new ApplicationMetrics(0.5, 0.9),
+ Optional.of("-----BEGIN PUBLIC KEY-----\n∠( ᐛ 」∠)_\n-----END PUBLIC KEY-----"),
+ projectId,
+ true,
+ instances);
+
+ Application serialized = APPLICATION_SERIALIZER.fromSlime(APPLICATION_SERIALIZER.toSlime(original));
+
+ assertEquals(original.id(), serialized.id());
+ assertEquals(original.createdAt(), serialized.createdAt());
+
+ assertEquals(original.deploymentSpec().xmlForm(), serialized.deploymentSpec().xmlForm());
+ assertEquals(original.validationOverrides().xmlForm(), serialized.validationOverrides().xmlForm());
+
+ assertEquals(original.projectId(), serialized.projectId());
+ assertEquals(original.internal(), serialized.internal());
+ assertEquals(original.deploymentIssueId(), serialized.deploymentIssueId());
+
+ assertEquals(0, serialized.require(id3.instance()).deployments().size());
+ assertEquals(0, serialized.require(id3.instance()).deploymentJobs().jobStatus().size());
+ assertEquals(0, serialized.require(id3.instance()).rotations().size());
+ assertEquals(RotationStatus.EMPTY, serialized.require(id3.instance()).rotationStatus());
+
+ assertEquals(2, serialized.require(id1.instance()).deployments().size());
+ assertEquals(original.require(id1.instance()).deployments().get(zone1).applicationVersion(), serialized.require(id1.instance()).deployments().get(zone1).applicationVersion());
+ assertEquals(original.require(id1.instance()).deployments().get(zone2).applicationVersion(), serialized.require(id1.instance()).deployments().get(zone2).applicationVersion());
+ assertEquals(original.require(id1.instance()).deployments().get(zone1).version(), serialized.require(id1.instance()).deployments().get(zone1).version());
+ assertEquals(original.require(id1.instance()).deployments().get(zone2).version(), serialized.require(id1.instance()).deployments().get(zone2).version());
+ assertEquals(original.require(id1.instance()).deployments().get(zone1).at(), serialized.require(id1.instance()).deployments().get(zone1).at());
+ assertEquals(original.require(id1.instance()).deployments().get(zone2).at(), serialized.require(id1.instance()).deployments().get(zone2).at());
+ assertEquals(original.require(id1.instance()).deployments().get(zone2).activity().lastQueried().get(), serialized.require(id1.instance()).deployments().get(zone2).activity().lastQueried().get());
+ assertEquals(original.require(id1.instance()).deployments().get(zone2).activity().lastWritten().get(), serialized.require(id1.instance()).deployments().get(zone2).activity().lastWritten().get());
+
+ assertEquals(original.require(id1.instance()).deploymentJobs().projectId(), serialized.require(id1.instance()).deploymentJobs().projectId());
+ assertEquals(original.require(id1.instance()).deploymentJobs().jobStatus().size(), serialized.require(id1.instance()).deploymentJobs().jobStatus().size());
+ assertEquals( original.require(id1.instance()).deploymentJobs().jobStatus().get(JobType.systemTest),
+ serialized.require(id1.instance()).deploymentJobs().jobStatus().get(JobType.systemTest));
+ assertEquals( original.require(id1.instance()).deploymentJobs().jobStatus().get(JobType.stagingTest),
+ serialized.require(id1.instance()).deploymentJobs().jobStatus().get(JobType.stagingTest));
+
+ assertEquals(original.outstandingChange(), serialized.outstandingChange());
+
+ assertEquals(original.ownershipIssueId(), serialized.ownershipIssueId());
+ assertEquals(original.owner(), serialized.owner());
+ assertEquals(original.majorVersion(), serialized.majorVersion());
+ assertEquals(original.change(), serialized.change());
+ assertEquals(original.pemDeployKey(), serialized.pemDeployKey());
+
+ assertEquals(original.require(id1.instance()).rotations(), serialized.require(id1.instance()).rotations());
+ assertEquals(original.require(id1.instance()).rotationStatus(), serialized.require(id1.instance()).rotationStatus());
+
+ // Test cluster utilization
+ assertEquals(0, serialized.require(id1.instance()).deployments().get(zone1).clusterUtils().size());
+ assertEquals(3, serialized.require(id1.instance()).deployments().get(zone2).clusterUtils().size());
+ assertEquals(0.4, serialized.require(id1.instance()).deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id2")).getCpu(), 0.01);
+ assertEquals(0.2, serialized.require(id1.instance()).deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getCpu(), 0.01);
+ assertEquals(0.2, serialized.require(id1.instance()).deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getMemory(), 0.01);
+
+ // Test cluster info
+ assertEquals(3, serialized.require(id1.instance()).deployments().get(zone2).clusterInfo().size());
+ assertEquals(10, serialized.require(id1.instance()).deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCost());
+ assertEquals(ClusterSpec.Type.content, serialized.require(id1.instance()).deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getClusterType());
+ assertEquals("flavor2", serialized.require(id1.instance()).deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavor());
+ assertEquals(4, serialized.require(id1.instance()).deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getHostnames().size());
+ assertEquals(2, serialized.require(id1.instance()).deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCPU(), Double.MIN_VALUE);
+ assertEquals(4, serialized.require(id1.instance()).deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorMem(), Double.MIN_VALUE);
+ assertEquals(50, serialized.require(id1.instance()).deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorDisk(), Double.MIN_VALUE);
+
+ // Test metrics
+ assertEquals(original.metrics().queryServiceQuality(), serialized.metrics().queryServiceQuality(), Double.MIN_VALUE);
+ assertEquals(original.metrics().writeServiceQuality(), serialized.metrics().writeServiceQuality(), Double.MIN_VALUE);
+ assertEquals(original.require(id1.instance()).deployments().get(zone2).metrics().queriesPerSecond(), serialized.require(id1.instance()).deployments().get(zone2).metrics().queriesPerSecond(), Double.MIN_VALUE);
+ assertEquals(original.require(id1.instance()).deployments().get(zone2).metrics().writesPerSecond(), serialized.require(id1.instance()).deployments().get(zone2).metrics().writesPerSecond(), Double.MIN_VALUE);
+ assertEquals(original.require(id1.instance()).deployments().get(zone2).metrics().documentCount(), serialized.require(id1.instance()).deployments().get(zone2).metrics().documentCount(), Double.MIN_VALUE);
+ assertEquals(original.require(id1.instance()).deployments().get(zone2).metrics().queryLatencyMillis(), serialized.require(id1.instance()).deployments().get(zone2).metrics().queryLatencyMillis(), Double.MIN_VALUE);
+ assertEquals(original.require(id1.instance()).deployments().get(zone2).metrics().writeLatencyMillis(), serialized.require(id1.instance()).deployments().get(zone2).metrics().writeLatencyMillis(), Double.MIN_VALUE);
+ assertEquals(original.require(id1.instance()).deployments().get(zone2).metrics().instant(), serialized.require(id1.instance()).deployments().get(zone2).metrics().instant());
+ assertEquals(original.require(id1.instance()).deployments().get(zone2).metrics().warnings(), serialized.require(id1.instance()).deployments().get(zone2).metrics().warnings());
+ }
+
+ private Map<ClusterSpec.Id, ClusterInfo> createClusterInfo(int clusters, int hosts) {
+ Map<ClusterSpec.Id, ClusterInfo> result = new HashMap<>();
+
+ for (int cluster = 0; cluster < clusters; cluster++) {
+ List<String> hostnames = new ArrayList<>();
+ for (int host = 0; host < hosts; host++) {
+ hostnames.add("hostname" + cluster*host + host);
+ }
+
+ result.put(ClusterSpec.Id.from("id" + cluster), new ClusterInfo("flavor" + cluster, 10,
+ 2, 4, 50, ClusterSpec.Type.content, hostnames));
+ }
+ return result;
+ }
+
+ private Map<ClusterSpec.Id, ClusterUtilization> createClusterUtils(int clusters, double inc) {
+ Map<ClusterSpec.Id, ClusterUtilization> result = new HashMap<>();
+
+ ClusterUtilization util = new ClusterUtilization(0,0,0,0);
+ for (int cluster = 0; cluster < clusters; cluster++) {
+ double agg = cluster*inc;
+ result.put(ClusterSpec.Id.from("id" + cluster), new ClusterUtilization(
+ util.getMemory()+ agg,
+ util.getCpu()+ agg,
+ util.getDisk() + agg,
+ util.getDiskBusy() + agg));
+ }
+ return result;
+ }
+
+ @Test
+ public void testCompleteApplicationDeserialization() throws Exception {
+ byte[] applicationJson = Files.readAllBytes(testData.resolve("complete-application.json"));
+ APPLICATION_SERIALIZER.fromSlime(SlimeUtils.jsonToSlime(applicationJson));
+ // ok if no error
+ }
+
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/InstanceSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/InstanceSerializerTest.java
index 8a6a6efbd5a..3bb5e3d0c2b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/InstanceSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/InstanceSerializerTest.java
@@ -46,7 +46,6 @@ import java.util.OptionalLong;
import java.util.Set;
import static com.yahoo.config.provision.SystemName.main;
-import static com.yahoo.vespa.hosted.controller.ControllerTester.writable;
import static java.util.Optional.empty;
import static org.junit.Assert.assertEquals;
@@ -182,30 +181,6 @@ public class InstanceSerializerTest {
assertEquals(original.deployments().get(zone2).metrics().writeLatencyMillis(), serialized.deployments().get(zone2).metrics().writeLatencyMillis(), Double.MIN_VALUE);
assertEquals(original.deployments().get(zone2).metrics().instant(), serialized.deployments().get(zone2).metrics().instant());
assertEquals(original.deployments().get(zone2).metrics().warnings(), serialized.deployments().get(zone2).metrics().warnings());
- { // test more deployment serialization cases
- Instance original2 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 42))).get();
- Instance serialized2 = INSTANCE_SERIALIZER.fromSlime(INSTANCE_SERIALIZER.toSlime(original2));
- assertEquals(original2.change(), serialized2.change());
- assertEquals(serialized2.change().application().get().source(),
- original2.change().application().get().source());
-
- Instance original3 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get();
- Instance serialized3 = INSTANCE_SERIALIZER.fromSlime(INSTANCE_SERIALIZER.toSlime(original3));
- assertEquals(original3.change(), serialized3.change());
- assertEquals(serialized3.change().application().get().source(),
- original3.change().application().get().source());
- Instance original4 = writable(original).withChange(Change.empty()).get();
- Instance serialized4 = INSTANCE_SERIALIZER.fromSlime(INSTANCE_SERIALIZER.toSlime(original4));
- assertEquals(original4.change(), serialized4.change());
-
- Instance original5 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get();
- Instance serialized5 = INSTANCE_SERIALIZER.fromSlime(INSTANCE_SERIALIZER.toSlime(original5));
- assertEquals(original5.change(), serialized5.change());
-
- Instance original6 = writable(original).withOutstandingChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get();
- Instance serialized6 = INSTANCE_SERIALIZER.fromSlime(INSTANCE_SERIALIZER.toSlime(original6));
- assertEquals(original6.outstandingChange(), serialized6.outstandingChange());
- }
}
private Map<ClusterSpec.Id, ClusterInfo> createClusterInfo(int clusters, int hosts) {
@@ -240,7 +215,7 @@ public class InstanceSerializerTest {
@Test
public void testCompleteApplicationDeserialization() throws Exception {
- byte[] applicationJson = Files.readAllBytes(testData.resolve("complete-application.json"));
+ byte[] applicationJson = Files.readAllBytes(testData.resolve("complete-instance.json"));
INSTANCE_SERIALIZER.fromSlime(SlimeUtils.jsonToSlime(applicationJson));
// ok if no error
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/OldCuratorDb.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/OldCuratorDb.java
index ef85da1100c..8aea72a9ac4 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/OldCuratorDb.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/OldCuratorDb.java
@@ -337,8 +337,7 @@ public class OldCuratorDb {
}
public Optional<Instance> readInstance(ApplicationId application) {
- return readSlime(instancePath(application)).or(() -> readSlime(applicationPath(application)))
- .map(instanceSerializer::fromSlime);
+ return readSlime(applicationPath(application)).map(instanceSerializer::fromSlime);
}
public List<Instance> readInstances() {
@@ -350,11 +349,10 @@ public class OldCuratorDb {
}
private Stream<ApplicationId> readInstanceIds() {
- return Stream.concat(curator.getChildren(applicationRoot).stream()
- .filter(id -> id.split(":").length == 3),
- curator.getChildren(instanceRoot).stream())
- .distinct()
- .map(ApplicationId::fromSerializedForm);
+ return curator.getChildren(applicationRoot).stream()
+ .filter(id -> id.split(":").length == 3)
+ .distinct()
+ .map(ApplicationId::fromSerializedForm);
}
private List<Instance> readInstances(Predicate<ApplicationId> instanceFilter) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/complete-application.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/complete-application.json
index 28f505e88ec..ec976c1b922 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/complete-application.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/complete-application.json
@@ -1,299 +1,36 @@
{
- "id": "tenant1:app1:default",
+ "id": "tenant1:app1",
+ "internal": true,
+ "deploymentIssueId": "321",
"deploymentSpecField": "<deployment version='1.0'>\n <test />\n <!--<staging />-->\n <prod global-service-id=\"foo\">\n <region active=\"true\">us-east-3</region>\n <region active=\"true\">us-west-1</region>\n </prod>\n</deployment>\n",
"validationOverrides": "<validation-overrides>\n <allow until=\"2016-04-28\" comment=\"Renaming content cluster\">content-cluster-removal</allow>\n <allow until=\"2016-08-22\" comment=\"Migrating us-east-3 to C-2E\">cluster-size-reduction</allow>\n <allow until=\"2017-06-30\" comment=\"Test Vespa upgrade tests\">force-automatic-tenant-upgrade-test</allow>\n</validation-overrides>\n",
- "deployments": [
- {
- "zone": {
- "environment": "prod",
- "region": "us-west-1"
- },
- "version": "6.173.62",
- "deployTime": 1510837817704,
- "applicationPackageRevision": {
- "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
- }
- },
- "clusterInfo": {
- "cluster1": {
- "flavor": "d-3-16-100",
- "cost": 9,
- "flavorCpu": 0,
- "flavorMem": 0,
- "flavorDisk": 0,
- "clusterType": "container",
- "hostnames": [
- "node1",
- "node2"
- ]
- },
- "cluster2": {
- "flavor": "d-12-64-400",
- "cost": 38,
- "flavorCpu": 0,
- "flavorMem": 0,
- "flavorDisk": 0,
- "clusterType": "content",
- "hostnames": [
- "node3",
- "node4",
- "node5"
- ]
- },
- "cluster3": {
- "flavor": "d-12-64-400",
- "cost": 38,
- "flavorCpu": 0,
- "flavorMem": 0,
- "flavorDisk": 0,
- "clusterType": "content",
- "hostnames": [
- "node6",
- "node7",
- "node8",
- "node9"
- ]
- }
- },
- "clusterUtils": {
- "cluster1": {
- "cpu": 0.1720353499228221,
- "mem": 0.4986146831512451,
- "disk": 0.0617671330041831,
- "diskbusy": 0
- },
- "cluster2": {
- "cpu": 0.07505730001866318,
- "mem": 0.7936344432830811,
- "disk": 0.2260549694485994,
- "diskbusy": 0
- },
- "cluster3": {
- "cpu": 0.01712671480989384,
- "mem": 0.0225852754983035,
- "disk": 0.006084436856721915,
- "diskbusy": 0
- }
- },
- "metrics": {
- "queriesPerSecond": 1.25,
- "writesPerSecond": 43.83199977874756,
- "documentCount": 525880277.9999999,
- "queryLatencyMillis": 5.607503938674927,
- "writeLatencyMillis": 20.57866265104621
- }
- },
- {
- "zone": {
- "environment": "test",
- "region": "us-east-1"
- },
- "version": "6.173.62",
- "deployTime": 1511256872316,
- "applicationPackageRevision": {
- "applicationPackageHash": "ec548fa61cbfab7a270a51d46b1263ec1be5d9a8",
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
- }
- },
- "clusterInfo": {},
- "clusterUtils": {},
- "metrics": {
- "queriesPerSecond": 0,
- "writesPerSecond": 0,
- "documentCount": 0,
- "queryLatencyMillis": 0,
- "writeLatencyMillis": 0
- }
- },
- {
- "zone": {
- "environment": "dev",
- "region": "us-east-1"
- },
- "version": "6.173.62",
- "deployTime": 1510597489464,
- "applicationPackageRevision": {
- "applicationPackageHash": "59b883f263c2a3c23dfab249730097d7e0e1ed32"
- },
- "clusterInfo": {
- "cluster1": {
- "flavor": "d-2-8-50",
- "cost": 5,
- "flavorCpu": 0,
- "flavorMem": 0,
- "flavorDisk": 0,
- "clusterType": "container",
- "hostnames": [
- "node1"
- ]
- },
- "cluster2": {
- "flavor": "d-2-8-50",
- "cost": 5,
- "flavorCpu": 0,
- "flavorMem": 0,
- "flavorDisk": 0,
- "clusterType": "content",
- "hostnames": [
- "node2"
- ]
- },
- "cluster3": {
- "flavor": "d-2-8-50",
- "cost": 5,
- "flavorCpu": 0,
- "flavorMem": 0,
- "flavorDisk": 0,
- "clusterType": "content",
- "hostnames": [
- "node3"
- ]
- }
- },
- "clusterUtils": {
- "cluster1": {
- "cpu": 0.191833330678661,
- "mem": 0.4625738318415235,
- "disk": 0.05582004563850269,
- "diskbusy": 0
- },
- "cluster2": {
- "cpu": 0.2227037978608054,
- "mem": 0.2051752598416401,
- "disk": 0.05471533698695047,
- "diskbusy": 0
- },
- "cluster3": {
- "cpu": 0.1869410834020498,
- "mem": 0.1691722576000564,
- "disk": 0.04977374774258153,
- "diskbusy": 0
- }
- },
- "metrics": {
- "queriesPerSecond": 0,
- "writesPerSecond": 0,
- "documentCount": 30916,
- "queryLatencyMillis": 0,
- "writeLatencyMillis": 0
- }
- },
- {
- "zone": {
- "environment": "prod",
- "region": "us-east-3"
- },
- "version": "6.173.62",
- "deployTime": 1510817190016,
- "applicationPackageRevision": {
- "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
- }
- },
- "clusterInfo": {
- "cluster1": {
- "flavor": "d-3-16-100",
- "cost": 9,
- "flavorCpu": 0,
- "flavorMem": 0,
- "flavorDisk": 0,
- "clusterType": "container",
- "hostnames": [
- "node1",
- "node2"
- ]
- },
- "cluster2": {
- "flavor": "d-12-64-400",
- "cost": 38,
- "flavorCpu": 0,
- "flavorMem": 0,
- "flavorDisk": 0,
- "clusterType": "content",
- "hostnames": [
- "node1",
- "node2",
- "node3"
- ]
- },
- "cluster3": {
- "flavor": "d-12-64-400",
- "cost": 38,
- "flavorCpu": 0,
- "flavorMem": 0,
- "flavorDisk": 0,
- "clusterType": "content",
- "hostnames": [
- "node1",
- "node2",
- "node3",
- "node4"
- ]
- }
- },
- "clusterUtils": {
- "cluster1": {
- "cpu": 0.2295038983007097,
- "mem": 0.4627357390237263,
- "disk": 0.05559941525894966,
- "diskbusy": 0
- },
- "cluster2": {
- "cpu": 0.05340429087579549,
- "mem": 0.8107630891552372,
- "disk": 0.226444914138854,
- "diskbusy": 0
- },
- "cluster3": {
- "cpu": 0.02148227413975218,
- "mem": 0.02162174219104161,
- "disk": 0.006057760545243265,
- "diskbusy": 0
- }
- },
- "metrics": {
- "queriesPerSecond": 1.734000012278557,
- "writesPerSecond": 44.59999895095825,
- "documentCount": 525868193.9999999,
- "queryLatencyMillis": 5.65284947195106,
- "writeLatencyMillis": 17.34593812832452
- }
+ "projectId": 102889,
+ "deployingField": {
+ "buildNumber": 42,
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
}
+ },
+ "outstandingChangeField": false,
+ "queryQuality": 100,
+ "writeQuality": 99.99894341115082,
+ "pemDeployKeys": [
+ "-----BEGIN PUBLIC KEY-----\n∠( ᐛ 」∠)_\n-----END PUBLIC KEY-----"
],
- "deploymentJobs": {
- "projectId": 102889,
- "jobStatus": [
- {
- "jobType": "staging-test",
- "lastTriggered": {
- "id": -1,
- "version": "6.173.62",
- "revision": {
- "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
- }
+ "instances": [
+ {
+ "instanceName": "default",
+ "deployments": [
+ {
+ "zone": {
+ "environment": "prod",
+ "region": "us-west-1"
},
- "upgrade": true,
- "reason": "system-test completed",
- "at": 1510830134259
- },
- "lastCompleted": {
- "id": 1184,
"version": "6.173.62",
- "revision": {
+ "deployTime": 1510837817704,
+ "applicationPackageRevision": {
"applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
"sourceRevision": {
"repositoryField": "git@git.host:user/repo.git",
@@ -301,112 +38,181 @@
"commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
}
},
- "upgrade": true,
- "reason": "system-test completed",
- "at": 1510830684960
- },
- "lastSuccess": {
- "id": 1184,
- "version": "6.173.62",
- "revision": {
- "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ "clusterInfo": {
+ "cluster1": {
+ "flavor": "d-3-16-100",
+ "cost": 9,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "container",
+ "hostnames": [
+ "node1",
+ "node2"
+ ]
+ },
+ "cluster2": {
+ "flavor": "d-12-64-400",
+ "cost": 38,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "content",
+ "hostnames": [
+ "node3",
+ "node4",
+ "node5"
+ ]
+ },
+ "cluster3": {
+ "flavor": "d-12-64-400",
+ "cost": 38,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "content",
+ "hostnames": [
+ "node6",
+ "node7",
+ "node8",
+ "node9"
+ ]
}
},
- "upgrade": true,
- "reason": "system-test completed",
- "at": 1510830684960
- }
- },
- {
- "jobType": "component",
- "lastCompleted": {
- "id": 849,
- "version": "6.174.156",
- "upgrade": false,
- "reason": "Application commit",
- "at": 1511217733555
- },
- "lastSuccess": {
- "id": 849,
- "version": "6.174.156",
- "upgrade": false,
- "reason": "Application commit",
- "at": 1511217733555
- }
- },
- {
- "jobType": "production-us-east-3",
- "lastTriggered": {
- "id": -1,
- "version": "6.173.62",
- "revision": {
- "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ "clusterUtils": {
+ "cluster1": {
+ "cpu": 0.1720353499228221,
+ "mem": 0.4986146831512451,
+ "disk": 0.0617671330041831,
+ "diskbusy": 0
+ },
+ "cluster2": {
+ "cpu": 0.07505730001866318,
+ "mem": 0.7936344432830811,
+ "disk": 0.2260549694485994,
+ "diskbusy": 0
+ },
+ "cluster3": {
+ "cpu": 0.01712671480989384,
+ "mem": 0.0225852754983035,
+ "disk": 0.006084436856721915,
+ "diskbusy": 0
}
},
- "upgrade": true,
- "reason": "staging-test completed",
- "at": 1510830685127
+ "metrics": {
+ "queriesPerSecond": 1.25,
+ "writesPerSecond": 43.83199977874756,
+ "documentCount": 525880277.9999999,
+ "queryLatencyMillis": 5.607503938674927,
+ "writeLatencyMillis": 20.57866265104621
+ }
},
- "lastCompleted": {
- "id": 923,
+ {
+ "zone": {
+ "environment": "test",
+ "region": "us-east-1"
+ },
"version": "6.173.62",
- "revision": {
- "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "deployTime": 1511256872316,
+ "applicationPackageRevision": {
+ "applicationPackageHash": "ec548fa61cbfab7a270a51d46b1263ec1be5d9a8",
"sourceRevision": {
"repositoryField": "git@git.host:user/repo.git",
"branchField": "origin/master",
- "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
}
},
- "upgrade": true,
- "reason": "staging-test completed",
- "at": 1510837650046
+ "clusterInfo": {},
+ "clusterUtils": {},
+ "metrics": {
+ "queriesPerSecond": 0,
+ "writesPerSecond": 0,
+ "documentCount": 0,
+ "queryLatencyMillis": 0,
+ "writeLatencyMillis": 0
+ }
},
- "lastSuccess": {
- "id": 923,
+ {
+ "zone": {
+ "environment": "dev",
+ "region": "us-east-1"
+ },
"version": "6.173.62",
- "revision": {
- "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ "deployTime": 1510597489464,
+ "applicationPackageRevision": {
+ "applicationPackageHash": "59b883f263c2a3c23dfab249730097d7e0e1ed32"
+ },
+ "clusterInfo": {
+ "cluster1": {
+ "flavor": "d-2-8-50",
+ "cost": 5,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "container",
+ "hostnames": [
+ "node1"
+ ]
+ },
+ "cluster2": {
+ "flavor": "d-2-8-50",
+ "cost": 5,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "content",
+ "hostnames": [
+ "node2"
+ ]
+ },
+ "cluster3": {
+ "flavor": "d-2-8-50",
+ "cost": 5,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "content",
+ "hostnames": [
+ "node3"
+ ]
}
},
- "upgrade": true,
- "reason": "staging-test completed",
- "at": 1510837650046
- }
- },
- {
- "jobType": "production-us-west-1",
- "lastTriggered": {
- "id": -1,
- "version": "6.173.62",
- "revision": {
- "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ "clusterUtils": {
+ "cluster1": {
+ "cpu": 0.191833330678661,
+ "mem": 0.4625738318415235,
+ "disk": 0.05582004563850269,
+ "diskbusy": 0
+ },
+ "cluster2": {
+ "cpu": 0.2227037978608054,
+ "mem": 0.2051752598416401,
+ "disk": 0.05471533698695047,
+ "diskbusy": 0
+ },
+ "cluster3": {
+ "cpu": 0.1869410834020498,
+ "mem": 0.1691722576000564,
+ "disk": 0.04977374774258153,
+ "diskbusy": 0
}
},
- "upgrade": true,
- "reason": "production-us-east-3 completed",
- "at": 1510837650139
+ "metrics": {
+ "queriesPerSecond": 0,
+ "writesPerSecond": 0,
+ "documentCount": 30916,
+ "queryLatencyMillis": 0,
+ "writeLatencyMillis": 0
+ }
},
- "lastCompleted": {
- "id": 646,
+ {
+ "zone": {
+ "environment": "prod",
+ "region": "us-east-3"
+ },
"version": "6.173.62",
- "revision": {
+ "deployTime": 1510817190016,
+ "applicationPackageRevision": {
"applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
"sourceRevision": {
"repositoryField": "git@git.host:user/repo.git",
@@ -414,121 +220,333 @@
"commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
}
},
- "upgrade": true,
- "reason": "production-us-east-3 completed",
- "at": 1510843559162
- },
- "lastSuccess": {
- "id": 646,
- "version": "6.173.62",
- "revision": {
- "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ "clusterInfo": {
+ "cluster1": {
+ "flavor": "d-3-16-100",
+ "cost": 9,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "container",
+ "hostnames": [
+ "node1",
+ "node2"
+ ]
+ },
+ "cluster2": {
+ "flavor": "d-12-64-400",
+ "cost": 38,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "content",
+ "hostnames": [
+ "node1",
+ "node2",
+ "node3"
+ ]
+ },
+ "cluster3": {
+ "flavor": "d-12-64-400",
+ "cost": 38,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "content",
+ "hostnames": [
+ "node1",
+ "node2",
+ "node3",
+ "node4"
+ ]
}
},
- "upgrade": true,
- "reason": "production-us-east-3 completed",
- "at": 1510843559162
+ "clusterUtils": {
+ "cluster1": {
+ "cpu": 0.2295038983007097,
+ "mem": 0.4627357390237263,
+ "disk": 0.05559941525894966,
+ "diskbusy": 0
+ },
+ "cluster2": {
+ "cpu": 0.05340429087579549,
+ "mem": 0.8107630891552372,
+ "disk": 0.226444914138854,
+ "diskbusy": 0
+ },
+ "cluster3": {
+ "cpu": 0.02148227413975218,
+ "mem": 0.02162174219104161,
+ "disk": 0.006057760545243265,
+ "diskbusy": 0
+ }
+ },
+ "metrics": {
+ "queriesPerSecond": 1.734000012278557,
+ "writesPerSecond": 44.59999895095825,
+ "documentCount": 525868193.9999999,
+ "queryLatencyMillis": 5.65284947195106,
+ "writeLatencyMillis": 17.34593812832452
+ }
}
- },
- {
- "jobType": "system-test",
- "jobError": "unknown",
- "lastTriggered": {
- "id": -1,
- "version": "6.173.62",
- "revision": {
- "applicationPackageHash": "ec548fa61cbfab7a270a51d46b1263ec1be5d9a8",
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
+ ],
+ "deploymentJobs": {
+ "jobStatus": [
+ {
+ "jobType": "staging-test",
+ "lastTriggered": {
+ "id": -1,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "system-test completed",
+ "at": 1510830134259
+ },
+ "lastCompleted": {
+ "id": 1184,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "system-test completed",
+ "at": 1510830684960
+ },
+ "lastSuccess": {
+ "id": 1184,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "system-test completed",
+ "at": 1510830684960
}
},
- "upgrade": false,
- "reason": "Available change in component",
- "at": 1511256608649
- },
- "lastCompleted": {
- "id": 1686,
- "version": "6.173.62",
- "revision": {
- "applicationPackageHash": "ec548fa61cbfab7a270a51d46b1263ec1be5d9a8",
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
+ {
+ "jobType": "component",
+ "lastCompleted": {
+ "id": 849,
+ "version": "6.174.156",
+ "upgrade": false,
+ "reason": "Application commit",
+ "at": 1511217733555
+ },
+ "lastSuccess": {
+ "id": 849,
+ "version": "6.174.156",
+ "upgrade": false,
+ "reason": "Application commit",
+ "at": 1511217733555
}
},
- "upgrade": false,
- "reason": "Available change in component",
- "at": 1511256603353
- },
- "firstFailing": {
- "id": 1659,
- "version": "6.173.62",
- "revision": {
- "applicationPackageHash": "ec548fa61cbfab7a270a51d46b1263ec1be5d9a8",
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
+ {
+ "jobType": "production-us-east-3",
+ "lastTriggered": {
+ "id": -1,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "staging-test completed",
+ "at": 1510830685127
+ },
+ "lastCompleted": {
+ "id": 923,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "staging-test completed",
+ "at": 1510837650046
+ },
+ "lastSuccess": {
+ "id": 923,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "staging-test completed",
+ "at": 1510837650046
}
},
- "upgrade": false,
- "reason": "component completed",
- "at": 1511219070725
- },
- "lastSuccess": {
- "id": 1658,
- "version": "6.173.62",
- "revision": {
- "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ {
+ "jobType": "production-us-west-1",
+ "lastTriggered": {
+ "id": -1,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "production-us-east-3 completed",
+ "at": 1510837650139
+ },
+ "lastCompleted": {
+ "id": 646,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "production-us-east-3 completed",
+ "at": 1510843559162
+ },
+ "lastSuccess": {
+ "id": 646,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "production-us-east-3 completed",
+ "at": 1510843559162
}
},
- "upgrade": true,
- "reason": "Upgrading to 6.173.62",
- "at": 1511175754163
+ {
+ "jobType": "system-test",
+ "jobError": "unknown",
+ "lastTriggered": {
+ "id": -1,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "ec548fa61cbfab7a270a51d46b1263ec1be5d9a8",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
+ }
+ },
+ "upgrade": false,
+ "reason": "Available change in component",
+ "at": 1511256608649
+ },
+ "lastCompleted": {
+ "id": 1686,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "ec548fa61cbfab7a270a51d46b1263ec1be5d9a8",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
+ }
+ },
+ "upgrade": false,
+ "reason": "Available change in component",
+ "at": 1511256603353
+ },
+ "firstFailing": {
+ "id": 1659,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "ec548fa61cbfab7a270a51d46b1263ec1be5d9a8",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
+ }
+ },
+ "upgrade": false,
+ "reason": "component completed",
+ "at": 1511219070725
+ },
+ "lastSuccess": {
+ "id": 1658,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "Upgrading to 6.173.62",
+ "at": 1511175754163
+ }
+ }
+ ]
+ },
+ "assignedRotations": [
+ {
+ "rotationId": "rotation-foo",
+ "clusterId": "qrs",
+ "endpointId": "default"
}
- }
- ]
- },
- "deployingField": {
- "buildNumber": 42,
- "sourceRevision": {
- "repositoryField": "git@git.host:user/repo.git",
- "branchField": "origin/master",
- "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
- }
- },
- "outstandingChangeField": false,
- "queryQuality": 100,
- "writeQuality": 99.99894341115082,
- "pemDeployKey": "-----BEGIN PUBLIC KEY-----\n∠( ᐛ 」∠)_\n-----END PUBLIC KEY-----",
- "assignedRotations": [
- {
- "rotationId": "rotation-foo",
- "clusterId": "qrs",
- "endpointId": "default"
- }
- ],
- "rotationStatus2": [
- {
- "rotationId": "rotation-foo",
- "status": [
+ ],
+ "rotationStatus2": [
{
- "environment": "prod",
- "region": "us-east-3",
- "state": "in"
+ "rotationId": "rotation-foo",
+ "status": [
+ {
+ "environment": "prod",
+ "region": "us-east-3",
+ "state": "in"
+ }
+ ]
}
]
+ },
+ {
+ "instanceName": "empty-instance",
+ "deployments": [],
+ "deploymentJobs": {
+ "jobStatus": []
+ },
+ "assignedRotations": [],
+ "rotationStatus2": []
}
]
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/complete-instance.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/complete-instance.json
new file mode 100644
index 00000000000..28f505e88ec
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/complete-instance.json
@@ -0,0 +1,534 @@
+{
+ "id": "tenant1:app1:default",
+ "deploymentSpecField": "<deployment version='1.0'>\n <test />\n <!--<staging />-->\n <prod global-service-id=\"foo\">\n <region active=\"true\">us-east-3</region>\n <region active=\"true\">us-west-1</region>\n </prod>\n</deployment>\n",
+ "validationOverrides": "<validation-overrides>\n <allow until=\"2016-04-28\" comment=\"Renaming content cluster\">content-cluster-removal</allow>\n <allow until=\"2016-08-22\" comment=\"Migrating us-east-3 to C-2E\">cluster-size-reduction</allow>\n <allow until=\"2017-06-30\" comment=\"Test Vespa upgrade tests\">force-automatic-tenant-upgrade-test</allow>\n</validation-overrides>\n",
+ "deployments": [
+ {
+ "zone": {
+ "environment": "prod",
+ "region": "us-west-1"
+ },
+ "version": "6.173.62",
+ "deployTime": 1510837817704,
+ "applicationPackageRevision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "clusterInfo": {
+ "cluster1": {
+ "flavor": "d-3-16-100",
+ "cost": 9,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "container",
+ "hostnames": [
+ "node1",
+ "node2"
+ ]
+ },
+ "cluster2": {
+ "flavor": "d-12-64-400",
+ "cost": 38,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "content",
+ "hostnames": [
+ "node3",
+ "node4",
+ "node5"
+ ]
+ },
+ "cluster3": {
+ "flavor": "d-12-64-400",
+ "cost": 38,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "content",
+ "hostnames": [
+ "node6",
+ "node7",
+ "node8",
+ "node9"
+ ]
+ }
+ },
+ "clusterUtils": {
+ "cluster1": {
+ "cpu": 0.1720353499228221,
+ "mem": 0.4986146831512451,
+ "disk": 0.0617671330041831,
+ "diskbusy": 0
+ },
+ "cluster2": {
+ "cpu": 0.07505730001866318,
+ "mem": 0.7936344432830811,
+ "disk": 0.2260549694485994,
+ "diskbusy": 0
+ },
+ "cluster3": {
+ "cpu": 0.01712671480989384,
+ "mem": 0.0225852754983035,
+ "disk": 0.006084436856721915,
+ "diskbusy": 0
+ }
+ },
+ "metrics": {
+ "queriesPerSecond": 1.25,
+ "writesPerSecond": 43.83199977874756,
+ "documentCount": 525880277.9999999,
+ "queryLatencyMillis": 5.607503938674927,
+ "writeLatencyMillis": 20.57866265104621
+ }
+ },
+ {
+ "zone": {
+ "environment": "test",
+ "region": "us-east-1"
+ },
+ "version": "6.173.62",
+ "deployTime": 1511256872316,
+ "applicationPackageRevision": {
+ "applicationPackageHash": "ec548fa61cbfab7a270a51d46b1263ec1be5d9a8",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
+ }
+ },
+ "clusterInfo": {},
+ "clusterUtils": {},
+ "metrics": {
+ "queriesPerSecond": 0,
+ "writesPerSecond": 0,
+ "documentCount": 0,
+ "queryLatencyMillis": 0,
+ "writeLatencyMillis": 0
+ }
+ },
+ {
+ "zone": {
+ "environment": "dev",
+ "region": "us-east-1"
+ },
+ "version": "6.173.62",
+ "deployTime": 1510597489464,
+ "applicationPackageRevision": {
+ "applicationPackageHash": "59b883f263c2a3c23dfab249730097d7e0e1ed32"
+ },
+ "clusterInfo": {
+ "cluster1": {
+ "flavor": "d-2-8-50",
+ "cost": 5,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "container",
+ "hostnames": [
+ "node1"
+ ]
+ },
+ "cluster2": {
+ "flavor": "d-2-8-50",
+ "cost": 5,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "content",
+ "hostnames": [
+ "node2"
+ ]
+ },
+ "cluster3": {
+ "flavor": "d-2-8-50",
+ "cost": 5,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "content",
+ "hostnames": [
+ "node3"
+ ]
+ }
+ },
+ "clusterUtils": {
+ "cluster1": {
+ "cpu": 0.191833330678661,
+ "mem": 0.4625738318415235,
+ "disk": 0.05582004563850269,
+ "diskbusy": 0
+ },
+ "cluster2": {
+ "cpu": 0.2227037978608054,
+ "mem": 0.2051752598416401,
+ "disk": 0.05471533698695047,
+ "diskbusy": 0
+ },
+ "cluster3": {
+ "cpu": 0.1869410834020498,
+ "mem": 0.1691722576000564,
+ "disk": 0.04977374774258153,
+ "diskbusy": 0
+ }
+ },
+ "metrics": {
+ "queriesPerSecond": 0,
+ "writesPerSecond": 0,
+ "documentCount": 30916,
+ "queryLatencyMillis": 0,
+ "writeLatencyMillis": 0
+ }
+ },
+ {
+ "zone": {
+ "environment": "prod",
+ "region": "us-east-3"
+ },
+ "version": "6.173.62",
+ "deployTime": 1510817190016,
+ "applicationPackageRevision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "clusterInfo": {
+ "cluster1": {
+ "flavor": "d-3-16-100",
+ "cost": 9,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "container",
+ "hostnames": [
+ "node1",
+ "node2"
+ ]
+ },
+ "cluster2": {
+ "flavor": "d-12-64-400",
+ "cost": 38,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "content",
+ "hostnames": [
+ "node1",
+ "node2",
+ "node3"
+ ]
+ },
+ "cluster3": {
+ "flavor": "d-12-64-400",
+ "cost": 38,
+ "flavorCpu": 0,
+ "flavorMem": 0,
+ "flavorDisk": 0,
+ "clusterType": "content",
+ "hostnames": [
+ "node1",
+ "node2",
+ "node3",
+ "node4"
+ ]
+ }
+ },
+ "clusterUtils": {
+ "cluster1": {
+ "cpu": 0.2295038983007097,
+ "mem": 0.4627357390237263,
+ "disk": 0.05559941525894966,
+ "diskbusy": 0
+ },
+ "cluster2": {
+ "cpu": 0.05340429087579549,
+ "mem": 0.8107630891552372,
+ "disk": 0.226444914138854,
+ "diskbusy": 0
+ },
+ "cluster3": {
+ "cpu": 0.02148227413975218,
+ "mem": 0.02162174219104161,
+ "disk": 0.006057760545243265,
+ "diskbusy": 0
+ }
+ },
+ "metrics": {
+ "queriesPerSecond": 1.734000012278557,
+ "writesPerSecond": 44.59999895095825,
+ "documentCount": 525868193.9999999,
+ "queryLatencyMillis": 5.65284947195106,
+ "writeLatencyMillis": 17.34593812832452
+ }
+ }
+ ],
+ "deploymentJobs": {
+ "projectId": 102889,
+ "jobStatus": [
+ {
+ "jobType": "staging-test",
+ "lastTriggered": {
+ "id": -1,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "system-test completed",
+ "at": 1510830134259
+ },
+ "lastCompleted": {
+ "id": 1184,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "system-test completed",
+ "at": 1510830684960
+ },
+ "lastSuccess": {
+ "id": 1184,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "system-test completed",
+ "at": 1510830684960
+ }
+ },
+ {
+ "jobType": "component",
+ "lastCompleted": {
+ "id": 849,
+ "version": "6.174.156",
+ "upgrade": false,
+ "reason": "Application commit",
+ "at": 1511217733555
+ },
+ "lastSuccess": {
+ "id": 849,
+ "version": "6.174.156",
+ "upgrade": false,
+ "reason": "Application commit",
+ "at": 1511217733555
+ }
+ },
+ {
+ "jobType": "production-us-east-3",
+ "lastTriggered": {
+ "id": -1,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "staging-test completed",
+ "at": 1510830685127
+ },
+ "lastCompleted": {
+ "id": 923,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "staging-test completed",
+ "at": 1510837650046
+ },
+ "lastSuccess": {
+ "id": 923,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "staging-test completed",
+ "at": 1510837650046
+ }
+ },
+ {
+ "jobType": "production-us-west-1",
+ "lastTriggered": {
+ "id": -1,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "production-us-east-3 completed",
+ "at": 1510837650139
+ },
+ "lastCompleted": {
+ "id": 646,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "production-us-east-3 completed",
+ "at": 1510843559162
+ },
+ "lastSuccess": {
+ "id": 646,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "production-us-east-3 completed",
+ "at": 1510843559162
+ }
+ },
+ {
+ "jobType": "system-test",
+ "jobError": "unknown",
+ "lastTriggered": {
+ "id": -1,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "ec548fa61cbfab7a270a51d46b1263ec1be5d9a8",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
+ }
+ },
+ "upgrade": false,
+ "reason": "Available change in component",
+ "at": 1511256608649
+ },
+ "lastCompleted": {
+ "id": 1686,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "ec548fa61cbfab7a270a51d46b1263ec1be5d9a8",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
+ }
+ },
+ "upgrade": false,
+ "reason": "Available change in component",
+ "at": 1511256603353
+ },
+ "firstFailing": {
+ "id": 1659,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "ec548fa61cbfab7a270a51d46b1263ec1be5d9a8",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
+ }
+ },
+ "upgrade": false,
+ "reason": "component completed",
+ "at": 1511219070725
+ },
+ "lastSuccess": {
+ "id": 1658,
+ "version": "6.173.62",
+ "revision": {
+ "applicationPackageHash": "9db423e1021d7b452d37ec6372bc757d9c1bda87",
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "49cd7bbb1ed9f4b922083cb042590b0885ffe22b"
+ }
+ },
+ "upgrade": true,
+ "reason": "Upgrading to 6.173.62",
+ "at": 1511175754163
+ }
+ }
+ ]
+ },
+ "deployingField": {
+ "buildNumber": 42,
+ "sourceRevision": {
+ "repositoryField": "git@git.host:user/repo.git",
+ "branchField": "origin/master",
+ "commitField": "234f3e4e77049d0b9538c9e1b356d17eb1dedb6a"
+ }
+ },
+ "outstandingChangeField": false,
+ "queryQuality": 100,
+ "writeQuality": 99.99894341115082,
+ "pemDeployKey": "-----BEGIN PUBLIC KEY-----\n∠( ᐛ 」∠)_\n-----END PUBLIC KEY-----",
+ "assignedRotations": [
+ {
+ "rotationId": "rotation-foo",
+ "clusterId": "qrs",
+ "endpointId": "default"
+ }
+ ],
+ "rotationStatus2": [
+ {
+ "rotationId": "rotation-foo",
+ "status": [
+ {
+ "environment": "prod",
+ "region": "us-east-3",
+ "state": "in"
+ }
+ ]
+ }
+ ]
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java
index b8a0610bedd..2d8c937097a 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java
@@ -11,7 +11,7 @@ import com.yahoo.vespa.athenz.api.AthenzDomain;
import com.yahoo.vespa.athenz.api.AthenzPrincipal;
import com.yahoo.vespa.athenz.api.AthenzUser;
import com.yahoo.vespa.athenz.api.OktaAccessToken;
-import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeployOptions;
import com.yahoo.vespa.hosted.controller.api.identifiers.Property;
@@ -22,6 +22,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockBuildService;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.api.integration.athenz.ApplicationAction;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.athenz.HostedAthenzIdentities;
import com.yahoo.vespa.hosted.controller.api.integration.athenz.AthenzClientFactoryMock;
import com.yahoo.vespa.hosted.controller.api.integration.athenz.AthenzDbMock;
@@ -64,11 +65,11 @@ public class ContainerControllerTester {
/** Returns the wrapped generic container tester */
public ContainerTester containerTester() { return containerTester; }
- public Instance createApplication() {
+ public Application createApplication() {
return createApplication("domain1","tenant1", "application1", "default");
}
- public Instance createApplication(String athensDomain, String tenant, String application, String instance) {
+ public Application createApplication(String athensDomain, String tenant, String application, String instance) {
AthenzDomain domain1 = addTenantAthenzDomain(athensDomain, "user");
AthenzPrincipal user = new AthenzPrincipal(new AthenzUser("user"));
AthenzCredentials credentials = new AthenzCredentials(user, domain1, new OktaAccessToken("okta-token"));
@@ -82,31 +83,31 @@ public class ContainerControllerTester {
return controller().applications().createApplication(app, Optional.of(credentials));
}
- public Instance deploy(Instance instance, ApplicationPackage applicationPackage, ZoneId zone) {
- controller().applications().deploy(instance.id(), zone, Optional.of(applicationPackage),
+ public void deploy(ApplicationId id, ApplicationPackage applicationPackage, ZoneId zone) {
+ controller().applications().deploy(id, zone, Optional.of(applicationPackage),
new DeployOptions(false, Optional.empty(), false, false));
- return instance;
}
- public void deployCompletely(Instance instance, ApplicationPackage applicationPackage, long projectId,
+ public void deployCompletely(Application application, ApplicationPackage applicationPackage, long projectId,
boolean failStaging) {
- jobCompletion(JobType.component).application(instance)
+ jobCompletion(JobType.component).application(application)
.projectId(projectId)
.uploadArtifact(applicationPackage)
.submit();
DeploymentSteps steps = controller().applications().deploymentTrigger().steps(applicationPackage.deploymentSpec());
+ // TODO jonmv: Connect instances from deployment spec to deployments below.
boolean succeeding = true;
for (var job : steps.jobs()) {
if (!succeeding) return;
var zone = job.zone(controller().system());
- deploy(instance, applicationPackage, zone);
+ deploy(application.id().defaultInstance(), applicationPackage, zone);
if (failStaging && zone.environment() == Environment.staging) {
succeeding = false;
}
if (zone.environment().isTest()) {
- controller().applications().deactivate(instance.id(), zone);
+ controller().applications().deactivate(application.id().defaultInstance(), zone);
}
- jobCompletion(job).application(instance).success(succeeding).projectId(projectId).submit();
+ jobCompletion(job).application(application).success(succeeding).projectId(projectId).submit();
}
}
@@ -128,13 +129,13 @@ public class ContainerControllerTester {
/*
* Authorize action on tenantDomain/application for a given screwdriverId
*/
- public void authorize(AthenzDomain tenantDomain, ScrewdriverId screwdriverId, ApplicationAction action, Instance instance) {
+ public void authorize(AthenzDomain tenantDomain, ScrewdriverId screwdriverId, ApplicationAction action, TenantAndApplicationId id) {
AthenzClientFactoryMock mock = (AthenzClientFactoryMock) containerTester.container().components()
.getComponent(AthenzClientFactoryMock.class.getName());
mock.getSetup()
.domains.get(tenantDomain)
- .applications.get(new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(instance.id().application().value()))
+ .applications.get(new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()))
.addRoleMember(action, HostedAthenzIdentities.from(screwdriverId));
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index b2494be65b1..836e3c07763 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -21,6 +21,7 @@ import com.yahoo.vespa.athenz.api.AthenzIdentity;
import com.yahoo.vespa.athenz.api.AthenzUser;
import com.yahoo.vespa.athenz.api.OktaAccessToken;
import com.yahoo.vespa.config.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.LockedTenant;
import com.yahoo.vespa.hosted.controller.api.application.v4.EnvironmentResource;
@@ -49,6 +50,7 @@ import com.yahoo.vespa.hosted.controller.application.EndpointId;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
import com.yahoo.vespa.hosted.controller.application.RoutingPolicy;
import com.yahoo.vespa.hosted.controller.api.integration.athenz.ApplicationAction;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.athenz.HostedAthenzIdentities;
import com.yahoo.vespa.hosted.controller.api.integration.athenz.AthenzClientFactoryMock;
import com.yahoo.vespa.hosted.controller.api.integration.athenz.AthenzDbMock;
@@ -318,7 +320,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(app2.application().value()));
// Trigger upgrade and then application change
- controllerTester.controller().applications().deploymentTrigger().triggerChange(app2, Change.of(Version.fromString("7.0")));
+ controllerTester.controller().applications().deploymentTrigger().triggerChange(TenantAndApplicationId.from(app2), Change.of(Version.fromString("7.0")));
controllerTester.jobCompletion(JobType.component)
.application(app2)
@@ -344,7 +346,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
"{\"message\":\"Set major version to 7\"}");
// PATCH in a pem deploy key
- tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", PATCH)
+ tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
.userIdentity(USER_ID)
.data("{\"pemDeployKey\":\"-----BEGIN PUBLIC KEY-----\n∠( ᐛ 」∠)_\n-----END PUBLIC KEY-----\"}"),
"{\"message\":\"Set pem deploy key to -----BEGIN PUBLIC KEY-----\\n∠( ᐛ 」∠)_\\n-----END PUBLIC KEY-----\"}");
@@ -366,6 +368,12 @@ public class ApplicationApiTest extends ControllerContainerTest {
.data("{\"pemDeployKey\":null}"),
"{\"message\":\"Set pem deploy key to empty\"}");
+ // PATCH in removal of the pem deploy key on deprecated path
+ tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/instance/default", PATCH)
+ .userIdentity(USER_ID)
+ .data("{\"pemDeployKey\":null}"),
+ "{\"message\":\"Set pem deploy key to empty\"}");
+
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.userIdentity(USER_ID),
new File("application2.json"));
@@ -391,7 +399,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
.userIdentity(USER_ID),
new File("deployment.json"));
- addIssues(controllerTester, ApplicationId.from("tenant1", "application1", "instance1"));
+ addIssues(controllerTester, TenantAndApplicationId.from("tenant1", "application1"));
// GET at root, with "&recursive=deployment", returns info about all tenants, their applications and their deployments
tester.assertResponse(request("/application/v4/", GET)
.userIdentity(USER_ID)
@@ -426,19 +434,19 @@ public class ApplicationApiTest extends ControllerContainerTest {
// DELETE (cancel) ongoing change
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(HOSTED_VESPA_OPERATOR),
- "{\"message\":\"Changed deployment from 'application change to 1.0.42-commit1' to 'no change' for application 'tenant1.application1.instance1'\"}");
+ "{\"message\":\"Changed deployment from 'application change to 1.0.42-commit1' to 'no change' for application 'tenant1.application1'\"}");
// DELETE (cancel) again is a no-op
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", DELETE)
.userIdentity(USER_ID)
.data("{\"cancel\":\"all\"}"),
- "{\"message\":\"No deployment in progress for application 'tenant1.application1.instance1' at this time\"}");
+ "{\"message\":\"No deployment in progress for application 'tenant1.application1' at this time\"}");
// POST pinning to a given version to an application
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1.0"),
- "{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
+ "{\"message\":\"Triggered pin to 6.1 for tenant1.application1\"}");
assertTrue("Action is logged to audit log",
tester.controller().auditLogger().readLog().entries().stream()
.anyMatch(entry -> entry.resource().equals("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin")));
@@ -450,7 +458,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
// DELETE only the pin to a given version
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
- "{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for application 'tenant1.application1.instance1'\"}");
+ "{\"message\":\"Changed deployment from 'pin to 6.1' to 'upgrade to 6.1' for application 'tenant1.application1'\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":false}");
@@ -458,21 +466,21 @@ public class ApplicationApiTest extends ControllerContainerTest {
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", POST)
.userIdentity(USER_ID)
.data("6.1"),
- "{\"message\":\"Triggered pin to 6.1 for tenant1.application1.instance1\"}");
+ "{\"message\":\"Triggered pin to 6.1 for tenant1.application1\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"platform\":\"6.1\",\"pinned\":true}");
// DELETE only the version, but leave the pin
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/platform", DELETE)
.userIdentity(USER_ID),
- "{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for application 'tenant1.application1.instance1'\"}");
+ "{\"message\":\"Changed deployment from 'pin to 6.1' to 'pin to current platform' for application 'tenant1.application1'\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{\"pinned\":true}");
// DELETE also the pin to a given version
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying/pin", DELETE)
.userIdentity(USER_ID),
- "{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for application 'tenant1.application1.instance1'\"}");
+ "{\"message\":\"Changed deployment from 'pin to current platform' to 'no change' for application 'tenant1.application1'\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/deploying", GET)
.userIdentity(USER_ID), "{}");
@@ -679,8 +687,8 @@ public class ApplicationApiTest extends ControllerContainerTest {
new File("tenant-without-applications.json"));
}
- private void addIssues(ContainerControllerTester tester, ApplicationId id) {
- tester.controller().applications().lockOrThrow(id, application ->
+ private void addIssues(ContainerControllerTester tester, TenantAndApplicationId id) {
+ tester.controller().applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
.withOwner(User.from("owner-username"))));
@@ -1238,12 +1246,12 @@ public class ApplicationApiTest extends ControllerContainerTest {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(new AthenzDomain("another.domain"), "service"), true);
- Instance instance = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
+ Application application = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
ScrewdriverId screwdriverId = new ScrewdriverId(Long.toString(screwdriverProjectId));
- controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, instance);
+ controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, application.id());
controllerTester.jobCompletion(JobType.component)
- .application(instance.id())
+ .application(application)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackage)
.submit();
@@ -1270,12 +1278,12 @@ public class ApplicationApiTest extends ControllerContainerTest {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), true);
- Instance instance = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
- controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, instance);
+ Application application = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
+ controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, application.id());
- // Allow systemtest to succeed by notifying completion of system test
+ // Allow systemtest to succeed by notifying completion of component
controllerTester.jobCompletion(JobType.component)
- .application(instance.id())
+ .application(application)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackage)
.submit();
@@ -1367,12 +1375,12 @@ public class ApplicationApiTest extends ControllerContainerTest {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), false);
- Instance instance = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
- controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, instance);
+ Application application = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
+ controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, application.id());
// Allow systemtest to succeed by notifying completion of system test
controllerTester.jobCompletion(JobType.component)
- .application(instance.id())
+ .application(application)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackage)
.submit();
@@ -1404,12 +1412,12 @@ public class ApplicationApiTest extends ControllerContainerTest {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
configureAthenzIdentity(new com.yahoo.vespa.athenz.api.AthenzService(ATHENZ_TENANT_DOMAIN, "service"), true);
- Instance instance = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
- controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, instance);
+ Application application = controllerTester.createApplication(ATHENZ_TENANT_DOMAIN.getName(), "tenant1", "application1", "default");
+ controllerTester.authorize(ATHENZ_TENANT_DOMAIN, screwdriverId, ApplicationAction.deploy, application.id());
- // Allow systemtest to succeed by notifying completion of system test
+ // Allow systemtest to succeed by notifying completion of component
controllerTester.jobCompletion(JobType.component)
- .application(instance.id())
+ .application(application)
.projectId(screwdriverProjectId)
.uploadArtifact(applicationPackage)
.submit();
@@ -1430,7 +1438,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
tester.computeVersionStatus();
long projectId = 1;
- Instance app = controllerTester.createApplication();
+ Application app = controllerTester.createApplication();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
@@ -1442,7 +1450,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
.application(app)
.projectId(projectId);
job.type(JobType.component).uploadArtifact(applicationPackage).submit();
- controllerTester.deploy(app, applicationPackage, TEST_ZONE);
+ controllerTester.deploy(app.id().defaultInstance(), applicationPackage, TEST_ZONE);
job.type(JobType.systemTest).submit();
// Notifying about job started not by the controller fails
@@ -1452,7 +1460,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
.get();
tester.assertResponse(request, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Notified of completion " +
"of system-test for tenant1.application1, but that has not been triggered; last was " +
- controllerTester.controller().applications().require(app.id()).deploymentJobs().jobStatus().get(JobType.systemTest).lastTriggered().get().at() + "\"}", 400);
+ controllerTester.controller().applications().requireInstance(app.id().defaultInstance()).deploymentJobs().jobStatus().get(JobType.systemTest).lastTriggered().get().at() + "\"}", 400);
// Notifying about unknown job fails
request = request("/application/v4/tenant/tenant1/application/application1/jobreport", POST)
@@ -1465,14 +1473,14 @@ public class ApplicationApiTest extends ControllerContainerTest {
// ... and assert it was recorded
JobStatus recordedStatus =
- tester.controller().applications().get(app.id()).get().deploymentJobs().jobStatus().get(JobType.component);
+ tester.controller().applications().getInstance(app.id().defaultInstance()).get().deploymentJobs().jobStatus().get(JobType.component);
assertNotNull("Status was recorded", recordedStatus);
assertTrue(recordedStatus.isSuccess());
assertEquals(vespaVersion, recordedStatus.lastCompleted().get().platform());
recordedStatus =
- tester.controller().applications().get(app.id()).get().deploymentJobs().jobStatus().get(JobType.productionApNortheast2);
+ tester.controller().applications().getInstance(app.id().defaultInstance()).get().deploymentJobs().jobStatus().get(JobType.productionApNortheast2);
assertNull("Status of never-triggered jobs is empty", recordedStatus);
assertTrue("All jobs have been run", tester.controller().applications().deploymentTrigger().jobsToRun().isEmpty());
}
@@ -1482,7 +1490,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
controllerTester.containerTester().computeVersionStatus();
long projectId = 1;
- Instance app = controllerTester.createApplication();
+ Application app = controllerTester.createApplication();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-central-1")
@@ -1494,35 +1502,34 @@ public class ApplicationApiTest extends ControllerContainerTest {
.projectId(projectId);
job.type(JobType.component).uploadArtifact(applicationPackage).submit();
- controllerTester.deploy(app, applicationPackage, TEST_ZONE);
+ controllerTester.deploy(app.id().defaultInstance(), applicationPackage, TEST_ZONE);
job.type(JobType.systemTest).submit();
- controllerTester.deploy(app, applicationPackage, STAGING_ZONE);
+ controllerTester.deploy(app.id().defaultInstance(), applicationPackage, STAGING_ZONE);
job.type(JobType.stagingTest).error(DeploymentJobs.JobError.outOfCapacity).submit();
// Appropriate error is recorded
- JobStatus jobStatus = tester.controller().applications().get(app.id())
- .get()
- .deploymentJobs()
- .jobStatus()
- .get(JobType.stagingTest);
+ JobStatus jobStatus = tester.controller().applications().getInstance(app.id().defaultInstance()).get()
+ .deploymentJobs()
+ .jobStatus()
+ .get(JobType.stagingTest);
assertFalse(jobStatus.isSuccess());
assertEquals(DeploymentJobs.JobError.outOfCapacity, jobStatus.jobError().get());
}
@Test
public void applicationWithRoutingPolicy() {
- Instance app = controllerTester.createApplication();
+ Application app = controllerTester.createApplication();
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.environment(Environment.prod)
.region("us-west-1")
.build();
controllerTester.deployCompletely(app, applicationPackage, 1, false);
- RoutingPolicy policy = new RoutingPolicy(app.id(),
+ RoutingPolicy policy = new RoutingPolicy(app.id().defaultInstance(),
ClusterSpec.Id.from("default"),
ZoneId.from(Environment.prod, RegionName.from("us-west-1")),
HostName.from("lb-0-canonical-name"),
Optional.of("dns-zone-1"), Set.of(EndpointId.of("c0")));
- tester.controller().curator().writeRoutingPolicies(app.id(), Set.of(policy));
+ tester.controller().curator().writeRoutingPolicies(app.id().defaultInstance(), Set.of(policy));
// GET application
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", GET)
@@ -1715,30 +1722,32 @@ public class ApplicationApiTest extends ControllerContainerTest {
* This sets these values as if the maintainers has been ran.
*/
private void setDeploymentMaintainedInfo(ContainerControllerTester controllerTester) {
- for (Instance instance : controllerTester.controller().applications().asList()) {
- controllerTester.controller().applications().lockOrThrow(instance.id(), lockedApplication -> {
+ for (Application application : controllerTester.controller().applications().asList()) {
+ controllerTester.controller().applications().lockApplicationOrThrow(application.id(), lockedApplication -> {
lockedApplication = lockedApplication.with(new ApplicationMetrics(0.5, 0.7));
- for (Deployment deployment : instance.deployments().values()) {
- Map<ClusterSpec.Id, ClusterInfo> clusterInfo = new HashMap<>();
- List<String> hostnames = new ArrayList<>();
- hostnames.add("host1");
- hostnames.add("host2");
- clusterInfo.put(ClusterSpec.Id.from("cluster1"),
- new ClusterInfo("flavor1", 37, 2, 4, 50,
- ClusterSpec.Type.content, hostnames));
- Map<ClusterSpec.Id, ClusterUtilization> clusterUtils = new HashMap<>();
- clusterUtils.put(ClusterSpec.Id.from("cluster1"), new ClusterUtilization(0.3, 0.6, 0.4, 0.3));
- DeploymentMetrics metrics = new DeploymentMetrics(1, 2, 3, 4, 5,
- Optional.of(Instant.ofEpochMilli(123123)), Map.of());
-
- lockedApplication = lockedApplication
- .withClusterInfo(deployment.zone(), clusterInfo)
- .withClusterUtilization(deployment.zone(), clusterUtils)
- .with(deployment.zone(), metrics)
- .recordActivityAt(Instant.parse("2018-06-01T10:15:30.00Z"), deployment.zone());
+ for (Instance instance : application.instances().values()) {
+ for (Deployment deployment : instance.deployments().values()) {
+ Map<ClusterSpec.Id, ClusterInfo> clusterInfo = new HashMap<>();
+ List<String> hostnames = new ArrayList<>();
+ hostnames.add("host1");
+ hostnames.add("host2");
+ clusterInfo.put(ClusterSpec.Id.from("cluster1"),
+ new ClusterInfo("flavor1", 37, 2, 4, 50,
+ ClusterSpec.Type.content, hostnames));
+ Map<ClusterSpec.Id, ClusterUtilization> clusterUtils = new HashMap<>();
+ clusterUtils.put(ClusterSpec.Id.from("cluster1"), new ClusterUtilization(0.3, 0.6, 0.4, 0.3));
+ DeploymentMetrics metrics = new DeploymentMetrics(1, 2, 3, 4, 5,
+ Optional.of(Instant.ofEpochMilli(123123)), Map.of());
+
+ lockedApplication = lockedApplication.with(instance.name(),
+ lockedInstance -> lockedInstance.withClusterInfo(deployment.zone(), clusterInfo)
+ .withClusterUtilization(deployment.zone(), clusterUtils)
+ .with(deployment.zone(), metrics)
+ .recordActivityAt(Instant.parse("2018-06-01T10:15:30.00Z"), deployment.zone()));
+ }
+ controllerTester.controller().applications().store(lockedApplication);
}
- controllerTester.controller().applications().store(lockedApplication);
});
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
index 043ce712636..fb5f28e1963 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
@@ -33,7 +33,7 @@ import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobTy
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.stagingTest;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.systemTest;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud.Status.FAILURE;
-import static com.yahoo.vespa.hosted.controller.deployment.InternalDeploymentTester.appId;
+import static com.yahoo.vespa.hosted.controller.deployment.InternalDeploymentTester.instanceId;
import static com.yahoo.vespa.hosted.controller.deployment.InternalDeploymentTester.applicationPackage;
import static com.yahoo.vespa.hosted.controller.deployment.InternalDeploymentTester.testerId;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.deploymentFailed;
@@ -55,7 +55,7 @@ public class JobControllerApiHandlerHelperTest {
// Revision 1 gets deployed everywhere.
ApplicationVersion revision1 = tester.deployNewSubmission();
- assertEquals(2, tester.app().deploymentJobs().projectId().getAsLong());
+ assertEquals(2, tester.instance().deploymentJobs().projectId().getAsLong());
tester.clock().advance(Duration.ofMillis(1000));
// Revision 2 gets deployed everywhere except in us-east-3.
@@ -69,20 +69,20 @@ public class JobControllerApiHandlerHelperTest {
// us-east-3 eats the deployment failure and fails before deployment, while us-west-1 fails after.
tester.configServer().throwOnNextPrepare(new ConfigServerException(URI.create("url"), "ERROR!", INVALID_APPLICATION_PACKAGE, null));
tester.runner().run();
- assertEquals(deploymentFailed, tester.jobs().last(appId, productionUsEast3).get().status());
+ assertEquals(deploymentFailed, tester.jobs().last(instanceId, productionUsEast3).get().status());
ZoneId usWest1 = productionUsWest1.zone(tester.tester().controller().system());
- tester.configServer().convergeServices(appId, usWest1);
+ tester.configServer().convergeServices(instanceId, usWest1);
tester.configServer().convergeServices(testerId.id(), usWest1);
- tester.setEndpoints(appId, usWest1);
+ tester.setEndpoints(instanceId, usWest1);
tester.setEndpoints(testerId.id(), usWest1);
tester.runner().run();
tester.cloud().set(FAILURE);
tester.runner().run();
- assertEquals(testFailure, tester.jobs().last(appId, productionUsWest1).get().status());
- assertEquals(revision2, tester.app().deployments().get(productionUsCentral1.zone(tester.tester().controller().system())).applicationVersion());
- assertEquals(revision1, tester.app().deployments().get(productionUsEast3.zone(tester.tester().controller().system())).applicationVersion());
- assertEquals(revision2, tester.app().deployments().get(productionUsWest1.zone(tester.tester().controller().system())).applicationVersion());
+ assertEquals(testFailure, tester.jobs().last(instanceId, productionUsWest1).get().status());
+ assertEquals(revision2, tester.instance().deployments().get(productionUsCentral1.zone(tester.tester().controller().system())).applicationVersion());
+ assertEquals(revision1, tester.instance().deployments().get(productionUsEast3.zone(tester.tester().controller().system())).applicationVersion());
+ assertEquals(revision2, tester.instance().deployments().get(productionUsWest1.zone(tester.tester().controller().system())).applicationVersion());
tester.clock().advance(Duration.ofMillis(1000));
@@ -93,17 +93,17 @@ public class JobControllerApiHandlerHelperTest {
tester.tester().readyJobTrigger().maintain(); // Starts a run for us-central-1.
tester.tester().readyJobTrigger().maintain(); // Starts a new staging test run.
tester.runner().run();
- assertEquals(running, tester.jobs().last(appId, productionUsCentral1).get().status());
- assertEquals(running, tester.jobs().last(appId, stagingTest).get().status());
+ assertEquals(running, tester.jobs().last(instanceId, productionUsCentral1).get().status());
+ assertEquals(running, tester.jobs().last(instanceId, stagingTest).get().status());
// Staging is expired, and the job fails and won't be retried immediately.
- tester.tester().controller().applications().deactivate(appId, stagingTest.zone(tester.tester().controller().system()));
+ tester.tester().controller().applications().deactivate(instanceId, stagingTest.zone(tester.tester().controller().system()));
tester.runner().run();
- assertEquals(installationFailed, tester.jobs().last(appId, stagingTest).get().status());
+ assertEquals(installationFailed, tester.jobs().last(instanceId, stagingTest).get().status());
tester.clock().advance(Duration.ofMillis(100_000)); // More than the minute within which there are immediate retries.
tester.tester().readyJobTrigger().maintain();
- assertEquals(installationFailed, tester.jobs().last(appId, stagingTest).get().status());
+ assertEquals(installationFailed, tester.jobs().last(instanceId, stagingTest).get().status());
// System upgrades to a new version, which won't yet start.
Version platform = new Version("7.1");
@@ -114,13 +114,13 @@ public class JobControllerApiHandlerHelperTest {
// Only us-east-3 is verified, on revision1.
// staging-test has 4 runs: one success without sources on revision1, one success from revision1 to revision2,
// one success from revision2 to revision3 and one failure from revision1 to revision3.
- assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(appId, stagingTest), URI.create("https://some.url:43/root")), "staging-runs.json");
- assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(appId, productionUsEast3).get().id(), "0"), "us-east-3-log-without-first.json");
- assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.tester().controller(), appId, URI.create("https://some.url:43/root/")), "overview.json");
+ assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(instanceId, stagingTest), URI.create("https://some.url:43/root")), "staging-runs.json");
+ assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(instanceId, productionUsEast3).get().id(), "0"), "us-east-3-log-without-first.json");
+ assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.tester().controller(), instanceId, URI.create("https://some.url:43/root/")), "overview.json");
- tester.jobs().deploy(appId, JobType.devAwsUsEast2a, Optional.empty(), applicationPackage);
+ tester.jobs().deploy(instanceId, JobType.devAwsUsEast2a, Optional.empty(), applicationPackage);
tester.runJob(JobType.devAwsUsEast2a);
- assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(appId, devAwsUsEast2a), URI.create("https://some.url:43/root")), "dev-aws-us-east-2a-runs.json");
+ assertResponse(JobControllerApiHandlerHelper.runResponse(tester.jobs().runs(instanceId, devAwsUsEast2a), URI.create("https://some.url:43/root")), "dev-aws-us-east-2a-runs.json");
}
@Test
@@ -129,17 +129,17 @@ public class JobControllerApiHandlerHelperTest {
tester.clock().setInstant(Instant.EPOCH);
ZoneId zone = JobType.devUsEast1.zone(tester.tester().controller().system());
- tester.jobs().deploy(appId, JobType.devUsEast1, Optional.empty(), applicationPackage);
+ tester.jobs().deploy(instanceId, JobType.devUsEast1, Optional.empty(), applicationPackage);
tester.configServer().setLogStream("1554970337.935104\t17491290-v6-1.ostk.bm2.prod.ne1.yahoo.com\t5480\tcontainer\tstdout\tinfo\tERROR: Bundle canary-application [71] Unable to get module class path. (java.lang.NullPointerException)\n");
- assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(appId, devUsEast1).get().id(), null), "dev-us-east-1-log-first-part.json");
+ assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(instanceId, devUsEast1).get().id(), null), "dev-us-east-1-log-first-part.json");
tester.configServer().setLogStream("Nope, this won't be logged");
- tester.configServer().convergeServices(appId, zone);
- tester.setEndpoints(appId, zone);
+ tester.configServer().convergeServices(instanceId, zone);
+ tester.setEndpoints(instanceId, zone);
tester.runner().run();
- assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.tester().controller(), appId, URI.create("https://some.url:43/root")), "dev-overview.json");
- assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(appId, devUsEast1).get().id(), "9"), "dev-us-east-1-log-second-part.json");
+ assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.tester().controller(), instanceId, URI.create("https://some.url:43/root")), "dev-overview.json");
+ assertResponse(JobControllerApiHandlerHelper.runDetailsResponse(tester.jobs(), tester.jobs().last(instanceId, devUsEast1).get().id(), "9"), "dev-us-east-1-log-second-part.json");
}
@Test
@@ -149,11 +149,11 @@ public class JobControllerApiHandlerHelperTest {
var region = "us-west-1";
var applicationPackage = new ApplicationPackageBuilder().region(region).build();
// Deploy directly to production zone, like integration tests.
- tester.tester().controller().applications().deploy(tester.app().id(), ZoneId.from("prod", region),
+ tester.tester().controller().applications().deploy(tester.instance().id(), ZoneId.from("prod", region),
Optional.of(applicationPackage),
new DeployOptions(true, Optional.empty(),
false, false));
- assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.tester().controller(), appId, URI.create("https://some.url:43/root/")),
+ assertResponse(JobControllerApiHandlerHelper.jobTypeResponse(tester.tester().controller(), instanceId, URI.create("https://some.url:43/root/")),
"jobs-direct-deployment.json");
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/delete-with-active-deployments.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/delete-with-active-deployments.json
index ad7e4f00027..c9ed2ad3391 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/delete-with-active-deployments.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/delete-with-active-deployments.json
@@ -1,4 +1,4 @@
{
"error-code": "BAD_REQUEST",
- "message": "Could not delete 'application 'tenant1.application1.instance1'': It has active deployments in: dev.us-west-1, prod.us-central-1"
+ "message": "Could not delete 'application 'tenant1.application1'': It has active deployments in: dev.us-west-1, prod.us-central-1"
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java
index 09555dd9f2e..8db759d855d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java
@@ -2,7 +2,7 @@ package com.yahoo.vespa.hosted.controller.restapi.deployment;
import com.yahoo.config.provision.AthenzService;
import com.yahoo.config.provision.Environment;
-import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
@@ -20,13 +20,13 @@ public class BadgeApiTest extends ControllerContainerTest {
@Test
public void testBadgeApi() {
ContainerControllerTester tester = new ContainerControllerTester(container, responseFiles);
- Instance instance = tester.createApplication("domain", "tenant", "application", "default");
+ Application application = tester.createApplication("domain", "tenant", "application", "default");
ApplicationPackage packageWithService = new ApplicationPackageBuilder()
.environment(Environment.prod)
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.region("us-west-1")
.build();
- tester.controller().jobController().submit(instance.id(),
+ tester.controller().jobController().submit(application.id().defaultInstance(),
new SourceRevision("repository", "branch", "commit"),
"foo@bar",
123,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java
index adf48ea37cb..084b235943e 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java
@@ -6,6 +6,7 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
@@ -39,15 +40,15 @@ public class DeploymentApiTest extends ControllerContainerTest {
.build();
// 3 applications deploy on current system version
- Instance failingInstance = tester.createApplication("domain1", "tenant1", "application1", "default");
- Instance productionInstance = tester.createApplication("domain2", "tenant2", "application2", "default");
- Instance instanceWithoutDeployment = tester.createApplication("domain3", "tenant3", "application3", "default");
+ Application failingInstance = tester.createApplication("domain1", "tenant1", "application1", "default");
+ Application productionInstance = tester.createApplication("domain2", "tenant2", "application2", "default");
+ Application instanceWithoutDeployment = tester.createApplication("domain3", "tenant3", "application3", "default");
tester.deployCompletely(failingInstance, applicationPackage, 1L, false);
tester.deployCompletely(productionInstance, applicationPackage, 2L, false);
// Deploy once so that job information is stored, then remove the deployment
tester.deployCompletely(instanceWithoutDeployment, applicationPackage, 3L, false);
- tester.controller().applications().deactivate(instanceWithoutDeployment.id(), ZoneId.from("prod", "us-west-1"));
+ tester.controller().applications().deactivate(instanceWithoutDeployment.id().defaultInstance(), ZoneId.from("prod", "us-west-1"));
// New version released
version = Version.fromString("5.1");
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/filter/SignatureFilterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/filter/SignatureFilterTest.java
index 820aa61617f..3b7d55f8cef 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/filter/SignatureFilterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/filter/SignatureFilterTest.java
@@ -10,6 +10,7 @@ import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.role.Role;
import com.yahoo.vespa.hosted.controller.api.role.SecurityContext;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.restapi.ApplicationRequestToDiscFilterRequestWrapper;
import org.junit.Before;
import org.junit.Test;
@@ -42,7 +43,8 @@ public class SignatureFilterTest {
"PbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
"-----END EC PRIVATE KEY-----\n";
- private static final ApplicationId id = ApplicationId.from("my-tenant", "my-app", "default");
+ private static final TenantAndApplicationId appId = TenantAndApplicationId.from("my-tenant", "my-app");
+ private static final ApplicationId id = appId.defaultInstance();
private ControllerTester tester;
private ApplicationController applications;
@@ -77,12 +79,12 @@ public class SignatureFilterTest {
assertNull(signed.getAttribute(SecurityContext.ATTRIBUTE_NAME));
// Signed request gets no role when a non-matching key is stored for the application.
- applications.lockOrThrow(id, application -> applications.store(application.withPemDeployKey(otherPublicKey)));
+ applications.lockApplicationOrThrow(appId, application -> applications.store(application.withPemDeployKey(otherPublicKey)));
filter.filter(signed);
assertNull(signed.getAttribute(SecurityContext.ATTRIBUTE_NAME));
// Signed request gets a build service role when a matching key is stored for the application.
- applications.lockOrThrow(id, application -> applications.store(application.withPemDeployKey(publicKey)));
+ applications.lockApplicationOrThrow(appId, application -> applications.store(application.withPemDeployKey(publicKey)));
assertTrue(filter.filter(signed).isEmpty());
SecurityContext securityContext = (SecurityContext) signed.getAttribute(SecurityContext.ATTRIBUTE_NAME);
assertEquals("buildService@my-tenant.my-app", securityContext.principal().getName());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java
index 78f36fdf4a3..25ae7c88e84 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.controller.rotation;
import com.yahoo.config.provision.SystemName;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
@@ -50,27 +51,29 @@ public class RotationRepositoryTest {
private DeploymentTester tester;
private RotationRepository repository;
+ private Application application;
private Instance instance;
@Before
public void before() {
tester = new DeploymentTester(new ControllerTester(rotationsConfig));
repository = tester.controller().applications().rotationRepository();
- instance = tester.createApplication("app1", "tenant1", 11L, 1L);
+ application = tester.createApplication("app1", "tenant1", 11L, 1L);
+ instance = tester.defaultInstance(application.id());
}
@Test
public void assigns_and_reuses_rotation() {
// Deploying assigns a rotation
- tester.deployCompletely(instance, applicationPackage);
+ tester.deployCompletely(application, applicationPackage);
Rotation expected = new Rotation(new RotationId("foo-1"), "foo-1.com");
- instance = tester.applications().require(instance.id());
+ instance = tester.applications().requireInstance(instance.id());
assertEquals(List.of(expected.id()), rotationIds(instance.rotations()));
assertEquals(URI.create("https://app1--tenant1.global.vespa.oath.cloud:4443/"),
instance.endpointsIn(SystemName.main).main().get().url());
try (RotationLock lock = repository.lock()) {
- Rotation rotation = repository.getOrAssignRotation(tester.applications().require(instance.id()), lock);
+ Rotation rotation = repository.getOrAssignRotation(application.deploymentSpec(), tester.applications().requireInstance(instance.id()), lock);
assertEquals(expected, rotation);
}
@@ -81,21 +84,21 @@ public class RotationRepositoryTest {
.region("us-west-1")
.searchDefinition("search foo { }") // Update application package so there is something to deploy
.build();
- tester.deployCompletely(instance, applicationPackage, 43);
- assertEquals(List.of(expected.id()), rotationIds(tester.applications().require(instance.id()).rotations()));
+ tester.deployCompletely(application, applicationPackage, 43);
+ assertEquals(List.of(expected.id()), rotationIds(tester.applications().requireInstance(instance.id()).rotations()));
}
@Test
public void strips_whitespace_in_rotation_fqdn() {
DeploymentTester tester = new DeploymentTester(new ControllerTester(rotationsConfigWhitespaces));
RotationRepository repository = tester.controller().applications().rotationRepository();
- Instance instance = tester.createApplication("app2", "tenant2", 22L,
- 2L);
- tester.deployCompletely(instance, applicationPackage);
- instance = tester.applications().require(instance.id());
+ Application application2 = tester.createApplication("app2", "tenant2", 22L, 2L);
+
+ tester.deployCompletely(application2, applicationPackage);
+ Instance instance2 = tester.defaultInstance(application2.id());
try (RotationLock lock = repository.lock()) {
- Rotation rotation = repository.getOrAssignRotation(instance, lock);
+ Rotation rotation = repository.getOrAssignRotation(application2.deploymentSpec(), instance2, lock);
Rotation assignedRotation = new Rotation(new RotationId("foo-1"), "foo-1.com");
assertEquals(assignedRotation, rotation);
}
@@ -104,19 +107,17 @@ public class RotationRepositoryTest {
@Test
public void out_of_rotations() {
// Assigns 1 rotation
- tester.deployCompletely(instance, applicationPackage);
+ tester.deployCompletely(application, applicationPackage);
// Assigns 1 more
- Instance instance2 = tester.createApplication("app2", "tenant2", 22L,
- 2L);
- tester.deployCompletely(instance2, applicationPackage);
+ Application application2 = tester.createApplication("app2", "tenant2", 22L, 2L);
+ tester.deployCompletely(application2, applicationPackage);
// We're now out of rotations
thrown.expect(IllegalStateException.class);
thrown.expectMessage("no rotations available");
- Instance instance3 = tester.createApplication("app3", "tenant3", 33L,
- 3L);
- tester.deployCompletely(instance3, applicationPackage);
+ Application application3 = tester.createApplication("app3", "tenant3", 33L, 3L);
+ tester.deployCompletely(application3, applicationPackage);
}
@Test
@@ -125,11 +126,10 @@ public class RotationRepositoryTest {
.globalServiceId("foo")
.region("us-east-3")
.build();
- Instance instance = tester.createApplication("app2", "tenant2", 22L,
- 2L);
+ Application application2 = tester.createApplication("app2", "tenant2", 22L, 2L);
thrown.expect(RuntimeException.class);
thrown.expectMessage("less than 2 prod zones are defined");
- tester.deployCompletely(instance, applicationPackage);
+ tester.deployCompletely(application2, applicationPackage);
}
@Test
@@ -138,9 +138,8 @@ public class RotationRepositoryTest {
.region("us-east-3")
.region("us-west-1")
.build();
- tester.deployCompletely(instance, applicationPackage);
- Instance app = tester.applications().require(instance.id());
- assertTrue(app.rotations().isEmpty());
+ tester.deployCompletely(application, applicationPackage);
+ assertTrue(tester.defaultInstance(application.id()).rotations().isEmpty());
}
@Test
@@ -150,11 +149,10 @@ public class RotationRepositoryTest {
.region("us-east-3")
.region("us-west-1")
.build();
- Instance instance = tester.createApplication("app2", "tenant2", 22L,
- 2L);
- tester.deployCompletely(instance, applicationPackage);
- assertEquals(List.of(new RotationId("foo-1")), rotationIds(tester.applications().require(instance.id()).rotations()));
- assertEquals("https://cd--app2--tenant2.global.vespa.oath.cloud:4443/", tester.applications().require(instance.id())
+ Application application2 = tester.createApplication("app2", "tenant2", 22L, 2L);
+ tester.deployCompletely(application2, applicationPackage);
+ assertEquals(List.of(new RotationId("foo-1")), rotationIds(tester.defaultInstance(application2.id()).rotations()));
+ assertEquals("https://cd--app2--tenant2.global.vespa.oath.cloud:4443/", tester.defaultInstance(application2.id())
.endpointsIn(SystemName.cd).main().get().url().toString());
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
index 3d12fe382ce..ba8309de286 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
@@ -7,6 +7,7 @@ import com.yahoo.component.Vtag;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.zone.ZoneApi;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.ControllerTester;
@@ -135,9 +136,9 @@ public class VersionStatusTest {
tester.upgradeSystem(version1);
// Setup applications
- Instance app1 = tester.createAndDeploy("app1", 11, applicationPackage);
- Instance app2 = tester.createAndDeploy("app2", 22, applicationPackage);
- Instance app3 = tester.createAndDeploy("app3", 33, applicationPackage);
+ Application app1 = tester.createAndDeploy("app1", 11, applicationPackage);
+ Application app2 = tester.createAndDeploy("app2", 22, applicationPackage);
+ Application app3 = tester.createAndDeploy("app3", 33, applicationPackage);
// version2 is released
tester.upgradeSystem(version2);
@@ -157,13 +158,13 @@ public class VersionStatusTest {
VespaVersion v1 = versions.get(0);
assertEquals(version1, v1.versionNumber());
assertEquals("No applications are failing on version1.", ImmutableSet.of(), v1.statistics().failing());
- assertEquals("All applications have at least one active production deployment on version 1.", ImmutableSet.of(app1.id(), app2.id(), app3.id()), v1.statistics().production());
+ assertEquals("All applications have at least one active production deployment on version 1.", ImmutableSet.of(app1.id().defaultInstance(), app2.id().defaultInstance(), app3.id().defaultInstance()), v1.statistics().production());
assertEquals("No applications have active deployment jobs on version1.", ImmutableSet.of(), v1.statistics().deploying());
VespaVersion v2 = versions.get(1);
assertEquals(version2, v2.versionNumber());
- assertEquals("All applications have failed on version2 in at least one zone.", ImmutableSet.of(app1.id(), app2.id(), app3.id()), v2.statistics().failing());
- assertEquals("Only app2 has successfully deployed to production on version2.", ImmutableSet.of(app2.id()), v2.statistics().production());
+ assertEquals("All applications have failed on version2 in at least one zone.", ImmutableSet.of(app1.id().defaultInstance(), app2.id().defaultInstance(), app3.id().defaultInstance()), v2.statistics().failing());
+ assertEquals("Only app2 has successfully deployed to production on version2.", ImmutableSet.of(app2.id().defaultInstance()), v2.statistics().production());
// Should test the below, but can't easily be done with current test framework. This test passes in DeploymentApiTest.
// assertEquals("All applications are being retried on version2.", ImmutableSet.of(app1.id(), app2.id(), app3.id()), v2.statistics().deploying());
}
@@ -175,25 +176,25 @@ public class VersionStatusTest {
tester.upgradeSystem(version0);
// Setup applications - all running on version0
- Instance canary0 = tester.createAndDeploy("canary0", 1, "canary");
- Instance canary1 = tester.createAndDeploy("canary1", 2, "canary");
- Instance canary2 = tester.createAndDeploy("canary2", 3, "canary");
- Instance default0 = tester.createAndDeploy("default0", 4, "default");
- Instance default1 = tester.createAndDeploy("default1", 5, "default");
- Instance default2 = tester.createAndDeploy("default2", 6, "default");
- Instance default3 = tester.createAndDeploy("default3", 7, "default");
- Instance default4 = tester.createAndDeploy("default4", 8, "default");
- Instance default5 = tester.createAndDeploy("default5", 9, "default");
- Instance default6 = tester.createAndDeploy("default6", 10, "default");
- Instance default7 = tester.createAndDeploy("default7", 11, "default");
- Instance default8 = tester.createAndDeploy("default8", 12, "default");
- Instance default9 = tester.createAndDeploy("default9", 13, "default");
- Instance conservative0 = tester.createAndDeploy("conservative1", 14, "conservative");
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
+ Application canary2 = tester.createAndDeploy("canary2", 3, "canary");
+ Application default0 = tester.createAndDeploy("default0", 4, "default");
+ Application default1 = tester.createAndDeploy("default1", 5, "default");
+ Application default2 = tester.createAndDeploy("default2", 6, "default");
+ Application default3 = tester.createAndDeploy("default3", 7, "default");
+ Application default4 = tester.createAndDeploy("default4", 8, "default");
+ Application default5 = tester.createAndDeploy("default5", 9, "default");
+ Application default6 = tester.createAndDeploy("default6", 10, "default");
+ Application default7 = tester.createAndDeploy("default7", 11, "default");
+ Application default8 = tester.createAndDeploy("default8", 12, "default");
+ Application default9 = tester.createAndDeploy("default9", 13, "default");
+ Application conservative0 = tester.createAndDeploy("conservative1", 14, "conservative");
// Applications that do not affect confidence calculation:
// Application without deployment
- Instance ignored0 = tester.createApplication("ignored0", "tenant1", 1000, 1000L);
+ Application ignored0 = tester.createApplication("ignored0", "tenant1", 1000, 1000L);
assertEquals("All applications running on this version: High",
Confidence.high, confidence(tester.controller(), version0));
@@ -310,7 +311,7 @@ public class VersionStatusTest {
tester.upgradeSystem(version0);
// Create and deploy application on current version
- Instance app = tester.createAndDeploy("app", 1, "canary");
+ Application app = tester.createAndDeploy("app", 1, "canary");
tester.computeVersionStatus();
assertEquals(Confidence.high, confidence(tester.controller(), version0));
@@ -368,9 +369,9 @@ public class VersionStatusTest {
assertEquals(5, tester.hourOfDayAfter(Duration.ZERO));
Version version0 = Version.fromString("7.1");
tester.upgradeSystem(version0);
- Instance canary0 = tester.createAndDeploy("canary0", 1, "canary");
- Instance canary1 = tester.createAndDeploy("canary1", 1, "canary");
- Instance default0 = tester.createAndDeploy("default0", 1, "default");
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 1, "canary");
+ Application default0 = tester.createAndDeploy("default0", 1, "default");
tester.computeVersionStatus();
assertSame(Confidence.high, tester.controller().versionStatus().version(version0).confidence());
diff --git a/dist/vespa.spec b/dist/vespa.spec
index 2d6e8cbfd13..b55d7e4125a 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -57,13 +57,13 @@ BuildRequires: gtest-devel
BuildRequires: gmock-devel
%endif
%if 0%{?fc31}
-BuildRequires: llvm-devel >= 8.0.0
+BuildRequires: llvm-devel >= 9.0.0
BuildRequires: boost-devel >= 1.69
BuildRequires: gtest-devel
BuildRequires: gmock-devel
%endif
%if 0%{?fc32}
-BuildRequires: llvm-devel >= 8.0.0
+BuildRequires: llvm-devel >= 9.0.0
BuildRequires: boost-devel >= 1.69
BuildRequires: gtest-devel
BuildRequires: gmock-devel
@@ -133,12 +133,12 @@ Requires: llvm-libs >= 8.0.0
%define _vespa_llvm_version 8
%endif
%if 0%{?fc31}
-Requires: llvm-libs >= 8.0.0
-%define _vespa_llvm_version 8
+Requires: llvm-libs >= 9.0.0
+%define _vespa_llvm_version 9
%endif
%if 0%{?fc32}
-Requires: llvm-libs >= 8.0.0
-%define _vespa_llvm_version 8
+Requires: llvm-libs >= 9.0.0
+%define _vespa_llvm_version 9
%endif
%define _extra_link_directory %{_vespa_deps_prefix}/lib64
%define _extra_include_directory %{_vespa_deps_prefix}/include
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java
index 2e5cfab36cc..ed623c82259 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java
@@ -170,6 +170,9 @@ class CreateContainerCommandImpl implements Docker.CreateContainerCommand {
.withSecurityOpts(new ArrayList<>(securityOpts))
.withBinds(volumeBinds)
.withUlimits(ulimits)
+ // Docker version 1.13.1 patch 94 changed default pids.max for the Docker container's cgroup
+ // from max to 4096. -1L reinstates "max". File: /sys/fs/cgroup/pids/docker/CONTAINERID/pids.max.
+ .withPidsLimit(-1L)
.withCapAdd(addCapabilities.toArray(new Capability[0]))
.withCapDrop(dropCapabilities.toArray(new Capability[0]))
.withPrivileged(privileged);
@@ -240,6 +243,7 @@ class CreateContainerCommandImpl implements Docker.CreateContainerCommand {
toOptionalOption("--memory", containerResources.map(ContainerResources::memoryBytes)),
toRepeatedOption("--label", labelList),
toRepeatedOption("--ulimit", ulimitList),
+ "--pids-limit -1",
toRepeatedOption("--env", environmentAssignments),
toRepeatedOption("--volume", volumeBindSpecs),
toRepeatedOption("--cap-add", addCapabilitiesList),
diff --git a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImplTest.java b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImplTest.java
index 7d41d873be2..3b8b0a84e64 100644
--- a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImplTest.java
+++ b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImplTest.java
@@ -49,6 +49,7 @@ public class CreateContainerCommandImplTest {
"--label my-label=test-label " +
"--ulimit nofile=1:2 " +
"--ulimit nproc=10:20 " +
+ "--pids-limit -1 " +
"--env env1=val1 " +
"--env env2=val2 " +
"--volume vol1:/host/vol1:Z " +
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index bb3865f2043..54531aeb0d7 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -80,6 +80,12 @@ public class Flags {
"Takes effect on next node agent tick. Change is orchestrated, but does NOT require container restart",
HOSTNAME, APPLICATION_ID);
+ public static final UnboundBooleanFlag INCLUDE_SIS_IN_TRUSTSTORE = defineFeatureFlag(
+ "include-sis-in-truststore", false,
+ "Whether to use the trust store backed by Athenz and Service Identity certificates.",
+ "Takes effect on next tick, but may get throttled due to orchestration.",
+ HOSTNAME);
+
public static final UnboundStringFlag TLS_INSECURE_MIXED_MODE = defineStringFlag(
"tls-insecure-mixed-mode", "tls_client_mixed_server",
"TLS insecure mixed mode. Allowed values: ['plaintext_client_mixed_server', 'tls_client_mixed_server', 'tls_client_tls_server']",
@@ -153,6 +159,12 @@ public class Flags {
"Takes effect on next tick or on host-admin restart (may vary where used).",
HOSTNAME);
+ public static final UnboundBooleanFlag USE_INTERNAL_ZTS = defineFeatureFlag(
+ "use-internal-zts", false,
+ "Decides if certificate in public should be requested from 'zts' configserver or mapped in",
+ "Takes effect on next tick or on host-admin restart.",
+ APPLICATION_ID);
+
public static final UnboundBooleanFlag DYNAMIC_UPSTREAM_CONNECTION_CACHE = defineFeatureFlag(
"dynamic-upstream-connection-cache", false,
"Scale upstream connection cache with number of upstream servers",
diff --git a/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/server/jetty/HealthCheckProxyHandler.java b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/server/jetty/HealthCheckProxyHandler.java
index 4dfdbd55fab..e206cb3215d 100644
--- a/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/server/jetty/HealthCheckProxyHandler.java
+++ b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/server/jetty/HealthCheckProxyHandler.java
@@ -136,6 +136,7 @@ class HealthCheckProxyHandler extends HandlerWrapper {
.setSslcontext(sslContextFactory.getSslContext())
.setSSLHostnameVerifier(NoopHostnameVerifier.INSTANCE)
.setUserTokenHandler(context -> null) // https://stackoverflow.com/a/42112034/1615280
+ .setUserAgent("health-check-proxy-client")
.build();
}
}
diff --git a/maven-plugins/pom.xml b/maven-plugins/pom.xml
index d2287506912..c2a011e0408 100644
--- a/maven-plugins/pom.xml
+++ b/maven-plugins/pom.xml
@@ -9,8 +9,7 @@
<relativePath>../parent/pom.xml</relativePath>
</parent>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>bundle-plugins</artifactId>
- <name>bundle-plugins</name>
+ <artifactId>maven-plugins</artifactId>
<packaging>pom</packaging>
<version>7-SNAPSHOT</version>
<description>Parent artifact for Vespa maven plugins.</description>
diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Argument.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Argument.java
index 9f62a27a3b9..dad4508bc61 100644
--- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Argument.java
+++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Argument.java
@@ -67,7 +67,7 @@ public class Argument extends IntermediateOperation {
@Override
public String toFullString() {
- return "\t" + lazyGetType() + ":\tArgument(" + standardNamingType + ")";
+ return "\t" + type + ":\tArgument(" + standardNamingType + ")";
}
}
diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Const.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Const.java
index 3ad5cb1d19f..fc895b07d53 100644
--- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Const.java
+++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Const.java
@@ -102,7 +102,7 @@ public class Const extends IntermediateOperation {
@Override
public String toFullString() {
- return "\t" + lazyGetType() + ":\tConst(" + type + ")";
+ return "\t" + type + ":\tConst(" + getConstantValue().get() + ")";
}
}
diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Constant.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Constant.java
index 1eaaf705220..ad56eefe5f2 100644
--- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Constant.java
+++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Constant.java
@@ -74,7 +74,7 @@ public class Constant extends IntermediateOperation {
@Override
public String toFullString() {
- return "\t" + lazyGetType() + ":\tConstant(" + type + ")";
+ return "\t" + type + ":\tConstant(" + type + ")";
}
}
diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/IntermediateOperation.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/IntermediateOperation.java
index 9c9fed89585..26b376cce1c 100644
--- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/IntermediateOperation.java
+++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/IntermediateOperation.java
@@ -226,7 +226,7 @@ public abstract class IntermediateOperation {
}
public String toFullString() {
- return "\t" + lazyGetType() + ":\t" + operationName() + "(" +
+ return "\t" + type + ":\t" + operationName() + "(" +
inputs().stream().map(input -> input.toFullString()).collect(Collectors.joining(", ")) +
")";
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileSnapshot.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileSnapshot.java
new file mode 100644
index 00000000000..4f227ccb6d4
--- /dev/null
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileSnapshot.java
@@ -0,0 +1,83 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.node.admin.task.util.file;
+
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.time.Instant;
+import java.util.Optional;
+
+/**
+ * A snapshot of the attributes of the file for a given path, and file content if it is a regular file.
+ *
+ * @author hakonhall
+ */
+public class FileSnapshot {
+ private final Path path;
+ private final Optional<FileAttributes> attributes;
+ private final Optional<byte[]> content;
+
+ public static FileSnapshot forPath(Path path) { return forNonExistingFile(path).snapshot(); }
+
+ /** Guaranteed to not throw any exceptions. */
+ public static FileSnapshot forNonExistingFile(Path path) {
+ return new FileSnapshot(path, Optional.empty(), Optional.empty());
+ }
+
+ private static FileSnapshot forRegularFile(Path path, FileAttributes attributes, byte[] content) {
+ if (!attributes.isRegularFile()) throw new IllegalArgumentException(path + " is not a regular file");
+ return new FileSnapshot(path, Optional.of(attributes), Optional.of(content));
+ }
+
+ private static FileSnapshot forOtherFile(Path path, FileAttributes attributes) {
+ if (attributes.isRegularFile()) throw new IllegalArgumentException(path + " is a regular file");
+ return new FileSnapshot(path, Optional.of(attributes), Optional.empty());
+ }
+
+ private FileSnapshot(Path path, Optional<FileAttributes> attributes, Optional<byte[]> content) {
+ this.path = path;
+ this.attributes = attributes;
+ this.content = content;
+ }
+
+ public Path path() { return path; }
+
+ /** Whether there was a file (or directory) at path. */
+ public boolean exists() { return attributes.isPresent(); }
+
+ /** Returns the file attributes if the file exists. */
+ public Optional<FileAttributes> attributes() { return attributes; }
+
+ /** Returns the file content if the file exists and is a regular file. */
+ public Optional<byte[]> content() { return content; }
+
+ /** Returns the file UTF-8 content if it exists and is a regular file. */
+ public Optional<String> utf8Content() { return content.map(c -> new String(c, StandardCharsets.UTF_8)); }
+
+ /** Returns an up-to-date snapshot of the path, possibly {@code this} if last modified time has not changed. */
+ public FileSnapshot snapshot() {
+ Optional<FileAttributes> currentAttributes = new UnixPath(path).getAttributesIfExists();
+ if (currentAttributes.isPresent()) {
+
+ // 'this' may still be valid, depending on last modified times.
+ if (attributes.isPresent()) {
+ Instant previousModifiedTime = attributes.get().lastModifiedTime();
+ Instant currentModifiedTime = currentAttributes.get().lastModifiedTime();
+ if (currentModifiedTime.compareTo(previousModifiedTime) <= 0) {
+ return this;
+ }
+ }
+
+ if (currentAttributes.get().isRegularFile()) {
+ Optional<byte[]> content = IOExceptionUtil.ifExists(() -> Files.readAllBytes(path));
+ return content.map(bytes -> FileSnapshot.forRegularFile(path, currentAttributes.get(), bytes))
+ // File was removed after getting attributes and before getting content.
+ .orElseGet(() -> FileSnapshot.forNonExistingFile(path));
+ } else {
+ return FileSnapshot.forOtherFile(path, currentAttributes.get());
+ }
+ } else {
+ return attributes.isPresent() ? FileSnapshot.forNonExistingFile(path) : this /* avoid allocation */;
+ }
+ }
+}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileWriter.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileWriter.java
index f7eba68e455..afc0e7b5c22 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileWriter.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileWriter.java
@@ -3,9 +3,11 @@ package com.yahoo.vespa.hosted.node.admin.task.util.file;
import com.yahoo.vespa.hosted.node.admin.component.TaskContext;
+import java.io.File;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.util.Optional;
import java.util.function.Supplier;
/**
@@ -17,20 +19,30 @@ public class FileWriter {
private final Path path;
private final FileSync fileSync;
private final PartialFileData.Builder fileDataBuilder = PartialFileData.builder();
- private final Supplier<byte[]> contentProducer;
+ private final Optional<ByteArraySupplier> contentProducer;
private boolean overwriteExistingFile = true;
+ public FileWriter(Path path) {
+ this(path, Optional.empty());
+ }
+
public FileWriter(Path path, Supplier<String> contentProducer) {
this(path, () -> contentProducer.get().getBytes(StandardCharsets.UTF_8));
}
public FileWriter(Path path, ByteArraySupplier contentProducer) {
+ this(path, Optional.of(contentProducer));
+ }
+
+ private FileWriter(Path path, Optional<ByteArraySupplier> contentProducer) {
this.path = path;
this.fileSync = new FileSync(path);
this.contentProducer = contentProducer;
}
+ public Path path() { return path; }
+
public FileWriter withOwner(String owner) {
fileDataBuilder.withOwner(owner);
return this;
@@ -52,11 +64,19 @@ public class FileWriter {
}
public boolean converge(TaskContext context) {
+ return converge(context, contentProducer.orElseThrow().get());
+ }
+
+ public boolean converge(TaskContext context, String utf8Content) {
+ return converge(context, utf8Content.getBytes(StandardCharsets.UTF_8));
+ }
+
+ public boolean converge(TaskContext context, byte[] content) {
if (!overwriteExistingFile && Files.isRegularFile(path)) {
return false;
}
- fileDataBuilder.withContent(contentProducer.get());
+ fileDataBuilder.withContent(content);
PartialFileData fileData = fileDataBuilder.create();
return fileSync.convergeTo(context, fileData);
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java
index cf6c6c432f4..2cc74742463 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java
@@ -231,6 +231,8 @@ public class UnixPath {
return new UnixPath(link);
}
+ public FileSnapshot getFileSnapshot() { return FileSnapshot.forPath(path); }
+
@Override
public String toString() {
return path.toString();
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileSnapshotTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileSnapshotTest.java
new file mode 100644
index 00000000000..8c73d522f1d
--- /dev/null
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileSnapshotTest.java
@@ -0,0 +1,64 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.node.admin.task.util.file;
+
+import com.yahoo.vespa.test.file.TestFileSystem;
+import org.junit.Test;
+
+import java.nio.file.FileSystem;
+import java.nio.file.Path;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * @author hakonhall
+ */
+public class FileSnapshotTest {
+ private final FileSystem fileSystem = TestFileSystem.create();
+ private final UnixPath path = new UnixPath(fileSystem.getPath("/var/lib/file.txt"));
+
+ private FileSnapshot fileSnapshot = FileSnapshot.forPath(path.toPath());
+
+ @Test
+ public void fileDoesNotExist() {
+ assertFalse(fileSnapshot.exists());
+ assertFalse(fileSnapshot.attributes().isPresent());
+ assertFalse(fileSnapshot.content().isPresent());
+ assertEquals(path.toPath(), fileSnapshot.path());
+ }
+
+ @Test
+ public void directory() {
+ path.createParents().createDirectory();
+ fileSnapshot = fileSnapshot.snapshot();
+ assertTrue(fileSnapshot.exists());
+ assertTrue(fileSnapshot.attributes().isPresent());
+ assertTrue(fileSnapshot.attributes().get().isDirectory());
+ }
+
+ @Test
+ public void regularFile() {
+ path.createParents().writeUtf8File("file content");
+ fileSnapshot = fileSnapshot.snapshot();
+ assertTrue(fileSnapshot.exists());
+ assertTrue(fileSnapshot.attributes().isPresent());
+ assertTrue(fileSnapshot.attributes().get().isRegularFile());
+ assertTrue(fileSnapshot.utf8Content().isPresent());
+ assertEquals("file content", fileSnapshot.utf8Content().get());
+
+ FileSnapshot newFileSnapshot = fileSnapshot.snapshot();
+ assertSame(fileSnapshot, newFileSnapshot);
+ }
+
+ @Test
+ public void fileRemoval() {
+ path.createParents().writeUtf8File("file content");
+ fileSnapshot = fileSnapshot.snapshot();
+ assertTrue(fileSnapshot.exists());
+ path.deleteIfExists();
+ fileSnapshot = fileSnapshot.snapshot();
+ assertFalse(fileSnapshot.exists());
+ }
+} \ No newline at end of file
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java
index bdcb96c1861..fa439e50fbd 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java
@@ -1,6 +1,7 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision;
+import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterMembership;
import com.yahoo.config.provision.Flavor;
@@ -287,6 +288,24 @@ public final class Node {
return this.with(newStatus).with(newHistory);
}
+ /** Returns a copy of this node with the current OS version set to the given version at the given instant */
+ public Node withCurrentOsVersion(Version version, Instant instant) {
+ var newStatus = status.withOsVersion(version);
+ var newHistory = history();
+ // Only update history if version has changed
+ if (status.osVersion().isEmpty() || !status.osVersion().get().equals(version)) {
+ newHistory = history.with(new History.Event(History.Event.Type.osUpgraded, Agent.system, instant));
+ }
+ return this.with(newStatus).with(newHistory);
+ }
+
+ /** Returns a copy of this node with firmware verified at the given instant */
+ public Node withFirmwareVerifiedAt(Instant instant) {
+ var newStatus = status.withFirmwareVerifiedAt(instant);
+ var newHistory = history.with(new History.Event(History.Event.Type.firmwareVerified, Agent.system, instant));
+ return this.with(newStatus).with(newHistory);
+ }
+
/** Returns a copy of this node with the given history. */
public Node with(History history) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history, type, reports, modelName);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index dfd190b14f5..68123092dfa 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -27,12 +27,12 @@ import com.yahoo.vespa.hosted.provision.node.NodeAcl;
import com.yahoo.vespa.hosted.provision.node.filter.NodeFilter;
import com.yahoo.vespa.hosted.provision.node.filter.NodeListFilter;
import com.yahoo.vespa.hosted.provision.node.filter.StateFilter;
+import com.yahoo.vespa.hosted.provision.os.OsVersions;
import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
import com.yahoo.vespa.hosted.provision.persistence.DnsNameResolver;
import com.yahoo.vespa.hosted.provision.persistence.NameResolver;
import com.yahoo.vespa.hosted.provision.provisioning.DockerImages;
import com.yahoo.vespa.hosted.provision.provisioning.FirmwareChecks;
-import com.yahoo.vespa.hosted.provision.os.OsVersions;
import com.yahoo.vespa.hosted.provision.restapi.v2.NotFoundException;
import java.time.Clock;
@@ -608,7 +608,7 @@ public class NodeRepository extends AbstractComponent {
* Non-Docker-container node: iff in state provisioned|failed|parked
* Docker-container-node:
* If only removing the container node: node in state ready
- * If also removing the parent node: child is in state provisioned|failed|parked|ready
+ * If also removing the parent node: child is in state provisioned|failed|parked|dirty|ready
*/
private boolean canRemove(Node node, boolean deletingAsChild) {
if (node.type() == NodeType.tenant && node.allocation().isPresent()) {
@@ -623,7 +623,7 @@ public class NodeRepository extends AbstractComponent {
} else if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) {
Set<Node.State> legalStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked,
- Node.State.ready);
+ Node.State.dirty, Node.State.ready);
if (! legalStates.contains(node.state())) {
throw new IllegalArgumentException(String.format("Child node %s can only be removed from following states: %s",
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodePatcher.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodePatcher.java
index 8dcf3c260f5..5f4c300b496 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodePatcher.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodePatcher.java
@@ -120,9 +120,9 @@ public class NodePatcher {
case "currentVespaVersion" :
return node.with(node.status().withVespaVersion(Version.fromString(asString(value))));
case "currentOsVersion" :
- return node.with(node.status().withOsVersion(Version.fromString(asString(value))));
+ return node.withCurrentOsVersion(Version.fromString(asString(value)), clock.instant());
case "currentFirmwareCheck":
- return node.with(node.status().withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value))));
+ return node.withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value)));
case "failCount" :
return node.with(node.status().setFailCount(asLong(value).intValue()));
case "flavor" :
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
index b958f7cd09c..c9671aeafbe 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
@@ -10,7 +10,6 @@ import org.junit.Test;
import java.time.Instant;
import java.util.Arrays;
import java.util.HashSet;
-import java.util.Optional;
import java.util.Set;
import java.util.function.Predicate;
import java.util.stream.Collectors;
@@ -119,11 +118,11 @@ public class NodeRepositoryTest {
tester.addNode("node20", "node20", "host2", "docker", NodeType.tenant);
assertEquals(6, tester.nodeRepository().getNodes().size());
- tester.setNodeState("node11", Node.State.dirty);
+ tester.setNodeState("node11", Node.State.active);
try {
tester.nodeRepository().removeRecursively("host1");
- fail("Should not be able to delete host node, one of the children is in state dirty");
+ fail("Should not be able to delete host node, one of the children is in state active");
} catch (IllegalArgumentException ignored) {
// Expected
}
@@ -133,9 +132,10 @@ public class NodeRepositoryTest {
tester.nodeRepository().removeRecursively("host2");
assertEquals(4, tester.nodeRepository().getNodes().size());
- // Now node10 and node12 are in provisioned, set node11 to ready, and it should be OK to delete host1
- tester.nodeRepository().setReady("node11", Agent.system, getClass().getSimpleName());
- tester.nodeRepository().removeRecursively("node11"); // Remove one of the children first instead
+ // Now node10 is in provisioned, set node11 to failed and node12 to ready, and it should be OK to delete host1
+ tester.nodeRepository().fail("node11", Agent.system, getClass().getSimpleName());
+ tester.nodeRepository().setReady("node12", Agent.system, getClass().getSimpleName());
+ tester.nodeRepository().removeRecursively("node12"); // Remove one of the children first instead
assertEquals(3, tester.nodeRepository().getNodes().size());
tester.nodeRepository().removeRecursively("host1");
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/SerializationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/SerializationTest.java
index 23699879ceb..0cfdf80a8a1 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/SerializationTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/SerializationTest.java
@@ -325,10 +325,17 @@ public class SerializationTest {
assertFalse(serialized.status().osVersion().isPresent());
// Update OS version
- serialized = serialized.with(serialized.status()
- .withOsVersion(Version.fromString("7.1")));
+ serialized = serialized.withCurrentOsVersion(Version.fromString("7.1"), Instant.ofEpochMilli(123))
+ // Another update for same version:
+ .withCurrentOsVersion(Version.fromString("7.1"), Instant.ofEpochMilli(456));
serialized = nodeSerializer.fromJson(State.provisioned, nodeSerializer.toJson(serialized));
assertEquals(Version.fromString("7.1"), serialized.status().osVersion().get());
+ var osUpgradedEvents = serialized.history().events().stream()
+ .filter(event -> event.type() == History.Event.Type.osUpgraded)
+ .collect(Collectors.toList());
+ assertEquals("OS upgraded event is added", 1, osUpgradedEvents.size());
+ assertEquals("Duplicate updates of same version uses earliest instant", Instant.ofEpochMilli(123),
+ osUpgradedEvents.get(0).at());
}
@Test
@@ -336,9 +343,10 @@ public class SerializationTest {
Node node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(createNode()));
assertFalse(node.status().firmwareVerifiedAt().isPresent());
- node = node.with(node.status().withFirmwareVerifiedAt(Instant.ofEpochMilli(100)));
+ node = node.withFirmwareVerifiedAt(Instant.ofEpochMilli(100));
node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(node));
assertEquals(100, node.status().firmwareVerifiedAt().get().toEpochMilli());
+ assertEquals(Instant.ofEpochMilli(100), node.history().event(History.Event.Type.firmwareVerified).get().at());
}
@Test
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1-os-upgrade-complete.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1-os-upgrade-complete.json
index f5068924084..31bdfe7d6e3 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1-os-upgrade-complete.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1-os-upgrade-complete.json
@@ -57,6 +57,11 @@
"event": "activated",
"at": 123,
"agent": "application"
+ },
+ {
+ "event": "osUpgraded",
+ "at": 123,
+ "agent": "system"
}
],
"ipAddresses": [
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/dockerhost1-with-firmware-data.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/dockerhost1-with-firmware-data.json
index 53982c78042..24e64248b1c 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/dockerhost1-with-firmware-data.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/dockerhost1-with-firmware-data.json
@@ -57,6 +57,11 @@
"event": "activated",
"at": 123,
"agent": "application"
+ },
+ {
+ "event": "firmwareVerified",
+ "at": 100,
+ "agent": "system"
}
],
"ipAddresses": [
diff --git a/parent/pom.xml b/parent/pom.xml
index 7efe36e73f7..504ae2ade4d 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -123,7 +123,7 @@
<artifactId>maven-dependency-plugin</artifactId>
<version>3.1.1</version>
<dependencies>
- <!-- TODO: Remove when upgrading to 3.1.2 -->
+ <!-- TODO: remove when upgrading to 3.1.2 -->
<dependency>
<groupId>org.apache.maven.shared</groupId>
<artifactId>maven-dependency-analyzer</artifactId>
@@ -405,10 +405,13 @@
</profiles>
<dependencyManagement>
<dependencies>
+
+ <!-- Please keep this list lexically sorted by groupId, then artifactId.-->
+
<dependency>
- <groupId>org.apache.maven.wagon</groupId>
- <artifactId>wagon-ssh-external</artifactId>
- <version>2.7</version>
+ <groupId>com.amazonaws</groupId>
+ <artifactId>aws-java-sdk-core</artifactId>
+ <version>${aws.sdk.version}</version>
</dependency>
<dependency>
<groupId>com.github.cverges.expect4j</groupId>
@@ -416,54 +419,50 @@
<version>1.6</version>
</dependency>
<dependency>
- <groupId>org.apache.commons</groupId>
- <artifactId>commons-compress</artifactId>
- <version>1.18</version>
- </dependency>
- <dependency>
- <groupId>org.apache.commons</groupId>
- <artifactId>commons-exec</artifactId>
- <version>1.3</version>
+ <groupId>com.github.tomakehurst</groupId>
+ <artifactId>wiremock-standalone</artifactId>
+ <version>2.6.0</version>
</dependency>
<dependency>
- <groupId>org.apache.velocity</groupId>
- <artifactId>velocity</artifactId>
- <version>1.7</version>
+ <groupId>com.google.jimfs</groupId>
+ <artifactId>jimfs</artifactId>
+ <version>1.1</version>
+ <scope>test</scope> <!-- TODO: remove scope from parent pom -->
</dependency>
<dependency>
- <groupId>io.airlift</groupId>
- <artifactId>airline</artifactId>
- <version>0.7</version>
+ <groupId>com.google.protobuf</groupId>
+ <artifactId>protobuf-java</artifactId>
+ <version>${protobuf.version}</version>
</dependency>
<dependency>
- <groupId>io.prometheus</groupId>
- <artifactId>simpleclient</artifactId>
- <version>${prometheus.client.version}</version>
+ <groupId>com.ibm.icu</groupId>
+ <artifactId>icu4j</artifactId>
+ <version>57.1</version>
</dependency>
<dependency>
- <groupId>io.prometheus</groupId>
- <artifactId>simpleclient_common</artifactId>
- <version>${prometheus.client.version}</version>
+ <groupId>com.infradna.tool</groupId>
+ <artifactId>bridge-method-annotation</artifactId>
+ <version>1.4</version>
</dependency>
<dependency>
- <groupId>org.ow2.asm</groupId>
- <artifactId>asm</artifactId>
- <version>${asm.version}</version>
+ <groupId>com.optimaize.languagedetector</groupId>
+ <artifactId>language-detector</artifactId>
+ <version>0.6</version>
</dependency>
<dependency>
- <groupId>org.eclipse.collections</groupId>
- <artifactId>eclipse-collections</artifactId>
- <version>9.2.0</version>
+ <groupId>com.yahoo.athenz</groupId>
+ <artifactId>athenz-zms-java-client</artifactId>
+ <version>${athenz.version}</version>
</dependency>
<dependency>
- <groupId>org.eclipse.collections</groupId>
- <artifactId>eclipse-collections-api</artifactId>
- <version>9.2.0</version>
+ <groupId>com.yahoo.athenz</groupId>
+ <artifactId>athenz-zpe-java-client</artifactId>
+ <version>${athenz.version}</version>
</dependency>
<dependency>
- <groupId>com.infradna.tool</groupId>
- <artifactId>bridge-method-annotation</artifactId>
- <version>1.4</version>
+ <groupId>com.yahoo.athenz</groupId>
+ <artifactId>athenz-zts-java-client</artifactId>
+ <version>${athenz.version}</version>
</dependency>
<dependency>
<groupId>commons-cli</groupId>
@@ -505,39 +504,35 @@
<artifactId>commons-pool</artifactId>
<version>1.5.6</version>
</dependency>
- <!-- Explicitly included to get Zookeeper version 3.4.14,
- can be excluded if you want the Zookeeper version
- used by curator by default
- -->
<dependency>
- <groupId>org.apache.zookeeper</groupId>
- <artifactId>zookeeper</artifactId>
- <version>3.4.14</version>
+ <groupId>io.airlift</groupId>
+ <artifactId>airline</artifactId>
+ <version>0.7</version>
</dependency>
<dependency>
- <groupId>org.apache.curator</groupId>
- <artifactId>curator-recipes</artifactId>
- <version>${curator.version}</version>
+ <groupId>io.prometheus</groupId>
+ <artifactId>simpleclient</artifactId>
+ <version>${prometheus.client.version}</version>
</dependency>
<dependency>
- <groupId>org.apache.curator</groupId>
- <artifactId>curator-test</artifactId>
- <version>${curator.version}</version>
+ <groupId>io.prometheus</groupId>
+ <artifactId>simpleclient_common</artifactId>
+ <version>${prometheus.client.version}</version>
</dependency>
<dependency>
- <groupId>org.junit.jupiter</groupId>
- <artifactId>junit-jupiter-engine</artifactId>
- <version>${junit.version}</version>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>4.12</version>
</dependency>
<dependency>
- <groupId>org.junit.vintage</groupId>
- <artifactId>junit-vintage-engine</artifactId>
- <version>${junit.version}</version>
+ <groupId>net.java.dev.jna</groupId>
+ <artifactId>jna</artifactId>
+ <version>${jna.version}</version>
</dependency>
<dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <version>4.12</version>
+ <groupId>net.spy</groupId>
+ <artifactId>spymemcached</artifactId>
+ <version>2.10.1</version>
</dependency>
<dependency>
<groupId>org.antlr</groupId>
@@ -551,10 +546,30 @@
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
+ <artifactId>commons-compress</artifactId>
+ <version>1.18</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-exec</artifactId>
+ <version>1.3</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.1</version>
</dependency>
<dependency>
+ <groupId>org.apache.curator</groupId>
+ <artifactId>curator-recipes</artifactId>
+ <version>${curator.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.curator</groupId>
+ <artifactId>curator-test</artifactId>
+ <version>${curator.version}</version>
+ </dependency>
+ <dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>fluent-hc</artifactId>
<version>4.3.6</version>
@@ -590,11 +605,6 @@
<version>3.5.0</version>
</dependency>
<dependency>
- <groupId>org.apache.maven.plugin-tools</groupId>
- <artifactId>maven-plugin-annotations</artifactId>
- <version>${maven-plugin-tools.version}</version>
- </dependency>
- <dependency>
<groupId>org.apache.maven</groupId>
<artifactId>maven-plugin-api</artifactId>
<version>${maven-plugin-tools.version}</version>
@@ -605,6 +615,11 @@
<version>2.2.1</version>
</dependency>
<dependency>
+ <groupId>org.apache.maven.plugin-tools</groupId>
+ <artifactId>maven-plugin-annotations</artifactId>
+ <version>${maven-plugin-tools.version}</version>
+ </dependency>
+ <dependency>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>3.1.0</version>
@@ -621,6 +636,34 @@
<type>pom</type>
</dependency>
<dependency>
+ <groupId>org.apache.maven.wagon</groupId>
+ <artifactId>wagon-ssh-external</artifactId>
+ <version>2.7</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.opennlp</groupId>
+ <artifactId>opennlp-tools</artifactId>
+ <version>1.8.4</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.velocity</groupId>
+ <artifactId>velocity</artifactId>
+ <version>1.7</version>
+ </dependency>
+ <dependency>
+ <!-- Explicitly included to get Zookeeper version 3.4.14,
+ can be excluded if you want the Zookeeper version
+ used by curator by default -->
+ <groupId>org.apache.zookeeper</groupId>
+ <artifactId>zookeeper</artifactId>
+ <version>3.4.14</version>
+ </dependency>
+ <dependency>
+ <groupId>org.assertj</groupId>
+ <artifactId>assertj-core</artifactId>
+ <version>3.11.1</version>
+ </dependency>
+ <dependency>
<groupId>org.codehaus.jettison</groupId>
<artifactId>jettison</artifactId>
<version>1.3.1</version>
@@ -629,31 +672,35 @@
<groupId>org.cthul</groupId>
<artifactId>cthul-matchers</artifactId>
<version>1.0</version>
- <scope>test</scope>
+ <scope>test</scope> <!-- TODO: remove scope from parent pom -->
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.collections</groupId>
+ <artifactId>eclipse-collections</artifactId>
+ <version>9.2.0</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.collections</groupId>
+ <artifactId>eclipse-collections-api</artifactId>
+ <version>9.2.0</version>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-all</artifactId>
<version>1.3</version>
- <scope>test</scope>
+ <scope>test</scope> <!-- TODO: remove scope from parent pom -->
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-core</artifactId>
<version>1.3</version>
- <scope>test</scope>
+ <scope>test</scope> <!-- TODO: remove scope from parent pom -->
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-library</artifactId>
<version>1.3</version>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>uk.co.datumedge</groupId>
- <artifactId>hamcrest-json</artifactId>
- <version>0.2</version>
- <scope>test</scope>
+ <scope>test</scope> <!-- TODO: remove scope from parent pom -->
</dependency>
<dependency>
<groupId>org.hdrhistogram</groupId>
@@ -661,16 +708,25 @@
<version>2.1.8</version>
</dependency>
<dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-engine</artifactId>
+ <version>${junit.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.junit.vintage</groupId>
+ <artifactId>junit-vintage-engine</artifactId>
+ <version>${junit.version}</version>
+ </dependency>
+ <dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>1.10.19</version>
- <scope>test</scope>
+ <scope>test</scope> <!-- TODO: remove scope from parent pom -->
</dependency>
<dependency>
- <groupId>com.google.jimfs</groupId>
- <artifactId>jimfs</artifactId>
- <version>1.1</version>
- <scope>test</scope>
+ <groupId>org.ow2.asm</groupId>
+ <artifactId>asm</artifactId>
+ <version>${asm.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
@@ -678,66 +734,6 @@
<version>4.0.6.RELEASE</version>
</dependency>
<dependency>
- <groupId>org.twdata.maven</groupId>
- <artifactId>mojo-executor</artifactId>
- <version>2.3.0</version>
- </dependency>
- <dependency>
- <groupId>net.spy</groupId>
- <artifactId>spymemcached</artifactId>
- <version>2.10.1</version>
- </dependency>
- <dependency>
- <groupId>xerces</groupId>
- <artifactId>xercesImpl</artifactId>
- <version>2.11.0</version>
- </dependency>
- <dependency>
- <groupId>com.ibm.icu</groupId>
- <artifactId>icu4j</artifactId>
- <version>57.1</version>
- </dependency>
- <dependency>
- <groupId>com.yahoo.athenz</groupId>
- <artifactId>athenz-zms-java-client</artifactId>
- <version>${athenz.version}</version>
- </dependency>
- <dependency>
- <groupId>com.yahoo.athenz</groupId>
- <artifactId>athenz-zts-java-client</artifactId>
- <version>${athenz.version}</version>
- </dependency>
- <dependency>
- <groupId>com.yahoo.athenz</groupId>
- <artifactId>athenz-zpe-java-client</artifactId>
- <version>${athenz.version}</version>
- </dependency>
- <dependency>
- <groupId>com.github.tomakehurst</groupId>
- <artifactId>wiremock-standalone</artifactId>
- <version>2.6.0</version>
- </dependency>
- <dependency>
- <groupId>org.apache.opennlp</groupId>
- <artifactId>opennlp-tools</artifactId>
- <version>1.8.4</version>
- </dependency>
- <dependency>
- <groupId>com.optimaize.languagedetector</groupId>
- <artifactId>language-detector</artifactId>
- <version>0.6</version>
- </dependency>
- <dependency>
- <groupId>net.java.dev.jna</groupId>
- <artifactId>jna</artifactId>
- <version>${jna.version}</version>
- </dependency>
- <dependency>
- <groupId>com.google.protobuf</groupId>
- <artifactId>protobuf-java</artifactId>
- <version>${protobuf.version}</version>
- </dependency>
- <dependency>
<groupId>org.tensorflow</groupId>
<artifactId>proto</artifactId>
<version>${tensorflow.version}</version>
@@ -748,14 +744,20 @@
<version>${tensorflow.version}</version>
</dependency>
<dependency>
- <groupId>org.assertj</groupId>
- <artifactId>assertj-core</artifactId>
- <version>3.11.1</version>
+ <groupId>org.twdata.maven</groupId>
+ <artifactId>mojo-executor</artifactId>
+ <version>2.3.0</version>
</dependency>
<dependency>
- <groupId>com.amazonaws</groupId>
- <artifactId>aws-java-sdk-core</artifactId>
- <version>${aws.sdk.version}</version>
+ <groupId>uk.co.datumedge</groupId>
+ <artifactId>hamcrest-json</artifactId>
+ <version>0.2</version>
+ <scope>test</scope> <!-- TODO: remove scope from parent pom -->
+ </dependency>
+ <dependency>
+ <groupId>xerces</groupId>
+ <artifactId>xercesImpl</artifactId>
+ <version>2.11.0</version>
</dependency>
</dependencies>
</dependencyManagement>
@@ -766,11 +768,9 @@
<apache.httpclient.version>4.4.1</apache.httpclient.version>
<apache.httpcore.version>4.4.1</apache.httpcore.version>
<asm.version>7.0</asm.version>
- <aws.sdk.version>1.11.542</aws.sdk.version>
- <jna.version>4.5.2</jna.version>
- <tensorflow.version>1.12.0</tensorflow.version>
- <!-- Athenz dependencies. Make sure these dependencies matches those in Vespa's internal repositories -->
+ <!-- Athenz dependencies. Make sure these dependencies match those in Vespa's internal repositories -->
<athenz.version>1.8.29</athenz.version>
+ <aws.sdk.version>1.11.542</aws.sdk.version>
<commons-lang.version>2.6</commons-lang.version>
<!-- WARNING: If you change curator version, you also need to update
zkfacade/src/main/java/org/apache/curator/**/package-info.java
@@ -779,16 +779,20 @@
xargs perl -pi -e 's/major = [0-9]+, minor = [0-9]+, micro = [0-9]+/major = 2, minor = 9, micro = 1/g'
-->
<curator.version>2.9.1</curator.version>
- <maven-shade-plugin.version>3.2.1</maven-shade-plugin.version>
- <maven-plugin-tools.version>3.6.0</maven-plugin-tools.version>
- <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
- <test.hide>true</test.hide>
- <doclint>all</doclint>
- <surefire.version>2.22.0</surefire.version>
+ <jna.version>4.5.2</jna.version>
<junit.version>5.4.2</junit.version>
+ <maven-plugin-tools.version>3.6.0</maven-plugin-tools.version>
+ <maven-shade-plugin.version>3.2.1</maven-shade-plugin.version>
<prometheus.client.version>0.6.0</prometheus.client.version>
<protobuf.version>3.7.0</protobuf.version>
+ <surefire.version>2.22.0</surefire.version>
+ <tensorflow.version>1.12.0</tensorflow.version>
+
+ <doclint>all</doclint>
+ <test.hide>true</test.hide>
+
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
</properties>
</project>
diff --git a/searchcore/CMakeLists.txt b/searchcore/CMakeLists.txt
index 45113e95df4..ee262a5fa5c 100644
--- a/searchcore/CMakeLists.txt
+++ b/searchcore/CMakeLists.txt
@@ -21,9 +21,6 @@ vespa_define_module(
LIBS
src/vespa/searchcore/config
- src/vespa/searchcore/fdispatch/common
- src/vespa/searchcore/fdispatch/program
- src/vespa/searchcore/fdispatch/search
src/vespa/searchcore/grouping
src/vespa/searchcore/proton/attribute
src/vespa/searchcore/proton/bucketdb
@@ -46,7 +43,6 @@ vespa_define_module(
src/vespa/searchcore/util
APPS
- src/apps/fdispatch
src/apps/proton
src/apps/tests
src/apps/verify_ranksetup
@@ -56,8 +52,6 @@ vespa_define_module(
src/apps/vespa-transactionlog-inspect
TESTS
- src/tests/fdispatch/randomrow
- src/tests/fdispatch/fnet_search
src/tests/grouping
src/tests/proton/attribute
src/tests/proton/attribute/attribute_aspect_delayer
diff --git a/searchcore/src/apps/fdispatch/.gitignore b/searchcore/src/apps/fdispatch/.gitignore
deleted file mode 100644
index 36a9a584d01..00000000000
--- a/searchcore/src/apps/fdispatch/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vespa-dispatch-bin
diff --git a/searchcore/src/apps/fdispatch/CMakeLists.txt b/searchcore/src/apps/fdispatch/CMakeLists.txt
deleted file mode 100644
index 4399968a761..00000000000
--- a/searchcore/src/apps/fdispatch/CMakeLists.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(searchcore_fdispatch_app
- SOURCES
- fdispatch.cpp
- OUTPUT_NAME vespa-dispatch-bin
- INSTALL sbin
- DEPENDS
- searchcore_fdispatch_program
- searchcore_fdispatch_search
- searchcore_grouping
- searchcore_fdcommon
- searchcore_util
- searchcore_fconfig
- searchlib_searchlib_uca
-)
diff --git a/searchcore/src/apps/fdispatch/fdispatch.cpp b/searchcore/src/apps/fdispatch/fdispatch.cpp
deleted file mode 100644
index 0aa16260737..00000000000
--- a/searchcore/src/apps/fdispatch/fdispatch.cpp
+++ /dev/null
@@ -1,208 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/searchcore/fdispatch/program/fdispatch.h>
-#include <vespa/vespalib/net/state_server.h>
-#include <vespa/vespalib/net/simple_health_producer.h>
-#include <vespa/vespalib/net/simple_metrics_producer.h>
-#include <vespa/searchlib/expression/forcelink.hpp>
-#include <vespa/searchlib/aggregation/forcelink.hpp>
-#include <vespa/vespalib/util/signalhandler.h>
-#include <vespa/fastos/app.h>
-#include <thread>
-#include <getopt.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP("fdispatch");
-
-
-using fdispatch::Fdispatch;
-using vespa::config::search::core::FdispatchrcConfig;
-using namespace std::literals;
-
-extern char FastS_VersionTag[];
-
-class FastS_FDispatchApp : public FastOS_Application
-{
-private:
- FastS_FDispatchApp(const FastS_FDispatchApp &);
- FastS_FDispatchApp& operator=(const FastS_FDispatchApp &);
-
-protected:
- vespalib::string _configId;
-
- bool CheckShutdownFlags () const {
- return (vespalib::SignalHandler::INT.check() || vespalib::SignalHandler::TERM.check());
- }
-
- void Usage();
- bool GetOptions(int *exitCode);
-
-public:
- int Main() override;
- FastS_FDispatchApp();
- ~FastS_FDispatchApp();
-};
-
-
-FastS_FDispatchApp::FastS_FDispatchApp()
-{
-}
-
-
-/**
- * Main program for a dispatcher node.
- */
-int
-FastS_FDispatchApp::Main()
-{
- int exitCode;
-
- forcelink_searchlib_expression();
- forcelink_searchlib_aggregation();
-
- if (!GetOptions(&exitCode)) {
- EV_STOPPING("fdispatch", (exitCode == 0) ? "clean shutdown" : "error");
- return exitCode;
- }
-
- exitCode = 0;
-
- EV_STARTED("fdispatch");
-
- vespalib::SignalHandler::INT.hook();
- vespalib::SignalHandler::TERM.hook();
- vespalib::SignalHandler::PIPE.ignore();
-
- std::unique_ptr<Fdispatch> myfdispatch;
- try {
- myfdispatch.reset(new Fdispatch(_configId));
- } catch (std::exception &ex) {
- LOG(error, "getting config: %s", (const char *) ex.what());
- exitCode = 1;
- EV_STOPPING("fdispatch", "error getting config");
- return exitCode;
- }
-
- try {
-
- if (!myfdispatch->Init()) {
- throw std::runtime_error("myfdispatch->Init(" + _configId + ") failed");
- }
- if (myfdispatch->Failed()) {
- throw std::runtime_error("myfdispatch->Failed()");
- }
- { // main loop scope
- vespalib::SimpleHealthProducer health;
- vespalib::SimpleMetricsProducer metrics;
- vespalib::StateServer stateServer(myfdispatch->getHealthPort(), health, metrics, myfdispatch->getComponentConfig());
- while (!CheckShutdownFlags()) {
- if (myfdispatch->Failed()) {
- throw std::runtime_error("myfdispatch->Failed()");
- }
- std::this_thread::sleep_for(100ms);
- if (!myfdispatch->CheckTempFail())
- break;
- }
- } // main loop scope
- if (myfdispatch->Failed()) {
- throw std::runtime_error("myfdispatch->Failed()");
- }
- } catch (std::runtime_error &e) {
- LOG(warning, "got std::runtime_error during init: %s", e.what());
- exitCode = 1;
- } catch (std::exception &e) {
- LOG(error, "got exception during init: %s", e.what());
- exitCode = 1;
- }
-
- LOG(debug, "Deleting fdispatch");
- myfdispatch.reset();
- LOG(debug, "COMPLETION: Exiting");
- EV_STOPPING("fdispatch", (exitCode == 0) ? "clean shutdown" : "error");
- return exitCode;
-}
-
-
-bool
-FastS_FDispatchApp::GetOptions(int *exitCode)
-{
- int errflg = 0;
- int c;
- const char *optArgument;
- int longopt_index; /* Shows which long option was used */
- static struct option longopts[] = {
- { "config-id", 1, NULL, 0 },
- { NULL, 0, NULL, 0 }
- };
- enum longopts_enum {
- LONGOPT_CONFIGID
- };
- int optIndex = 1; // Start with argument 1
- while ((c = GetOptLong("c:", optArgument, optIndex, longopts, &longopt_index)) != -1) {
- switch (c) {
- case 0:
- switch (longopt_index) {
- case LONGOPT_CONFIGID:
- break;
- default:
- if (optArgument != NULL) {
- LOG(info, "longopt %s with arg %s", longopts[longopt_index].name, optArgument);
- } else {
- LOG(info, "longopt %s", longopts[longopt_index].name);
- }
- break;
- }
- break;
- case 'c':
- _configId = optArgument;
- break;
- case '?':
- default:
- errflg++;
- }
- }
- if (errflg) {
- Usage();
- *exitCode = 1;
- return false;
- }
- return true;
-}
-
-void
-FastS_FDispatchApp::Usage()
-{
- printf("FAST Search - fdispatch %s\n", FastS_VersionTag);
- printf("\n"
- "USAGE:\n"
- "\n"
- "fdispatch [-C fsroot] [-c rcFile] [-P preHttPort] [-V] [-w FFF]\n"
- "\n"
- " -C fsroot Fast Search's root directory\n"
- " (default /usr/fastsearch/fastserver4)\n"
- " -c rcFile fdispatchrc file (default FASTSEARCHROOT/etc/fdispatchrc)\n"
- " -P preHttPort pre-allocated socket number for http service\n"
- " -V show version and exit\n"
- " -w FFF hex value (max 32 bit) for the Verbose mask\n"
- "\n");
-}
-
-
-FastS_FDispatchApp::~FastS_FDispatchApp()
-{
-}
-
-
-int
-main(int argc, char **argv)
-{
- FastS_FDispatchApp app;
- int retval;
-
- // Maybe this should be handeled by FastOS
- setlocale(LC_ALL, "C");
-
- retval = app.Entry(argc, argv);
-
- return retval;
-}
diff --git a/searchcore/src/apps/vespa-proton-cmd/vespa-proton-cmd.cpp b/searchcore/src/apps/vespa-proton-cmd/vespa-proton-cmd.cpp
index 2e097e7141f..3b3b5f412d2 100644
--- a/searchcore/src/apps/vespa-proton-cmd/vespa-proton-cmd.cpp
+++ b/searchcore/src/apps/vespa-proton-cmd/vespa-proton-cmd.cpp
@@ -216,9 +216,12 @@ public:
fprintf(stderr, "Config system is not up. Verify that vespa is started.");
return 3;
}
-
- initRPC();
-
+ try {
+ initRPC();
+ } catch (vespalib::Exception &e) {
+ fprintf(stderr, "Exception in network initialization: %s", e.what());
+ return 2;
+ }
int port = 0;
std::string spec = _argv[1];
diff --git a/searchcore/src/tests/fdispatch/fnet_search/.gitignore b/searchcore/src/tests/fdispatch/fnet_search/.gitignore
deleted file mode 100644
index b525d6fcd38..00000000000
--- a/searchcore/src/tests/fdispatch/fnet_search/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-searchcore_search_path_test_app
-searchcore_search_coverage_test_app
diff --git a/searchcore/src/tests/fdispatch/fnet_search/CMakeLists.txt b/searchcore/src/tests/fdispatch/fnet_search/CMakeLists.txt
deleted file mode 100644
index c4e1608d6da..00000000000
--- a/searchcore/src/tests/fdispatch/fnet_search/CMakeLists.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(searchcore_search_path_test_app TEST
- SOURCES
- search_path_test.cpp
- DEPENDS
- searchcore_fdispatch_search
-)
-vespa_add_test(NAME searchcore_search_path_test_app COMMAND searchcore_search_path_test_app)
-
-vespa_add_executable(searchcore_search_coverage_test_app TEST
- SOURCES
- search_coverage_test.cpp
- DEPENDS
- searchcore_fdispatch_search
- searchcore_fdcommon
- searchcore_grouping
-)
-vespa_add_test(NAME searchcore_search_coverage_test_app COMMAND searchcore_search_coverage_test_app)
diff --git a/searchcore/src/tests/fdispatch/fnet_search/search_coverage_test.cpp b/searchcore/src/tests/fdispatch/fnet_search/search_coverage_test.cpp
deleted file mode 100644
index d598583437d..00000000000
--- a/searchcore/src/tests/fdispatch/fnet_search/search_coverage_test.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vespalib/testkit/testapp.h>
-
-#include <vespa/searchcore/fdispatch/search/fnet_search.h>
-#include <vespa/searchlib/engine/searchreply.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP("search_coverage_test");
-
-using namespace fdispatch;
-using search::engine::SearchReply;
-
-std::vector<FastS_FNET_SearchNode>
-createNodes(uint32_t count) {
- std::vector<FastS_FNET_SearchNode> nodes;
- nodes.reserve(count);
- for (uint32_t partid(0); partid < count; partid++) {
- nodes.emplace_back(nullptr, partid);
- }
- return nodes;
-}
-
-void
-query(FastS_FNET_SearchNode & node) {
- node.DirtySetChannelOnlyForTesting((FNET_Channel *) 1);
-}
-
-void
-respond(FastS_FNET_SearchNode & node, size_t covered, size_t active, size_t soonActive, uint32_t degradeReason) {
- node._qresult = new FS4Packet_QUERYRESULTX();
- node._qresult->_coverageDocs = covered;
- node._qresult->_activeDocs = active;
- node._qresult->_soonActiveDocs = soonActive;
- node._qresult->_coverageDegradeReason = degradeReason;
-}
-
-void
-respond(FastS_FNET_SearchNode & node, size_t covered, size_t active, size_t soonActive) {
- respond(node, covered, active, soonActive, 0);
-}
-
-void disconnectNodes(std::vector<FastS_FNET_SearchNode> & nodes) {
- for (auto & node : nodes) {
- node.DirtySetChannelOnlyForTesting(nullptr);
- }
-}
-TEST("testCoverageWhenAllNodesAreUp") {
- std::vector<FastS_FNET_SearchNode> nodes = createNodes(4);
- for (auto & node : nodes) {
- query(node);
- respond(node, 25, 30, 50);
- }
- FastS_SearchInfo si = FastS_FNET_Search::computeCoverage(nodes, 1, false);
- EXPECT_EQUAL(4u, si._nodesQueried);
- EXPECT_EQUAL(4u, si._nodesReplied);
- EXPECT_EQUAL(100u, si._coverageDocs);
- EXPECT_EQUAL(120u, si._activeDocs);
- EXPECT_EQUAL(200u, si._soonActiveDocs);
- EXPECT_EQUAL(0u, si._degradeReason);
- disconnectNodes(nodes);
-}
-
-TEST("testCoverageWhenNoNodesAreUp") {
- std::vector<FastS_FNET_SearchNode> nodes = createNodes(4);
- for (auto & node : nodes) {
- query(node);
- }
- FastS_SearchInfo si = FastS_FNET_Search::computeCoverage(nodes, 1, false);
- EXPECT_EQUAL(4u, si._nodesQueried);
- EXPECT_EQUAL(0u, si._nodesReplied);
- EXPECT_EQUAL(0u, si._coverageDocs);
- EXPECT_EQUAL(0u, si._activeDocs);
- EXPECT_EQUAL(0u, si._soonActiveDocs);
- EXPECT_EQUAL(SearchReply::Coverage::TIMEOUT, si._degradeReason);
- disconnectNodes(nodes);
-}
-
-TEST("testCoverageWhenNoNodesAreUpWithAdaptiveTimeout") {
- std::vector<FastS_FNET_SearchNode> nodes = createNodes(4);
- for (auto & node : nodes) {
- query(node);
- }
- FastS_SearchInfo si = FastS_FNET_Search::computeCoverage(nodes, 1, true);
- EXPECT_EQUAL(4u, si._nodesQueried);
- EXPECT_EQUAL(0u, si._nodesReplied);
- EXPECT_EQUAL(0u, si._coverageDocs);
- EXPECT_EQUAL(0u, si._activeDocs);
- EXPECT_EQUAL(0u, si._soonActiveDocs);
- EXPECT_EQUAL(SearchReply::Coverage::ADAPTIVE_TIMEOUT, si._degradeReason);
- disconnectNodes(nodes);
-}
-
-TEST("testCoverageWhen1NodesIsDown") {
- std::vector<FastS_FNET_SearchNode> nodes = createNodes(4);
- for (auto & node : nodes) {
- query(node);
- }
- respond(nodes[0], 25, 30, 50);
- respond(nodes[2], 25, 30, 50);
- respond(nodes[3], 25, 30, 50);
-
- FastS_SearchInfo si = FastS_FNET_Search::computeCoverage(nodes, 1, false);
- EXPECT_EQUAL(4u, si._nodesQueried);
- EXPECT_EQUAL(3u, si._nodesReplied);
- EXPECT_EQUAL(75u, si._coverageDocs);
- EXPECT_EQUAL(120u, si._activeDocs);
- EXPECT_EQUAL(200u, si._soonActiveDocs);
- EXPECT_EQUAL(SearchReply::Coverage::TIMEOUT, si._degradeReason);
-
- // Do not trigger dirty magic when you still have enough coverage in theory
- si = FastS_FNET_Search::computeCoverage(nodes, 2, false);
- EXPECT_EQUAL(4u, si._nodesQueried);
- EXPECT_EQUAL(3u, si._nodesReplied);
- EXPECT_EQUAL(75u, si._coverageDocs);
- EXPECT_EQUAL(90u, si._activeDocs);
- EXPECT_EQUAL(150u, si._soonActiveDocs);
- EXPECT_EQUAL(0u, si._degradeReason);
- disconnectNodes(nodes);
-}
-
-TEST("testCoverageWhen1NodeDoesnotReplyWithAdaptiveTimeout") {
- std::vector<FastS_FNET_SearchNode> nodes = createNodes(4);
- for (auto & node : nodes) {
- query(node);
- }
- respond(nodes[0], 25, 30, 50);
- respond(nodes[2], 25, 30, 50);
- respond(nodes[3], 25, 30, 50);
-
- FastS_SearchInfo si = FastS_FNET_Search::computeCoverage(nodes, 1, true);
- EXPECT_EQUAL(4u, si._nodesQueried);
- EXPECT_EQUAL(3u, si._nodesReplied);
- EXPECT_EQUAL(75u, si._coverageDocs);
- EXPECT_EQUAL(120u, si._activeDocs);
- EXPECT_EQUAL(200u, si._soonActiveDocs);
- EXPECT_EQUAL(SearchReply::Coverage::ADAPTIVE_TIMEOUT, si._degradeReason);
- disconnectNodes(nodes);
-}
-
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/fdispatch/fnet_search/search_path_test.cpp b/searchcore/src/tests/fdispatch/fnet_search/search_path_test.cpp
deleted file mode 100644
index b62fb8d14f2..00000000000
--- a/searchcore/src/tests/fdispatch/fnet_search/search_path_test.cpp
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/log/log.h>
-LOG_SETUP("search_path_test");
-#include <vespa/vespalib/testkit/testapp.h>
-
-#include <vespa/searchcore/fdispatch/search/search_path.h>
-#include <vespa/searchcore/fdispatch/search/fnet_search.h>
-#include <iostream>
-
-using namespace fdispatch;
-
-template <typename T>
-vespalib::string
-toString(const T &val)
-{
- std::ostringstream oss;
- oss << "[";
- bool first = true;
- for (auto v : val) {
- if (!first) oss << ",";
- oss << v;
- first = false;
- }
- oss << "]";
- return oss.str();
-}
-
-void
-assertParts(const std::vector<size_t> &exp, const SearchPath::NodeList &act)
-{
- std::string expStr = toString(exp);
- std::string actStr = toString(act);
- std::cout << "assertParts(" << expStr << "," << actStr << ")" << std::endl;
- EXPECT_EQUAL(expStr, actStr);
-}
-
-void
-assertElement(const std::vector<size_t> &parts, size_t row, const SearchPath::Element &elem)
-{
- assertParts(parts, elem.nodes());
- EXPECT_TRUE(elem.hasRow());
- EXPECT_EQUAL(row, elem.row());
-}
-
-void
-assertElement(const std::vector<size_t> &parts, const SearchPath::Element &elem)
-{
- assertParts(parts, elem.nodes());
- EXPECT_FALSE(elem.hasRow());
-}
-
-void
-assertSinglePath(const std::vector<size_t> &parts, const vespalib::string &spec, size_t numNodes=0)
-{
- SearchPath p(spec, numNodes);
- EXPECT_EQUAL(1u, p.elements().size());
- assertElement(parts, p.elements().front());
-}
-
-void
-assertSinglePath(const std::vector<size_t> &parts, size_t row, const vespalib::string &spec, size_t numNodes=0)
-{
- SearchPath p(spec, numNodes);
- EXPECT_EQUAL(1u, p.elements().size());
- assertElement(parts, row, p.elements().front());
-}
-
-TEST("requireThatSinglePartCanBeSpecified")
-{
- assertSinglePath({0}, "0/");
-}
-
-TEST("requireThatMultiplePartsCanBeSpecified")
-{
- assertSinglePath({1,3,5}, "1,3,5/");
-}
-
-TEST("requireThatRangePartsCanBeSpecified")
-{
- assertSinglePath({1,2,3}, "[1,4>/", 6);
-}
-
-TEST("requireThatAllPartsCanBeSpecified")
-{
- assertSinglePath({0,1,2,3}, "*/", 4);
-}
-
-TEST("requireThatRowCanBeSpecified")
-{
- assertSinglePath({1}, 2, "1/2");
-}
-
-TEST("requireThatMultipleSimpleElementsCanBeSpecified")
-{
- SearchPath p("0/1;2/3", 3);
- EXPECT_EQUAL(2u, p.elements().size());
- assertElement({0}, 1, p.elements()[0]);
- assertElement({2}, 3, p.elements()[1]);
-}
-
-TEST("requireThatMultipleComplexElementsCanBeSpecified")
-{
- SearchPath p("0,2,4/1;1,3,5/3", 6);
- EXPECT_EQUAL(2u, p.elements().size());
- assertElement({0,2,4}, 1, p.elements()[0]);
- assertElement({1,3,5}, 3, p.elements()[1]);
-}
-
-TEST("requireThatMultipleElementsWithoutRowsCanBeSpecified")
-{
- SearchPath p("0/;1/", 2);
- EXPECT_EQUAL(2u, p.elements().size());
- assertElement({0}, p.elements()[0]);
- assertElement({1}, p.elements()[1]);
-}
-
-TEST("require that sizeof FastS_FNET_SearchNode is reasonable")
-{
- EXPECT_EQUAL(232u, sizeof(FastS_FNET_SearchNode));
- EXPECT_EQUAL(40u, sizeof(search::common::SortDataIterator));
-}
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/fdispatch/randomrow/.gitignore b/searchcore/src/tests/fdispatch/randomrow/.gitignore
deleted file mode 100644
index bfe075b287a..00000000000
--- a/searchcore/src/tests/fdispatch/randomrow/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-searchcore_randomrow_test_app
diff --git a/searchcore/src/tests/fdispatch/randomrow/CMakeLists.txt b/searchcore/src/tests/fdispatch/randomrow/CMakeLists.txt
deleted file mode 100644
index 2b05af6a2f6..00000000000
--- a/searchcore/src/tests/fdispatch/randomrow/CMakeLists.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(searchcore_randomrow_test_app TEST
- SOURCES
- randomrow_test.cpp
- DEPENDS
- searchcore_fdispatch_search
- searchcore_util
- searchcore_fdcommon
-)
-vespa_add_test(NAME searchcore_randomrow_test_app COMMAND searchcore_randomrow_test_app)
diff --git a/searchcore/src/tests/fdispatch/randomrow/randomrow_test.cpp b/searchcore/src/tests/fdispatch/randomrow/randomrow_test.cpp
deleted file mode 100644
index c6791436bff..00000000000
--- a/searchcore/src/tests/fdispatch/randomrow/randomrow_test.cpp
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/log/log.h>
-LOG_SETUP("randomrow_test");
-#include <vespa/vespalib/testkit/testapp.h>
-
-#include <vespa/searchcore/fdispatch/search/plain_dataset.h>
-
-using fdispatch::StateOfRows;
-
-TEST("requireThatEmpyStateReturnsRowZero")
-{
- StateOfRows s(1, 1.0, 1000);
- EXPECT_EQUAL(0u, s.getRandomWeightedRow());
- EXPECT_EQUAL(1.0, s.getRowState(0).getAverageSearchTime());
-}
-
-TEST("requireThatDecayWorks")
-{
- constexpr double SMALL = 0.00001;
- StateOfRows s(1, 1.0, 1000);
- s.updateSearchTime(1.0, 0);
- EXPECT_EQUAL(1.0, s.getRowState(0).getAverageSearchTime());
- s.updateSearchTime(2.0, 0);
- EXPECT_APPROX(1.02326, s.getRowState(0).getAverageSearchTime(), SMALL);
- s.updateSearchTime(2.0, 0);
- EXPECT_APPROX(1.04545, s.getRowState(0).getAverageSearchTime(), SMALL);
- s.updateSearchTime(0.1, 0);
- s.updateSearchTime(0.1, 0);
- s.updateSearchTime(0.1, 0);
- s.updateSearchTime(0.1, 0);
- EXPECT_APPROX(0.966667, s.getRowState(0).getAverageSearchTime(), SMALL);
- for (size_t i(0); i < 10000; i++) {
- s.updateSearchTime(1.0, 0);
- }
- EXPECT_APPROX(1.0, s.getRowState(0).getAverageSearchTime(), SMALL);
- s.updateSearchTime(0.1, 0);
- EXPECT_APPROX(0.9991, s.getRowState(0).getAverageSearchTime(), SMALL);
- for (size_t i(0); i < 10000; i++) {
- s.updateSearchTime(0.0, 0);
- }
- EXPECT_APPROX(0.001045, s.getRowState(0).getAverageSearchTime(), SMALL);
-}
-
-TEST("requireWeightedSelectionWorks")
-{
- StateOfRows s(5, 1.0, 1000);
- EXPECT_EQUAL(0u, s.getWeightedNode(-0.1));
- EXPECT_EQUAL(0u, s.getWeightedNode(0.0));
- EXPECT_EQUAL(0u, s.getWeightedNode(0.1));
- EXPECT_EQUAL(1u, s.getWeightedNode(0.2));
- EXPECT_EQUAL(1u, s.getWeightedNode(0.39));
- EXPECT_EQUAL(2u, s.getWeightedNode(0.4));
- EXPECT_EQUAL(3u, s.getWeightedNode(0.6));
- EXPECT_EQUAL(4u, s.getWeightedNode(0.8));
- EXPECT_EQUAL(4u, s.getWeightedNode(2.0));
-}
-
-TEST("requireWeightedSelectionWorksFineWithDifferentWeights")
-{
- StateOfRows s(5, 1.0, 1000);
- s.getRowState(0).setAverageSearchTime(0.1);
- s.getRowState(1).setAverageSearchTime(0.2);
- s.getRowState(2).setAverageSearchTime(0.3);
- s.getRowState(3).setAverageSearchTime(0.4);
- s.getRowState(4).setAverageSearchTime(0.5);
- EXPECT_EQUAL(0.1, s.getRowState(0).getAverageSearchTime());
- EXPECT_EQUAL(0.2, s.getRowState(1).getAverageSearchTime());
- EXPECT_EQUAL(0.3, s.getRowState(2).getAverageSearchTime());
- EXPECT_EQUAL(0.4, s.getRowState(3).getAverageSearchTime());
- EXPECT_EQUAL(0.5, s.getRowState(4).getAverageSearchTime());
- EXPECT_EQUAL(0u, s.getWeightedNode(-0.1));
- EXPECT_EQUAL(0u, s.getWeightedNode(0.0));
- EXPECT_EQUAL(0u, s.getWeightedNode(0.4379));
- EXPECT_EQUAL(1u, s.getWeightedNode(0.4380));
- EXPECT_EQUAL(1u, s.getWeightedNode(0.6569));
- EXPECT_EQUAL(2u, s.getWeightedNode(0.6570));
- EXPECT_EQUAL(2u, s.getWeightedNode(0.8029));
- EXPECT_EQUAL(3u, s.getWeightedNode(0.8030));
- EXPECT_EQUAL(3u, s.getWeightedNode(0.9124));
- EXPECT_EQUAL(4u, s.getWeightedNode(0.9125));
- EXPECT_EQUAL(4u, s.getWeightedNode(2.0));
-}
-
-TEST("require randomness")
-{
- StateOfRows s(3, 1.0, 1000);
- s.getRowState(0).setAverageSearchTime(1.0);
- s.getRowState(1).setAverageSearchTime(1.0);
- s.getRowState(2).setAverageSearchTime(1.0);
- size_t counts[3] = {0,0,0};
- for (size_t i(0); i < 1000; i++) {
- counts[s.getRandomWeightedRow()]++;
- }
- EXPECT_EQUAL(322ul, counts[0]);
- EXPECT_EQUAL(345ul, counts[1]);
- EXPECT_EQUAL(333ul, counts[2]);
-}
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/vespa/searchcore/fdispatch/.gitignore b/searchcore/src/vespa/searchcore/fdispatch/.gitignore
deleted file mode 100644
index fe8e24952a5..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-ID
diff --git a/searchcore/src/vespa/searchcore/fdispatch/OWNERS b/searchcore/src/vespa/searchcore/fdispatch/OWNERS
deleted file mode 100644
index f4d47806ed9..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-baldersheim
-toregge
diff --git a/searchcore/src/vespa/searchcore/fdispatch/common/CMakeLists.txt b/searchcore/src/vespa/searchcore/fdispatch/common/CMakeLists.txt
deleted file mode 100644
index 45261162e93..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/common/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_library(searchcore_fdcommon STATIC
- SOURCES
- appcontext.cpp
- rpc.cpp
- search.cpp
- timestat.cpp
- DEPENDS
-)
diff --git a/searchcore/src/vespa/searchcore/fdispatch/common/appcontext.cpp b/searchcore/src/vespa/searchcore/fdispatch/common/appcontext.cpp
deleted file mode 100644
index a859c53b4e3..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/common/appcontext.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "appcontext.h"
-#include <chrono>
-
-double FastS_TimeKeeper::GetTime() const {
- using clock = std::chrono::steady_clock;
- using seconds = std::chrono::duration<double, std::ratio<1,1>>;
- return std::chrono::duration_cast<seconds>(clock::now().time_since_epoch()).count();
-}
-
-//---------------------------------------------------------------------
-
-FastS_AppContext::FastS_AppContext()
- : _timeKeeper(),
- _createTime(_timeKeeper.GetTime())
-{
-}
-
-FastS_AppContext::~FastS_AppContext() = default;
-
-FNET_Transport *
-FastS_AppContext::GetFNETTransport()
-{
- return nullptr;
-}
-
-FNET_Scheduler *
-FastS_AppContext::GetFNETScheduler()
-{
- return nullptr;
-}
-
-FastS_NodeManager *
-FastS_AppContext::GetNodeManager()
-{
- return nullptr;
-}
-
-FastS_DataSetCollection *
-FastS_AppContext::GetDataSetCollection()
-{
- return nullptr;
-}
-
-FastOS_ThreadPool *
-FastS_AppContext::GetThreadPool()
-{
- return nullptr;
-}
-
-uint32_t
-FastS_AppContext::getDispatchLevel()
-{
- return 0u;
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/common/appcontext.h b/searchcore/src/vespa/searchcore/fdispatch/common/appcontext.h
deleted file mode 100644
index 5235722d3b4..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/common/appcontext.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <cstdint>
-
-class FastS_NodeManager;
-class FNET_Transport;
-class FNET_Scheduler;
-class FastS_DataSetCollection;
-class FastOS_ThreadPool;
-
-class FastS_TimeKeeper
-{
-public:
- double GetTime() const;
-};
-
-
-class FastS_AppContext
-{
-private:
- FastS_TimeKeeper _timeKeeper;
- double _createTime;
-
-public:
- FastS_AppContext();
- virtual ~FastS_AppContext();
-
- FastS_TimeKeeper *GetTimeKeeper() { return &_timeKeeper; }
-
- virtual FastS_NodeManager *GetNodeManager();
- virtual FNET_Transport *GetFNETTransport();
- virtual FNET_Scheduler *GetFNETScheduler();
- virtual FastS_DataSetCollection *GetDataSetCollection();
- virtual FastOS_ThreadPool *GetThreadPool();
- virtual uint32_t getDispatchLevel();
-private:
-};
diff --git a/searchcore/src/vespa/searchcore/fdispatch/common/properties.h b/searchcore/src/vespa/searchcore/fdispatch/common/properties.h
deleted file mode 100644
index 783022ac96e..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/common/properties.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-class FastS_IProperties
-{
-public:
- /**
- * Destructor. No cleanup needed for base class.
- */
- virtual ~FastS_IProperties() { }
-
- virtual bool IsSet (const char *key) = 0;
- virtual bool BoolVal (const char *key) = 0;
- virtual const char *StrVal (const char *key, const char *def = NULL) = 0;
- virtual int IntVal (const char *key, int def = -1) = 0;
- virtual double DoubleVal(const char *key, double def = 0.0) = 0;
-};
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/common/rpc.cpp b/searchcore/src/vespa/searchcore/fdispatch/common/rpc.cpp
deleted file mode 100644
index 437482dddd2..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/common/rpc.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-
-#include "rpc.h"
-#include "appcontext.h"
-#include <vespa/fnet/info.h>
-
-extern char FastS_VersionTag[];
-
-FastS_RPC::FastS_RPC(FastS_AppContext *appCtx) :
- _appCtx(appCtx),
- _transport(),
- _supervisor(&_transport),
- _sbregister(_supervisor, slobrok::ConfiguratorFactory("admin/slobrok.0"))
-{
-}
-
-bool FastS_RPC::Start() {
- return _transport.Start(_appCtx->GetThreadPool());
-}
-
-void FastS_RPC::ShutDown() {
- _transport.ShutDown(true);
-}
-
-bool
-FastS_RPC::Init(int port, const vespalib::string &myHeartbeatId)
-{
- bool rc = true;
-
- char spec[4096];
- snprintf(spec, 4096, "tcp/%d", port);
- rc = rc && _supervisor.Listen(spec);
- if (rc) {
- FRT_ReflectionBuilder rb(&_supervisor);
- RegisterMethods(&rb);
- _sbregister.registerName(myHeartbeatId);
- }
- return rc;
-}
-
-
-void
-FastS_RPC::RegisterMethods(FRT_ReflectionBuilder *rb)
-{
- rb->DefineMethod("fs.admin.getNodeType", "", "s",
- FRT_METHOD(FastS_RPC::RPC_GetNodeType), this);
- rb->MethodDesc("Get string indicating the node type");
- rb->ReturnDesc("type", "node type");
- //---------------------------------------------------------------//
- rb->DefineMethod("fs.admin.getCompileInfo", "", "*",
- FRT_METHOD(FastS_RPC::RPC_GetCompileInfo), this);
- rb->MethodDesc("Obtain compile info for this node");
- rb->ReturnDesc("info", "any number of descriptive strings");
- //---------------------------------------------------------------//
-}
-
-
-void
-FastS_RPC::RPC_GetCompileInfo(FRT_RPCRequest *req)
-{
- FRT_Values &ret = *req->GetReturn();
- ret.AddString("using juniper (api version 2)");
-
-#ifdef NO_MONITOR_LATENCY_CHECK
- ret.AddString("monitor latency check disabled");
-#endif
-#ifdef CUSTOM_TEST_SHUTDOWN
- ret.AddString("Win32: debug shutdown for memory leak detection enabled");
-#endif
- ret.AddString("default transport is 'fnet'");
-
- const char *prefix = "version tag: ";
- uint32_t prefix_len = strlen(prefix);
- uint32_t len = prefix_len + strlen(FastS_VersionTag);
- if (len == prefix_len) {
- ret.AddString("version tag not available");
- } else {
- char *str = ret.AddString(len + 1);
- sprintf(str, "%s%s", prefix, FastS_VersionTag);
- }
-
- ret.AddString("fastos X current");
- ret.AddString(FNET_Info::GetFNETVersion());
-}
-
-
-void
-FastS_RPC::RPC_GetNodeType_Proxy(FRT_RPCRequest *req)
-{
- RPC_GetNodeType(req);
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/common/rpc.h b/searchcore/src/vespa/searchcore/fdispatch/common/rpc.h
deleted file mode 100644
index b5519aab22d..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/common/rpc.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/fnet/frt/invokable.h>
-#include <vespa/fnet/frt/supervisor.h>
-#include <vespa/fnet/transport.h>
-#include <vespa/vespalib/stllike/string.h>
-#include <vespa/slobrok/sbregister.h>
-
-class FastS_AppContext;
-
-class FastS_RPC : public FRT_Invokable
-{
-private:
- FastS_RPC(const FastS_RPC &);
- FastS_RPC& operator=(const FastS_RPC &);
-
- FastS_AppContext *_appCtx;
- FNET_Transport _transport;
- FRT_Supervisor _supervisor;
- slobrok::api::RegisterAPI _sbregister;
-
-public:
- FastS_RPC(FastS_AppContext *appCtx);
- ~FastS_RPC() {}
-
- FastS_AppContext *GetAppCtx() { return _appCtx; }
- FRT_Supervisor *GetSupervisor() { return &_supervisor; }
- bool Init(int port, const vespalib::string& myHeartbeatId);
- bool Start();
- void ShutDown();
-
- // Register RPC Methods
-
- virtual void RegisterMethods(FRT_ReflectionBuilder *rb);
-
- // RPC methods implemented here
-
- void RPC_GetCompileInfo(FRT_RPCRequest *req);
- void RPC_GetResultConfig(FRT_RPCRequest *req);
-
- // RPC Proxy Methods
-
- void RPC_GetNodeType_Proxy(FRT_RPCRequest *req);
-
- // RPC methods to be implemented by subclasses
-
- virtual void RPC_GetNodeType(FRT_RPCRequest *req) = 0;
-};
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/common/search.cpp b/searchcore/src/vespa/searchcore/fdispatch/common/search.cpp
deleted file mode 100644
index 3694fad7882..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/common/search.cpp
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "search.h"
-
-//---------------------------------------------------------------------
-
-FastS_SearchAdapter::FastS_SearchAdapter(FastS_ISearch *search)
- : _search(search)
-{
-}
-
-
-FastS_SearchAdapter::~FastS_SearchAdapter()
-{
-}
-
-
-bool
-FastS_SearchAdapter::IsAsync()
-{
- return _search->IsAsync();
-}
-
-
-uint32_t
-FastS_SearchAdapter::GetDataSetID()
-{
- return _search->GetDataSetID();
-}
-
-
-FastS_SearchInfo *
-FastS_SearchAdapter::GetSearchInfo()
-{
- return _search->GetSearchInfo();
-}
-
-
-FastS_ISearch::RetCode
-FastS_SearchAdapter::SetAsyncArgs(FastS_ISearchOwner *owner)
-{
- return _search->SetAsyncArgs(owner);
-}
-
-
-FastS_ISearch::RetCode
-FastS_SearchAdapter::setSearchRequest(const search::engine::SearchRequest * request)
-{
- return _search->setSearchRequest(request);
-}
-
-
-FastS_ISearch::RetCode
-FastS_SearchAdapter::SetGetDocsumArgs(search::docsummary::GetDocsumArgs *docsumArgs)
-{
- return _search->SetGetDocsumArgs(docsumArgs);
-}
-
-
-FastS_ISearch::RetCode
-FastS_SearchAdapter::Search(uint32_t searchOffset,
- uint32_t maxhits, uint32_t minhits)
-{
- return _search->Search(searchOffset, maxhits, minhits);
-}
-
-
-FastS_ISearch::RetCode
-FastS_SearchAdapter::ProcessQueryDone()
-{
- return _search->ProcessQueryDone();
-}
-
-
-FastS_QueryResult *
-FastS_SearchAdapter::GetQueryResult()
-{
- return _search->GetQueryResult();
-}
-
-
-FastS_ISearch::RetCode
-FastS_SearchAdapter::GetDocsums(const FastS_hitresult *hits, uint32_t hitcnt)
-{
- return _search->GetDocsums(hits, hitcnt);
-}
-
-
-FastS_ISearch::RetCode
-FastS_SearchAdapter::ProcessDocsumsDone()
-{
- return _search->ProcessDocsumsDone();
-}
-
-
-FastS_DocsumsResult *
-FastS_SearchAdapter::GetDocsumsResult()
-{
- return _search->GetDocsumsResult();
-}
-
-
-search::engine::ErrorCode
-FastS_SearchAdapter::GetErrorCode()
-{
- return _search->GetErrorCode();
-}
-
-
-const char *
-FastS_SearchAdapter::GetErrorMessage()
-{
- return _search->GetErrorMessage();
-}
-
-
-void
-FastS_SearchAdapter::Interrupt()
-{
- _search->Interrupt();
-}
-
-
-void
-FastS_SearchAdapter::Free()
-{
- _search->Free();
- delete this;
-}
-
-//---------------------------------------------------------------------
-
-FastS_SyncSearchAdapter::FastS_SyncSearchAdapter(FastS_ISearch *search)
- : FastS_SearchAdapter(search),
- _cond(),
- _waitQuery(false),
- _queryDone(false),
- _waitDocsums(false),
- _docsumsDone(false)
-{}
-
-FastS_SyncSearchAdapter::~FastS_SyncSearchAdapter() = default;
-
-void
-FastS_SyncSearchAdapter::DoneQuery(FastS_ISearch *)
-{
- std::lock_guard<std::mutex> guard(_lock);
- _queryDone = true;
- if (_waitQuery) {
- _cond.notify_one();
- }
-}
-
-
-void
-FastS_SyncSearchAdapter::DoneDocsums(FastS_ISearch *)
-{
- std::lock_guard<std::mutex> guard(_lock);
- _docsumsDone = true;
- if (_waitDocsums) {
- _cond.notify_one();
- }
-}
-
-
-void
-FastS_SyncSearchAdapter::WaitQueryDone()
-{
- std::unique_lock<std::mutex> guard(_lock);
- _waitQuery = true;
- while (!_queryDone) {
- _cond.wait(guard);
- }
-}
-
-
-void
-FastS_SyncSearchAdapter::WaitDocsumsDone()
-{
- std::unique_lock<std::mutex> guard(_lock);
- _waitDocsums = true;
- while (!_docsumsDone) {
- _cond.wait(guard);
- }
-}
-
-
-bool
-FastS_SyncSearchAdapter::IsAsync()
-{
- return false;
-}
-
-
-FastS_ISearch::RetCode
-FastS_SyncSearchAdapter::SetAsyncArgs(FastS_ISearchOwner *)
-{
- return RET_ERROR;
-}
-
-
-FastS_ISearch::RetCode
-FastS_SyncSearchAdapter::Search(uint32_t searchOffset, uint32_t maxhits, uint32_t minhits)
-{
- RetCode res = _search->Search(searchOffset, maxhits, minhits);
- if (res == RET_INPROGRESS) {
- WaitQueryDone();
- }
- return (res == RET_ERROR) ? RET_ERROR : RET_OK;
-}
-
-
-FastS_ISearch::RetCode
-FastS_SyncSearchAdapter::GetDocsums(const FastS_hitresult *hits, uint32_t hitcnt)
-{
- RetCode res = _search->GetDocsums(hits, hitcnt);
- if (res == RET_INPROGRESS) {
- WaitDocsumsDone();
- }
- return (res == RET_ERROR) ? RET_ERROR : RET_OK;
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/common/search.h b/searchcore/src/vespa/searchcore/fdispatch/common/search.h
deleted file mode 100644
index d3383abdee7..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/common/search.h
+++ /dev/null
@@ -1,393 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/searchsummary/docsummary/getdocsumargs.h>
-#include <vespa/searchlib/common/fslimits.h>
-#include <vespa/searchlib/engine/errorcodes.h>
-#include <vespa/searchlib/engine/searchrequest.h>
-#include <vespa/searchlib/common/packets.h>
-#include <vespa/document/base/globalid.h>
-#include <limits>
-#include <mutex>
-#include <condition_variable>
-
-class FastS_ISearch;
-
-
-//----------------------------------------------------------------
-
-class FastS_ISearchOwner
-{
-public:
- /**
- * Destructor. No cleanup needed for base class.
- */
- virtual ~FastS_ISearchOwner() { }
-
- virtual void DoneQuery(FastS_ISearch *search) = 0;
- virtual void DoneDocsums(FastS_ISearch *search) = 0;
-};
-
-//----------------------------------------------------------------
-
-class FastS_hitresult
-{
-public:
- const document::GlobalId & HT_GetGlobalID() const { return _gid; }
- search::HitRank HT_GetMetric() const { return _metric; }
- uint32_t HT_GetPartID() const { return _partition; }
- uint32_t getDistributionKey() const { return _distributionKey; }
-
- void HT_SetGlobalID(const document::GlobalId & val) { _gid = val; }
- void HT_SetMetric(search::HitRank val) { _metric = val; }
- void HT_SetPartID(uint32_t val) { _partition = val; }
- void setDistributionKey(uint32_t val) { _distributionKey = val; }
- document::GlobalId _gid;
- search::HitRank _metric;
- uint32_t _partition;
-private:
- uint32_t _distributionKey;
-};
-
-//----------------------------------------------------------------
-
-struct FastS_fullresult {
- uint32_t _partition;
- uint32_t _docid;
- document::GlobalId _gid;
- search::HitRank _metric;
- search::fs4transport::FS4Packet_DOCSUM::Buf _buf;
-};
-
-//----------------------------------------------------------------
-
-class FastS_SearchInfo
-{
-public:
- uint32_t _searchOffset;
- uint32_t _maxHits;
- uint64_t _coverageDocs;
- uint64_t _activeDocs;
- uint64_t _soonActiveDocs;
- uint32_t _degradeReason;
- uint16_t _nodesQueried;
- uint16_t _nodesReplied;
-
- FastS_SearchInfo()
- : _searchOffset(0),
- _maxHits(0),
- _coverageDocs(0),
- _activeDocs(0),
- _soonActiveDocs(0),
- _degradeReason(0),
- _nodesQueried(0),
- _nodesReplied(0)
- { }
-};
-
-//----------------------------------------------------------------
-
-class FastS_QueryResult
-{
-public:
- FastS_hitresult *_hitbuf;
- uint32_t _hitCount;
- uint64_t _totalHitCount;
- search::HitRank _maxRank;
- double _queryResultTime;
-
- uint32_t _groupResultLen;
- const char *_groupResult;
-
- uint32_t *_sortIndex;
- const char *_sortData;
-
- FastS_QueryResult()
- : _hitbuf(NULL),
- _hitCount(0),
- _totalHitCount(0),
- _maxRank(std::numeric_limits<search::HitRank>::is_integer ?
- std::numeric_limits<search::HitRank>::min() :
- - std::numeric_limits<search::HitRank>::max()),
- _queryResultTime(0.0),
- _groupResultLen(0),
- _groupResult(NULL),
- _sortIndex(NULL),
- _sortData(NULL)
- {}
-};
-
-//----------------------------------------------------------------
-
-class FastS_DocsumsResult
-{
-private:
- FastS_DocsumsResult(const FastS_DocsumsResult &);
- FastS_DocsumsResult& operator=(const FastS_DocsumsResult &);
-
-public:
- FastS_fullresult *_fullresult;
- uint32_t _fullResultCount;
- double _queryDocSumTime;
-
- FastS_DocsumsResult()
- : _fullresult(NULL),
- _fullResultCount(0),
- _queryDocSumTime(0.0)
- {}
-};
-
-//----------------------------------------------------------------
-
-class FastS_ISearch
-{
-public:
- /**
- * Destructor. No cleanup needed for base class.
- */
- virtual ~FastS_ISearch() { }
-
-
- enum RetCode {
- RET_OK = 0, // sync operation performed
- RET_INPROGRESS = 1, // async operation started
- RET_ERROR = 2 // illegal method invocation
- };
-
- // OBTAIN META-DATA
-
- virtual bool IsAsync() = 0;
- virtual uint32_t GetDataSetID() = 0;
- virtual FastS_SearchInfo *GetSearchInfo() = 0;
-
- // SET PARAMETERS
-
- virtual RetCode SetAsyncArgs(FastS_ISearchOwner *owner) = 0;
- virtual RetCode setSearchRequest(const search::engine::SearchRequest * request) = 0;
- virtual RetCode SetGetDocsumArgs(search::docsummary::GetDocsumArgs *docsumArgs) = 0;
-
- // SEARCH API
-
- virtual RetCode Search(uint32_t searchOffset,
- uint32_t maxhits, uint32_t minhits = 0) = 0;
- virtual RetCode ProcessQueryDone() = 0;
- virtual FastS_QueryResult *GetQueryResult() = 0;
-
- // DOCSUM API
-
- virtual RetCode GetDocsums(const FastS_hitresult *hits, uint32_t hitcnt) = 0;
- virtual RetCode ProcessDocsumsDone() = 0;
- virtual FastS_DocsumsResult *GetDocsumsResult() = 0;
-
- // ERROR HANDLING
-
- virtual search::engine::ErrorCode GetErrorCode() = 0;
- virtual const char *GetErrorMessage() = 0;
-
- // INTERRUPT OPERATION
-
- virtual void Interrupt() = 0;
-
- // GET RID OF OBJECT
-
- virtual void Free() = 0;
-};
-
-//----------------------------------------------------------------
-
-class FastS_SearchBase : public FastS_ISearch
-{
-protected:
- uint32_t _dataSetID;
- search::engine::ErrorCode _errorCode;
- char *_errorMessage;
- const search::engine::SearchRequest *_queryArgs;
- search::docsummary::GetDocsumArgs *_docsumArgs;
- FastS_SearchInfo _searchInfo;
- FastS_QueryResult _queryResult;
- FastS_DocsumsResult _docsumsResult;
-
-public:
- FastS_SearchBase(const FastS_SearchBase &) = delete;
- FastS_SearchBase& operator=(const FastS_SearchBase &) = delete;
- FastS_SearchBase(uint32_t dataSetID)
- : _dataSetID(dataSetID),
- _errorCode(search::engine::ECODE_NO_ERROR),
- _errorMessage(NULL),
- _queryArgs(NULL),
- _docsumArgs(NULL),
- _searchInfo(),
- _queryResult(),
- _docsumsResult()
- {
- }
-
- ~FastS_SearchBase() override {
- free(_errorMessage);
- }
-
- void SetError(search::engine::ErrorCode errorCode, const char *errorMessage)
- {
- _errorCode = errorCode;
- if (errorMessage != NULL)
- _errorMessage = strdup(errorMessage);
- else
- _errorMessage = NULL;
- }
-
-
- uint32_t GetDataSetID() override { return _dataSetID; }
- FastS_SearchInfo *GetSearchInfo() override { return &_searchInfo; }
-
- RetCode setSearchRequest(const search::engine::SearchRequest * request) override {
- _queryArgs = request;
- return RET_OK;
- }
-
- RetCode SetGetDocsumArgs(search::docsummary::GetDocsumArgs *docsumArgs) override {
- _docsumArgs = docsumArgs;
- return RET_OK;
- }
-
- RetCode Search(uint32_t searchOffset, uint32_t maxhits, uint32_t minhits = 0) override {
- (void) minhits;
- _searchInfo._searchOffset = searchOffset;
- _searchInfo._maxHits = maxhits;
- return RET_OK;
- }
-
- RetCode ProcessQueryDone() override { return RET_OK; }
- FastS_QueryResult *GetQueryResult() override { return &_queryResult; }
-
- RetCode GetDocsums(const FastS_hitresult *hits, uint32_t hitcnt) override {
- (void) hits;
- (void) hitcnt;
- return RET_OK;
- }
-
- RetCode ProcessDocsumsDone() override { return RET_OK; }
- FastS_DocsumsResult *GetDocsumsResult() override { return &_docsumsResult; }
- search::engine::ErrorCode GetErrorCode() override { return _errorCode; }
-
- const char *GetErrorMessage() override {
- if (_errorMessage != NULL)
- return _errorMessage;
- return search::engine::getStringFromErrorCode(_errorCode);
- }
-
- void Interrupt() override {}
- void Free() override { delete this; }
-};
-
-//----------------------------------------------------------------
-
-class FastS_FailedSearch : public FastS_SearchBase
-{
-private:
- bool _async;
-
-public:
- FastS_FailedSearch(uint32_t dataSetID, bool async, search::engine::ErrorCode errorCode, const char *errorMessage)
- : FastS_SearchBase(dataSetID),
- _async(async)
- {
- SetError(errorCode, errorMessage);
- }
-
- bool IsAsync() override { return _async; }
-
- RetCode SetAsyncArgs(FastS_ISearchOwner *owner) override {
- (void) owner;
- return (_async) ? RET_OK : RET_ERROR;
- }
-};
-
-//----------------------------------------------------------------
-
-class FastS_AsyncSearch : public FastS_SearchBase
-{
-protected:
- FastS_ISearchOwner *_searchOwner;
-
-public:
- FastS_AsyncSearch(const FastS_AsyncSearch &) = delete;
- FastS_AsyncSearch& operator=(const FastS_AsyncSearch &) = delete;
- FastS_AsyncSearch(uint32_t dataSetID)
- : FastS_SearchBase(dataSetID),
- _searchOwner(NULL)
- {}
-
- bool IsAsync() override { return true; }
-
- RetCode SetAsyncArgs(FastS_ISearchOwner *owner) override {
- _searchOwner = owner;
- return RET_OK;
- }
-};
-
-//----------------------------------------------------------------
-
-class FastS_SearchAdapter : public FastS_ISearch
-{
-protected:
- FastS_ISearch *_search;
-
-public:
- explicit FastS_SearchAdapter(FastS_ISearch *search);
- FastS_SearchAdapter(const FastS_SearchAdapter &) = delete;
- FastS_SearchAdapter& operator=(const FastS_SearchAdapter &) = delete;
- ~FastS_SearchAdapter() override;
-
- bool IsAsync() override;
- uint32_t GetDataSetID() override;
- FastS_SearchInfo *GetSearchInfo() override;
- RetCode SetAsyncArgs(FastS_ISearchOwner *owner) override;
- RetCode setSearchRequest(const search::engine::SearchRequest * request) override;
- RetCode SetGetDocsumArgs(search::docsummary::GetDocsumArgs *docsumArgs) override;
- RetCode Search(uint32_t searchOffset, uint32_t maxhits, uint32_t minhits = 0) override;
- RetCode ProcessQueryDone() override;
- FastS_QueryResult *GetQueryResult() override;
- RetCode GetDocsums(const FastS_hitresult *hits, uint32_t hitcnt) override;
- RetCode ProcessDocsumsDone() override;
- FastS_DocsumsResult *GetDocsumsResult() override;
- search::engine::ErrorCode GetErrorCode() override;
- const char *GetErrorMessage() override;
- void Interrupt() override;
- void Free() override;
-};
-
-//----------------------------------------------------------------
-
-class FastS_SyncSearchAdapter : public FastS_SearchAdapter,
- public FastS_ISearchOwner
-{
-private:
- std::mutex _lock;
- std::condition_variable _cond;
- bool _waitQuery;
- bool _queryDone;
- bool _waitDocsums;
- bool _docsumsDone;
-
-protected:
- explicit FastS_SyncSearchAdapter(FastS_ISearch *search);
-
-public:
- ~FastS_SyncSearchAdapter() override;
-
-
- void DoneQuery(FastS_ISearch *) override;
- void DoneDocsums(FastS_ISearch *) override;
-
- void WaitQueryDone();
- void WaitDocsumsDone();
-
- bool IsAsync() override;
- RetCode SetAsyncArgs(FastS_ISearchOwner *owner) override;
- RetCode Search(uint32_t searchOffset, uint32_t maxhits, uint32_t minhits = 0) override;
- RetCode GetDocsums(const FastS_hitresult *hits, uint32_t hitcnt) override;
-};
-
-//----------------------------------------------------------------
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/common/stdincl.h b/searchcore/src/vespa/searchcore/fdispatch/common/stdincl.h
deleted file mode 100644
index b268d2831e2..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/common/stdincl.h
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <cstdint>
-
-/**
- * This method defines the illegal/undefined value for unsigned 32-bit
- * integer ids.
- **/
-inline uint32_t FastS_NoID32() { return static_cast<uint32_t>(-1); }
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/common/timestat.cpp b/searchcore/src/vespa/searchcore/fdispatch/common/timestat.cpp
deleted file mode 100644
index 5dec5ba4bc3..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/common/timestat.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "timestat.h"
-
-void
-FastS_TimeStatHistory::Reset()
-{
- _sampleAccTime = 0.0;
- _totalAccTime = 0.0;
- _sampleIdx = 0;
- _sampleCount = 0;
- _totalCount = 0;
- for (uint32_t i = 0; i < _timestatssize; i++)
- _sampleTimes[i] = Sample();
-}
-
-
-double
-FastS_TimeStatHistory::GetMaxTime() const
-{
- double max = 0.0;
- uint32_t idx = _sampleIdx;
- for (uint32_t residue = _sampleCount;
- residue > 0; residue--)
- {
- if (idx > 0)
- idx--;
- else
- idx = _timestatssize - 1;
- if (_sampleTimes[idx]._time > max)
- max = _sampleTimes[idx]._time;
- }
- return max;
-}
-
-
-void
-FastS_TimeStatHistory::Update(double tnow, double t, bool timedout)
-{
- uint32_t timeIdx = getTimeIdx(tnow);
- if (_slotCount == 0u) {
- _timeSlots[_slotIdx].init(timeIdx);
- ++_slotCount;
- } else {
- TimeSlot &ts = _timeSlots[_slotIdx];
- if (ts._timeIdx > timeIdx)
- timeIdx = ts._timeIdx;
- if (ts._timeIdx < timeIdx) {
- if (_slotCount < NUM_TIMESLOTS)
- ++_slotCount;
- _slotIdx = nextTimeSlot(_slotIdx);
- _timeSlots[_slotIdx].init(timeIdx);
- }
- }
- _timeSlots[_slotIdx].update(t, timedout);
-
- _totalAccTime += t;
- ++_totalCount;
- if (timedout)
- ++_totalTimeouts;
- if (_sampleCount >= _timestatssize) {
- const Sample &s = _sampleTimes[_sampleIdx];
- _sampleAccTime -= s._time;
- if (s._timedout)
- --_sampleTimeouts;
- --_sampleCount;
- }
- _sampleTimes[_sampleIdx] = Sample(t, timedout);
- _sampleAccTime += t;
- if (timedout)
- ++_sampleTimeouts;
- _sampleIdx++;
- if (_sampleIdx >= _timestatssize)
- _sampleIdx = 0;
- ++_sampleCount;
-}
-
-
-void
-FastS_TimeStatHistory::getRecentStats(double tsince,
- FastS_TimeStatTotals &totals)
-{
- uint32_t timeIdx = getTimeIdx(tsince);
- uint32_t slotCount = _slotCount;
- uint32_t slotIdx = _slotIdx;
- for (; slotCount > 0u && _timeSlots[slotIdx]._timeIdx >= timeIdx;
- --slotCount, slotIdx = prevTimeSlot(slotIdx)) {
- TimeSlot &ts = _timeSlots[slotIdx];
- totals._totalCount += ts._count;
- totals._totalTimeouts += ts._timeouts;
- totals._totalAccTime += ts._accTime;
- }
-}
-
-
-double
-FastS_TimeStatHistory::getLoadTime(double tsince, double tnow)
-{
- const uint32_t holeSize = 2; // 2 missing slots => hole
- const uint32_t minSlotLoad = 4; // Mininum load for not being "missing"
- uint32_t sinceTimeIdx = getTimeIdx(tsince);
- uint32_t timeIdx = getTimeIdx(tnow);
- uint32_t slotCount = _slotCount;
- uint32_t slotIdx = _slotIdx;
- uint32_t doneTimeIdx = timeIdx;
- for (; slotCount > 0u; --slotCount, slotIdx = prevTimeSlot(slotIdx)) {
- TimeSlot &ts = _timeSlots[slotIdx];
- if (ts._timeIdx + holeSize < doneTimeIdx)
- break; // Found hole, i.e. holeSize missing slots
- if (ts._timeIdx + holeSize < sinceTimeIdx)
- break; // No point in looking further back
- if (ts._count >= minSlotLoad)
- doneTimeIdx = ts._timeIdx;
- }
- return tnow - getTimeFromTimeIdx(doneTimeIdx);
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/common/timestat.h b/searchcore/src/vespa/searchcore/fdispatch/common/timestat.h
deleted file mode 100644
index e05bfe5ef29..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/common/timestat.h
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-
-#pragma once
-
-#include <cstdint>
-
-class FastS_TimeStatTotals
-{
-public:
- int _totalCount;
- uint32_t _totalTimeouts;
- double _totalAccTime;
-
- FastS_TimeStatTotals()
- : _totalCount(0),
- _totalTimeouts(0u),
- _totalAccTime(0)
- {
- }
-
- FastS_TimeStatTotals &
- operator+=(const FastS_TimeStatTotals &rhs)
- {
- _totalCount += rhs._totalCount;
- _totalTimeouts += rhs._totalTimeouts;
- _totalAccTime += rhs._totalAccTime;
- return *this;
- }
-
- FastS_TimeStatTotals &
- operator-=(const FastS_TimeStatTotals &rhs)
- {
- _totalCount -= rhs._totalCount;
- _totalTimeouts -= rhs._totalTimeouts;
- _totalAccTime -= rhs._totalAccTime;
- return *this;
- }
-
- double
- getAvgTime() const
- {
- if (_totalCount == 0)
- return 0.0;
- return (_totalAccTime / _totalCount);
- }
-
- double
- getTimeoutRate() const
- {
- if (_totalCount == 0)
- return 0.0;
- return (static_cast<double>(_totalTimeouts) / _totalCount);
- }
-};
-
-
-class FastS_TimeStatHistory
-{
- enum {
- _timestatssize = 100
- };
- enum {
- SLOT_SIZE = 5,
- NUM_TIMESLOTS = 128
- };
- double _sampleAccTime;
- double _totalAccTime;
- uint32_t _sampleIdx;
- uint32_t _sampleCount;
- uint32_t _sampleTimeouts;
- uint32_t _totalCount;
- uint32_t _totalTimeouts;
- struct Sample {
- double _time;
- bool _timedout;
-
- Sample()
- : _time(0.0),
- _timedout(false)
- {
- }
-
- Sample(double time, bool timedout)
- : _time(time),
- _timedout(timedout)
- {
- }
- };
- Sample _sampleTimes[_timestatssize];
-
- struct TimeSlot
- {
- double _accTime;
- uint32_t _count;
- uint32_t _timeouts;
- uint32_t _timeIdx;
-
- TimeSlot()
- : _accTime(0.0),
- _count(0u),
- _timeouts(0u),
- _timeIdx(0u)
- {
- }
-
- void
- init(uint32_t timeIdx)
- {
- _accTime = 0.0;
- _count = 0u;
- _timeouts = 0u;
- _timeIdx = timeIdx;
- }
-
- void
- update(double t, bool timedout)
- {
- _accTime += t;
- ++_count;
- if (timedout)
- ++_timeouts;
- }
- };
-
- static uint32_t
- nextTimeSlot(uint32_t timeSlot)
- {
- return (timeSlot >= (NUM_TIMESLOTS - 1)) ? 0u : timeSlot + 1;
- }
-
- static uint32_t
- prevTimeSlot(uint32_t timeSlot)
- {
- return (timeSlot == 0u) ? (NUM_TIMESLOTS - 1) : timeSlot - 1;
- }
-
-
- TimeSlot _timeSlots[NUM_TIMESLOTS];
- uint32_t _slotCount;
- uint32_t _slotIdx;
-
- static uint32_t
- getTimeIdx(double t)
- {
- // Each SLOT_SIZE second period has it's own slot of statistics
- return static_cast<uint32_t>(t / SLOT_SIZE);
- }
-
- static double
- getTimeFromTimeIdx(uint32_t timeIdx)
- {
- return static_cast<double>(timeIdx) * SLOT_SIZE;
- }
-public:
- FastS_TimeStatHistory()
- : _sampleAccTime(0.0),
- _totalAccTime(0.0),
- _sampleIdx(0),
- _sampleCount(0),
- _sampleTimeouts(0),
- _totalCount(0),
- _totalTimeouts(0u),
- _sampleTimes(),
- _timeSlots(),
- _slotCount(0u),
- _slotIdx(0u)
- {
- Reset();
- }
-
- void Reset();
-
- double GetSampleAccTime() const { return _sampleAccTime; }
- uint32_t GetSampleCount() const { return _sampleCount; }
-
- uint32_t
- getSampleTimeouts() const
- {
- return _sampleTimeouts;
- }
-
- double GetAvgTime() const
- {
- if (_sampleCount == 0)
- return 0.0;
- return (_sampleAccTime / _sampleCount);
- }
- double GetMaxTime() const;
-
- void Update(double tnow, double t, bool timedout);
-
- uint32_t GetTotalCount() const { return _totalCount; }
- double GetTotalAccTime() const { return _totalAccTime; }
-
- uint32_t
- getTotalTimeouts() const
- {
- return _totalTimeouts;
- }
-
- void AddTotal(FastS_TimeStatTotals *totals) {
- totals->_totalCount += GetTotalCount();
- totals->_totalTimeouts += getTotalTimeouts();
- totals->_totalAccTime += GetTotalAccTime();
- }
-
- void
- getRecentStats(double tsince, FastS_TimeStatTotals &totals);
-
- double
- getLoadTime(double tsince, double tnow);
-};
-
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/program/.gitignore b/searchcore/src/vespa/searchcore/fdispatch/program/.gitignore
deleted file mode 100644
index 27eb860d05b..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/program/.gitignore
+++ /dev/null
@@ -1,13 +0,0 @@
-*.exp
-*.ilk
-*.lib
-*.pdb
-.depend
-ID
-Makefile
-access_log
-debugmalloc
-fdispatch
-fdispatch.exe
-sfdispatch
-sfdispatch.exe
diff --git a/searchcore/src/vespa/searchcore/fdispatch/program/CMakeLists.txt b/searchcore/src/vespa/searchcore/fdispatch/program/CMakeLists.txt
deleted file mode 100644
index c2c00320d82..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/program/CMakeLists.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_library(searchcore_fdispatch_program STATIC
- SOURCES
- fdispatch.cpp
- rpc.cpp
- engineadapter.cpp
- searchadapter.cpp
- docsumadapter.cpp
- DEPENDS
- searchcore_fconfig
-)
diff --git a/searchcore/src/vespa/searchcore/fdispatch/program/description.html b/searchcore/src/vespa/searchcore/fdispatch/program/description.html
deleted file mode 100644
index a73a6c4b4d1..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/program/description.html
+++ /dev/null
@@ -1,3 +0,0 @@
-<!-- Short description for make kdoc. -->
-<!-- Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
-FDispatch program.
diff --git a/searchcore/src/vespa/searchcore/fdispatch/program/docsumadapter.cpp b/searchcore/src/vespa/searchcore/fdispatch/program/docsumadapter.cpp
deleted file mode 100644
index d104aecfacc..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/program/docsumadapter.cpp
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "docsumadapter.h"
-#include <vespa/searchcore/fdispatch/search/datasetcollection.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".fdispatch.docsumadapter");
-
-namespace fdispatch {
-
-void
-DocsumAdapter::setupRequest()
-{
- const DocsumRequest &req = *_request.get();
- _args.initFromDocsumRequest(req);
- _hitcnt = req.hits.size();
- LOG(debug, "DocsumAdapter::setupRequest : hitcnt=%d", _hitcnt);
- if (_hitcnt > 0) {
- _hitbuf = (FastS_hitresult *) malloc(req.hits.size() * sizeof(FastS_hitresult));
- }
- for (uint32_t i = 0; i < _hitcnt; i++) {
- _hitbuf[i]._gid = req.hits[i].gid;
- _hitbuf[i]._partition = req.hits[i].path;
- LOG(debug, "DocsumAdapter::setupRequest : hit[%d] (gid=%s,part=%d)",
- i, _hitbuf[i]._gid.toString().c_str(), _hitbuf[i]._partition);
- }
-}
-
-void
-DocsumAdapter::handleRequest()
-{
- _dsc = _appCtx->GetDataSetCollection();
- assert(_dsc != NULL);
- _search = _dsc->CreateSearch(FastS_NoID32(), _appCtx->GetTimeKeeper());
- assert(_search != NULL);
- _docsumsResult = _search->GetDocsumsResult();
- _search->SetGetDocsumArgs(&_args);
- _search->GetDocsums(_hitbuf, _hitcnt);
- _search->ProcessDocsumsDone();
-}
-
-void
-DocsumAdapter::createReply()
-{
- DocsumReply::UP reply(new DocsumReply());
- DocsumReply &r = *reply;
-
- FastS_fullresult *hitbuf = _docsumsResult->_fullresult;
- uint32_t hitcnt = _docsumsResult->_fullResultCount;
-
- LOG(debug, "DocsumAdapter::createReply : hitcnt=%d", hitcnt);
- r.docsums.reserve(hitcnt);
- for (uint32_t i = 0; i < hitcnt; i++) {
- if ( ! hitbuf[i]._buf.empty() ) {
- r.docsums.push_back(DocsumReply::Docsum());
- DocsumReply::Docsum & d = r.docsums.back();
- d.docid = hitbuf[i]._docid;
- d.gid = hitbuf[i]._gid;
- d.data.swap(hitbuf[i]._buf);
- } else {
- LOG(debug, "DocsumAdapter::createReply : No buf for hit=%d", i);
- }
- }
- r.request = _request.release();
- _client.getDocsumsDone(std::move(reply));
-}
-
-void
-DocsumAdapter::writeLog()
-{
- // no access log for docsums
-}
-
-void
-DocsumAdapter::cleanup()
-{
- if (_search != NULL) {
- _search->Free();
- }
- if (_dsc != NULL) {
- _dsc->subRef();
- }
- free(_hitbuf);
- _hitcnt = 0;
- _hitbuf = 0;
-}
-
-void
-DocsumAdapter::Run(FastOS_ThreadInterface *, void *)
-{
- setupRequest();
- handleRequest();
- createReply();
- writeLog();
- cleanup();
- delete this;
-}
-
-DocsumAdapter::DocsumAdapter(FastS_AppContext *appCtx,
- DocsumRequest::Source request,
- DocsumClient &client)
- : _appCtx(appCtx),
- _request(std::move(request)),
- _client(client),
- _args(),
- _hitcnt(0),
- _hitbuf(0),
- _dsc(0),
- _search(0),
- _docsumsResult(0)
-{
-}
-
-} // namespace fdispatch
diff --git a/searchcore/src/vespa/searchcore/fdispatch/program/docsumadapter.h b/searchcore/src/vespa/searchcore/fdispatch/program/docsumadapter.h
deleted file mode 100644
index 18ade945102..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/program/docsumadapter.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/searchlib/engine/docsumapi.h>
-#include <vespa/searchcore/fdispatch/common/appcontext.h>
-#include <vespa/searchcore/fdispatch/common/search.h>
-#include <vespa/searchsummary/docsummary/getdocsumargs.h>
-#include <vespa/fastos/thread.h>
-
-namespace fdispatch {
-
-/**
- * Implementation of the common search api for the fdispatch server
- * application.
- **/
-class DocsumAdapter : public FastOS_Runnable
-{
-public:
- typedef search::engine::DocsumRequest DocsumRequest;
- typedef search::engine::DocsumReply DocsumReply;
- typedef search::engine::DocsumClient DocsumClient;
-
-private:
- FastS_AppContext *_appCtx;
- DocsumRequest::Source _request;
- DocsumClient &_client;
-
- // internal docsum related state
- search::docsummary::GetDocsumArgs _args;
- uint32_t _hitcnt;
- FastS_hitresult *_hitbuf;
- FastS_DataSetCollection *_dsc;
- FastS_ISearch *_search;
- FastS_DocsumsResult *_docsumsResult;
-
- void setupRequest();
- void handleRequest();
- void createReply();
- void writeLog();
- void cleanup();
-
- virtual void Run(FastOS_ThreadInterface *, void *) override;
-
-public:
- DocsumAdapter(FastS_AppContext *appCtx,
- DocsumRequest::Source request,
- DocsumClient &client);
-};
-
-} // namespace fdispatch
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/program/engineadapter.cpp b/searchcore/src/vespa/searchcore/fdispatch/program/engineadapter.cpp
deleted file mode 100644
index 6519b55851e..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/program/engineadapter.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "engineadapter.h"
-#include "searchadapter.h"
-#include "docsumadapter.h"
-#include <vespa/searchcore/fdispatch/search/child_info.h>
-#include <vespa/searchcore/fdispatch/search/nodemanager.h>
-#include <vespa/searchcore/fdispatch/search/dataset_base.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".fdispatch.engineadapter");
-
-namespace fdispatch {
-
-EngineAdapter::
-EngineAdapter(FastS_AppContext *appCtx, FastOS_ThreadPool *threadPool)
- : _appCtx(appCtx),
- _mypool(threadPool)
-{
-}
-
-EngineAdapter::SearchReply::UP
-EngineAdapter::search(SearchRequest::Source request, SearchClient &client)
-{
- SearchAdapter *sa = new SearchAdapter(_appCtx, std::move(request), client);
- if ((_mypool == 0) || (_mypool->NewThread(sa) == 0)) {
- delete sa;
- LOG(error, "could not allocate thread for incoming search request");
- SearchReply::UP reply(new SearchReply());
- reply->useWideHits = true; // mld
- reply->errorCode = search::engine::ECODE_OVERLOADED;
- reply->errorMessage = "could not allocate thread for query";
- return reply;
- }
- return SearchReply::UP();
-}
-
-EngineAdapter::DocsumReply::UP
-EngineAdapter::getDocsums(DocsumRequest::Source request, DocsumClient &client)
-{
- DocsumAdapter *da = new DocsumAdapter(_appCtx, std::move(request), client);
- if ((_mypool == 0) || (_mypool->NewThread(da) == 0)) {
- delete da;
- LOG(error, "could not allocate thread for incoming docsum request");
- return DocsumReply::UP(new DocsumReply());
- }
- return DocsumReply::UP();
-}
-
-EngineAdapter::MonitorReply::UP
-EngineAdapter::ping(MonitorRequest::UP request, MonitorClient &)
-{
- MonitorReply::UP reply(new MonitorReply());
- MonitorReply &mr = *reply;
-
- uint32_t timeStamp = 0;
- FastS_NodeManager *nm = _appCtx->GetNodeManager();
-
- ChildInfo ci = nm->getChildInfo();
- timeStamp = nm->GetMldDocstamp();
- // TODO: Report softoffline upwards when fdispatch has been requested
- // to go down in a controlled manner (along with zero docstamp).
- mr.partid = nm->GetMldPartition();
- mr.timestamp = timeStamp;
- mr.mld = true;
- mr.totalNodes = ci.maxNodes;
- mr.activeNodes = ci.activeNodes;
- mr.totalParts = ci.maxParts;
- mr.activeParts = ci.activeParts;
- if (ci.activeDocs.valid) {
- mr.activeDocs = ci.activeDocs.count;
- mr.activeDocsRequested = request->reportActiveDocs;
- }
- return reply;
-}
-
-} // namespace fdispatch
diff --git a/searchcore/src/vespa/searchcore/fdispatch/program/engineadapter.h b/searchcore/src/vespa/searchcore/fdispatch/program/engineadapter.h
deleted file mode 100644
index add5f045d51..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/program/engineadapter.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/searchlib/engine/searchapi.h>
-#include <vespa/searchlib/engine/docsumapi.h>
-#include <vespa/searchlib/engine/monitorapi.h>
-
-#include <vespa/searchcore/fdispatch/common/appcontext.h>
-
-namespace fdispatch {
-
-/**
- * Implementation of the common search api for the fdispatch server
- * application.
- **/
-class EngineAdapter : public search::engine::SearchServer,
- public search::engine::DocsumServer,
- public search::engine::MonitorServer
-{
-private:
- FastS_AppContext *_appCtx;
- FastOS_ThreadPool *_mypool;
-
-public:
- typedef search::engine::SearchRequest SearchRequest;
- typedef search::engine::DocsumRequest DocsumRequest;
- typedef search::engine::MonitorRequest MonitorRequest;
-
- typedef search::engine::SearchReply SearchReply;
- typedef search::engine::DocsumReply DocsumReply;
- typedef search::engine::MonitorReply MonitorReply;
-
- typedef search::engine::SearchClient SearchClient;
- typedef search::engine::DocsumClient DocsumClient;
- typedef search::engine::MonitorClient MonitorClient;
-
- EngineAdapter(FastS_AppContext *appCtx, FastOS_ThreadPool *threadPool);
-
- SearchReply::UP search(SearchRequest::Source request, SearchClient &client) override;
- DocsumReply::UP getDocsums(DocsumRequest::Source request, DocsumClient &client) override;
- MonitorReply::UP ping(MonitorRequest::UP request, MonitorClient &client) override;
-};
-
-} // namespace fdispatch
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/program/fdispatch.cpp b/searchcore/src/vespa/searchcore/fdispatch/program/fdispatch.cpp
deleted file mode 100644
index 3047834be85..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/program/fdispatch.cpp
+++ /dev/null
@@ -1,397 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "fdispatch.h"
-#include "engineadapter.h"
-#include "rpc.h"
-#include <vespa/searchcore/fdispatch/search/querycacheutil.h>
-#include <vespa/searchcore/fdispatch/search/nodemanager.h>
-#include <vespa/searchcore/util/eventloop.h>
-#include <vespa/vespalib/util/exceptions.h>
-#include <vespa/config/helper/configgetter.hpp>
-#include <vespa/vespalib/net/crypto_engine.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".fdispatch");
-
-#ifndef V_TAG
-#define V_TAG "NOTAG"
-#endif
-
-using search::fs4transport::FS4PersistentPacketStreamer;
-using vespa::config::search::core::FdispatchrcConfig;
-using vespa::config::search::core::internal::InternalFdispatchrcType;
-using vespalib::compression::CompressionConfig;
-
-char FastS_VersionTag[] = V_TAG;
-
-namespace fdispatch
-{
-
-FastS_FNETAdapter::FastS_FNETAdapter(FastS_AppContext *appCtx)
- : _appCtx(appCtx),
- _nodeManager(),
- _timeKeeper(NULL),
- _transport(NULL),
- _last_now(0.0),
- _live_counter(0),
- _task()
-{ }
-
-FastS_FNETAdapter::~FastS_FNETAdapter()
-{
- fini();
-}
-
-void
-FastS_FNETAdapter::init()
-{
- _nodeManager = _appCtx->GetNodeManager();
- _timeKeeper = _appCtx->GetTimeKeeper();
- _transport = _appCtx->GetFNETTransport();
- _last_now = _timeKeeper->GetTime();
- _task.reset(new MyTask(_transport->GetScheduler(), *this));
- _task->ScheduleNow();
-}
-
-void
-FastS_FNETAdapter::perform()
-{
- double now = _timeKeeper->GetTime();
- double delta = now - _last_now;
- if (delta >= 3.0) {
- LOG(warning, "FNET loop high latency: %.3f", delta);
- }
- _last_now = now;
- ++_live_counter;
- _nodeManager->CheckEvents(_timeKeeper);
-}
-
-void
-FastS_FNETAdapter::fini()
-{
- if (_task) {
- _task->Kill();
- _task.reset();
- }
-}
-
-Fdispatch::~Fdispatch()
-{
- if (_transportServer) {
- _transportServer->shutDown(); // sync shutdown
- }
- _FNET_adapter.fini();
- if (_nodeManager) {
- _nodeManager->ShutdownConfig();
- }
- if (_transport && _transportStarted) {
- _transport->ShutDown(true); // sync shutdown
- }
- if (_rpc) {
- _rpc->ShutDown(); // sync shutdown
- }
-
- LOG(debug, "Will close threadpool");
- _mypool->Close();
- _executor.shutdown().sync();
- LOG(debug, "Has closed threadpool");
- _transportServer.reset();
- _engineAdapter.reset();
- _nodeManager.reset();
- _transport.reset();
- _rpc.reset();
- _mypool.reset();
-}
-
-FNET_Transport *
-Fdispatch::GetFNETTransport()
-{
- return _transport.get();
-}
-
-FNET_Scheduler *
-Fdispatch::GetFNETScheduler()
-{
- return (_transport) ? _transport->GetScheduler() : nullptr;
-}
-
-FastS_NodeManager *
-Fdispatch::GetNodeManager()
-{
- return _nodeManager.get();
-}
-
-FastS_DataSetCollection *
-Fdispatch::GetDataSetCollection()
-{
- return ( _nodeManager) ? _nodeManager->GetDataSetCollection() : nullptr;
-}
-
-FastOS_ThreadPool *
-Fdispatch::GetThreadPool()
-{
- return _mypool.get();
-}
-
-bool
-Fdispatch::Failed()
-{
- return ( (_transportServer && _transportServer->isFailed())) || _needRestart;
-}
-
-bool
-Fdispatch::CheckTempFail()
-{
- bool ret;
- bool failflag = _nodeManager->GetTempFail();
- unsigned int FNETLiveCounter;
-
- ret = true;
-
- FNETLiveCounter = _FNET_adapter.GetLiveCounter();
- if (FNETLiveCounter == _lastFNETLiveCounter) {
- if (_FNETLiveCounterFailed) {
- failflag = true; // Still failure
- } else if (!_FNETLiveCounterDanger) {
- _FNETLiveCounterDanger = true;
- _FNETLiveCounterDangerStart.SetNow();
- } else if (_FNETLiveCounterDangerStart.MilliSecsToNow() >= 6000) {
- LOG(error, "fdispatch::Fdispatch::CheckTempFail: FNET inactive for 6 seconds, deadlock ?");
- _FNETLiveCounterFailed = true; // Note that we failed
- failflag = true; // Force temporary failure
- } else if (_FNETLiveCounterDangerStart.MilliSecsToNow() >= 3000 &&
- !_FNETLiveCounterWarned) {
- _FNETLiveCounterWarned = true;
- LOG(warning, "fdispatch::Fdispatch::CheckTempFail: FNET inactive for 3 seconds");
- }
- } else {
- if (_FNETLiveCounterFailed || _FNETLiveCounterWarned) {
- LOG(warning, "fdispatch::Fdispatch::CheckTempFail: FNET active again");
- }
- _FNETLiveCounterFailed = false;
- _FNETLiveCounterWarned = false;
- _FNETLiveCounterDanger = false;
- _lastFNETLiveCounter = FNETLiveCounter;
- }
-
- if (failflag == _tempFail)
- return ret;
-
- if (_transportServer) {
- if (failflag) {
- _transportServer->setListen(false);
- LOG(error, "Disabling fnet server interface");
- } else {
- _transportServer->setListen(true);
- LOG(info, "Reenabling fnet server interface");
- }
- }
- _tempFail = failflag;
- return ret;
-}
-
-
-/**
- * Make the httpd and Monitor, and let a Thread execute each.
- * Set up stuff as specified in the fdispatch-rc-file.
- */
-Fdispatch::Fdispatch(const config::ConfigUri &configUri)
- : _executor(1, 128 * 1024),
- _mypool(),
- _engineAdapter(),
- _transportServer(),
- _componentConfig(),
- _nodeManager(),
- _transport(),
- _FNET_adapter(this),
- _rpc(),
- _config(),
- _configUri(configUri),
- _fdispatchrcFetcher(configUri.getContext()),
- _rndGen(),
- _partition(0),
- _tempFail(false),
- _FNETLiveCounterDanger(false),
- _FNETLiveCounterWarned(false),
- _FNETLiveCounterFailed(false),
- _transportStarted(false),
- _lastFNETLiveCounter(false),
- _FNETLiveCounterDangerStart(),
- _timeouts(0u),
- _checkLimit(0u),
- _healthPort(0),
- _needRestart(false)
-{
- int64_t cfgGen = -1;
- _config = config::ConfigGetter<FdispatchrcConfig>::
- getConfig(cfgGen, _configUri.getConfigId(), _configUri.getContext());
- LOG(config, "fdispatch version %s (RPC-port: %d, transport at %d)",
- FastS_VersionTag, _config->frtport, _config->ptport);
-
- _componentConfig.addConfig(vespalib::ComponentConfigProducer::Config("fdispatch", cfgGen,
- "config only obtained at startup"));
- _fdispatchrcFetcher.subscribe<FdispatchrcConfig>(configUri.getConfigId(), this);
- _fdispatchrcFetcher.start();
-}
-
-namespace {
-
-bool needRestart(const FdispatchrcConfig & curr, const FdispatchrcConfig & next)
-{
- if (curr.frtport != next.frtport) {
- LOG(warning, "FRT port has changed from %d to %d.", curr.frtport, next.frtport);
- return true;
- }
- if (curr.ptport != next.ptport) {
- LOG(warning, "PT port has changed from %d to %d.", curr.ptport, next.ptport);
- return true;
- }
- if (curr.healthport != next.healthport) {
- LOG(warning, "Health port has changed from %d to %d.", curr.healthport, next.healthport);
- return true;
- }
- return false;
-}
-
-}
-
-void Fdispatch::configure(std::unique_ptr<FdispatchrcConfig> cfg)
-{
- if (cfg && _config) {
- if ( needRestart(*_config, *cfg) ) {
- LOG(warning, "Will restart by abort now.");
- _needRestart.store(true);
- }
- }
-}
-
-namespace {
-
-CompressionConfig::Type
-convert(InternalFdispatchrcType::Packetcompresstype type)
-{
- switch (type) {
- case InternalFdispatchrcType::Packetcompresstype::LZ4: return CompressionConfig::LZ4;
- default: return CompressionConfig::LZ4;
- }
-}
-
-}
-
-bool
-Fdispatch::Init()
-{
- int maxthreads;
-
- _tempFail = false;
- _FNETLiveCounterDanger = false;
- _FNETLiveCounterWarned = false;
- _FNETLiveCounterFailed = false;
- _lastFNETLiveCounter = 0;
- _timeouts = 0;
- _checkLimit = 60;
-
- FS4PersistentPacketStreamer::Instance.SetCompressionLimit(_config->packetcompresslimit);
- FS4PersistentPacketStreamer::Instance.SetCompressionLevel(_config->packetcompresslevel);
- FS4PersistentPacketStreamer::Instance.SetCompressionType(convert(_config->packetcompresstype));
-
-
- LOG(debug, "Creating FNET transport");
- _transport = std::make_unique<FNET_Transport>(std::make_shared<vespalib::NullCryptoEngine>(), _config->transportthreads); // disable encryption
-
- // grab node slowness limit defaults
-
- FastS_DataSetDesc::SetDefaultSlowQueryLimitFactor(_config->defaultslowquerylimitfactor);
- FastS_DataSetDesc::SetDefaultSlowQueryLimitBias(_config->defaultslowquerylimitbias);
- FastS_DataSetDesc::SetDefaultSlowDocsumLimitFactor(_config->defaultslowdocsumlimitfactor);
- FastS_DataSetDesc::SetDefaultSlowDocsumLimitBias(_config->defaultslowdocsumlimitbias);
-
- maxthreads = _config->maxthreads;
- _mypool = std::make_unique<FastOS_ThreadPool>(256 * 1024, maxthreads);
-
- // Max interval betw read from socket.
- FastS_TimeOut::_val[FastS_TimeOut::maxSockSilent] = _config->maxsocksilent;
-
- if (_transport) {
- _transport->SetIOCTimeOut((uint32_t) (FastS_TimeOut::_val[FastS_TimeOut::maxSockSilent] * 1000.0));
- }
-
- char timestr[40];
- FastS_TimeOut::WriteTime(timestr, sizeof(timestr), FastS_TimeOut::_val[FastS_TimeOut::maxSockSilent]);
- LOG(debug, "VERBOSE: Max time between successful read from a socket: %s", timestr);
-
- FastS_QueryCacheUtil::_systemMaxHits = std::numeric_limits<int>::max();
- LOG(debug, "VERBOSE: maxhits: %d", FastS_QueryCacheUtil::_systemMaxHits);
-
- FastS_QueryCacheUtil::_maxOffset = std::numeric_limits<int>::max();
- const uint32_t linesize = 1;
- if (FastS_QueryCacheUtil::_systemMaxHits < linesize
- && FastS_QueryCacheUtil::_maxOffset < linesize - FastS_QueryCacheUtil::_systemMaxHits) {
- LOG(warning, "maxoffset must be >= %d! (overriding config value)", linesize - FastS_QueryCacheUtil::_systemMaxHits);
- FastS_QueryCacheUtil::_maxOffset = linesize - FastS_QueryCacheUtil::_systemMaxHits;
- }
- LOG(debug, "VERBOSE: maxoffset: %d", FastS_QueryCacheUtil::_maxOffset);
-
- _partition = _config->partition;
-
- int ptportnum = _config->ptport;
-
- LOG(debug, "Using port number %d", ptportnum);
-
- _nodeManager = std::make_unique<FastS_NodeManager>(_componentConfig, this, _partition);
-
- GetFNETTransport()->SetTCPNoDelay(_config->transportnodelay);
-
- if (ptportnum == 0) {
- throw vespalib::IllegalArgumentException("fdispatchrc.ptportnum must be non-zero, most likely an issue with config delivery.");
- }
-
- _engineAdapter = std::make_unique<fdispatch::EngineAdapter>(this, _mypool.get());
- _transportServer = std::make_unique<TransportServer>(*_engineAdapter, *_engineAdapter, *_engineAdapter, ptportnum, search::engine::TransportServer::DEBUG_ALL);
- _transportServer->setTCPNoDelay(_config->transportnodelay);
-
- if (!_transportServer->start()) {
- _transportServer.reset();
- _engineAdapter.reset();
- LOG(error, "CRITICAL: Failed to init upwards FNET transport on port %d", ptportnum);
- return false;
- }
-
- _nodeManager->SubscribePartMap(_configUri);
-
- if (_config->frtport != 0) {
- _rpc = std::make_unique<FastS_fdispatch_RPC>(this);
- if (!_rpc->Init(_config->frtport, _configUri.getConfigId())) {
- LOG(error, "RPC init failed");
- _rpc.reset();
- }
- } else {
- _rpc.reset();
- }
-
- // Kick off fdispatch administrative threads.
- if (_transport) {
- _FNET_adapter.init();
- bool rc = _transport->Start(_mypool.get());
- if (rc) {
- LOG(debug, "Started FNET transport");
- _transportStarted = true;
- } else {
- LOG(error, "Failed to start FNET transport");
- }
- }
- FastOS_Thread::Sleep(1000);
- if (_rpc) {
- _rpc->Start();
- }
- _healthPort = _config->healthport;
- return true;
-}
-
-uint32_t
-Fdispatch::getDispatchLevel()
-{
- return _config->dispatchlevel;
-}
-
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/program/fdispatch.h b/searchcore/src/vespa/searchcore/fdispatch/program/fdispatch.h
deleted file mode 100644
index 093308d68d2..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/program/fdispatch.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/fnet/fnet.h>
-#include <vespa/searchcore/fdispatch/common/appcontext.h>
-#include <vespa/searchlib/engine/transportserver.h>
-#include <vespa/searchcore/config/config-fdispatchrc.h>
-#include <vespa/config/subscription/configuri.h>
-#include <vespa/config/helper/ifetchercallback.h>
-#include <vespa/config/helper/configfetcher.h>
-#include <vespa/vespalib/net/simple_component_config_producer.h>
-#include <vespa/vespalib/util/random.h>
-#include <vespa/vespalib/util/threadstackexecutor.h>
-
-class FastS_NodeManager;
-class FastS_fdispatch_RPC;
-
-namespace fdispatch {
-
-class EngineAdapter;
-
-class FastS_FNETAdapter
-{
-private:
- FastS_AppContext *_appCtx;
- FastS_NodeManager *_nodeManager;
- FastS_TimeKeeper *_timeKeeper;
- FNET_Transport *_transport;
- double _last_now; // latency check
- uint32_t _live_counter; // latency check
-
- struct MyTask : FNET_Task {
- FastS_FNETAdapter &self;
- MyTask(FNET_Scheduler *scheduler, FastS_FNETAdapter &self_in)
- : FNET_Task(scheduler), self(self_in) {}
- virtual void PerformTask() override {
- self.perform();
- ScheduleNow();
- }
- };
- std::unique_ptr<MyTask> _task;
-
-public:
- FastS_FNETAdapter(FastS_AppContext *appCtx);
- ~FastS_FNETAdapter();
- void init();
- void perform();
- uint32_t GetLiveCounter() const { return _live_counter; }
- void fini();
-};
-
-
-/**
- * Note: There is only one instance of this.
- */
-class Fdispatch : public FastS_AppContext,
- public config::IFetcherCallback<vespa::config::search::core::FdispatchrcConfig>
-{
-private:
- typedef search::engine::TransportServer TransportServer;
- typedef vespa::config::search::core::FdispatchrcConfig FdispatchrcConfig;
- Fdispatch(const Fdispatch &);
- Fdispatch& operator=(const Fdispatch &);
-
- vespalib::ThreadStackExecutor _executor;
- std::unique_ptr<FastOS_ThreadPool> _mypool;
- std::unique_ptr<EngineAdapter> _engineAdapter;
- std::unique_ptr<TransportServer> _transportServer;
- vespalib::SimpleComponentConfigProducer _componentConfig;
- std::unique_ptr<FastS_NodeManager> _nodeManager;
- std::unique_ptr<FNET_Transport> _transport;
- FastS_FNETAdapter _FNET_adapter;
- std::unique_ptr<FastS_fdispatch_RPC> _rpc;
- std::unique_ptr<FdispatchrcConfig> _config;
- config::ConfigUri _configUri;
- config::ConfigFetcher _fdispatchrcFetcher;
- vespalib::RandomGen _rndGen;
- unsigned int _partition;
- bool _tempFail;
- bool _FNETLiveCounterDanger;
- bool _FNETLiveCounterWarned;
- bool _FNETLiveCounterFailed;
- bool _transportStarted;
- unsigned int _lastFNETLiveCounter;
- FastOS_Time _FNETLiveCounterDangerStart;
- unsigned int _timeouts;
- unsigned int _checkLimit;
- int _healthPort;
- std::atomic<bool> _needRestart;
- void configure(std::unique_ptr<FdispatchrcConfig> cfg) override;
-public:
- // Implements FastS_AppContext
- virtual FNET_Transport *GetFNETTransport() override;
- virtual FNET_Scheduler *GetFNETScheduler() override;
- virtual FastS_NodeManager *GetNodeManager() override;
- virtual FastS_DataSetCollection *GetDataSetCollection() override;
- virtual FastOS_ThreadPool *GetThreadPool() override;
- virtual uint32_t getDispatchLevel() override;
- bool CheckTempFail();
- bool Failed();
- bool Init();
- int getHealthPort() const { return _healthPort; }
- vespalib::SimpleComponentConfigProducer &getComponentConfig() { return _componentConfig; }
-
- Fdispatch(const config::ConfigUri &configUri);
- ~Fdispatch();
-};
-
-}
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/program/rpc.cpp b/searchcore/src/vespa/searchcore/fdispatch/program/rpc.cpp
deleted file mode 100644
index df8711adb88..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/program/rpc.cpp
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "rpc.h"
-#include <vespa/searchcore/fdispatch/search/engine_base.h>
-#include <vespa/searchcore/fdispatch/search/datasetcollection.h>
-
-void
-FastS_fdispatch_RPC::RegisterMethods(FRT_ReflectionBuilder *rb)
-{
- FastS_RPC::RegisterMethods(rb);
- //------------------------------------------------------------------
- rb->DefineMethod("fs.admin.enableEngine", "s", "i",
- FRT_METHOD(FastS_fdispatch_RPC::RPC_EnableEngine), this);
- rb->MethodDesc("Enable the given engine (clear badness).");
- rb->ParamDesc("name", "engine name");
- rb->ReturnDesc("count", "number of engines affected");
- //------------------------------------------------------------------
- rb->DefineMethod("fs.admin.disableEngine", "s", "i",
- FRT_METHOD(FastS_fdispatch_RPC::RPC_DisableEngine), this);
- rb->MethodDesc("Disable the given engine (mark as admin bad).");
- rb->ParamDesc("name", "engine name");
- rb->ReturnDesc("count", "number of engines affected");
-}
-
-
-void
-FastS_fdispatch_RPC::RPC_GetNodeType(FRT_RPCRequest *req)
-{
- req->GetReturn()->AddString("dispatch");
-}
-
-namespace {
-
-template<class FUN>
-struct ExecuteWhenEqualName_t {
- FUN _successFun;
- const char* _targetName;
- uint32_t _cnt;
-
- ExecuteWhenEqualName_t(const char* targetName, FUN successFun)
- : _successFun(successFun),
- _targetName(targetName),
- _cnt(0)
- {}
-
- void operator()(FastS_EngineBase* engine) {
- if (strcmp(engine->GetName(), _targetName) == 0 ) {
- _cnt++;
- _successFun(engine);
- }
- }
-};
-
-template <class FUN>
-ExecuteWhenEqualName_t<FUN>
-ExecuteWhenEqualName(const char* targetName, FUN successFun) {
- return ExecuteWhenEqualName_t<FUN>(targetName, successFun);
-}
-
-
-} //anonymous namespace
-
-void
-FastS_fdispatch_RPC::RPC_EnableEngine(FRT_RPCRequest *req)
-{
- const char *name = req->GetParams()->GetValue(0)._string._str;
- FastS_DataSetCollection *dsc = GetAppCtx()->GetDataSetCollection();
- uint32_t cnt = 0;
-
- for (uint32_t i = 0; i < dsc->GetMaxNumDataSets(); i++) {
- FastS_DataSetBase *ds;
- FastS_PlainDataSet *ds_plain;
- if ((ds = dsc->PeekDataSet(i)) == NULL ||
- (ds_plain = ds->GetPlainDataSet()) == NULL)
- continue;
-
- cnt += ds_plain->ForEachEngine(
- ExecuteWhenEqualName(name,
- std::mem_fn( &FastS_EngineBase::ClearBad )))
- ._cnt;
- }
-
- dsc->subRef();
- req->GetReturn()->AddInt32(cnt);
-}
-
-
-void
-FastS_fdispatch_RPC::RPC_DisableEngine(FRT_RPCRequest *req)
-{
- const char *name = req->GetParams()->GetValue(0)._string._str;
- FastS_DataSetCollection *dsc = GetAppCtx()->GetDataSetCollection();
- uint32_t cnt = 0;
-
- for (uint32_t i = 0; i < dsc->GetMaxNumDataSets(); i++) {
- FastS_DataSetBase *ds;
- FastS_PlainDataSet *ds_plain;
- if ((ds = dsc->PeekDataSet(i)) == NULL ||
- (ds_plain = ds->GetPlainDataSet()) == NULL)
- continue;
-
- uint32_t badness = FastS_EngineBase::BAD_ADMIN;
- cnt += ds_plain->ForEachEngine(
- ExecuteWhenEqualName(name,
- std::bind(
- std::mem_fn( &FastS_EngineBase::MarkBad ),
- std::placeholders::_1,
- badness)))
- ._cnt;
- }
- dsc->subRef();
- req->GetReturn()->AddInt32(cnt);
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/program/rpc.h b/searchcore/src/vespa/searchcore/fdispatch/program/rpc.h
deleted file mode 100644
index 6ca4e020d77..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/program/rpc.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/searchcore/fdispatch/common/rpc.h>
-
-
-class FastS_fdispatch_RPC : public FastS_RPC
-{
-public:
- FastS_fdispatch_RPC(FastS_AppContext *appCtx)
- : FastS_RPC(appCtx) {}
- virtual ~FastS_fdispatch_RPC() {}
-
- // Register RPC Methods
-
- virtual void RegisterMethods(FRT_ReflectionBuilder *rb) override;
-
- // methods registered by superclass
-
- virtual void RPC_GetNodeType(FRT_RPCRequest *req) override;
-
- // methods registered by us
-
- void RPC_EnableEngine(FRT_RPCRequest *req);
- void RPC_DisableEngine(FRT_RPCRequest *req);
-};
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/program/searchadapter.cpp b/searchcore/src/vespa/searchcore/fdispatch/program/searchadapter.cpp
deleted file mode 100644
index 824688a75f6..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/program/searchadapter.cpp
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "searchadapter.h"
-#include <vespa/searchcore/fdispatch/search/datasetcollection.h>
-#include <vespa/searchcore/fdispatch/search/dataset_base.h>
-#include <vespa/searchcore/fdispatch/search/nodemanager.h>
-
-namespace fdispatch {
-
-void
-SearchAdapter::handleRequest()
-{
- _dsc = _appCtx->GetDataSetCollection();
- FastS_assert(_dsc != NULL);
-
- uint32_t dataset = _dsc->SuggestDataSet();
-
- _search = _dsc->CreateSearch(dataset, _appCtx->GetTimeKeeper());
- FastS_assert(_search != NULL);
-
- _searchInfo = _search->GetSearchInfo();
- _queryResult = _search->GetQueryResult();
- _search->setSearchRequest(_request.get());
- _search->Search(_request->offset, _request->maxhits, /* minhits */ 0);
- _search->ProcessQueryDone();
-}
-
-SearchAdapter::SearchReply::UP
-SearchAdapter::createReply()
-{
- SearchReply::UP reply(new SearchReply());
- SearchReply &r = *reply;
- r.useWideHits = true; // mld
- if (_search->GetErrorCode() != search::engine::ECODE_NO_ERROR) {
- r.errorCode = _search->GetErrorCode();
- r.errorMessage = _search->GetErrorMessage();
- return reply;
- }
-
- uint32_t hitcnt = _queryResult->_hitCount;
- r.offset = _searchInfo->_searchOffset;
- r.totalHitCount = _queryResult->_totalHitCount;
- r.maxRank = _queryResult->_maxRank;
- r.setDistributionKey(_appCtx->GetNodeManager()->GetMldDocstamp());
-
- if (_queryResult->_sortIndex != NULL && hitcnt > 0) {
- r.sortIndex.assign(_queryResult->_sortIndex, _queryResult->_sortIndex + hitcnt + 1);
- r.sortData.assign(_queryResult->_sortData, _queryResult->_sortData + _queryResult->_sortIndex[hitcnt]);
- }
-
- if (_queryResult->_groupResultLen > 0) {
- r.groupResult.assign(_queryResult->_groupResult,
- _queryResult->_groupResult + _queryResult->_groupResultLen);
- }
-
- r.coverage = SearchReply::Coverage(_searchInfo->_activeDocs, _searchInfo->_coverageDocs);
- r.coverage.setSoonActive(_searchInfo->_soonActiveDocs);
- r.coverage.setDegradeReason(_searchInfo->_degradeReason);
- r.coverage.setNodesQueried(_searchInfo->_nodesQueried);
- r.coverage.setNodesReplied(_searchInfo->_nodesReplied);
-
- FastS_hitresult *hitbuf = _queryResult->_hitbuf;
- r.hits.resize(hitcnt);
-
- for (uint32_t cur = 0; cur < hitcnt; cur++) {
- r.hits[cur].gid = hitbuf[cur]._gid;
- r.hits[cur].metric = hitbuf[cur]._metric;
- r.hits[cur].path = hitbuf[cur]._partition;
- r.hits[cur].setDistributionKey(hitbuf[cur].getDistributionKey());
- }
- r.request = _request.release();
- return reply;
-}
-
-void
-SearchAdapter::cleanup()
-{
- if (_search != NULL) {
- _search->Free();
- }
- if (_dsc != NULL) {
- _dsc->subRef();
- }
-}
-
-void
-SearchAdapter::Run(FastOS_ThreadInterface *, void *)
-{
- handleRequest();
- SearchReply::UP reply = createReply();
- cleanup();
- _client.searchDone(std::move(reply));
- delete this;
-}
-
-SearchAdapter::SearchAdapter(FastS_AppContext *appCtx,
- SearchRequest::Source request,
- SearchClient &client)
- : _appCtx(appCtx),
- _request(std::move(request)),
- _client(client),
- _dsc(0),
- _search(0),
- _searchInfo(0),
- _queryResult(0)
-{
-}
-
-} // namespace fdispatch
diff --git a/searchcore/src/vespa/searchcore/fdispatch/program/searchadapter.h b/searchcore/src/vespa/searchcore/fdispatch/program/searchadapter.h
deleted file mode 100644
index 32cadfd0648..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/program/searchadapter.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/searchlib/engine/searchapi.h>
-#include <vespa/searchcore/fdispatch/common/appcontext.h>
-#include <vespa/searchcore/fdispatch/common/search.h>
-#include <vespa/fastos/thread.h>
-
-namespace fdispatch {
-
-/**
- * Implementation of the common search api for the fdispatch server
- * application.
- **/
-class SearchAdapter : public FastOS_Runnable
-{
-public:
- typedef search::engine::SearchRequest SearchRequest;
- typedef search::engine::SearchReply SearchReply;
- typedef search::engine::SearchClient SearchClient;
-
-private:
- FastS_AppContext *_appCtx;
- SearchRequest::Source _request;
- SearchClient &_client;
-
- // internal search related state
- FastS_DataSetCollection *_dsc;
- FastS_ISearch *_search;
- FastS_SearchInfo *_searchInfo;
- FastS_QueryResult *_queryResult;
-
- void handleRequest();
- SearchReply::UP createReply();
- void writeLog();
- void cleanup();
-
- void Run(FastOS_ThreadInterface *, void *) override;
-
-public:
- SearchAdapter(FastS_AppContext *appCtx,
- SearchRequest::Source request,
- SearchClient &client);
-};
-
-} // namespace fdispatch
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/.gitignore b/searchcore/src/vespa/searchcore/fdispatch/search/.gitignore
deleted file mode 100644
index ca1a057edea..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-*.lib
-.depend
-Makefile
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/CMakeLists.txt b/searchcore/src/vespa/searchcore/fdispatch/search/CMakeLists.txt
deleted file mode 100644
index ec8b7d18143..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/CMakeLists.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_library(searchcore_fdispatch_search STATIC
- SOURCES
- configdesc.cpp
- dataset_base.cpp
- datasetcollection.cpp
- engine_base.cpp
- fnet_dataset.cpp
- fnet_engine.cpp
- fnet_search.cpp
- mergehits.cpp
- nodemanager.cpp
- plain_dataset.cpp
- query.cpp
- querycacheutil.cpp
- rowstate.cpp
- search_path.cpp
- DEPENDS
- searchcore_fconfig
- searchcore_util
-)
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/child_info.h b/searchcore/src/vespa/searchcore/fdispatch/search/child_info.h
deleted file mode 100644
index 3568aa384dd..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/child_info.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-
-#include "poss_count.h"
-
-struct ChildInfo {
- uint32_t maxNodes;
- uint32_t activeNodes;
- uint32_t maxParts;
- uint32_t activeParts;
- PossCount activeDocs;
-
- ChildInfo()
- : maxNodes(0),
- activeNodes(0),
- maxParts(0),
- activeParts(0),
- activeDocs()
- {}
-};
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/configdesc.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/configdesc.cpp
deleted file mode 100644
index 045f5b20ee0..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/configdesc.cpp
+++ /dev/null
@@ -1,344 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "configdesc.h"
-#include <vespa/searchcore/util/log.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".search.configdesc");
-
-//----------------------------------------------------------------------
-
-double FastS_DataSetDesc::_defaultSlowQueryLimitFactor = 0.0;
-double FastS_DataSetDesc::_defaultSlowQueryLimitBias = 100.0;
-double FastS_DataSetDesc::_defaultSlowDocsumLimitFactor = 0.0;
-double FastS_DataSetDesc::_defaultSlowDocsumLimitBias = 100.0;
-
-
-FastS_DataSetDesc::FastS_DataSetDesc(uint32_t datasetid)
- : _id(datasetid),
- _queryDistributionMode(QueryDistributionMode::AUTOMATIC, 100.0, 10000),
- _searchableCopies(1),
- _unitRefCost(0),
- _partBits(6),
- _rowBits(0),
- _numParts(0),
- _firstPart(0),
- _minChildParts(0),
- _maxNodesDownPerFixedRow(0),
- _useRoundRobinForFixedRow(true),
- _maxHitsPerNode(static_cast<uint32_t>(-1)),
- _estimateParts(1),
- _estPartCutoff(1),
- _estimatePartsSet(false),
- _estPartCutoffSet(false),
- _minOurActive(500),
- _maxOurActive(500),
- _cutoffOurActive(1000),
- _minEstActive(500),
- _maxEstActive(1000),
- _cutoffEstActive(1000),
- _queueDrainRate(400.0),
- _queueMaxDrain(40.0),
- _slowQueryLimitFactor(_defaultSlowQueryLimitFactor),
- _slowQueryLimitBias(_defaultSlowQueryLimitBias),
- _slowDocsumLimitFactor(_defaultSlowDocsumLimitFactor),
- _slowDocsumLimitBias(_defaultSlowDocsumLimitBias),
- _monitorInterval(1.0),
- _higherCoverageMaxSearchWait(1.0),
- _higherCoverageMinSearchWait(0.0),
- _higherCoverageBaseSearchWait(0.1),
- _minimalSearchCoverage(100.0),
- _higherCoverageMaxDocSumWait(0.3),
- _higherCoverageMinDocSumWait(0.1),
- _higherCoverageBaseDocSumWait(0.1),
- _minimalDocSumCoverage(100.0),
- _engineCnt(0),
- _enginesHead(NULL),
- _enginesTail(NULL),
- _mpp(1)
-{
-}
-
-
-FastS_DataSetDesc::~FastS_DataSetDesc()
-{
- while (_enginesHead != NULL) {
- FastS_EngineDesc *engine = _enginesHead;
- _enginesHead = engine->GetNext();
- delete engine;
- }
-}
-
-
-FastS_EngineDesc *
-FastS_DataSetDesc::AddEngine(const char *name)
-{
- FastS_EngineDesc *engine = new FastS_EngineDesc(name);
- FastS_assert(engine != NULL);
-
- engine->SetNext(NULL);
- if (_enginesHead == NULL)
- _enginesHead = engine;
- else
- _enginesTail->SetNext(engine);
- _enginesTail = engine;
- _engineCnt++;
- return engine;
-}
-
-
-void
-FastS_DataSetDesc::FinalizeConfig()
-{
- /* assume 1 partition if number of partitions was not specified */
- if (GetNumParts() == 0) {
- LOG(warning,
- "Setting partitions to 1 in dataset %u",
- (unsigned int) GetID());
- SetNumParts(1);
- }
-
- if (!_estPartCutoffSet ||
- _estPartCutoff > _numParts ||
- _estPartCutoff == 0)
- _estPartCutoff = _numParts;
-}
-
-//----------------------------------------------------------------------
-
-bool
-FastS_DataSetCollDesc::CheckIntegrity()
-{
- bool rc = true;
-
- for (uint32_t i = 0; i < _datasets_size; i++) {
- FastS_DataSetDesc *d = _datasets[i];
- if (d != NULL) {
- if (d->GetEngineCnt() == 0) {
- LOG(warning, "plain dataset %d has no engines", d->GetID());
- }
-
- if (d->GetNumParts() == 0) {
- LOG(warning, "plain dataset %d has no partitions", d->GetID());
- }
-
- // check engine configuration
- {
- uint32_t partBits = d->GetPartBits();
- uint32_t rowBits = d->GetRowBits();
- uint32_t minPart = d->GetFirstPart();
- uint32_t maxPart = minPart + (1 << partBits) - 2;
- uint32_t maxRow = (rowBits > 0)? (1 << rowBits) - 1 : 0;
- uint32_t enginePartCnt = 0;
- FastS_assert(partBits > 0);
- bool *partidUsed = new bool[maxPart];
- for (uint32_t j = 0; j < maxPart; j++)
- partidUsed[j] = false;
-
- for (FastS_EngineDesc *engine = d->GetEngineList();
- engine != NULL; engine = engine->GetNext()) {
-
- bool bad = false;
- uint32_t partid = engine->GetConfPartID();
- uint32_t rowid = engine->GetConfRowID();
-
- if (partid != FastS_NoID32() &&
- (partid < minPart || partid > maxPart))
- {
- LOG(error, "engine '%s' in dataset %d has partid %d, legal range is [%d,%d] (partbits = %d)",
- engine->GetName(), d->GetID(), partid,
- minPart, maxPart, partBits);
- bad = true;
- }
-
- if (rowid && rowid != FastS_NoID32()) {
- if (rowBits == 0) {
- LOG(warning, "rowid (%d) on engine '%s' in dataset %d "
- "will be ignored because rowbits is 0",
- rowid, engine->GetName(), d->GetID());
- } else if (rowid > maxRow) {
- LOG(error, "engine '%s' in dataset %d has rowid %d, legal range is [%d,%d] (rowbits = %d)",
- engine->GetName(), d->GetID(), rowid,
- 0, maxRow, rowBits);
- bad = true;
- }
- }
- if (bad) {
- LOG(error, "marking engine '%s' in dataset %d as BAD due to illegal configuration",
- engine->GetName(), d->GetID());
- engine->MarkBad();
- }
-
- if (partid != FastS_NoID32() &&
- (partid >= minPart || partid <= maxPart)) {
- if (!partidUsed[partid]) {
- enginePartCnt++;
- partidUsed[partid] = true;
- }
- } else {
- enginePartCnt++;
- }
- }
- delete [] partidUsed;
- if (d->GetNumParts() < enginePartCnt) {
- LOG(warning,
- "plain dataset %d has "
- "%d engines with different partids, "
- "but only %d partitions",
- d->GetID(),
- enginePartCnt,
- d->GetNumParts());
- }
- }
- }
- }
-
- return rc;
-}
-
-
-
-FastS_DataSetCollDesc::FastS_DataSetCollDesc()
- : _datasets(NULL),
- _datasets_size(0),
- _frozen(false),
- _error(false)
-{
-}
-
-
-FastS_DataSetCollDesc::~FastS_DataSetCollDesc()
-{
- if (_datasets != NULL) {
- for (uint32_t i = 0; i < _datasets_size; i++) {
- if (_datasets[i] != NULL) {
- delete _datasets[i];
- }
- }
- delete [] _datasets;
- }
-}
-
-
-FastS_DataSetDesc *
-FastS_DataSetCollDesc::LookupCreateDataSet(uint32_t datasetid)
-{
- FastS_assert(!_frozen);
-
- if (datasetid >= _datasets_size) {
- uint32_t newSize = datasetid + 1;
-
- FastS_DataSetDesc **newArray = new FastS_DataSetDesc*[newSize];
- FastS_assert(newArray != NULL);
-
- uint32_t i;
- for (i = 0; i < _datasets_size; i++)
- newArray[i] = _datasets[i];
-
- for (; i < newSize; i++)
- newArray[i] = NULL;
-
- delete [] _datasets;
- _datasets = newArray;
- _datasets_size = newSize;
- }
-
- if (_datasets[datasetid] == NULL) {
- _datasets[datasetid] = new FastS_DataSetDesc(datasetid);
- }
-
- return _datasets[datasetid];
-}
-
-
-bool
-FastS_DataSetCollDesc::Freeze()
-{
- if (!_frozen) {
- _frozen = true;
-
- for (uint32_t i = 0; i < _datasets_size; i++)
- if (_datasets[i] != NULL)
- _datasets[i]->FinalizeConfig();
-
- _error = !CheckIntegrity();
- }
- return !_error;
-}
-
-//----------------------------------------------------------------------
-bool
-FastS_DataSetCollDesc::ReadConfig(const PartitionsConfig& partmap)
-{
- FastS_assert(!_frozen);
-
- int datasetcnt = partmap.dataset.size();
-
- if (datasetcnt < 1) {
- LOG(error, "no datasets in partitions config");
- return false;
- }
- for (int i=0; i < datasetcnt; i++) {
- typedef PartitionsConfig::Dataset Dsconfig;
- const Dsconfig dsconfig = partmap.dataset[i];
-
- FastS_DataSetDesc *dataset = LookupCreateDataSet(dsconfig.id);
-
- dataset->setSearchableCopies(dsconfig.searchablecopies);
- dataset->SetUnitRefCost(dsconfig.refcost);
- dataset->SetPartBits(dsconfig.partbits);
- dataset->SetRowBits(dsconfig.rowbits);
- dataset->SetNumParts(dsconfig.numparts);
- dataset->SetMinChildParts(dsconfig.minpartitions);
- dataset->setMaxNodesDownPerFixedRow(dsconfig.maxnodesdownperfixedrow);
- dataset->useRoundRobinForFixedRow(dsconfig.useroundrobinforfixedrow);
- dataset->SetMaxHitsPerNode(dsconfig.maxhitspernode);
- dataset->SetFirstPart(dsconfig.firstpart);
- dataset->SetMinOurActive(dsconfig.minactive);
- dataset->SetMaxOurActive(dsconfig.maxactive);
- dataset->SetCutoffOurActive(dsconfig.cutoffactive);
- dataset->SetMinEstActive(dsconfig.minestactive);
- dataset->SetMaxEstActive(dsconfig.maxestactive);
- dataset->SetCutoffEstActive(dsconfig.cutoffestactive);
- dataset->SetQueueDrainRate(dsconfig.queuedrainrate);
- dataset->SetQueueMaxDrain(dsconfig.queuedrainmax);
- dataset->SetSlowQueryLimitFactor(dsconfig.slowquerylimitfactor);
- dataset->SetSlowQueryLimitBias(dsconfig.slowquerylimitbias);
- dataset->SetSlowDocsumLimitFactor(dsconfig.slowdocsumlimitfactor);
- dataset->SetSlowDocsumLimitBias(dsconfig.slowdocsumlimitbias);
- dataset->setMonitorInterval(dsconfig.monitorinterval);
- dataset->setHigherCoverageMaxSearchWait(dsconfig.higherCoverageMaxsearchwait);
- dataset->setHigherCoverageMinSearchWait(dsconfig.higherCoverageMinsearchwait);
- dataset->setHigherCoverageBaseSearchWait(dsconfig.higherCoverageBasesearchwait);
- dataset->setMinimalSearchCoverage(dsconfig.minimalSearchcoverage);
- dataset->setHigherCoverageMaxDocSumWait(dsconfig.higherCoverageMaxdocsumwait);
- dataset->setHigherCoverageMinDocSumWait(dsconfig.higherCoverageMindocsumwait);
- dataset->setHigherCoverageBaseDocSumWait(dsconfig.higherCoverageBasedocsumwait);
- dataset->setMinimalDocSumCoverage(dsconfig.minimalDocsumcoverage);
- FastS_DataSetDesc::QueryDistributionMode distMode(dsconfig.querydistribution,
- dsconfig.minGroupCoverage,
- dsconfig.latencyDecayRate);
- distMode.setMinActivedocsCoverage(dsconfig.minActivedocsCoverage);
- dataset->SetQueryDistributionMode(distMode);
- dataset->setMPP(dsconfig.mpp);
- if (dsconfig.estparts > 0)
- dataset->SetEstimateParts(dsconfig.estparts);
- if (dsconfig.estpartcutoff > 0)
- dataset->SetEstPartCutoff(dsconfig.estpartcutoff);
-
- int enginecnt = dsconfig.engine.size();
-
- for (int j=0; j < enginecnt; j++) {
- const Dsconfig::Engine& engconfig = dsconfig.engine[j];
-
- FastS_EngineDesc *engine = dataset->AddEngine(engconfig.nameAndPort.c_str());
-
- engine->SetUnitRefCost(engconfig.refcost);
- engine->SetConfRowID(engconfig.rowid);
- engine->SetConfPartID(engconfig.partid);
- if (engconfig.overridepartids)
- engine->SetConfPartIDOverrides();
- }
- }
- return true;
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/configdesc.h b/searchcore/src/vespa/searchcore/fdispatch/search/configdesc.h
deleted file mode 100644
index e0b0f0d7403..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/configdesc.h
+++ /dev/null
@@ -1,374 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/searchlib/common/fslimits.h>
-#include <vespa/searchcore/fdispatch/common/stdincl.h>
-#include <vespa/searchcore/config/config-partitions.h>
-#include <cassert>
-
-using vespa::config::search::core::PartitionsConfig;
-
-//-----------------------------------------------------------------------
-
-class FastS_EngineDesc
-{
-private:
- FastS_EngineDesc(const FastS_EngineDesc &);
- FastS_EngineDesc& operator=(const FastS_EngineDesc &);
-
- FastS_EngineDesc *_next;
- std::string _name;
- uint32_t _confPartID;
- uint32_t _confRowID;
- uint32_t _unitrefcost;
- bool _isBad;
- bool _confPartIDOverrides;
-
-public:
- explicit FastS_EngineDesc(const char *name)
- : _next(NULL),
- _name(name),
- _confPartID(FastS_NoID32()),
- _confRowID(FastS_NoID32()),
- _unitrefcost(1),
- _isBad(false),
- _confPartIDOverrides(false)
- { }
-
- void SetNext(FastS_EngineDesc *next) { _next = next; }
- void SetConfPartID(int32_t value) { assert(value >= 0); _confPartID = value; }
- void SetConfPartIDOverrides() { _confPartIDOverrides = true; }
- void SetConfRowID(int32_t value) { assert(value >= 0); _confRowID = value; }
- void SetUnitRefCost(uint32_t value) { _unitrefcost = value; }
- void MarkBad() { _isBad = true; }
- FastS_EngineDesc * GetNext() const { return _next; }
- const char * GetName() const { return _name.c_str(); }
- uint32_t GetConfPartID() const { return _confPartID; }
- bool GetConfPartIDOverrides() const { return _confPartIDOverrides; }
- uint32_t GetConfRowID() const { return _confRowID; }
- uint32_t GetUnitRefCost() const { return _unitrefcost; }
- bool IsBad() const { return _isBad; }
-};
-
-//-----------------------------------------------------------------------
-
-class FastS_DataSetDesc
-{
-private:
- FastS_DataSetDesc(const FastS_DataSetDesc &);
- FastS_DataSetDesc& operator=(const FastS_DataSetDesc &);
-
- static double _defaultSlowQueryLimitFactor;
- static double _defaultSlowQueryLimitBias;
- static double _defaultSlowDocsumLimitFactor;
- static double _defaultSlowDocsumLimitBias;
-
-public:
-
- class QueryDistributionMode {
- public:
- enum Mode {
- RANDOM = static_cast<int>(PartitionsConfig::Dataset::Querydistribution::RANDOM),
- AUTOMATIC = static_cast<int>(PartitionsConfig::Dataset::Querydistribution::AUTOMATIC),
- FIXEDROW = static_cast<int>(PartitionsConfig::Dataset::Querydistribution::FIXEDROW)
- };
-
- QueryDistributionMode(Mode mode, double minGroupCoverage, double latencyDecayRate) :
- _mode(mode),
- _minGroupCoverage(minGroupCoverage),
- _latencyDecayRate(latencyDecayRate),
- _minActivedocsCoverage(0.0)
- { }
-
- QueryDistributionMode(PartitionsConfig::Dataset::Querydistribution mode, double minGroupCoverage, double latencyDecayRate) :
- QueryDistributionMode(static_cast<Mode>(mode), minGroupCoverage, latencyDecayRate)
- {
- }
-
- bool operator==(const QueryDistributionMode & rhs) const {
- return _mode == rhs._mode;
- }
- bool operator == (Mode rhs) const {
- return _mode == rhs;
- }
- double getMinGroupCoverage() const { return _minGroupCoverage; }
- double getLatencyDecayRate() const { return _latencyDecayRate; }
- double getMinActivedocsCoverage() const { return _minActivedocsCoverage; }
-
- void setMinActivedocsCoverage(double val) { _minActivedocsCoverage = val; }
- private:
- Mode _mode;
- double _minGroupCoverage;
- double _latencyDecayRate;
- double _minActivedocsCoverage;
- };
-
- static void SetDefaultSlowQueryLimitFactor(double value)
- { _defaultSlowQueryLimitFactor = value; }
-
- static void SetDefaultSlowQueryLimitBias(double value)
- { _defaultSlowQueryLimitBias = value; }
-
- static void SetDefaultSlowDocsumLimitFactor(double value)
- { _defaultSlowDocsumLimitFactor = value; }
-
- static void SetDefaultSlowDocsumLimitBias(double value)
- { _defaultSlowDocsumLimitBias = value; }
-
-private:
- uint32_t _id;
- QueryDistributionMode _queryDistributionMode;
-
- uint32_t _searchableCopies;
- uint32_t _unitRefCost; // Cost to reference us
- uint32_t _partBits; // # bits used to encode part id
- uint32_t _rowBits; // # bits used to encode row id
- uint32_t _numParts; // Number of partitions
- uint32_t _firstPart; // First partition
- uint32_t _minChildParts; // Minimum partitions live to avoid tempfail
- uint32_t _maxNodesDownPerFixedRow; // max number of nodes down in a row before considering another row.
- bool _useRoundRobinForFixedRow; // Either plain roundrobin or random.
- uint32_t _maxHitsPerNode; // max hits requested from single node
- uint32_t _estimateParts; // number of partitions used for estimate
- uint32_t _estPartCutoff; // First partition not used for estimate
- bool _estimatePartsSet; // has _estimateParts been set ?
- bool _estPartCutoffSet; // has _estimatePartsCutoff been set ?
- uint32_t _minOurActive; // below ==> activate, skip estimates
- uint32_t _maxOurActive; // above ==> queue
- uint32_t _cutoffOurActive; // Above ==> cutoff
- uint32_t _minEstActive; // est below ==> activate
- uint32_t _maxEstActive; // est below ==> queue, est above cutoff > 0%
- uint32_t _cutoffEstActive; // est above ==> cutoff 100%
- double _queueDrainRate; // max queue drain per second
- double _queueMaxDrain; // max queue drain at once
- double _slowQueryLimitFactor;
- double _slowQueryLimitBias;
- double _slowDocsumLimitFactor;
- double _slowDocsumLimitBias;
- double _monitorInterval;
- double _higherCoverageMaxSearchWait;
- double _higherCoverageMinSearchWait;
- double _higherCoverageBaseSearchWait;
- double _minimalSearchCoverage;
- double _higherCoverageMaxDocSumWait;
- double _higherCoverageMinDocSumWait;
- double _higherCoverageBaseDocSumWait;
- double _minimalDocSumCoverage;
-
- uint32_t _engineCnt; // number of search engines in dataset
- FastS_EngineDesc *_enginesHead; // first engine in dataset
- FastS_EngineDesc *_enginesTail; // last engine in dataset
-
- uint32_t _mpp; // Minimum number of engines per partition
-public:
- explicit FastS_DataSetDesc(uint32_t datasetid);
- ~FastS_DataSetDesc();
-
- uint32_t GetID() const { return _id; }
- void SetUnitRefCost(uint32_t value) { _unitRefCost = value; }
- void setSearchableCopies(uint32_t value) { _searchableCopies = value; }
-
- void SetPartBits(uint32_t value) {
- if (value >= MIN_PARTBITS && value <= MAX_PARTBITS)
- _partBits = value;
- }
-
- void SetRowBits(uint32_t value) {
- if (value <= MAX_ROWBITS)
- _rowBits = value;
- }
-
- void SetNumParts(uint32_t value) { _numParts = value; }
- void SetFirstPart(uint32_t value) { _firstPart = value; }
- void SetMinChildParts(uint32_t value) { _minChildParts = value; }
- void setMaxNodesDownPerFixedRow(uint32_t value) { _maxNodesDownPerFixedRow = value; }
- void useRoundRobinForFixedRow(bool value) { _useRoundRobinForFixedRow = value; }
- void SetMaxHitsPerNode(uint32_t value) { _maxHitsPerNode = value; }
- void SetEstimateParts(uint32_t value) {
- _estimateParts = value;
- _estimatePartsSet = true;
- }
-
- void SetEstPartCutoff(uint32_t value) {
- _estPartCutoff = value;
- _estPartCutoffSet = true;
- }
-
- void SetMinOurActive(uint32_t value) { _minOurActive = value; }
- void SetMaxOurActive(uint32_t value) { _maxOurActive = value; }
- void SetCutoffOurActive(uint32_t value) { _cutoffOurActive = value; }
- void SetMinEstActive(uint32_t value) { _minEstActive = value; }
- void SetMaxEstActive(uint32_t value) { _maxEstActive = value; }
- void SetCutoffEstActive(uint32_t value) { _cutoffEstActive = value; }
- void SetQueueDrainRate(double value) { _queueDrainRate = value; }
- void SetQueueMaxDrain(double value) { _queueMaxDrain = value; }
- void SetSlowQueryLimitFactor(double value) { _slowQueryLimitFactor = value; }
- void SetSlowQueryLimitBias(double value) { _slowQueryLimitBias = value; }
- void SetSlowDocsumLimitFactor(double value) { _slowDocsumLimitFactor = value; }
- void SetSlowDocsumLimitBias(double value) { _slowDocsumLimitBias = value; }
-
- void SetQueryDistributionMode(QueryDistributionMode queryDistributionMode) {
- _queryDistributionMode = queryDistributionMode;
- }
-
- QueryDistributionMode GetQueryDistributionMode() { return _queryDistributionMode; }
-
- FastS_EngineDesc * AddEngine(const char *name);
- uint32_t GetUnitRefCost() const { return _unitRefCost; }
- uint32_t GetPartBits() const { return _partBits; }
-
- uint32_t GetRowBits() const { return _rowBits; }
- uint32_t GetNumParts() const { return _numParts; }
- uint32_t GetFirstPart() const { return _firstPart; }
- uint32_t GetMinChildParts() const { return _minChildParts; }
- uint32_t getMaxNodesDownPerFixedRow() const { return _maxNodesDownPerFixedRow; }
- bool useRoundRobinForFixedRow() const { return _useRoundRobinForFixedRow; }
- uint32_t GetMaxHitsPerNode() const { return _maxHitsPerNode; }
- uint32_t GetEstimateParts() const { return _estimateParts; }
- uint32_t GetEstPartCutoff() const { return _estPartCutoff; }
- bool IsEstimatePartsSet() const { return _estimatePartsSet; }
- bool IsEstPartCutoffSet() const { return _estPartCutoffSet; }
- uint32_t getSearchableCopies() const { return _searchableCopies; }
- uint32_t GetMinOurActive() const { return _minOurActive; }
- uint32_t GetMaxOurActive() const { return _maxOurActive; }
- uint32_t GetCutoffOurActive() const { return _cutoffOurActive; }
- uint32_t GetMinEstActive() const { return _minEstActive; }
- uint32_t GetMaxEstActive() const { return _maxEstActive; }
- uint32_t GetCutoffEstActive() const { return _cutoffEstActive; }
- double GetQueueDrainRate() const { return _queueDrainRate; }
- double GetQueueMaxDrain() const { return _queueMaxDrain; }
- double GetSlowQueryLimitFactor() const { return _slowQueryLimitFactor; }
- double GetSlowQueryLimitBias() const { return _slowQueryLimitBias; }
- double GetSlowDocsumLimitFactor() const { return _slowDocsumLimitFactor; }
- double GetSlowDocsumLimitBias() const { return _slowDocsumLimitBias; }
- uint32_t GetEngineCnt() const { return _engineCnt; }
- FastS_EngineDesc * GetEngineList() const { return _enginesHead; }
- void setMPP(uint32_t mpp) { _mpp = mpp; }
- uint32_t getMPP() const { return _mpp; }
-
- void
- setMonitorInterval(double monitorInterval) { _monitorInterval = monitorInterval; }
- double getMonitorInterval() const { return _monitorInterval; }
-
- void
- setHigherCoverageMaxSearchWait(double higherCoverageMaxSearchWait) {
- _higherCoverageMaxSearchWait = higherCoverageMaxSearchWait;
- }
-
- double
- getHigherCoverageMaxSearchWait() const {
- return _higherCoverageMaxSearchWait;
- }
-
- void
- setHigherCoverageMinSearchWait(double higherCoverageMinSearchWait) {
- _higherCoverageMinSearchWait = higherCoverageMinSearchWait;
- }
-
- double
- getHigherCoverageMinSearchWait() const {
- return _higherCoverageMinSearchWait;
- }
-
- void
- setHigherCoverageBaseSearchWait(double higherCoverageBaseSearchWait) {
- _higherCoverageBaseSearchWait = higherCoverageBaseSearchWait;
- }
-
- double
- getHigherCoverageBaseSearchWait() const {
- return _higherCoverageBaseSearchWait;
- }
-
- void
- setMinimalSearchCoverage(double minimalSearchCoverage) {
- _minimalSearchCoverage = minimalSearchCoverage;
- }
-
- double
- getMinimalSearchCoverage() const {
- return _minimalSearchCoverage;
- }
-
- void
- setHigherCoverageMaxDocSumWait(double higherCoverageMaxDocSumWait) {
- _higherCoverageMaxDocSumWait = higherCoverageMaxDocSumWait;
- }
-
- double
- getHigherCoverageMaxDocSumWait() const {
- return _higherCoverageMaxDocSumWait;
- }
-
- void
- setHigherCoverageMinDocSumWait(double higherCoverageMinDocSumWait) {
- _higherCoverageMinDocSumWait = higherCoverageMinDocSumWait;
- }
-
- double
- getHigherCoverageMinDocSumWait() const {
- return _higherCoverageMinDocSumWait;
- }
-
- void
- setHigherCoverageBaseDocSumWait(double higherCoverageBaseDocSumWait) {
- _higherCoverageBaseDocSumWait = higherCoverageBaseDocSumWait;
- }
-
- double
- getHigherCoverageBaseDocSumWait() const {
- return _higherCoverageBaseDocSumWait;
- }
-
- void
- setMinimalDocSumCoverage(double minimalDocSumCoverage) {
- _minimalDocSumCoverage = minimalDocSumCoverage;
- }
-
- double
- getMinimalDocSumCoverage() const {
- return _minimalDocSumCoverage;
- }
-
- void FinalizeConfig();
-};
-
-//-----------------------------------------------------------------------
-
-class FastS_DataSetCollDesc
-{
-private:
- FastS_DataSetCollDesc(const FastS_DataSetCollDesc &);
- FastS_DataSetCollDesc& operator=(const FastS_DataSetCollDesc &);
-
- FastS_DataSetDesc **_datasets;
- uint32_t _datasets_size;
-
- bool _frozen;
- bool _error;
-
- void HandleDeprecatedFPEstPartsOption();
- bool CheckIntegrity();
-
-public:
- FastS_DataSetCollDesc();
- ~FastS_DataSetCollDesc();
-
- FastS_DataSetDesc *LookupCreateDataSet(uint32_t datasetid);
-
- bool Freeze();
-
- uint32_t GetMaxNumDataSets() const { return _datasets_size; }
-
- FastS_DataSetDesc *GetDataSet(uint32_t datasetid) const {
- return (datasetid < _datasets_size)
- ? _datasets[datasetid]
- : NULL;
- }
-
- bool ReadConfig(const PartitionsConfig& partmap);
-};
-
-//-----------------------------------------------------------------------
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/dataset_base.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/dataset_base.cpp
deleted file mode 100644
index 519960bfad0..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/dataset_base.cpp
+++ /dev/null
@@ -1,294 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "dataset_base.h"
-#include "configdesc.h"
-#include "datasetcollection.h"
-#include "engine_base.h"
-#include "nodemanager.h"
-
-//--------------------------------------------------------------------------
-
-FastS_DataSetBase::total_t::total_t()
- : _estimates(0),
- _nTimedOut(0),
- _nOverload(0),
- _normalTimeStat()
-{
- for (uint32_t i = 0; i < _timestatslots; i++)
- _timestats[i] = 0;
-}
-
-//--------------------------------------------------------------------------
-
-FastS_DataSetBase::overload_t::overload_t(FastS_DataSetDesc *desc)
- : _drainRate(desc->GetQueueDrainRate()),
- _drainMax(desc->GetQueueMaxDrain()),
- _minouractive(desc->GetMinOurActive()),
- _maxouractive(desc->GetMaxOurActive()),
- _cutoffouractive(desc->GetCutoffOurActive()),
- _minestactive(desc->GetMinEstActive()),
- _maxestactive(desc->GetMaxEstActive()),
- _cutoffestactive(desc->GetCutoffEstActive())
-{
-}
-
-//--------------------------------------------------------------------------
-
-FastS_DataSetBase::queryQueue_t::queryQueue_t(FastS_DataSetDesc *desc)
- : _head(nullptr),
- _tail(nullptr),
- _queueLen(0),
- _active(0),
- _drainAllowed(0.0),
- _drainStamp(0.0),
- _overload(desc)
-{
-}
-
-
-FastS_DataSetBase::queryQueue_t::~queryQueue_t()
-{
- FastS_assert(_active == 0);
-}
-
-
-void
-FastS_DataSetBase::queryQueue_t::QueueTail(queryQueued_t *newqueued)
-{
- FastS_assert(newqueued->_next == nullptr &&
- _head != newqueued &&
- _tail != newqueued);
- if (_tail != nullptr)
- _tail->_next = newqueued;
- else
- _head = newqueued;
- _tail = newqueued;
- _queueLen++;
-}
-
-
-void
-FastS_DataSetBase::queryQueue_t::DeQueueHead()
-{
- queryQueued_t *queued = _head;
- FastS_assert(_queueLen > 0);
- FastS_assert(queued->_next != nullptr || _tail == queued);
- _head = queued->_next;
- if (queued->_next == nullptr)
- _tail = nullptr;
- queued->_next = nullptr;
- _queueLen--;
-}
-
-//--------------------------------------------------------------------------
-
-FastS_DataSetBase::FastS_DataSetBase(FastS_AppContext *appCtx,
- FastS_DataSetDesc *desc)
- : _appCtx(appCtx),
- _lock(),
- _createtime(),
- _queryQueue(desc),
- _total(),
- _id(desc->GetID()),
- _unitrefcost(desc->GetUnitRefCost()),
- _totalrefcost(0),
- _mldDocStamp(0u),
- _searchableCopies(desc->getSearchableCopies())
-{
- _createtime.SetNow();
-}
-
-
-FastS_DataSetBase::~FastS_DataSetBase()
-{
- FastS_assert(_totalrefcost == 0);
-}
-
-void
-FastS_DataSetBase::ScheduleCheckTempFail()
-{
- _appCtx->GetNodeManager()->ScheduleCheckTempFail(_id);
-}
-
-
-void
-FastS_DataSetBase::DeQueueHeadWakeup_HasLock()
-{
- queryQueued_t *queued;
- queued = _queryQueue.GetFirst();
- FastS_assert(queued->IsQueued());
- auto queuedGuard(queued->getQueuedGuard());
- //SetNowFromMonitor();
- _queryQueue.DeQueueHead();
- queued->UnmarkQueued();
- FNET_Task *dequeuedTask = queued->getDequeuedTask();
- if (dequeuedTask != nullptr) {
- dequeuedTask->ScheduleNow();
- } else {
- queued->SignalCond();
- }
-}
-
-
-void
-FastS_DataSetBase::SetActiveQuery_HasLock()
-{
- _queryQueue.SetActiveQuery();
-}
-
-
-void
-FastS_DataSetBase::SetActiveQuery()
-{
- auto dsGuard(getDsGuard());
- SetActiveQuery_HasLock();
-}
-
-
-void
-FastS_DataSetBase::ClearActiveQuery_HasLock(FastS_TimeKeeper *timeKeeper)
-{
- FastS_assert(_queryQueue._active > 0);
- _queryQueue.ClearActiveQuery();
-
- CheckQueryQueue_HasLock(timeKeeper);
-}
-
-
-void
-FastS_DataSetBase::ClearActiveQuery(FastS_TimeKeeper *timeKeeper)
-{
- auto dsGuard(getDsGuard());
- ClearActiveQuery_HasLock(timeKeeper);
-}
-
-
-void
-FastS_DataSetBase::CheckQueryQueue_HasLock(FastS_TimeKeeper *timeKeeper)
-{
- queryQueued_t *queued;
- unsigned int active;
- unsigned int estactive;
- uint32_t dispatchnodes;
- double delay;
- double fnow;
-
- active = _queryQueue.GetActiveQueries(); // active from us
- estactive = CalculateQueueLens_HasLock(dispatchnodes);// active from us and others
-
- if (dispatchnodes == 0)
- dispatchnodes = 1;
-
- fnow = timeKeeper->GetTime();
- delay = fnow - _queryQueue._drainStamp;
- if (delay >= 0.0) {
- if (delay > 2.0) {
- delay = 2.0;
- if (_queryQueue._drainStamp == 0.0)
- _queryQueue._drainStamp = fnow;
- else
- _queryQueue._drainStamp += 2.0;
- } else
- _queryQueue._drainStamp = fnow;
- } else
- delay = 0.0;
-
- _queryQueue._drainAllowed += delay * _queryQueue._overload._drainRate;
- if (_queryQueue._drainAllowed >=
- _queryQueue._overload._drainMax + dispatchnodes - 1)
- _queryQueue._drainAllowed =
- _queryQueue._overload._drainMax + dispatchnodes - 1;
-
- while (_queryQueue._drainAllowed >= (double) dispatchnodes ||
- active < _queryQueue._overload._minouractive) {
- queued = _queryQueue.GetFirst();
- if (queued == nullptr) {
- return;
- }
-
- if (active >= _queryQueue._overload._maxouractive)
- return; // hard limit for how much we queue
-
- if (active >= _queryQueue._overload._minouractive &&
- estactive >= _queryQueue._overload._minestactive)
- return;
-
- // Dequeue query, count it active and wakeup thread handling query
- SetActiveQuery_HasLock();
- DeQueueHeadWakeup_HasLock();
-
- active++; // one more active from us
- estactive += dispatchnodes; // Assume other nodes do likewise
- if (_queryQueue._drainAllowed >= (double) dispatchnodes)
- _queryQueue._drainAllowed -= dispatchnodes; // Rate limitation
- else
- _queryQueue._drainAllowed = 0.0;
- }
-}
-
-
-void
-FastS_DataSetBase::AbortQueryQueue_HasLock()
-{
- queryQueued_t *queued;
-
- /*
- * Don't allow new queries to be queued.
- * Abort currently queued queries.
- */
- _queryQueue._overload._minouractive = 0;
- _queryQueue._overload._cutoffouractive = 0;
- for (;;) {
- queued = _queryQueue.GetFirst();
- if (queued == nullptr)
- break;
- // Doesn't lock query, but other thread is waiting on queue
- queued->MarkAbort();
- DeQueueHeadWakeup_HasLock();
- }
-}
-
-void
-FastS_DataSetBase::AddCost()
-{
- _totalrefcost += _unitrefcost;
-}
-
-void
-FastS_DataSetBase::SubCost()
-{
- FastS_assert(_totalrefcost >= _unitrefcost);
- _totalrefcost -= _unitrefcost;
-}
-
-void
-FastS_DataSetBase::UpdateSearchTime(double tnow, double elapsed, bool timedout)
-{
- int slot;
- auto dsGuard(getDsGuard());
- slot = (int) (elapsed * 10);
- if (slot >= _total._timestatslots)
- slot = _total._timestatslots - 1;
- else if (slot < 0)
- slot = 0;
- _total._timestats[slot]++;
- _total._normalTimeStat.Update(tnow, elapsed, timedout);
-}
-
-void
-FastS_DataSetBase::UpdateEstimateCount()
-{
- ++_total._estimates;
-}
-
-void
-FastS_DataSetBase::CountTimeout()
-{
- ++_total._nTimedOut;
-}
-
-ChildInfo
-FastS_DataSetBase::getChildInfo() const
-{
- return ChildInfo();
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/dataset_base.h b/searchcore/src/vespa/searchcore/fdispatch/search/dataset_base.h
deleted file mode 100644
index f4f69285e89..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/dataset_base.h
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "child_info.h"
-#include <vespa/searchcore/fdispatch/common/timestat.h>
-#include <vespa/searchcore/util/log.h>
-#include <atomic>
-#include <vespa/fastos/time.h>
-#include <mutex>
-#include <condition_variable>
-
-class FastS_TimeKeeper;
-
-class FastS_DataSetDesc;
-class FastS_EngineDesc;
-class FastS_DataSetCollection;
-class FastS_ISearch;
-class FastS_QueryResult;
-class FastS_PlainDataSet;
-class FastS_FNET_DataSet;
-class FastS_AppContext;
-class FNET_Task;
-
-//---------------------------------------------------------------------------
-
-class FastS_DataSetBase
-{
- friend class FastS_DataSetCollection;
-public:
-
- //----------------------------------------------------------------
- // total query stats
- //----------------------------------------------------------------
-
- class total_t
- {
- public:
- enum {
- _timestatslots = 100
- };
- std::atomic<uint32_t> _estimates;
- std::atomic<uint32_t> _nTimedOut;
- uint32_t _nOverload;
- uint32_t _timestats[_timestatslots];
- FastS_TimeStatHistory _normalTimeStat;
- total_t();
- };
-
- //----------------------------------------------------------------
- // parameters used by query queue
- //----------------------------------------------------------------
-
- class overload_t
- {
- public:
- double _drainRate; // Queue drain rate
- double _drainMax; // Max queue drain at once
- uint32_t _minouractive; // minimum active requests from us
- uint32_t _maxouractive; // maximum active requests from us (queue)
- uint32_t _cutoffouractive; // cutoff active requests
- uint32_t _minestactive; // minimum estimated requests before queueing
- uint32_t _maxestactive; // maximum estimated requests (start earlydrop)
- uint32_t _cutoffestactive; // cutoff estimated requests (end earlydrop)
-
- overload_t(FastS_DataSetDesc *desc);
- };
-
- //----------------------------------------------------------------
- // class used to wait for a query queue
- //----------------------------------------------------------------
-
- class queryQueue_t;
- class queryQueued_t
- {
- friend class queryQueue_t;
- private:
- queryQueued_t(const queryQueued_t &);
- queryQueued_t& operator=(const queryQueued_t &);
-
- std::mutex _queuedLock;
- std::condition_variable _queuedCond;
- queryQueued_t *_next;
- bool _isAborted;
- bool _isQueued;
- FNET_Task *const _deQueuedTask;
- public:
- queryQueued_t(FNET_Task *const deQueuedTask)
- : _queuedLock(),
- _queuedCond(),
- _next(NULL),
- _isAborted(false),
- _isQueued(false),
- _deQueuedTask(deQueuedTask)
- {
- }
-
- ~queryQueued_t()
- {
- FastS_assert(!_isQueued);
- }
- void Wait() {
- std::unique_lock<std::mutex> queuedGuard(_queuedLock);
- while (_isQueued) {
- _queuedCond.wait(queuedGuard);
- }
- }
- bool IsAborted() const { return _isAborted; }
- void MarkAbort() { _isAborted = true; }
- void MarkQueued() { _isQueued = true; }
- void UnmarkQueued() { _isQueued = false; }
- bool IsQueued() const { return _isQueued; }
- std::unique_lock<std::mutex> getQueuedGuard() { return std::unique_lock<std::mutex>(_queuedLock); }
- void SignalCond() { _queuedCond.notify_one(); }
-
- FNET_Task *
- getDequeuedTask() const
- {
- return _deQueuedTask;
- }
- };
-
- //----------------------------------------------------------------
- // per dataset query queue
- //----------------------------------------------------------------
-
- class queryQueue_t
- {
- friend class FastS_DataSetBase;
-
- private:
- queryQueue_t(const queryQueue_t &);
- queryQueue_t& operator=(const queryQueue_t &);
-
- queryQueued_t *_head;
- queryQueued_t *_tail;
- unsigned int _queueLen;
- unsigned int _active;
-
- public:
- double _drainAllowed; // number of drainable request
- double _drainStamp; // stamp of last drain check
- overload_t _overload; // queue parameters
-
- public:
- queryQueue_t(FastS_DataSetDesc *desc);
- ~queryQueue_t();
- void QueueTail(queryQueued_t *newquery);
- void DeQueueHead();
- unsigned int GetQueueLen() const { return _queueLen; }
- unsigned int GetActiveQueries() const { return _active; }
- void SetActiveQuery() { _active++; }
- void ClearActiveQuery() { _active--; }
- queryQueued_t *GetFirst() const { return _head; }
- };
-
- //----------------------------------------------------------------
-
-protected:
- FastS_AppContext *_appCtx;
- std::mutex _lock;
- FastOS_Time _createtime;
- queryQueue_t _queryQueue;
- total_t _total;
- uint32_t _id;
- uint32_t _unitrefcost;
-
- // Total cost as seen by referencing objects
- std::atomic<uint32_t> _totalrefcost;
- uint32_t _mldDocStamp;
-private:
- uint32_t _searchableCopies;
-
-public:
- FastS_DataSetBase(const FastS_DataSetBase &) = delete;
- FastS_DataSetBase& operator=(const FastS_DataSetBase &) = delete;
- FastS_DataSetBase(FastS_AppContext *appCtx, FastS_DataSetDesc *desc);
- virtual ~FastS_DataSetBase();
-
- // locking stuff
- //--------------
- std::unique_lock<std::mutex> getDsGuard() { return std::unique_lock<std::mutex>(_lock); }
-
- // query queue related methods
- //----------------------------
- void SetActiveQuery_HasLock();
- void SetActiveQuery();
- void ClearActiveQuery_HasLock(FastS_TimeKeeper *timeKeeper);
- void ClearActiveQuery(FastS_TimeKeeper *timeKeeper);
- void CheckQueryQueue_HasLock(FastS_TimeKeeper *timeKeeper);
- void AbortQueryQueue_HasLock();
-
- // common dataset methods
- //-----------------------
- uint32_t GetID() { return _id; }
- double Uptime() { return _createtime.MilliSecsToNow() / 1000.0; }
- FastS_AppContext *GetAppContext() const { return _appCtx; }
- void AddCost();
- void SubCost();
- void UpdateSearchTime(double tnow, double elapsed, bool timedout);
- void UpdateEstimateCount();
- void CountTimeout();
- uint32_t getSearchableCopies() const { return _searchableCopies; }
-
- void ScheduleCheckTempFail();
- virtual void DeQueueHeadWakeup_HasLock();
- virtual ChildInfo getChildInfo() const;
- uint32_t GetMldDocStamp() const { return _mldDocStamp; }
- void SetMldDocStamp(uint32_t mldDocStamp) { _mldDocStamp = mldDocStamp; }
-
- // common dataset API
- //-------------------
- virtual uint32_t CalculateQueueLens_HasLock(uint32_t &dispatchnodes) = 0;
- virtual bool AddEngine(FastS_EngineDesc *desc) = 0;
- virtual void ConfigDone(FastS_DataSetCollection *) {}
- virtual void ScheduleCheckBad() {}
- virtual bool AreEnginesReady() = 0;
- virtual FastS_ISearch *CreateSearch(FastS_DataSetCollection *dsc,
- FastS_TimeKeeper *timeKeeper,
- bool async) = 0;
- virtual void Free() = 0;
-
- // typesafe down-cast
- //-------------------
- virtual FastS_PlainDataSet *GetPlainDataSet() { return nullptr; }
- virtual FastS_FNET_DataSet *GetFNETDataSet() { return nullptr; }
-};
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/datasetcollection.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/datasetcollection.cpp
deleted file mode 100644
index d99b32ac138..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/datasetcollection.cpp
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "datasetcollection.h"
-#include "fnet_dataset.h"
-#include <vespa/searchcore/fdispatch/common/search.h>
-#include <vespa/fnet/fnet.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".search.datasetcollection");
-
-FastS_DataSetBase *
-FastS_DataSetCollection::CreateDataSet(FastS_DataSetDesc *desc)
-{
- FastS_DataSetBase *ret = nullptr;
-
- FNET_Transport *transport = _appCtx->GetFNETTransport();
- FNET_Scheduler *scheduler = _appCtx->GetFNETScheduler();
- if (transport != nullptr && scheduler != nullptr) {
- ret = new FastS_FNET_DataSet(transport, scheduler, _appCtx, desc);
- } else {
- LOG(error, "Non-available dataset transport: FNET");
- }
- return ret;
-}
-
-
-bool
-FastS_DataSetCollection::AddDataSet(FastS_DataSetDesc *desc)
-{
- uint32_t datasetid = desc->GetID();
-
- if (datasetid >= _datasets_size) {
- uint32_t newSize = datasetid + 1;
-
- FastS_DataSetBase **newArray = new FastS_DataSetBase*[newSize];
- FastS_assert(newArray != nullptr);
-
- uint32_t i;
- for (i = 0; i < _datasets_size; i++)
- newArray[i] = _datasets[i];
-
- for (; i < newSize; i++)
- newArray[i] = nullptr;
-
- delete [] _datasets;
- _datasets = newArray;
- _datasets_size = newSize;
- }
- FastS_assert(_datasets[datasetid] == nullptr);
- FastS_DataSetBase *dataset = CreateDataSet(desc);
- if (dataset == nullptr)
- return false;
- _datasets[datasetid] = dataset;
-
- for (FastS_EngineDesc *engineDesc = desc->GetEngineList();
- engineDesc != nullptr; engineDesc = engineDesc->GetNext()) {
-
- dataset->AddEngine(engineDesc);
- }
- dataset->ConfigDone(this);
- return true;
-}
-
-
-
-FastS_DataSetCollection::FastS_DataSetCollection(FastS_AppContext *appCtx)
- : _nextOld(nullptr),
- _configDesc(nullptr),
- _appCtx(appCtx),
- _datasets(nullptr),
- _datasets_size(0),
- _gencnt(0),
- _frozen(false),
- _error(false)
-{
-}
-
-
-FastS_DataSetCollection::~FastS_DataSetCollection()
-{
- if (_datasets != nullptr) {
- for (uint32_t i = 0; i < _datasets_size; i++) {
- if (_datasets[i] != nullptr) {
- _datasets[i]->Free();
- _datasets[i] = nullptr;
- }
- }
- }
-
- delete [] _datasets;
- delete _configDesc;
-}
-
-
-bool
-FastS_DataSetCollection::Configure(FastS_DataSetCollDesc *cfgDesc,
- uint32_t gencnt)
-{
- bool rc = false;
-
- if (_frozen) {
- delete cfgDesc;
- } else {
- FastS_assert(_configDesc == nullptr);
- if (cfgDesc == nullptr) {
- _configDesc = new FastS_DataSetCollDesc();
- } else {
- _configDesc = cfgDesc;
- }
- _gencnt = gencnt;
- _frozen = true;
- _error = !_configDesc->Freeze();
- rc = !_error;
-
- for (uint32_t i = 0; rc && i < _configDesc->GetMaxNumDataSets(); i++) {
- FastS_DataSetDesc *datasetDesc = _configDesc->GetDataSet(i);
- if (datasetDesc != nullptr) {
- FastS_assert(datasetDesc->GetID() == i);
- rc = AddDataSet(datasetDesc);
- }
- }
-
- _error = !rc;
- }
- return rc;
-}
-
-
-uint32_t
-FastS_DataSetCollection::SuggestDataSet()
-{
- FastS_assert(_frozen);
-
- FastS_DataSetBase *dataset = nullptr;
-
- for (uint32_t i = 0; i < _datasets_size; i++) {
- FastS_DataSetBase *tmp = _datasets[i];
- if (tmp == nullptr || tmp->_unitrefcost == 0)
- continue;
-
- // NB: cost race condition
-
- if (dataset == nullptr ||
- dataset->_totalrefcost + dataset->_unitrefcost >
- tmp->_totalrefcost + tmp->_unitrefcost)
- dataset = tmp;
- }
-
- return (dataset == nullptr)
- ? FastS_NoID32()
- : dataset->GetID();
-}
-
-
-FastS_DataSetBase *
-FastS_DataSetCollection::GetDataSet(uint32_t datasetid)
-{
- FastS_assert(_frozen);
-
- FastS_DataSetBase *dataset =
- (datasetid < _datasets_size) ?
- _datasets[datasetid] : nullptr;
-
- if (dataset != nullptr)
- dataset->AddCost();
-
- return dataset;
-}
-
-
-FastS_DataSetBase *
-FastS_DataSetCollection::GetDataSet()
-{
- FastS_assert(_frozen);
-
- FastS_DataSetBase *dataset = nullptr;
-
- for (uint32_t i = 0; i < _datasets_size; i++) {
- FastS_DataSetBase *tmp = _datasets[i];
- if (tmp == nullptr || tmp->_unitrefcost == 0)
- continue;
-
- // NB: cost race condition
-
- if (dataset == nullptr ||
- dataset->_totalrefcost + dataset->_unitrefcost >
- tmp->_totalrefcost + tmp->_unitrefcost)
- dataset = tmp;
- }
-
- if (dataset != nullptr)
- dataset->AddCost();
-
- return dataset;
-}
-
-
-bool
-FastS_DataSetCollection::AreEnginesReady()
-{
- for (uint32_t datasetidx = 0; datasetidx < GetMaxNumDataSets(); datasetidx++) {
- FastS_DataSetBase *dataset = PeekDataSet(datasetidx);
- if ((dataset != nullptr) && !dataset->AreEnginesReady()) {
- return false;
- }
- }
- return true;
-}
-
-
-FastS_ISearch *
-FastS_DataSetCollection::CreateSearch(uint32_t dataSetID,
- FastS_TimeKeeper *timeKeeper)
-{
- FastS_ISearch *ret = nullptr;
- FastS_DataSetBase *dataset;
-
- if (dataSetID == FastS_NoID32()) {
- dataset = GetDataSet();
- if (dataset != nullptr)
- dataSetID = dataset->GetID();
- } else {
- dataset = GetDataSet(dataSetID);
- }
- if (dataset == nullptr) {
- ret = new FastS_FailedSearch(dataSetID, false,
- search::engine::ECODE_ILLEGAL_DATASET, nullptr);
- } else {
- {
- auto dsGuard(dataset->getDsGuard());
- dataset->SetActiveQuery_HasLock();
- }
- /* XXX: Semantic change: precounted as active in dataset */
- ret = dataset->CreateSearch(this, timeKeeper, /* async = */ false);
- }
- FastS_assert(ret != nullptr);
- return ret;
-}
-
-
-void
-FastS_DataSetCollection::CheckQueryQueues(FastS_TimeKeeper *timeKeeper)
-{
- for (uint32_t datasetidx(0); datasetidx < GetMaxNumDataSets(); datasetidx++) {
- FastS_DataSetBase *dataset = PeekDataSet(datasetidx);
-
- if (dataset != nullptr) {
- auto dsGuard(dataset->getDsGuard());
- dataset->CheckQueryQueue_HasLock(timeKeeper);
- }
- }
-}
-
-
-void
-FastS_DataSetCollection::AbortQueryQueues()
-{
- for (uint32_t datasetidx(0); datasetidx < GetMaxNumDataSets(); datasetidx++) {
- FastS_DataSetBase *dataset = PeekDataSet(datasetidx);
-
- if (dataset != nullptr) {
- auto dsGuard(dataset->getDsGuard());
- dataset->AbortQueryQueue_HasLock();
- }
- }
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/datasetcollection.h b/searchcore/src/vespa/searchcore/fdispatch/search/datasetcollection.h
deleted file mode 100644
index aed33803c02..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/datasetcollection.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/vespalib/util/referencecounter.h>
-#include <vespa/searchcore/fdispatch/common/appcontext.h>
-#include <vespa/searchcore/fdispatch/search/configdesc.h>
-
-class FastS_DataSetBase;
-class FastS_ISearch;
-
-class FastS_DataSetCollection : public vespalib::ReferenceCounter
-{
-private:
- FastS_DataSetCollection(const FastS_DataSetCollection &);
- FastS_DataSetCollection& operator=(const FastS_DataSetCollection &);
-
-public:
- // used by Monitor to service old query queues.
- FastS_DataSetCollection *_nextOld;
-
-private:
- FastS_DataSetCollDesc *_configDesc;
- FastS_AppContext *_appCtx;
-
- FastS_DataSetBase **_datasets;
- uint32_t _datasets_size;
-
- uint32_t _gencnt;
- bool _frozen;
- bool _error;
-
- FastS_DataSetBase *CreateDataSet(FastS_DataSetDesc *desc);
- bool AddDataSet(FastS_DataSetDesc *desc);
-
-public:
- explicit FastS_DataSetCollection(FastS_AppContext *appCtx);
- virtual ~FastS_DataSetCollection();
-
- /**
- * Configure this dataset collection. Note that the given config
- * description is handed over to this object when this method is
- * called. Also note that this method replaces the old methods used
- * to add datasets and engines as well as the Freeze method. In
- * other words; this method uses the given config description to
- * create a new node setup and then freezing it. Using a NULL
- * pointer for the config description is legal; it denotes the empty
- * configuration.
- *
- * @return true(ok)/false(fail)
- * @param cfgDesc configuration description
- * @param gencnt the generation of this node setup
- **/
- bool Configure(FastS_DataSetCollDesc *cfgDesc, uint32_t gencnt);
-
- /**
- * This method may be used to verify that this dataset collection
- * has been successfully configured. See @ref Configure.
- *
- * @return true if successfully configured
- **/
- bool IsValid() { return (_frozen && !_error); }
-
- FastS_DataSetCollDesc *GetConfigDesc() { return _configDesc; }
-
- FastS_AppContext *GetAppContext() { return _appCtx; }
-
- uint32_t GetMaxNumDataSets() { return _datasets_size; }
-
- FastS_DataSetBase *PeekDataSet(uint32_t datasetid)
- { return (datasetid < _datasets_size) ? _datasets[datasetid] : NULL; }
-
- uint32_t SuggestDataSet();
- FastS_DataSetBase *GetDataSet(uint32_t datasetid);
- FastS_DataSetBase *GetDataSet();
-
- bool AreEnginesReady();
-
- // create search
- FastS_ISearch *CreateSearch(uint32_t dataSetID, FastS_TimeKeeper *timeKeeper);
-
- // handle old query queues
- bool IsLastRef() { return (refCount() == 1); }
- void CheckQueryQueues(FastS_TimeKeeper *timeKeeper);
- void AbortQueryQueues();
-};
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/engine_base.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/engine_base.cpp
deleted file mode 100644
index 24668db6024..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/engine_base.cpp
+++ /dev/null
@@ -1,417 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "engine_base.h"
-#include "configdesc.h"
-#include "plain_dataset.h"
-
-#include <vespa/log/log.h>
-LOG_SETUP(".search.engine_base");
-
-//---------------------------------------------------------------------------
-
-FastS_EngineBase::stats_t::stats_t()
- : _fliptime(),
- _floptime(),
- _slowQueryCnt(0),
- _slowDocsumCnt(0),
- _slowQuerySecs(0.0),
- _slowDocsumSecs(0.0),
- _queueLenSampleAcc(0),
- _queueLenSampleCnt(0),
- _activecntSampleAcc(0),
- _activecntSampleCnt(0),
- _queueLenAcc(0.0),
- _activecntAcc(0.0),
- _queueLenIdx(0),
- _queueLenValid(0)
-{
- uint32_t i;
-
- _fliptime.SetNow();
- _floptime.SetNow();
- for (i = 0; i < _queuestatsize; i++) {
- _queueLens[i]._queueLen = 0.0;
- _queueLens[i]._activecnt = 0.0;
- }
-}
-
-//---------------------------------------------------------------------------
-
-FastS_EngineBase::reported_t::reported_t()
- : _queueLen(0),
- _dispatchers(0),
- _mld(false),
- _reportedPartID(FastS_NoID32()),
- _actNodes(0),
- _maxNodes(0),
- _actParts(0),
- _maxParts(0),
- _activeDocs(),
- _docstamp(FastS_EngineBase::NoDocStamp())
-{
- _activeDocs.valid = true;
-}
-
-
-FastS_EngineBase::reported_t::~reported_t()
-{
-}
-
-//---------------------------------------------------------------------------
-
-FastS_EngineBase::config_t::config_t(FastS_EngineDesc *desc)
- : _name(NULL),
- _unitrefcost(desc->GetUnitRefCost()),
- _confPartID(desc->GetConfPartID()),
- _confRowID(desc->GetConfRowID()),
- _confPartIDOverrides(desc->GetConfPartIDOverrides())
-{
- _name = strdup(desc->GetName());
- FastS_assert(_name != NULL);
-}
-
-
-FastS_EngineBase::config_t::~config_t()
-{
- free(_name);
-}
-
-//---------------------------------------------------------------------------
-
-FastS_EngineBase::FastS_EngineBase(FastS_EngineDesc *desc,
- FastS_PlainDataSet *dataset)
- : _stats(),
- _reported(),
- _config(desc),
- _isUp(false),
- _badness(BAD_NOT),
- _partid(FastS_NoID32()),
- _totalrefcost(0),
- _activecnt(0),
- _dataset(dataset),
- _nextds(NULL),
- _prevpart(NULL),
- _nextpart(NULL),
- _lock()
-{
- FastS_assert(_dataset != NULL);
-}
-
-
-FastS_EngineBase::~FastS_EngineBase()
-{
- FastS_assert(_nextds == NULL);
- FastS_assert(_prevpart == NULL);
- FastS_assert(_nextpart == NULL);
- FastS_assert(_totalrefcost == 0);
- FastS_assert(_activecnt == 0);
-}
-
-
-void
-FastS_EngineBase::SlowQuery(double limit, double secs, bool silent)
-{
- {
- std::lock_guard<std::mutex> engineGuard(_lock);
- _stats._slowQueryCnt++;
- _stats._slowQuerySecs += secs;
- }
- if (!silent)
- LOG(warning,
- "engine %s query slow by %.3fs + %.3fs",
- _config._name, limit, secs);
-}
-
-
-void
-FastS_EngineBase::SlowDocsum(double limit, double secs)
-{
- {
- std::lock_guard<std::mutex> engineGuard(_lock);
- _stats._slowDocsumCnt++;
- _stats._slowDocsumSecs += secs;
- }
- LOG(warning,
- "engine %s docsum slow by %.3fs + %.3fs",
- _config._name, limit, secs);
-}
-
-
-void
-FastS_EngineBase::AddCost()
-{
- _totalrefcost += _config._unitrefcost;
- ++_activecnt;
-}
-
-
-void
-FastS_EngineBase::SubCost()
-{
- FastS_assert(_totalrefcost >= _config._unitrefcost);
- _totalrefcost -= _config._unitrefcost;
- FastS_assert(_activecnt >= 1);
- --_activecnt;
-}
-
-
-void
-FastS_EngineBase::SaveQueueLen_NoLock(uint32_t queueLen, uint32_t dispatchers)
-{
- _reported._queueLen = queueLen;
- _reported._dispatchers = dispatchers;
- _stats._queueLenSampleAcc += queueLen;
- _stats._queueLenSampleCnt++;
- _stats._activecntSampleAcc += _activecnt;
- _stats._activecntSampleCnt++;
-}
-
-
-void
-FastS_EngineBase::SampleQueueLens()
-{
- double queueLen;
- double activecnt;
-
- std::lock_guard<std::mutex> engineGuard(_lock);
- if (_stats._queueLenSampleCnt > 0)
- queueLen = (double) _stats._queueLenSampleAcc / (double) _stats._queueLenSampleCnt;
- else
- queueLen = 0;
- if (_stats._activecntSampleCnt > 0)
- activecnt = (double) _stats._activecntSampleAcc / (double) _stats._activecntSampleCnt;
- else
- activecnt = 0;
-
- _stats._queueLenSampleAcc = 0;
- _stats._queueLenSampleCnt = 0;
- _stats._activecntSampleAcc = 0;
- _stats._activecntSampleCnt = 0;
-
- _stats._queueLenAcc -= _stats._queueLens[_stats._queueLenIdx]._queueLen;
- _stats._queueLens[_stats._queueLenIdx]._queueLen = queueLen;
- _stats._queueLenAcc += queueLen;
-
- _stats._activecntAcc -= _stats._queueLens[_stats._queueLenIdx]._activecnt;
- _stats._queueLens[_stats._queueLenIdx]._activecnt = activecnt;
- _stats._activecntAcc += activecnt;
-
- _stats._queueLenIdx++;
- if (_stats._queueLenIdx >= _stats._queuestatsize)
- _stats._queueLenIdx = 0;
- if (_stats._queueLenValid < _stats._queuestatsize)
- _stats._queueLenValid++;
-}
-
-void
-FastS_EngineBase::UpdateSearchTime(double tnow, double elapsed, bool timedout)
-{
- (void) tnow;
- (void) elapsed;
- (void) timedout;
-}
-
-void
-FastS_EngineBase::MarkBad(uint32_t badness)
-{
- bool worse = false;
-
- {
- std::lock_guard<std::mutex> engineGuard(_lock);
- if (badness > _badness) {
- _badness = badness;
- worse = true;
- }
- }
-
- if (worse) {
- if (badness <= BAD_NOT) {
- } else {
- _dataset->ScheduleCheckBad();
- }
- }
-}
-
-
-void
-FastS_EngineBase::ClearBad()
-{
- {
- std::unique_lock<std::mutex> engineGuard(_lock);
- if (_badness >= BAD_CONFIG) {
- engineGuard.unlock();
- LOG(warning,
- "engine %s still bad due to illegal config",
- _config._name);
- return;
- }
- _badness = BAD_NOT;
- }
- HandleClearedBad();
-}
-
-
-void
-FastS_EngineBase::HandlePingResponse(uint32_t partid,
- time_t docstamp,
- bool mld,
- uint32_t maxnodes,
- uint32_t nodes,
- uint32_t maxparts,
- uint32_t parts,
- PossCount activeDocs)
-{
- // ignore really bad nodes
- if (IsRealBad())
- return;
-
- _reported._reportedPartID = partid;
-
- // override reported partid ?
-
- if (_config._confPartIDOverrides && _config._confPartID != FastS_NoID32()) {
- LOG(debug, "Partid(%d) overridden by config(%d)", partid, _config._confPartID);
- partid = _config._confPartID;
- }
-
- // bad partid ?
-
- if ((partid != _config._confPartID && _config._confPartID != FastS_NoID32()) ||
- (partid < _dataset->GetFirstPart()) ||
- (partid >= _dataset->GetLastPart()) ||
- (partid >= _dataset->GetFirstPart() + (1 << _dataset->GetPartBits())))
- {
- LOG(warning, "Partid(%d) overridden to %d since it was bad: _confPartID(%d) dataset.first(%d), last(%d), (1 << bits)(%d)", partid, FastS_NoID32(), _config._confPartID, _dataset->GetFirstPart(), _dataset->GetLastPart(), (1 << _dataset->GetPartBits()));
- partid = FastS_NoID32();
- }
-
- // what happened ?
-
- bool onlined = !IsUp();
- bool bigchange = (!onlined &&
- (partid != _partid ||
- docstamp != _reported._docstamp));
- bool changed = (!onlined &&
- (bigchange ||
- mld != _reported._mld ||
- maxnodes != _reported._maxNodes ||
- nodes != _reported._actNodes ||
- maxparts != _reported._maxParts ||
- activeDocs != _reported._activeDocs ||
- parts != _reported._actParts));
-
- // nothing happened ?
-
-#if 0
- LOG(info,
- "HandlePingResponse: "
- "engine %s (partid %d) docstamp %d, "
- "onlined %s, changed %s",
- _config._name,
- static_cast<int>(partid),
- static_cast<int>(docstamp),
- onlined ? "true" : "false",
- changed ? "true" : "false");
-#endif
- if (!onlined && !changed)
- return;
-
- // report stuff
-
- if (onlined) {
- LOG(debug,
- "Search node %s up, partition %d, docstamp %d",
- _config._name, partid, (uint32_t) docstamp);
- } else if (bigchange) {
- if (partid != _partid) {
- LOG(debug,
- "Search node %s changed partid %u -> %u",
- _config._name, _partid, partid);
- }
- if (docstamp != _reported._docstamp) {
- LOG(debug,
- "Search node %s changed docstamp %u -> %u",
- _config._name,
- (uint32_t)_reported._docstamp,
- (uint32_t)docstamp);
- if (docstamp == 0) {
- LOG(warning, "Search node %s (partid %d) went bad (docstamp 0)",
- _config._name, partid);
- }
- }
- }
-
- {
- auto dsGuard(_dataset->getDsGuard());
- if (changed)
- _dataset->LinkOutPart_HasLock(this);
-
- _partid = partid;
- if (docstamp != _reported._docstamp) {
- _reported._docstamp = docstamp;
- }
- _reported._mld = mld;
- _reported._maxNodes = maxnodes;
- _reported._actNodes = nodes;
- _reported._maxParts = maxparts;
- _reported._actParts = parts;
- if (_reported._activeDocs != activeDocs) {
- _dataset->updateActiveDocs_HasLock(GetConfRowID(), activeDocs, _reported._activeDocs);
- _reported._activeDocs = activeDocs;
- }
- _isUp = true;
-
- _dataset->LinkInPart_HasLock(this);
-
- }
- _dataset->ScheduleCheckTempFail();
-
- if (onlined) {
- HandleUp();
- }
-
- // detect flipflop badness
-
- // NB: fliphistory race with clearbad...
-
- if (onlined || bigchange) {
- _stats._fliptime.SetNow();
- }
-}
-
-
-void
-FastS_EngineBase::HandleLostConnection()
-{
- if (IsUp()) {
- _isUp = false;
- _stats._floptime.SetNow();
- LOG(warning, "Search node %s down", _config._name);
-
- {
- auto dsGuard(_dataset->getDsGuard());
- _dataset->LinkOutPart_HasLock(this);
- PossCount noDocs;
- noDocs.valid = true;
- _dataset->updateActiveDocs_HasLock(GetConfRowID(), noDocs, _reported._activeDocs);
- _reported._activeDocs = noDocs;
- }
- _dataset->ScheduleCheckTempFail();
- HandleDown(); // classic: NotifyVirtualConnsDown
- }
-}
-
-
-void
-FastS_EngineBase::HandleNotOnline(int seconds)
-{
- LOG(warning, "Search node %s still not up after %d seconds",
- _config._name, seconds);
-}
-
-
-void
-FastS_EngineBase::Ping()
-{
- SampleQueueLens();
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/engine_base.h b/searchcore/src/vespa/searchcore/fdispatch/search/engine_base.h
deleted file mode 100644
index 7c109cb99c0..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/engine_base.h
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/searchcore/fdispatch/common/timestat.h>
-#include "plain_dataset.h"
-#include "poss_count.h"
-#include <atomic>
-
-class FastS_FNET_DataSet;
-class FastS_DataSetInfo;
-
-class FastS_FNET_Engine;
-class FastS_RPC_Engine;
-
-class FastS_EngineBase
-{
- friend class FastS_FNET_Engine;
- friend class FastS_RPC_Engine;
- friend class FastS_PlainDataSet;
- friend class FastS_FNET_DataSet;
- friend class FastS_PartitionMap;
- friend class FastS_DataSetInfo;
-
-private:
- FastS_EngineBase(const FastS_EngineBase &);
- FastS_EngineBase& operator=(const FastS_EngineBase &);
-
-public:
-
- //----------------------------------------------------------------
- // class holding various statistics for a search node
- //----------------------------------------------------------------
- class stats_t
- {
- public:
- enum {
- _queuestatsize = 100
- };
-
- // the node goes up and down...
- FastOS_Time _fliptime; // When state changed last to UP or big chg
- FastOS_Time _floptime; // When state changed last from UP
-
- // search/docsum slowness
- uint32_t _slowQueryCnt;
- uint32_t _slowDocsumCnt;
- double _slowQuerySecs;
- double _slowDocsumSecs;
-
- // active cnt + queue len sampling
- uint32_t _queueLenSampleAcc; // sum of reported queue lengths
- uint32_t _queueLenSampleCnt; // number of reported queue lengths
- uint32_t _activecntSampleAcc; // sum of our "load"
- uint32_t _activecntSampleCnt; // number of our "load" samples
-
- // sampled active cnt + queue len
- struct {
- double _queueLen;
- double _activecnt;
- } _queueLens[_queuestatsize];
- double _queueLenAcc;
- double _activecntAcc;
- uint32_t _queueLenIdx;
- uint32_t _queueLenValid;
-
- stats_t();
-
- };
-
- //----------------------------------------------------------------
- // class holding values reported from the node below
- //----------------------------------------------------------------
- class reported_t
- {
- private:
- reported_t(const reported_t &);
- reported_t& operator=(const reported_t &);
-
- public:
- uint32_t _queueLen; // queue len on search node
- uint32_t _dispatchers; // # dispatchers using search node
-
- bool _mld;
- uint32_t _reportedPartID; // Partid reported from node below
- uint32_t _actNodes; // From _MLD_MON. # active nodes, or 1
- uint32_t _maxNodes; // From _MLD_MON. total # nodes, or 1
- uint32_t _actParts; // From _MLD_MON. # active parts, or 1
- uint32_t _maxParts; // From _MLD_MON. total # parts, or 1
- PossCount _activeDocs;
- time_t _docstamp;
-
- reported_t();
- ~reported_t();
- };
-
- //----------------------------------------------------------------
- // class holding config values
- //----------------------------------------------------------------
- class config_t
- {
- private:
- config_t(const config_t &);
- config_t& operator=(const config_t &);
-
- public:
- char *_name;
- uint32_t _unitrefcost; // Cost to reference us
- uint32_t _confPartID; // Partid configured in partitions file
- uint32_t _confRowID; // What row this engine belongs to
- bool _confPartIDOverrides; // Ignore lower partid and use our conf value
- config_t(FastS_EngineDesc *desc);
- ~config_t();
- };
-
- // engine badness enum
- enum {
- BAD_NOT,
- BAD_ADMIN,
- BAD_CONFIG
- };
-
-protected:
- stats_t _stats;
- reported_t _reported;
- config_t _config;
-
- bool _isUp; // is this engine up ?
- uint32_t _badness; // engine badness indicator
- uint32_t _partid; // Partid we actually use
-
- // Total cost as seen by referencing objects
- std::atomic<uint32_t> _totalrefcost;
- std::atomic<uint32_t> _activecnt; // Our "load" on search node
-
- FastS_PlainDataSet *_dataset; // dataset for this engine
-
- FastS_EngineBase *_nextds; // list of engines in dataset
- FastS_EngineBase *_prevpart; // list of engines in partition
- FastS_EngineBase *_nextpart; // list of engines in partition
- std::mutex _lock;
-
-public:
- FastS_EngineBase(FastS_EngineDesc *desc, FastS_PlainDataSet *dataset);
- virtual ~FastS_EngineBase();
-
- // common engine methods
- //----------------------
- static time_t NoDocStamp() { return static_cast<time_t>(-1); }
- const char *GetName() const { return _config._name; }
- FastS_EngineBase *GetNextDS() const { return _nextds; }
- uint32_t GetQueueLen() const { return _reported._queueLen; }
- uint32_t GetDispatchers() const { return _reported._dispatchers; }
- FastS_PlainDataSet *GetDataSet() const { return _dataset; }
- uint32_t GetConfRowID() const { return _config._confRowID; }
- uint32_t GetPartID() const { return _partid; }
-
- time_t GetTimeStamp() const { return _reported._docstamp; }
- bool IsMLD() const { return _reported._mld; }
-
- bool IsUp() const { return _isUp; }
- bool IsRealBad() const { return (_badness > BAD_NOT); }
- bool isAdminBad() const { return _badness == BAD_ADMIN; }
-
- bool IsReady() const { return (IsUp() || IsRealBad()); }
- void SlowQuery(double limit, double secs, bool silent);
- void SlowDocsum(double limit, double secs);
- void AddCost();
- void SubCost();
- void SaveQueueLen_NoLock(uint32_t queueLen, uint32_t dispatchers);
- void SampleQueueLens();
- void UpdateSearchTime(double tnow, double elapsed, bool timedout);
- void NotifyFailure();
- void MarkBad(uint32_t badness);
- void ClearBad();
- void HandlePingResponse(uint32_t partid, time_t docstamp, bool mld,
- uint32_t maxnodes, uint32_t nodes,
- uint32_t maxparts, uint32_t parts,
- PossCount activeDocs);
- void HandleLostConnection();
- void HandleNotOnline(int seconds);
-
- // common engine API
- //------------------
- virtual void Ping();
- virtual void HandleClearedBad() {}
- virtual void HandleUp() {}
- virtual void HandleDown() {}
-
- // typesafe "down"-cast
- //---------------------
- virtual FastS_FNET_Engine *GetFNETEngine() { return NULL; }
- virtual FastS_RPC_Engine *GetRPCEngine() { return NULL; }
-};
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/fnet_dataset.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/fnet_dataset.cpp
deleted file mode 100644
index 081d5e7da34..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/fnet_dataset.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "fnet_dataset.h"
-#include "fnet_engine.h"
-#include "fnet_search.h"
-#include "datasetcollection.h"
-
-#include <vespa/log/log.h>
-LOG_SETUP(".search.fnet_dataset");
-
-//--------------------------------------------------------------------------
-
-void
-FastS_FNET_DataSet::PingTask::PerformTask()
-{
- _dataset->Ping();
- Schedule(_delay);
-}
-
-//--------------------------------------------------------------------------
-
-FastS_FNET_DataSet::FastS_FNET_DataSet(FNET_Transport *transport,
- FNET_Scheduler *scheduler,
- FastS_AppContext *appCtx,
- FastS_DataSetDesc *desc)
- : FastS_PlainDataSet(appCtx, desc),
- _transport(transport),
- _pingTask(scheduler, this, getMonitorInterval()),
- _failedRowsBitmask(0)
-{
-}
-
-
-FastS_FNET_DataSet::~FastS_FNET_DataSet() = default;
-
-bool
-FastS_FNET_DataSet::AddEngine(FastS_EngineDesc *desc)
-{
- FastS_FNET_Engine *engine = new FastS_FNET_Engine(desc, this);
-
- InsertEngine(engine);
-
- if (desc->IsBad()) {
- engine->MarkBad(FastS_EngineBase::BAD_CONFIG);
- }
- return true;
-}
-
-
-namespace {
-struct ConnectFNETEngine {
- void operator()(FastS_EngineBase* engine) {
- FastS_FNET_Engine* fnet_engine = engine->GetFNETEngine();
- FastS_assert(fnet_engine != nullptr);
- fnet_engine->ScheduleConnect(0.0);
- fnet_engine->StartWarnTimer();
- }
-};
-}
-
-void
-FastS_FNET_DataSet::ConfigDone(FastS_DataSetCollection *)
-{
- ForEachEngine( ConnectFNETEngine() );
- _pingTask.ScheduleNow();
-}
-
-
-void
-FastS_FNET_DataSet::ScheduleCheckBad()
-{
- _pingTask.ScheduleNow();
-}
-
-
-FastS_ISearch *
-FastS_FNET_DataSet::CreateSearch(FastS_DataSetCollection *dsc,
- FastS_TimeKeeper *timeKeeper,
- bool async)
-{
- return (async)
- ? (FastS_ISearch *) new FastS_FNET_Search(dsc, this, timeKeeper)
- : (FastS_ISearch *) new FastS_Sync_FNET_Search(dsc, this, timeKeeper);
-}
-
-
-void
-FastS_FNET_DataSet::Free()
-{
- _pingTask.Kill();
-
- for (FastS_EngineBase *engine = ExtractEngine();
- engine != nullptr; engine = ExtractEngine())
- {
- FastS_assert(engine->GetFNETEngine() != nullptr);
- delete engine;
- }
-
- delete this;
-}
-
-bool
-FastS_FNET_DataSet::isGoodRow(uint32_t rowId)
-{
- auto dsGuard(getDsGuard());
- uint64_t rowBit = 1ul << rowId;
- bool wasBad = ((_failedRowsBitmask & rowBit) != 0);
- bool isBad = false;
- uint64_t candDocs = _stateOfRows.getRowState(rowId).activeDocs();
- // demand: (candidate row active docs >= p% of average active docs)
- // where p = min activedocs coverage
- double p = _queryDistributionMode.getMinActivedocsCoverage() / 100.0;
- p = std::min(p, 0.999); // max demand: 99.9 %
- uint64_t restDocs = _stateOfRows.sumActiveDocs() - candDocs;
- uint64_t restRows = _stateOfRows.numRowStates() - 1;
- double restAvg = (restRows > 0) ? (restDocs / (double)restRows) : 0;
- if (_stateOfRows.activeDocsValid() && (candDocs < (p * restAvg))) {
- isBad = true;
- if (!wasBad) {
- _failedRowsBitmask |= rowBit;
- LOG(warning, "Not enough active docs in group %d (only %" PRIu64 " docs, average is %g)",
- rowId, candDocs, restAvg);
- }
- }
- size_t nodesUp = countNodesUpInRow_HasLock(rowId);
- size_t configuredParts = getNumPartitions(rowId);
- size_t nodesAllowedDown =
- getMaxNodesDownPerFixedRow() +
- (configuredParts*(100.0 - getMinGroupCoverage()))/100.0;
- if (nodesUp + nodesAllowedDown < configuredParts) {
- isBad = true;
- if (!wasBad) {
- _failedRowsBitmask |= rowBit;
- LOG(warning, "Coverage of group %d is only %ld/%ld (requires %ld)",
- rowId, nodesUp, configuredParts, configuredParts-nodesAllowedDown);
- }
- }
- if (wasBad && !isBad) {
- _failedRowsBitmask &= ~rowBit;
- LOG(info, "Group %d is now good again (%" PRIu64 "/%g active docs, coverage %ld/%ld)",
- rowId, candDocs, restAvg, nodesUp, configuredParts);
- }
- return !isBad;
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/fnet_dataset.h b/searchcore/src/vespa/searchcore/fdispatch/search/fnet_dataset.h
deleted file mode 100644
index e51166a8456..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/fnet_dataset.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "plain_dataset.h"
-
-class FNET_Transport;
-
-class FastS_FNET_DataSet : public FastS_PlainDataSet
-{
-public:
-
- //----------------------------------------------------------------
- // class used to schedule periodic dataset pinging
- //----------------------------------------------------------------
-
- class PingTask : public FNET_Task
- {
- private:
- FastS_FNET_DataSet *_dataset;
- double _delay;
-
- public:
- PingTask(const PingTask &) = delete;
- PingTask& operator=(const PingTask &) = delete;
- PingTask(FNET_Scheduler *scheduler,
- FastS_FNET_DataSet *dataset,
- double delay)
- : FNET_Task(scheduler),
- _dataset(dataset),
- _delay(delay)
- {}
- void PerformTask() override;
- };
-
-
-private:
- FNET_Transport *_transport;
- PingTask _pingTask;
- uint64_t _failedRowsBitmask;
-
-public:
- FastS_FNET_DataSet(const FastS_FNET_DataSet &) = delete;
- FastS_FNET_DataSet& operator=(const FastS_FNET_DataSet &) = delete;
- FastS_FNET_DataSet(FNET_Transport *transport,
- FNET_Scheduler *scheduler,
- FastS_AppContext *appCtx,
- FastS_DataSetDesc *desc);
- ~FastS_FNET_DataSet() override;
-
- FNET_Transport *GetTransport() { return _transport; }
-
- // typesafe down-cast
- FastS_FNET_DataSet *GetFNETDataSet() override { return this; }
-
- // common dataset API
- bool AddEngine(FastS_EngineDesc *desc) override;
- void ConfigDone(FastS_DataSetCollection *) override;
- void ScheduleCheckBad() override;
- FastS_ISearch *CreateSearch(FastS_DataSetCollection *dsc,
- FastS_TimeKeeper *timeKeeper,
- bool async) override;
- void Free() override;
-
- bool isGoodRow(uint32_t rowId);
-};
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/fnet_engine.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/fnet_engine.cpp
deleted file mode 100644
index afa0379c06f..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/fnet_engine.cpp
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "fnet_dataset.h"
-#include "datasetcollection.h"
-#include "fnet_engine.h"
-#include <vespa/searchcore/fdispatch/common/search.h>
-#include <vespa/fnet/transport.h>
-#include <vespa/fnet/connection.h>
-
-using namespace search::fs4transport;
-
-//----------------------------------------------------------------------
-
-void
-FastS_StaticMonitorQuery::Free()
-{
- if (_refcnt-- == 1) {
- delete this;
- }
-}
-
-
-FastS_StaticMonitorQuery::FastS_StaticMonitorQuery()
- : FS4Packet_MONITORQUERYX(),
- _refcnt(1)
-{ }
-
-
-FastS_StaticMonitorQuery::~FastS_StaticMonitorQuery()
-{
- FastS_assert(_refcnt == 0);
-}
-
-//----------------------------------------------------------------------
-
-void
-FastS_FNET_Engine::WarnTask::PerformTask()
-{
- _engine->HandleNotOnline(DELAY);
-}
-
-//----------------------------------------------------------------------
-
-void
-FastS_FNET_Engine::ConnectTask::PerformTask()
-{
- _engine->Connect();
-}
-
-//----------------------------------------------------------------------
-
-void
-FastS_FNET_Engine::Connect()
-{
- if (_conn == nullptr ||
- _conn->GetState() >= FNET_Connection::FNET_CLOSING)
- {
- FNET_Connection *newConn =
- _transport->Connect(_spec.c_str(),
- &FS4PersistentPacketStreamer::Instance,
- this);
- FNET_Connection *oldConn;
- {
- auto dsGuard(getDsGuard());
- oldConn = _conn;
- _conn = newConn;
- }
- if (oldConn != nullptr)
- oldConn->SubRef();
- if (newConn == nullptr && !IsRealBad())
- ScheduleConnect(2.9);
- }
-}
-
-
-void
-FastS_FNET_Engine::Disconnect()
-{
- if (_conn != nullptr) {
- _conn->CloseAdminChannel();
- FNET_Connection *conn;
- {
- auto dsGuard(getDsGuard());
- conn = _conn;
- _conn = nullptr;
- }
- _transport->Close(conn, /* needref = */ false);
- }
-}
-
-
-FastS_FNET_Engine::FastS_FNET_Engine(FastS_EngineDesc *desc,
- FastS_FNET_DataSet *dataset)
- : FastS_EngineBase(desc, dataset),
- _spec(),
- _transport(dataset->GetTransport()),
- _conn(nullptr),
- _warnTask(dataset->GetAppContext()->GetFNETScheduler(), this),
- _connectTask(dataset->GetAppContext()->GetFNETScheduler(), this),
- _monitorQuery(nullptr)
-{
- if (strncmp(_config._name, "tcp/", 4) == 0) {
- _spec = _config._name;
- } else {
- _spec = "tcp/";
- _spec += _config._name;
- }
-}
-
-
-FastS_FNET_Engine::~FastS_FNET_Engine()
-{
- _warnTask.Kill();
- _connectTask.Kill();
- Disconnect();
- if (IsUp()) {
- auto dsGuard(getDsGuard());
- _dataset->LinkOutPart_HasLock(this);
- }
- if (_monitorQuery != nullptr) {
- _monitorQuery->Free();
- _monitorQuery = nullptr;
- }
-}
-
-
-void
-FastS_FNET_Engine::StartWarnTimer()
-{
- _warnTask.Schedule(_warnTask.DELAY);
-}
-
-
-void
-FastS_FNET_Engine::ScheduleConnect(double delay)
-{
- if (delay == 0.0) {
- _connectTask.ScheduleNow();
- } else {
- _connectTask.Schedule(delay);
- }
-}
-
-
-FNET_Channel *
-FastS_FNET_Engine::OpenChannel_HasDSLock(FNET_IPacketHandler *handler)
-{
- return (_conn != nullptr) ? _conn->OpenChannel(handler, FNET_Context()) : nullptr;
-}
-
-
-FNET_IPacketHandler::HP_RetCode
-FastS_FNET_Engine::HandlePacket(FNET_Packet *packet, FNET_Context)
-{
- HP_RetCode ret = FNET_KEEP_CHANNEL;
- uint32_t pcode = packet->GetPCODE();
-
- if (packet->IsChannelLostCMD()) {
-
- HandleLostConnection();
- ret = FNET_FREE_CHANNEL;
- if (!IsRealBad()) {
- ScheduleConnect(2.9);
- }
-
- } else if (pcode == search::fs4transport::PCODE_MONITORRESULTX) {
-
- FS4Packet_MONITORRESULTX *mr = (FS4Packet_MONITORRESULTX *) packet;
-
- PossCount activeDocs;
- activeDocs.valid = ((mr->_features & search::fs4transport::MRF_ACTIVEDOCS) != 0);
- activeDocs.count = mr->_activeDocs;
- if ((mr->_features & search::fs4transport::MRF_MLD) != 0) {
- HandlePingResponse(mr->_partid, mr->_timestamp, true,
- mr->_totalNodes, mr->_activeNodes,
- mr->_totalParts, mr->_activeParts,
- activeDocs);
- } else {
- HandlePingResponse(mr->_partid, mr->_timestamp, false, 1, 1, 1, 1, activeDocs);
- }
- }
-
- packet->Free();
- return ret;
-}
-
-
-void
-FastS_FNET_Engine::Ping()
-{
- FastS_EngineBase::Ping();
-
- // handle badness
- if (IsRealBad()) {
- if (_conn != nullptr) {
- Disconnect();
- HandleLostConnection();
- }
- return;
- }
-
- // handle ping
- if ((_conn != nullptr) && (_conn->GetState() < FNET_Connection::FNET_CLOSING)) {
- if (_monitorQuery == nullptr) {
- _monitorQuery = new FastS_StaticMonitorQuery();
- }
- if (_monitorQuery->getBusy()) {
- return;
- }
- _monitorQuery->markBusy();
- uint32_t features = search::fs4transport::MQF_QFLAGS;
- uint32_t qflags = search::fs4transport::MQFLAG_REPORT_ACTIVEDOCS;
- _monitorQuery->_features |= features;
- _monitorQuery->_qflags = qflags;
- _conn->PostPacket(_monitorQuery, FastS_NoID32());
- }
-}
-
-
-void
-FastS_FNET_Engine::HandleClearedBad()
-{
- ScheduleConnect(0.0);
-}
-
-
-void
-FastS_FNET_Engine::HandleUp()
-{
- _warnTask.Unschedule();
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/fnet_engine.h b/searchcore/src/vespa/searchcore/fdispatch/search/fnet_engine.h
deleted file mode 100644
index 4374deb2642..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/fnet_engine.h
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-
-#pragma once
-
-#include "engine_base.h"
-#include <vespa/searchlib/common/packets.h>
-#include <vespa/fnet/ipackethandler.h>
-#include <atomic>
-
-//----------------------------------------------------------------------
-
-using search::fs4transport::FS4Packet_MONITORQUERYX;
-
-class FastS_StaticMonitorQuery : public FS4Packet_MONITORQUERYX
-{
- std::atomic<int> _refcnt;
-public:
- virtual void Free() override;
- bool getBusy() const { return _refcnt > 1; }
- void markBusy() { _refcnt++; }
- FastS_StaticMonitorQuery();
- ~FastS_StaticMonitorQuery();
-};
-
-//----------------------------------------------------------------------
-
-class FastS_FNET_Engine : public FNET_IPacketHandler,
- public FastS_EngineBase
-{
-private:
- FastS_FNET_Engine(const FastS_FNET_Engine &);
- FastS_FNET_Engine& operator=(const FastS_FNET_Engine &);
-
-public:
- class WarnTask : public FNET_Task
- {
- private:
- WarnTask(const WarnTask &);
- WarnTask& operator=(const WarnTask &);
-
- FastS_FNET_Engine *_engine;
-
- public:
- enum { DELAY = 30 };
- WarnTask(FNET_Scheduler *scheduler,
- FastS_FNET_Engine *engine)
- : FNET_Task(scheduler), _engine(engine) {}
- virtual void PerformTask() override;
- };
- friend class FastS_FNET_Engine::WarnTask;
-
- class ConnectTask : public FNET_Task
- {
- private:
- ConnectTask(const ConnectTask &);
- ConnectTask& operator=(const ConnectTask &);
-
- FastS_FNET_Engine *_engine;
-
- public:
- ConnectTask(FNET_Scheduler *scheduler,
- FastS_FNET_Engine *engine)
- : FNET_Task(scheduler), _engine(engine) {}
- virtual void PerformTask() override;
- };
- friend class FastS_FNET_Engine::ConnectTask;
-
-private:
- std::string _hostName;
- int _portNumber;
- std::string _spec;
- FNET_Transport *_transport;
- FNET_Connection *_conn;
- WarnTask _warnTask;
- ConnectTask _connectTask;
- FastS_StaticMonitorQuery *_monitorQuery;
-
- void Connect();
- void Disconnect();
-
-public:
- FastS_FNET_Engine(FastS_EngineDesc *desc,
- FastS_FNET_DataSet *dataset);
- virtual ~FastS_FNET_Engine();
-
- std::unique_lock<std::mutex> getDsGuard() { return _dataset->getDsGuard(); }
-
- void StartWarnTimer();
- void ScheduleConnect(double delay);
- FNET_Channel *OpenChannel_HasDSLock(FNET_IPacketHandler *handler);
-
- // handle FNET admin packets
- //--------------------------
- virtual HP_RetCode HandlePacket(FNET_Packet *packet, FNET_Context) override;
-
- // common engine API
- //------------------
- virtual void Ping() override;
- virtual void HandleClearedBad() override;
- virtual void HandleUp() override;
-
- // typesafe "down"-cast
- //---------------------
- virtual FastS_FNET_Engine *GetFNETEngine() override { return this; }
-
- const char *getHostName() const { return _hostName.c_str(); }
- int getPortNumber() const { return _portNumber; }
-};
-
-//----------------------------------------------------------------------
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/fnet_search.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/fnet_search.cpp
deleted file mode 100644
index f665e25f819..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/fnet_search.cpp
+++ /dev/null
@@ -1,1572 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "datasetcollection.h"
-#include "fnet_dataset.h"
-#include "fnet_engine.h"
-#include "fnet_search.h"
-#include "mergehits.h"
-#include <vespa/searchlib/engine/packetconverter.h>
-#include <vespa/searchlib/engine/searchreply.h>
-#include <vespa/vespalib/util/stringfmt.h>
-#include <xxhash.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".fnet_search");
-
-#define IS_MLD_PART(part) ((part) > mldpartidmask)
-#define MLD_PART_TO_PARTID(part) ((part) & mldpartidmask)
-#define ENCODE_MLD_PART(part) (((part) + 1) << partbits)
-#define DECODE_MLD_PART(part) (((part) >> partbits) - 1)
-
-using fdispatch::SearchPath;
-using vespalib::nbostream;
-using vespalib::stringref;
-using namespace search::fs4transport;
-using search::engine::PacketConverter;
-
-//---------------------------------------------------------------------
-//
-
-FastS_FNET_SearchNode::FastS_FNET_SearchNode(FastS_FNET_Search *search, uint32_t partid)
- : _search(search),
- _engine(nullptr),
- _channel(nullptr),
- _partid(partid),
- _rowid(0),
- _stamp(0),
- _qresult(nullptr),
- _queryTime(0.0),
- _flags(),
- _docidCnt(0),
- _pendingDocsums(0),
- _docsumRow(0),
- _docsum_offsets_idx(0),
- _docsumTime(0.0),
- _gdx(nullptr),
- _docsum_offsets(),
- _extraDocsumNodes(),
- _nextExtraDocsumNode(this),
- _prevExtraDocsumNode(this),
- _hit_beg(nullptr),
- _hit_cur(nullptr),
- _hit_end(nullptr),
- _sortDataIterator()
-{
-}
-
-
-FastS_FNET_SearchNode::~FastS_FNET_SearchNode()
-{
- Disconnect();
- if (_qresult != nullptr) {
- _qresult->Free();
- }
- if (_gdx != nullptr) {
- _gdx->Free();
- }
-}
-
-FastS_FNET_SearchNode::FastS_FNET_SearchNode(FastS_FNET_SearchNode &&)
-{
- // These objects are referenced everywhere and must never be either copied nor moved,
- // but as std::vector requires this to exist so we do this little trick.
- LOG_ABORT("should not reach here");
-}
-
-bool
-FastS_FNET_SearchNode::NT_InitMerge(uint32_t *numDocs,
- uint64_t *totalHits,
- search::HitRank *maxRank,
- uint32_t *sortDataDocs)
-{
- uint32_t myNumDocs = 0;
- if (_qresult != nullptr) {
- myNumDocs = _qresult->_numDocs;
- *numDocs += myNumDocs;
- *totalHits += _qresult->_totNumDocs;
- search::HitRank mr = _qresult->_maxRank;
- if (mr > *maxRank)
- *maxRank = mr;
- }
- if (myNumDocs > 0) {
- _hit_beg = _qresult->_hits;
- _hit_cur = _hit_beg;
- _hit_end = _hit_beg + myNumDocs;
- if ((_qresult->_features & search::fs4transport::QRF_SORTDATA) != 0) {
- _sortDataIterator.Init(myNumDocs, _qresult->_sortIndex, _qresult->_sortData);
- *sortDataDocs += myNumDocs;
- }
- return true;
- }
- return false;
-}
-
-
-FastS_EngineBase *
-FastS_FNET_SearchNode::getPartition(const std::unique_lock<std::mutex> &dsGuard, bool userow, FastS_FNET_DataSet *dataset)
-{
- return ((userow)
- ? dataset->getPartitionMLD(dsGuard, getPartID(), _flags._docsumMld, _docsumRow)
- : dataset->getPartitionMLD(dsGuard, getPartID(), _flags._docsumMld));
-}
-
-
-void
-FastS_FNET_SearchNode::
-allocGDX(search::docsummary::GetDocsumArgs *args, const search::engine::PropertiesMap &props)
-{
- FS4Packet_GETDOCSUMSX *gdx = new FS4Packet_GETDOCSUMSX();
-
- gdx->AllocateDocIDs(_docidCnt);
- _gdx = gdx;
- _docsum_offsets.resize(_gdx->_docid.size());
- _docsum_offsets_idx = 0;
- if (args == nullptr)
- return;
-
- if (args->getRankProfile().size() != 0 || args->GetQueryFlags() != 0) {
- gdx->_features |= search::fs4transport::GDF_RANKP_QFLAGS;
- gdx->setRanking(args->getRankProfile());
- gdx->_qflags = args->GetQueryFlags();
- }
- gdx->setTimeout(args->getTimeout());
-
- if (args->getResultClassName().size() > 0) {
- gdx->_features |= search::fs4transport::GDF_RESCLASSNAME;
- gdx->setResultClassName(args->getResultClassName());
- }
-
- if (props.size() > 0) {
- PacketConverter::fillPacketProperties(props, gdx->_propsVector);
- gdx->_features |= search::fs4transport::GDF_PROPERTIES;
- }
-
- if (args->getStackDump().size() > 0) {
- gdx->_features |= search::fs4transport::GDF_QUERYSTACK;
- gdx->_stackItems = args->GetStackItems();
- gdx->setStackDump(args->getStackDump());
- }
-
- if (args->GetLocationLen() > 0) {
- gdx->_features |= search::fs4transport::GDF_LOCATION;
- gdx->setLocation(args->getLocation());
- }
-
- if (args->getFlags() != 0) {
- gdx->_features |= search::fs4transport::GDF_FLAGS;
- gdx->_flags = args->getFlags();
- }
-}
-
-
-void
-FastS_FNET_SearchNode::postGDX(uint32_t *pendingDocsums, uint32_t *docsumNodes)
-{
- FS4Packet_GETDOCSUMSX *gdx = _gdx;
- FastS_assert(gdx->_docid.size() == _docsum_offsets_idx);
- if (_flags._docsumMld) {
- gdx->_features |= search::fs4transport::GDF_MLD;
- }
- if (PostPacket(gdx)) {
- _pendingDocsums = _docsum_offsets_idx;
- *pendingDocsums += _pendingDocsums;
- (*docsumNodes)++;
- }
- _gdx = nullptr; // packet hand-over
- _docsum_offsets_idx = 0;
-}
-
-
-FNET_IPacketHandler::HP_RetCode
-FastS_FNET_SearchNode::HandlePacket(FNET_Packet *packet, FNET_Context context)
-{
- uint32_t pcode = packet->GetPCODE();
- if (LOG_WOULD_LOG(spam)) {
- LOG(spam, "handling packet %p\npacket=%s", packet, packet->Print().c_str());
- context.Print();
- }
- if (packet->IsChannelLostCMD()) {
- _search->LostSearchNode(this);
- } else if (pcode == search::fs4transport::PCODE_QUERYRESULTX) {
- _search->GotQueryResult(this, (FS4Packet_QUERYRESULTX *) packet);
- } else if (pcode == search::fs4transport::PCODE_DOCSUM) {
- _search->GotDocsum(this, (FS4Packet_DOCSUM *) packet);
- } else if (pcode == search::fs4transport::PCODE_ERROR) {
- _search->GotError(this, static_cast<FS4Packet_ERROR *>(packet));
- } else {
- if (pcode == search::fs4transport::PCODE_EOL) {
- _search->GotEOL(this);
- }
- packet->Free();
- }
- return FNET_KEEP_CHANNEL;
-}
-
-
-FastS_FNET_SearchNode *
-FastS_FNET_SearchNode::
-allocExtraDocsumNode(bool mld, uint32_t rowid, uint32_t rowbits)
-{
- if (_extraDocsumNodes.empty()) {
- size_t sz = (1 << (rowbits + 1));
- _extraDocsumNodes.resize(sz);
- }
-
- uint32_t idx = (rowid << 1) + (mld ? 1 : 0);
-
- if (_extraDocsumNodes[idx].get() == nullptr) {
- UP eNode(new FastS_FNET_SearchNode(_search, getPartID()));
- eNode->_docsumRow = rowid;
- eNode->_flags._docsumMld = mld;
-
- eNode->_nextExtraDocsumNode = this;
- eNode->_prevExtraDocsumNode = _prevExtraDocsumNode;
- _prevExtraDocsumNode->_nextExtraDocsumNode = eNode.get();
- _prevExtraDocsumNode = eNode.get();
- _extraDocsumNodes[idx] = std::move(eNode);
- }
- return _extraDocsumNodes[idx].get();
-}
-
-
-//---------------------------------------------------------------------
-
-void
-FastS_FNET_Search::Timeout::PerformTask()
-{
- _search->HandleTimeout();
-}
-
-//---------------------------------------------------------------------
-
-void
-FastS_FNET_Search::reallocNodes(size_t numParts)
-{
- _nodes.clear();
-
- _nodes.reserve(numParts);
-
- for (uint32_t i = 0; i < numParts; i++) {
- _nodes.emplace_back(this, i);
- }
-}
-
-namespace {
-volatile std::atomic<uint64_t> _G_prevFixedRow(0);
-} //anonymous namespace
-
-uint32_t
-FastS_FNET_Search::getFixedRowCandidate()
-{
- uint32_t rowId(_dataset->useRoundRobinForFixedRow()
- ? (_G_prevFixedRow++)
- : _dataset->getRandomWeightedRow());
- return rowId % _dataset->getNumRows();
-}
-
-uint32_t
-FastS_FNET_Search::getNextFixedRow()
-{
- size_t numTries(0);
- uint32_t fixedRow(0);
- size_t maxTries(_dataset->getNumRows());
- if ( ! _dataset->useRoundRobinForFixedRow()) {
- maxTries *= 10;
- }
- for(;numTries < maxTries; numTries++) {
- fixedRow = getFixedRowCandidate();
- if (_dataset->isGoodRow(fixedRow)) {
- break;
- }
- }
- if (numTries == maxTries) {
- fixedRow = getFixedRowCandidate(); // Will roundrobin/random if all rows are incomplete.
- }
- LOG(debug, "FixedRow: selected=%d, numRows=%d, numTries=%ld, _G_prevFixedRow=%" PRIu64, fixedRow, _dataset->getNumRows(), numTries, _G_prevFixedRow.load());
- return fixedRow;
-}
-
-void
-FastS_FNET_Search::connectNodes(const EngineNodeMap & engines)
-{
- for (const auto & pair : engines) {
- if ( ! pair.second->IsConnected() ) {
- // Here we are connecting without having the DataSet lock.
- // This might give a race when nodes go up or down, or there is a config change.
- // However none has ever been detected for as long as the race has existed.
- // The correct fix would be to make the DataSet be constant and be replaced upon changes.
- // And using shared_ptr to them. That would avoid the big global lock all together.
- pair.second->Connect_HasDSLock(pair.first->GetFNETEngine());
- } else {
- pair.first->SubCost();
- }
- }
- _nodesConnected = true;
-}
-
-uint32_t
-FastS_FNET_Search::getHashedRow() const {
- uint32_t hash = XXH32(&_queryArgs->sessionId[0], _queryArgs->sessionId.size(), 0);
- std::vector<uint32_t> rowIds;
- rowIds.reserve(_dataset->getNumRows());
- for (uint32_t rowId(0); rowId < _dataset->getNumRows(); rowId++) {
- rowIds.push_back(rowId);
- }
- while (!rowIds.empty()) {
- uint32_t index = hash % rowIds.size();
- uint32_t fixedRow = rowIds[index];
- if (_dataset->isGoodRow(fixedRow)) {
- return fixedRow;
- }
- rowIds.erase(rowIds.begin() + index);
- }
- return 0;
-}
-void
-FastS_FNET_Search::ConnectQueryNodes()
-{
- FastS_assert( ! _nodes.empty() );
- FastS_assert(!_nodesConnected);
-
- uint32_t fixedRow(0);
- if (_dataset->useFixedRowDistribution()) {
- fixedRow = (_queryArgs->sessionId.empty()) ? getNextFixedRow() : getHashedRow();
- _fixedRow = fixedRow;
- size_t numParts = _dataset->getNumPartitions(fixedRow);
- if (_nodes.size() > numParts) {
- reallocNodes(numParts);
- }
- }
- EngineNodeMap engines;
- engines.reserve(_nodes.size());
- {
- auto dsGuard(_dataset->getDsGuard());
- for (uint32_t i = 0; i < _nodes.size(); i++) {
- FastS_EngineBase *engine = nullptr;
- if (_dataset->useFixedRowDistribution()) {
- engine = _dataset->getPartition(dsGuard, i, fixedRow);
- LOG(debug, "FixedRow: getPartition(part=%u, row=%u) -> engine(%s)", i, fixedRow, (engine != nullptr ? engine->GetName() : "null"));
- } else {
- engine = _dataset->getPartition(dsGuard, i);
- }
- if (engine != nullptr) {
- LOG(debug, "Wanted part=%d, engine={name=%s, row=%d, partid=%d}", i, engine->GetName(), engine->GetConfRowID(), engine->GetPartID());
- if (engine != nullptr) {
- engines.emplace_back(engine, getNode(i));
- }
- } else {
- LOG(debug, "No engine for part %d", i);
- }
- }
- }
- connectNodes(engines);
-}
-
-
-void
-FastS_FNET_Search::ConnectEstimateNodes()
-{
- FastS_assert( ! _nodes.empty() );
- FastS_assert(!_nodesConnected);
-
- uint32_t partid = _util.GetQuery().StackDumpHashKey() % _estPartCutoff;
- uint32_t trycnt = 0;
- uint32_t partcnt = 0;
-
- EngineNodeMap engines;
- {
- auto dsGuard(_dataset->getDsGuard());
- while (partcnt < _dataset->GetEstimateParts() && trycnt < _estPartCutoff) {
- FastS_EngineBase *engine = _dataset->getPartition(dsGuard, partid);
- if (engine != nullptr) {
- engines.emplace_back(engine, getNode(partid));
- partcnt++;
- }
- trycnt++;
- partid = (partid + 1) % _estPartCutoff;
- }
- _estParts = partcnt;
- }
- connectNodes(engines);
-}
-
-
-void FastS_FNET_SearchNode::Connect(FastS_FNET_Engine *engine)
-{
- FastS_assert(_engine == nullptr);
- FastS_assert(_channel == nullptr);
-
- _engine = engine;
- _flags._needSubCost = true;
- auto dsGuard(_engine->getDsGuard());
- _channel = _engine->OpenChannel_HasDSLock(this);
- _rowid = _engine->GetConfRowID();
- _stamp = _engine->GetTimeStamp();
-}
-
-void FastS_FNET_SearchNode::Connect_HasDSLock(FastS_FNET_Engine *engine)
-{
- _engine = engine;
- _flags._needSubCost = true;
- _channel = _engine->OpenChannel_HasDSLock(this);
- _rowid = _engine->GetConfRowID();
- _stamp = _engine->GetTimeStamp();
-}
-
-
-void FastS_FNET_Search::connectSearchPath(const vespalib::string &spec)
-{
- FastS_assert( ! _nodes.empty());
- FastS_assert(!_nodesConnected);
-
- SearchPath searchPath(spec, _nodes.size());
- uint32_t dispatchLevel = _dsc->GetAppContext()->getDispatchLevel();
- LOG(debug, "Looking up searchpath element for dispatch level %u in searchpath '%s' (size=%zu)",
- dispatchLevel, spec.c_str(), searchPath.elements().size());
- if (dispatchLevel < searchPath.elements().size()) {
- connectSearchPath(searchPath.elements()[dispatchLevel], spec, dispatchLevel);
- } else {
- LOG(warning, "Did not find searchpath element for dispatch level "
- "%u in searchpath '%s' (size=%zu). No search nodes will be queried.",
- dispatchLevel, spec.c_str(), searchPath.elements().size());
- }
-}
-
-void FastS_FNET_Search::connectSearchPath(const SearchPath::Element &elem,
- const vespalib::string &spec,
- uint32_t dispatchLevel)
-{
- EngineNodeMap engines;
- {
- auto dsGuard(_dataset->getDsGuard());
- if (!elem.hasRow()) {
- for (size_t partId : elem.nodes()) {
- if (partId < _nodes.size()) {
- FastS_EngineBase *engine = _dataset->getPartition(dsGuard, partId);
- LOG(debug, "searchpath='%s', partId=%ld, dispatchLevel=%u", spec.c_str(), partId, dispatchLevel);
- if (engine != nullptr) {
- engines.emplace_back(engine, getNode(partId));
- }
- }
- }
- } else {
- for (size_t partId : elem.nodes()) {
- if (partId < _nodes.size()) {
- FastS_EngineBase *engine = _dataset->getPartition(dsGuard, partId, elem.row());
- LOG(debug, "searchpath='%s', partId=%ld, row=%ld, dispatchLevel=%u", spec.c_str(), partId, elem.row(), dispatchLevel);
- if (engine != nullptr) {
- engines.emplace_back(engine, getNode(partId));
- }
- }
- }
- }
- }
- connectNodes(engines);
-}
-
-void
-FastS_FNET_Search::ConnectDocsumNodes(bool ignoreRow)
-{
- FastS_assert( ! _nodes.empty());
- if (_nodesConnected)
- return;
-
- bool userow = (_dataset->GetRowBits() > 0) && !ignoreRow;
-
- EngineNodeMap engines;
- {
- auto dsGuard(_dataset->getDsGuard());
- for (auto & node : _nodes) {
- if (node._gdx != nullptr) {
- FastS_EngineBase *engine = node.getPartition(dsGuard, userow, _dataset);
- if (engine != nullptr) {
- engines.emplace_back(engine, &node);
- }
- }
- for (FastS_FNET_SearchNode::ExtraDocsumNodesIter iter(&node); iter.valid(); ++iter) {
- FastS_FNET_SearchNode *eNode = *iter;
- if (eNode->_gdx != nullptr) {
- FastS_EngineBase *engine = eNode->getPartition(dsGuard, userow, _dataset);
- if (engine != nullptr) {
- engines.emplace_back(engine, eNode);
- }
- }
- }
- }
- }
- connectNodes(engines);
-}
-
-void
-FastS_FNET_Search::EncodePartIDs(uint32_t partid, uint32_t rowid, bool mld,
- FS4Packet_QUERYRESULTX::FS4_hit *pt,
- FS4Packet_QUERYRESULTX::FS4_hit *end)
-{
- uint32_t rowbits = _dataset->GetRowBits();
- uint32_t partbits = _dataset->GetPartBits();
-
- if (rowbits > 0) {
- if (mld) {
- for (; pt < end; pt++) {
- pt->_partid = ((ENCODE_MLD_PART(pt->_partid) + partid) << rowbits) + rowid;
- }
- } else {
- for (; pt < end; pt++) {
- pt->_partid = (partid << rowbits) + rowid;
- }
- }
-
- } else { // rowbits == 0
-
- if (mld) {
- for (; pt < end; pt++) {
- pt->_partid = ENCODE_MLD_PART(pt->_partid) + partid;
- }
- } else {
- for (; pt < end; pt++) {
- pt->_partid = partid;
- }
- }
- }
-}
-
-
-FastS_FNET_Search::FastS_FNET_Search(FastS_DataSetCollection *dsc,
- FastS_FNET_DataSet *dataset,
- FastS_TimeKeeper *timeKeeper)
- : FastS_AsyncSearch(dataset->GetID()),
- _lock(),
- _timeKeeper(timeKeeper),
- _startTime(timeKeeper->GetTime()),
- _timeout(dataset->GetAppContext()->GetFNETScheduler(), this),
- _util(),
- _dsc(dsc),
- _dataset(dataset),
- _datasetActiveCostRef(true),
- _nodes(),
- _nodesConnected(false),
- _estParts(0),
- _estPartCutoff(dataset->GetEstimatePartCutoff()),
- _FNET_mode(FNET_NONE),
- _pendingQueries(0),
- _pendingDocsums(0),
- _pendingDocsumNodes(0),
- _requestedDocsums(0),
- _queryNodes(0),
- _queryNodesTimedOut(0),
- _docsumNodes(0),
- _docsumNodesTimedOut(0),
- _docsumsTimedOut(0),
- _queryTimeout(false),
- _docsumTimeout(false),
- _queryStartTime(0.0),
- _queryMinWait(0.0),
- _queryMaxWait(0.0),
- _queryWaitCalculated(false),
- _adjustedQueryTimeOut(0.0),
- _docSumStartTime(0.0),
- _adjustedDocSumTimeOut(0.0),
- _fixedRow(0),
- _resbuf()
-{
- _util.GetQuery().SetDataSet(dataset->GetID());
- _util.SetStartTime(GetTimeKeeper()->GetTime());
- reallocNodes(_dataset->GetPartitions());
-}
-
-
-FastS_FNET_Search::~FastS_FNET_Search()
-{
- _timeout.Kill();
- _nodes.clear();
- _util.DropResult();
- dropDatasetActiveCostRef();
-}
-
-
-void
-FastS_FNET_Search::dropDatasetActiveCostRef()
-{
- if (_datasetActiveCostRef) {
- _dataset->SubCost();
- _dataset->ClearActiveQuery(GetTimeKeeper());
- _datasetActiveCostRef = false;
- }
-}
-
-
-void
-FastS_FNET_Search::GotQueryResult(FastS_FNET_SearchNode *node,
- FS4Packet_QUERYRESULTX *qrx)
-{
- auto searchGuard(BeginFNETWork());
- if (!searchGuard) {
- qrx->Free();
- return;
- }
-
- if (_FNET_mode == FNET_QUERY &&
- node->_flags._pendingQuery) {
- FastS_assert(node->_qresult == nullptr);
- node->_qresult = qrx;
- EncodePartIDs(node->getPartID(), node->GetRowID(),
- (qrx->_features & search::fs4transport::QRF_MLD) != 0,
- qrx->_hits, qrx->_hits + qrx->_numDocs);
- LOG(spam, "Got result from row(%d), part(%d) = hits(%d), numDocs(%" PRIu64 ")", node->GetRowID(), node->getPartID(), qrx->_numDocs, qrx->_totNumDocs);
- node->_flags._pendingQuery = false;
- _pendingQueries--;
- double tnow = GetTimeKeeper()->GetTime();
- node->_queryTime = tnow - _startTime;
- node->GetEngine()->UpdateSearchTime(tnow, node->_queryTime, false);
- adjustQueryTimeout();
- node->dropCost();
- } else {
- qrx->Free();
- }
- EndFNETWork(std::move(searchGuard));
-}
-
-void
-FastS_FNET_Search::GotDocsum(FastS_FNET_SearchNode *node,
- FS4Packet_DOCSUM *docsum)
-{
- auto searchGuard(BeginFNETWork());
- if (!searchGuard) {
- docsum->Free();
- return;
- }
-
- if (_FNET_mode == FNET_DOCSUMS &&
- node->_pendingDocsums > 0) {
- LOG(spam, "Got docsum from row(%d), part(%d) = docsumidx(%d)", node->GetRowID(), node->getPartID(), node->_docsum_offsets_idx);
- uint32_t offset = node->_docsum_offsets[node->_docsum_offsets_idx++];
- docsum->swapBuf(_resbuf[offset]._buf);
- node->_pendingDocsums--;
- _pendingDocsums--;
- if (node->_pendingDocsums == 0) {
- node->_docsumTime = (GetTimeKeeper()->GetTime() - _startTime - node->_queryTime);
- _pendingDocsumNodes--;
- }
- adjustDocsumTimeout();
- }
- docsum->Free();
- EndFNETWork(std::move(searchGuard));
-}
-
-void
-FastS_FNET_Search::LostSearchNode(FastS_FNET_SearchNode *node)
-{
- auto searchGuard(BeginFNETWork());
- if (!searchGuard) {
- return;
- }
-
- if (_FNET_mode == FNET_QUERY && node->_flags._pendingQuery) {
- FastS_assert(_pendingQueries > 0);
- _pendingQueries--;
- node->_flags._pendingQuery = false;
- adjustQueryTimeout();
- node->dropCost();
- } else if (_FNET_mode == FNET_DOCSUMS && node->_pendingDocsums > 0) {
- uint32_t nodePendingDocsums = node->_pendingDocsums;
- FastS_assert(_pendingDocsums >= nodePendingDocsums);
- _pendingDocsums -= nodePendingDocsums;
- node->_pendingDocsums = 0;
- _pendingDocsumNodes--;
- adjustDocsumTimeout();
- }
- EndFNETWork(std::move(searchGuard));
-}
-
-
-void
-FastS_FNET_Search::GotEOL(FastS_FNET_SearchNode *node)
-{
- auto searchGuard(BeginFNETWork());
- if (!searchGuard) {
- return;
- }
-
- LOG(spam, "Got EOL from row(%d), part(%d) = pendingQ(%d) pendingDocsum(%d)", node->GetRowID(), node->getPartID(), node->_flags._pendingQuery, node->_pendingDocsums);
- if (_FNET_mode == FNET_QUERY && node->_flags._pendingQuery) {
- FastS_assert(_pendingQueries > 0);
- _pendingQueries--;
- node->_flags._pendingQuery = false;
- adjustQueryTimeout();
- node->dropCost();
- } else if (_FNET_mode == FNET_DOCSUMS && node->_pendingDocsums > 0) {
- uint32_t nodePendingDocsums = node->_pendingDocsums;
- FastS_assert(_pendingDocsums >= nodePendingDocsums);
- _pendingDocsums -= nodePendingDocsums;
- node->_pendingDocsums = 0;
- _pendingDocsumNodes--;
- adjustDocsumTimeout();
- }
- EndFNETWork(std::move(searchGuard));
-}
-
-
-void
-FastS_FNET_Search::GotError(FastS_FNET_SearchNode *node,
- FS4Packet_ERROR *error)
-{
- auto searchGuard(BeginFNETWork());
- if (!searchGuard) {
- error->Free();
- return;
- }
-
- LOG(spam,
- "Got Error from row(%d), part(%d) = pendingQ(%d) pendingDocsum(%d)",
- node->GetRowID(),
- node->getPartID(),
- node->_flags._pendingQuery,
- node->_pendingDocsums);
-
- if (_FNET_mode == FNET_QUERY && node->_flags._pendingQuery) {
- FastS_assert(_pendingQueries > 0);
- _pendingQueries--;
- node->_flags._pendingQuery = false;
- if (error->_errorCode == search::engine::ECODE_TIMEOUT) {
- node->_flags._queryTimeout = true;
- _queryNodesTimedOut++;
- }
- adjustQueryTimeout();
- } else if (_FNET_mode == FNET_DOCSUMS && node->_pendingDocsums > 0) {
- uint32_t nodePendingDocsums = node->_pendingDocsums;
- FastS_assert(_pendingDocsums >= nodePendingDocsums);
- _pendingDocsums -= nodePendingDocsums;
- node->_pendingDocsums = 0;
- _pendingDocsumNodes--;
- if (error->_errorCode == search::engine::ECODE_TIMEOUT) {
- node->_flags._docsumTimeout = true;
- _docsumNodesTimedOut++;
- _docsumsTimedOut += nodePendingDocsums;
- }
- adjustDocsumTimeout();
- }
- error->Free();
- EndFNETWork(std::move(searchGuard));
-}
-
-
-void
-FastS_FNET_Search::HandleTimeout()
-{
- auto searchGuard(BeginFNETWork());
- if (!searchGuard) {
- return;
- }
-
- if (_FNET_mode == FNET_QUERY) {
- for (FastS_FNET_SearchNode & node : _nodes) {
- if (node._flags._pendingQuery) {
- FastS_assert(_pendingQueries > 0);
- _pendingQueries--;
- node._flags._pendingQuery = false;
- node._flags._queryTimeout = true;
- _queryNodesTimedOut++;
- double tnow = GetTimeKeeper()->GetTime();
- node._queryTime = tnow - _startTime;
- node.GetEngine()->UpdateSearchTime(tnow, node._queryTime, true);
- }
- }
- _queryTimeout = true;
- } else if (_FNET_mode == FNET_DOCSUMS) {
- for (FastS_FNET_SearchNode & node : _nodes) {
- if (node._pendingDocsums > 0) {
- uint32_t nodePendingDocsums = node._pendingDocsums;
- FastS_assert(_pendingDocsums >= nodePendingDocsums);
- _pendingDocsums -= nodePendingDocsums;
- _docsumsTimedOut += nodePendingDocsums;
- node._pendingDocsums = 0;
- node._flags._docsumTimeout = true;
- _docsumNodesTimedOut++;
- _pendingDocsumNodes--;
- }
- for (FastS_FNET_SearchNode::ExtraDocsumNodesIter iter(&node); iter.valid(); ++iter) {
- FastS_FNET_SearchNode *eNode = *iter;
- if (eNode->_pendingDocsums > 0) {
- uint32_t nodePendingDocsums = eNode->_pendingDocsums;
- FastS_assert(_pendingDocsums >= nodePendingDocsums);
- _pendingDocsums -= nodePendingDocsums;
- _docsumsTimedOut += nodePendingDocsums;
- eNode->_pendingDocsums = 0;
- eNode->_flags._docsumTimeout = true;
- _docsumNodesTimedOut++;
- _pendingDocsumNodes--;
- }
- }
- }
- _docsumTimeout = true;
- }
- EndFNETWork(std::move(searchGuard));
-}
-
-std::unique_lock<std::mutex>
-FastS_FNET_Search::BeginFNETWork()
-{
- std::unique_lock<std::mutex> searchGuard(_lock);
- if (_FNET_mode == FNET_NONE) {
- searchGuard.unlock();
- }
- return searchGuard;
-}
-
-void
-FastS_FNET_Search::EndFNETWork(std::unique_lock<std::mutex> searchGuard)
-{
- if (_FNET_mode == FNET_QUERY && _pendingQueries == 0) {
- _FNET_mode = FNET_NONE;
- searchGuard.unlock();
- _searchOwner->DoneQuery(this);
- } else if (_FNET_mode == FNET_DOCSUMS && _pendingDocsums == 0) {
- _FNET_mode = FNET_NONE;
- searchGuard.unlock();
- _searchOwner->DoneDocsums(this);
- }
-}
-
-bool
-FastS_FNET_Search::ShouldLimitHitsPerNode() const
-{
- return (_util.GetAlignedMaxHits() > _dataset->GetMaxHitsPerNode());
-}
-
-
-void
-FastS_FNET_Search::MergeHits()
-{
- FastS_HitMerger<FastS_FNETMerge> merger(this);
- merger.MergeHits();
-
- if (_util.IsEstimate())
- return;
-
- if (ShouldLimitHitsPerNode())
- _dataset->UpdateMaxHitsPerNodeLog(merger.WasIncomplete(), merger.WasFuzzy());
-
- if (!_queryArgs->groupSpec.empty()) {
- _groupMerger.reset(new search::grouping::MergingManager(_dataset->GetPartBits(), _dataset->GetRowBits()));
- for (const FastS_FNET_SearchNode & node : _nodes) {
- if (node._qresult != nullptr) {
- _groupMerger->addResult(node.getPartID(), node.GetRowID(),
- ((node._qresult->_features & search::fs4transport::QRF_MLD) != 0),
- node._qresult->_groupData, node._qresult->_groupDataLen);
- }
- }
- _groupMerger->merge();
- _util.SetGroupResultLen(_groupMerger->getGroupResultLen());
- _util.SetGroupResult(_groupMerger->getGroupResult());
- }
-}
-
-FastS_SearchInfo
-FastS_FNET_Search::computeCoverage(const std::vector<FastS_FNET_SearchNode> & nodes,
- uint32_t numSearchableCopies, bool adaptiveTimeout)
-{
- FastS_SearchInfo si;
- size_t cntNone(0);
- size_t askedButNotAnswered(0);
-
- for (const FastS_FNET_SearchNode & node : nodes) {
- if (node._qresult != nullptr) {
- si._coverageDocs += node._qresult->_coverageDocs;
- si._activeDocs += node._qresult->_activeDocs;
- si._soonActiveDocs += node._qresult->_soonActiveDocs;
- si._degradeReason |= node._qresult->_coverageDegradeReason;
- si._nodesQueried += node._qresult->getNodesQueried();
- si._nodesReplied += node._qresult->getNodesReplied();
- } else {
- si._nodesQueried++;
- cntNone++;
- if (node.IsConnected()) {
- askedButNotAnswered++;
- }
- }
- }
- bool missingReplies = (askedButNotAnswered != 0) || (si._nodesQueried != si._nodesReplied);
- const ssize_t missingParts = cntNone - (numSearchableCopies - 1);
-
- if (missingReplies && adaptiveTimeout) {
- // TODO This will not be correct when using multilevel dispatch and has timeout on anything, but leaf level.
- // We can live with that as leaf level failures are the likely ones.
- if (si._nodesReplied ) {
- si._activeDocs += askedButNotAnswered * si._activeDocs/si._nodesReplied;
- si._soonActiveDocs += askedButNotAnswered * si._soonActiveDocs/si._nodesReplied;
- }
- si._degradeReason |= search::engine::SearchReply::Coverage::ADAPTIVE_TIMEOUT;
- } else if (missingParts > 0) {
- // TODO This is a dirty way of anticipating missing coverage.
- // It should be done differently
- if ((cntNone != nodes.size())) {
- si._activeDocs += missingParts * si._activeDocs/(nodes.size() - cntNone);
- si._soonActiveDocs += missingParts * si._soonActiveDocs/(nodes.size() - cntNone);
- }
- si._degradeReason |= search::engine::SearchReply::Coverage::TIMEOUT;
-
- }
- return si;
-}
-
-void
-FastS_FNET_Search::CheckCoverage()
-{
- FastS_SearchInfo si = computeCoverage(_nodes, _dataset->getSearchableCopies(), useAdaptiveTimeout());
- _util.SetCoverage(si._coverageDocs, si._activeDocs, si._soonActiveDocs,
- si._degradeReason, si._nodesQueried, si._nodesReplied);
-}
-
-
-void
-FastS_FNET_Search::CheckQueryTimes()
-{
- double factor = _dataset->GetSlowQueryLimitFactor();
- double bias = _dataset->GetSlowQueryLimitBias();
- double queryTime = 0.0;
- int queryCnt = 0;
-
- for (const FastS_FNET_SearchNode & node : _nodes) {
- if (node.IsConnected() && node._queryTime > 0.0) {
- queryTime += node._queryTime;
- queryCnt++;
- }
- }
-
- if (queryCnt == 0)
- return;
-
- queryTime = queryTime / (double)queryCnt;
- double maxQueryTime = queryTime * factor + bias;
-
- for (const FastS_FNET_SearchNode & node : _nodes) {
- if (node.IsConnected() && node._queryTime > maxQueryTime) {
- node.GetEngine()->SlowQuery(maxQueryTime, node._queryTime - maxQueryTime, false);
- }
- }
-}
-
-
-void
-FastS_FNET_Search::CheckDocsumTimes()
-{
- double factor = _dataset->GetSlowDocsumLimitFactor();
- double bias = _dataset->GetSlowDocsumLimitBias();
- double docsumTime = 0.0;
- int docsumCnt = 0;
-
- for (const FastS_FNET_SearchNode & node : _nodes) {
- if (node.IsConnected() && node._docsumTime > 0.0) {
- docsumTime += node._docsumTime;
- docsumCnt++;
- }
- }
- if (docsumCnt == 0)
- return;
- docsumTime = docsumTime / (double)docsumCnt;
- double maxDocsumTime = docsumTime * factor + bias;
-
- for (const FastS_FNET_SearchNode & node : _nodes) {
- if (node.IsConnected() && node._docsumTime > maxDocsumTime) {
- node.GetEngine()->SlowDocsum(maxDocsumTime, node._docsumTime - maxDocsumTime);
- }
- for (FastS_FNET_SearchNode::ExtraDocsumNodesIter iter(&node); iter.valid(); ++iter) {
- FastS_FNET_SearchNode *eNode = *iter;
- if (eNode->IsConnected() && eNode->_docsumTime > maxDocsumTime) {
- eNode->GetEngine()->SlowDocsum(maxDocsumTime, eNode->_docsumTime - maxDocsumTime);
- }
- }
- }
-}
-
-
-void
-FastS_FNET_Search::CheckQueryTimeout()
-{
- if (_queryNodes != 0 && _queryNodesTimedOut >= _queryNodes)
- SetError(search::engine::ECODE_TIMEOUT, nullptr);
- if (!_queryTimeout)
- return;
-
- vespalib::string nodeList;
- uint32_t nodeCnt = 0;
- uint32_t printNodes = 10;
- for (const FastS_FNET_SearchNode & node : _nodes) {
- if (node._flags._queryTimeout) {
- if (nodeCnt < printNodes) {
- if (nodeCnt > 0) {
- nodeList.append(", ");
- }
- nodeList.append(node.GetEngine()->GetName());
- }
- ++nodeCnt;
- }
- }
- if (nodeCnt > printNodes) {
- nodeList.append(", ...");
- }
- vespalib::string query = _util.GetQuery().getPrintableQuery();
- LOG(warning, "%u nodes(%s) timed out during query execution (%s)",
- nodeCnt, nodeList.c_str(), query.c_str());
-}
-
-
-void
-FastS_FNET_Search::CheckDocsumTimeout()
-{
- if (_docsumNodes != 0 && _docsumNodesTimedOut >= _docsumNodes)
- SetError(search::engine::ECODE_TIMEOUT, nullptr);
- if (!_docsumTimeout)
- return;
-
- vespalib::string nodeList;
- uint32_t nodeCnt = 0;
- uint32_t printNodes = 10;
- for (const FastS_FNET_SearchNode & node : _nodes) {
- if (node._flags._docsumTimeout) {
- if (nodeCnt < printNodes) {
- if (nodeCnt > 0) {
- nodeList.append(", ");
- }
- nodeList.append(node.GetEngine()->GetName());
- }
- ++nodeCnt;
- }
- for (FastS_FNET_SearchNode::ExtraDocsumNodesIter iter(&node); iter.valid(); ++iter) {
- FastS_FNET_SearchNode *eNode = *iter;
- if (eNode->_flags._docsumTimeout) {
- if (nodeCnt < printNodes) {
- if (nodeCnt > 0) {
- nodeList.append(", ");
- }
- nodeList.append(eNode->GetEngine()->GetName());
- }
- ++nodeCnt;
- }
- }
- }
- if (nodeCnt > printNodes) {
- nodeList.append(", ...");
- }
- double elapsed = GetTimeKeeper()->GetTime() - _docSumStartTime;
- LOG(warning, "%u nodes given %1.6f seconds timeout timed out during docsum fetching after %1.6f seconds (%s)",
- nodeCnt, _adjustedDocSumTimeOut, elapsed, nodeList.c_str());
-}
-
-
-FastS_ISearch::RetCode
-FastS_FNET_Search::Search(uint32_t searchOffset,
- uint32_t maxhits, uint32_t minhits)
-{
- // minhits is never sent down from dispatch...
- (void) minhits; // ignore
-
- _util.setSearchRequest(_queryArgs);
- _util.SetupQuery(maxhits, searchOffset);
- if (_util.IsEstimate())
- _util.InitEstimateMode();
- _util.AdjustSearchParameters(_nodes.size());
- _util.AdjustSearchParametersFinal(_nodes.size());
-
- vespalib::string searchPath;
- const search::fef::Properties & model = _queryArgs->propertiesMap.modelOverrides();
- search::fef::Property searchPathProperty = model.lookup("searchpath");
- if (searchPathProperty.found()) {
- searchPath = searchPathProperty.get();
- }
- _adjustedQueryTimeOut = static_cast<double>(_queryArgs->getTimeLeft().ms()) / 1000.0;
- if ( ! searchPath.empty()) {
- connectSearchPath(searchPath);
- } else if (_util.IsEstimate()) {
- ConnectEstimateNodes();
- } else {
- ConnectQueryNodes();
- }
-
- // we support error packets
- uint32_t qflags = _util.GetQuery().GetQueryFlags();
-
- // propagate drop-sortdata flag only if we have single sub-node
- if (_nodes.size() != 1)
- qflags &= ~search::fs4transport::QFLAG_DROP_SORTDATA;
-
- uint32_t hitsPerNode = ShouldLimitHitsPerNode()
- ? _dataset->GetMaxHitsPerNode()
- : _util.GetAlignedMaxHits();
-
- // set up expected _queryNodes, _pendingQueries and node->_flags._pendingQuery state
- for (FastS_FNET_SearchNode & node : _nodes) {
- if (node.IsConnected()) {
- node._flags._pendingQuery = true;
- _pendingQueries++;
- _queryNodes++;
- }
- }
- size_t num_send_ok = 0; // number of partitions where packet send succeeded
- std::vector<uint32_t> send_failed; // partitions where packet send failed
-
- // allow FNET responses while requests are being sent
- {
- std::lock_guard<std::mutex> searchGuard(_lock);
- ++_pendingQueries; // add Elephant query node to avoid early query done
- ++_queryNodes; // add Elephant query node to avoid early query done
- _FNET_mode = FNET_QUERY;
- _queryStartTime = GetTimeKeeper()->GetTime();
- _timeout.Schedule(_adjustedQueryTimeOut);
- }
- FNET_Packet::SP shared(new FS4Packet_PreSerialized(*setupQueryPacket(hitsPerNode, qflags, _queryArgs->propertiesMap)));
- for (uint32_t i = 0; i < _nodes.size(); i++) {
- FastS_FNET_SearchNode & node = _nodes[i];
- if (node.IsConnected()) {
- FNET_Packet::UP qx(new FS4Packet_Shared(shared));
- LOG(spam, "posting packet to node %d='%s'\npacket=%s", i, node.toString().c_str(), qx->Print(0).c_str());
- if (node.PostPacket(qx.release())) {
- ++num_send_ok;
- } else {
- send_failed.push_back(i);
- LOG(debug, "FAILED posting packet to node %d='%s'\npacket=%s", i, node.toString().c_str(), qx->Print(0).c_str());
- }
- }
- }
-
- // finalize setup and check if query is still in progress
- bool done;
- {
- std::lock_guard<std::mutex> searchGuard(_lock);
- assert(_queryNodes >= _pendingQueries);
- for (uint32_t i: send_failed) {
- // conditional revert of state for failed nodes
- if (_nodes[i]._flags._pendingQuery) {
- _nodes[i]._flags._pendingQuery = false;
- assert(_pendingQueries > 0);
- --_pendingQueries;
- --_queryNodes;
- }
- }
- // revert Elephant query node to allow search to complete
- assert(_pendingQueries > 0);
- --_pendingQueries;
- --_queryNodes;
- done = (_pendingQueries == 0);
- bool all_down = (num_send_ok == 0);
- if (done) {
- _FNET_mode = FNET_NONE;
- if (all_down) {
- SetError(search::engine::ECODE_ALL_PARTITIONS_DOWN, nullptr);
- }
- }
- }
-
- return (done) ? RET_OK : RET_INPROGRESS;
-}
-
-vespalib::string
-FastS_FNET_SearchNode::toString() const
-{
- vespalib::string s;
- s += vespalib::make_string("{ channel=%p={%d, c=%p='%s'}, partId = %d, rowid=%d }",
- _channel, _channel->GetID(),
- _channel->GetConnection(), _channel->GetConnection()->GetSpec(),
- _partid, _rowid);
- return s;
-}
-
-
-FNET_Packet::UP
-FastS_FNET_Search::setupQueryPacket(uint32_t hitsPerNode, uint32_t qflags,
- const search::engine::PropertiesMap &properties)
-{
- FNET_Packet::UP ret(new FS4Packet_QUERYX());
- FS4Packet_QUERYX & qx = static_cast<FS4Packet_QUERYX &>(*ret);
- qx._features = search::fs4transport::QF_PARSEDQUERY | search::fs4transport::QF_RANKP;
- qx._offset = _util.GetAlignedSearchOffset();
- qx._maxhits = hitsPerNode; // capped maxhits
- qx.setQueryFlags(qflags);
- qx.setTimeout(_queryArgs->getTimeLeft());
-
- qx.setRanking(_queryArgs->ranking);
-
- if (!_queryArgs->sortSpec.empty()) {
- qx._features |= search::fs4transport::QF_SORTSPEC;
- qx.setSortSpec(_queryArgs->sortSpec);
- }
-
- if (!_queryArgs->groupSpec.empty()) {
- qx._features |= search::fs4transport::QF_GROUPSPEC;
- qx.setGroupSpec(vespalib::stringref(&_queryArgs->groupSpec[0], _queryArgs->groupSpec.size()));
- }
-
- if (!_queryArgs->sessionId.empty()) {
- qx._features |= search::fs4transport::QF_SESSIONID;
- qx.setSessionId(vespalib::stringref(&_queryArgs->sessionId[0], _queryArgs->sessionId.size()));
- }
-
- if (!_queryArgs->location.empty()) {
- qx._features |= search::fs4transport::QF_LOCATION;
- qx.setLocation(_queryArgs->location);
- }
-
- if (properties.size() > 0) {
- PacketConverter::fillPacketProperties(properties, qx._propsVector);
- qx._features |= search::fs4transport::QF_PROPERTIES;
- }
-
- qx._numStackItems = _queryArgs->stackItems;
- qx.setStackDump(_queryArgs->getStackRef());
- return ret;
-}
-
-
-FastS_ISearch::RetCode
-FastS_FNET_Search::ProcessQueryDone()
-{
- CheckCoverage();
-
- if (_errorCode == search::engine::ECODE_NO_ERROR) {
- MergeHits();
- }
- _queryResult = *_util.GetQueryResult();
- double tnow = GetTimeKeeper()->GetTime();
- _queryResult._queryResultTime = tnow - _startTime;
- if (_errorCode == search::engine::ECODE_NO_ERROR) {
- if (_util.IsEstimate()) {
- _dataset->UpdateEstimateCount();
- } else {
- _dataset->UpdateSearchTime(tnow, _queryResult._queryResultTime, _queryNodesTimedOut != 0);
- }
- if ( _dataset->useFixedRowDistribution() ) {
- _dataset->updateSearchTime(_queryResult._queryResultTime, _fixedRow);
- }
- }
- CheckQueryTimes();
- CheckQueryTimeout();
- dropDatasetActiveCostRef();
- return RET_OK;
-}
-
-
-FastS_ISearch::RetCode
-FastS_FNET_Search::GetDocsums(const FastS_hitresult *hits, uint32_t hitcnt)
-{
- if (hitcnt > 0) {
- _resbuf.resize(hitcnt);
- }
-
- // copy values from query result
-
- uint32_t i;
- for (i = 0; i < hitcnt; i++) {
- _resbuf[i]._docid = 0;
- _resbuf[i]._gid = hits[i]._gid;
- _resbuf[i]._metric = hits[i]._metric;
- _resbuf[i]._partition = hits[i]._partition;
- }
-
- // determine docsum distribution among nodes
-
- const FastS_hitresult *p = hits;
- uint32_t rowbits = _dataset->GetRowBits();
- uint32_t partbits = _dataset->GetPartBits();
- uint32_t mldpartidmask = (1 << partbits) - 1;
- bool ignoreRow = (_docsumArgs->getFlags() &
- search::fs4transport::GDFLAG_IGNORE_ROW) != 0;
- if (rowbits > 0) {
- uint32_t rowmask = (1 << rowbits) - 1;
- for (i = 0; i < hitcnt; i++, p++) {
- FastS_FNET_SearchNode *node;
- uint32_t partid0 = p->_partition >> rowbits;
- uint32_t row = ignoreRow ? 0u : p->_partition & rowmask;
- if (IS_MLD_PART(partid0)) {
- uint32_t partid = MLD_PART_TO_PARTID(partid0);
- if (partid < _nodes.size()) {
- node = getNode(partid);
- if (node->_docidCnt == 0) {
- node->_flags._docsumMld = true;// Only accept MLD from now on
- node->_docsumRow = row;
- } else if (!node->_flags._docsumMld || row != node->_docsumRow) {
- if (_nodesConnected)
- continue; // Drop (inconsistent)
- node = node->allocExtraDocsumNode(true, row, rowbits);
- }
- node->_docidCnt++;
- }
- } else { // !MLD
- if (partid0 < _nodes.size()) {
- node = getNode(partid0);
- if (node->_docidCnt == 0) {
- node->_docsumRow = row;
- } else if (node->_flags._docsumMld || row != node->_docsumRow) {
- if (_nodesConnected)
- continue; // Drop (inconsistent)
- node = node->allocExtraDocsumNode(false, row, rowbits);
- }
- node->_docidCnt++;
- }
- }
- }
- } else { // rowbits == 0
- for (i = 0; i < hitcnt; i++, p++) {
- FastS_FNET_SearchNode *node;
- if (IS_MLD_PART(p->_partition)) {
- uint32_t partid = MLD_PART_TO_PARTID(p->_partition);
- if (partid < _nodes.size()) {
- node = getNode(partid);
- if (node->_docidCnt == 0) {
- node->_flags._docsumMld = true;// Only accept MLD from now on
- } else if (!node->_flags._docsumMld) {
- if (_nodesConnected)
- continue; // Drop (inconsistent)
- node = node->allocExtraDocsumNode(true, 0, 0);
- }
- node->_docidCnt++;
- }
- } else { // !MLD
- if (p->_partition < _nodes.size()) {
- node = getNode(p->_partition);
- if (node->_docidCnt == 0) {
- } else if (node->_flags._docsumMld) {
- if (_nodesConnected)
- continue; // Drop (inconsistent)
- node = node->allocExtraDocsumNode(false, 0, 0);
- }
- node->_docidCnt++;
- }
- }
- }
- }
- FastS_assert(p == hits + hitcnt);
-
- // allocate docsum requests and insert features
-
- search::docsummary::GetDocsumArgs *args = _docsumArgs;
- for (FastS_FNET_SearchNode & node : _nodes) {
- if (node._docidCnt != 0) {
- node.allocGDX(args, args->propertiesMap());
- }
- for (FastS_FNET_SearchNode::ExtraDocsumNodesIter iter(&node); iter.valid(); ++iter) {
- FastS_FNET_SearchNode *eNode = *iter;
- if (eNode->_docidCnt != 0)
- eNode->allocGDX(args, args->propertiesMap());
- }
- }
-
- // fill docid(/partid/stamp) data into docsum requests
-
- p = hits;
- if (rowbits > 0) {
- uint32_t rowmask = (1 << rowbits) - 1;
- for (i = 0; i < hitcnt; i++, p++) {
- FastS_FNET_SearchNode *node;
- uint32_t partid0 = p->_partition >> rowbits;
- uint32_t row = ignoreRow ? 0u : p->_partition & rowmask;
- if (IS_MLD_PART(partid0)) {
- uint32_t partid = MLD_PART_TO_PARTID(partid0);
- if (partid < _nodes.size()) {
- node = getNode(partid);
- if (!node->_flags._docsumMld || row != node->_docsumRow) {
- if (_nodesConnected)
- continue; // Drop (inconsistent)
- node = node->allocExtraDocsumNode(true, row, rowbits);
- }
-
- FS4Packet_GETDOCSUMSX::FS4_docid &q = node->_gdx->_docid[node->_docsum_offsets_idx];
- q._gid = p->_gid;
- q._partid = DECODE_MLD_PART(partid0);
- node->_docsum_offsets[node->_docsum_offsets_idx++] = i;
- }
- } else { // !MLD
- if (partid0 < _nodes.size()) {
- node = getNode(partid0);
- if (node->_flags._docsumMld || row != node->_docsumRow) {
- if (_nodesConnected)
- continue; // Drop (inconsistent)
- node = node->allocExtraDocsumNode(false, row, rowbits);
- }
-
- FS4Packet_GETDOCSUMSX::FS4_docid &q = node->_gdx->_docid[node->_docsum_offsets_idx];
- q._gid = p->_gid;
- node->_docsum_offsets[node->_docsum_offsets_idx++] = i;
- }
- }
- }
- } else { // rowbits == 0
- for (i = 0; i < hitcnt; i++, p++) {
- FastS_FNET_SearchNode *node;
- if (IS_MLD_PART(p->_partition)) {
- uint32_t partid = MLD_PART_TO_PARTID(p->_partition);
- if (partid < _nodes.size()) {
- node = getNode(partid);
- if (!node->_flags._docsumMld) {
- if (_nodesConnected)
- continue; // Drop (inconsistent)
- node = node->allocExtraDocsumNode(true, 0, 0);
- }
-
- FS4Packet_GETDOCSUMSX::FS4_docid &q = node->_gdx->_docid[node->_docsum_offsets_idx];
- q._gid = p->_gid;
- q._partid = DECODE_MLD_PART(p->_partition);
- node->_docsum_offsets[node->_docsum_offsets_idx++] = i;
- }
- } else { // !MLD
- if (p->_partition < _nodes.size()) {
- node = getNode(p->_partition);
- if (node->_flags._docsumMld) {
- if (_nodesConnected)
- continue; // Drop (inconsistent)
- node = node->allocExtraDocsumNode(false, 0, 0);
- }
-
- FS4Packet_GETDOCSUMSX::FS4_docid &q = node->_gdx->_docid[node->_docsum_offsets_idx];
- q._gid = p->_gid;
- node->_docsum_offsets[node->_docsum_offsets_idx++] = i;
- }
- }
- }
- }
- FastS_assert(p == hits + hitcnt);
-
- ConnectDocsumNodes(ignoreRow);
- bool done;
- {
- std::lock_guard<std::mutex> searchGuard(_lock);
-
- // patch in engine dependent features and send docsum requests
-
- for (FastS_FNET_SearchNode & node : _nodes) {
- if (node._gdx != nullptr)
- node.postGDX(&_pendingDocsums, &_docsumNodes);
- for (FastS_FNET_SearchNode::ExtraDocsumNodesIter iter(&node); iter.valid(); ++iter) {
- FastS_FNET_SearchNode *eNode = *iter;
- if (eNode->_gdx != nullptr)
- eNode->postGDX(&_pendingDocsums, &_docsumNodes);
- }
- }
- _pendingDocsumNodes = _docsumNodes;
- _requestedDocsums = _pendingDocsums;
-
- done = (_pendingDocsums == 0);
- if (!done) {
- _FNET_mode = FNET_DOCSUMS; // FNET; do your thing
-
- _adjustedDocSumTimeOut = args->getTimeout().sec();
- _docSumStartTime = GetTimeKeeper()->GetTime();
- _timeout.Schedule(_adjustedDocSumTimeOut);
- }
- }
-
- return (done) ? RET_OK : RET_INPROGRESS;
-}
-
-
-FastS_ISearch::RetCode
-FastS_FNET_Search::ProcessDocsumsDone()
-{
- _docsumsResult._fullresult = &_resbuf[0];
- _docsumsResult._fullResultCount = _resbuf.size();
- _docsumsResult._queryDocSumTime = GetTimeKeeper()->GetTime() - _startTime;
- CheckDocsumTimes();
- CheckDocsumTimeout();
- dropDatasetActiveCostRef();
- return RET_OK;
-}
-
-bool
-FastS_FNET_Search::useAdaptiveTimeout() const {
- return _dataset->getMinimalSearchCoverage() < 100.0;
-}
-
-void
-FastS_FNET_Search::adjustQueryTimeout()
-{
- uint32_t pendingQueries = getPendingQueries();
-
- if ((pendingQueries == 0) ||
- _util.IsQueryFlagSet(search::fs4transport::QFLAG_DUMP_FEATURES) ||
- ! useAdaptiveTimeout())
- {
- return;
- }
-
- double mincoverage = _dataset->getMinimalSearchCoverage();
- uint32_t wantedAnswers = std::ceil(getRequestedQueries() * mincoverage / 100.0);
- LOG(spam, "Adjusting wanted answers from %u to %u", getRequestedQueries(), wantedAnswers);
- if (getDoneQueries() < wantedAnswers) {
- return;
- }
- if (!_queryWaitCalculated) {
- double timeLeft = _queryArgs->getTimeLeft().sec();
- _queryMinWait = timeLeft * _dataset->getHigherCoverageMinSearchWait();
- _queryMaxWait = timeLeft * _dataset->getHigherCoverageMaxSearchWait();
- _queryWaitCalculated = true;
- }
-
- double basewait = 0.0;
- double minwait = _queryMinWait;
- double maxwait = _queryMaxWait;
-
- double elapsed = GetTimeKeeper()->GetTime() - _queryStartTime;
-
- double missWidth = ((100.0 - mincoverage) * getRequestedQueries()) / 100.0 - 1.0;
-
- double slopedwait = minwait;
-
- if (pendingQueries > 1 && missWidth > 0.0)
- slopedwait += ((maxwait - minwait) * (pendingQueries - 1)) / missWidth;
-
- double newTimeOut = std::max(elapsed, basewait) + slopedwait;
-
-
- if (newTimeOut >= _adjustedQueryTimeOut)
- return;
-
- _adjustedQueryTimeOut = newTimeOut;
- if (newTimeOut > elapsed)
- _timeout.Schedule(newTimeOut - elapsed);
- else
- _timeout.ScheduleNow();
-}
-
-
-void
-FastS_FNET_Search::adjustDocsumTimeout()
-{
- uint32_t pendingDocsums = getPendingDocsums();
-
- if (pendingDocsums == 0 || _util.IsQueryFlagSet(search::fs4transport::QFLAG_DUMP_FEATURES)) {
- return;
- }
-
- double coverage = static_cast<double>(getDoneDocsums() * 100) / getRequestedDocsums();
-
- double mincoverage = _dataset->getMinimalDocSumCoverage();
-
- if (coverage < mincoverage)
- return;
-
- double basewait = _dataset->getHigherCoverageBaseDocSumWait();
- double minwait = _dataset->getHigherCoverageMinDocSumWait();
- double maxwait = _dataset->getHigherCoverageMaxDocSumWait();
-
- double elapsed = GetTimeKeeper()->GetTime() - _docSumStartTime;
-
- double missWidth = ((100.0 - mincoverage) * getRequestedDocsums()) / 100.0 - 1.0;
-
- double slopedwait = minwait;
-
- if (pendingDocsums > 1 && missWidth > 0.0)
- slopedwait += ((maxwait - minwait) * (pendingDocsums - 1)) / missWidth;
-
- double newTimeOut = std::max(elapsed, basewait) + slopedwait;
-
- if (newTimeOut >= _adjustedDocSumTimeOut)
- return;
-
- _adjustedDocSumTimeOut = newTimeOut;
- if (newTimeOut > elapsed)
- _timeout.Schedule(newTimeOut - elapsed);
- else
- _timeout.ScheduleNow();
-}
-
-
-FastS_Sync_FNET_Search::~FastS_Sync_FNET_Search() {}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/fnet_search.h b/searchcore/src/vespa/searchcore/fdispatch/search/fnet_search.h
deleted file mode 100644
index a77b984ca28..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/fnet_search.h
+++ /dev/null
@@ -1,375 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/fnet/fnet.h>
-#include <vespa/vespalib/objects/nbostream.h>
-#include <vespa/searchcore/fdispatch/common/search.h>
-#include <vespa/searchlib/common/sortdata.h>
-#include <vespa/searchcore/grouping/mergingmanager.h>
-#include <vespa/searchcore/fdispatch/search/search_path.h>
-#include <vespa/searchcore/fdispatch/search/querycacheutil.h>
-#include <vespa/searchcore/fdispatch/search/fnet_engine.h>
-
-class FastS_FNET_Engine;
-class FastS_FNET_Search;
-
-using search::fs4transport::FS4Packet_QUERYRESULTX;
-using search::fs4transport::FS4Packet_GETDOCSUMSX;
-using search::fs4transport::FS4Packet_DOCSUM;
-using search::fs4transport::FS4Packet_TRACEREPLY;
-
-//-----------------------------------------------------------------
-
-class FastS_FNET_SearchNode : public FNET_IPacketHandler
-{
-public:
- class ExtraDocsumNodesIter;
- typedef std::unique_ptr<FastS_FNET_SearchNode> UP;
-private:
- friend class ExtraDocsumNodesIter;
-
- FastS_FNET_Search *_search; // we are part of this search
- FastS_FNET_Engine *_engine; // we use this search engine
- FNET_Channel *_channel; // connection with search engine
- uint32_t _partid; // engine partition id
- uint32_t _rowid; // engine row id
- uint32_t _stamp; // engine timestamp
-
-public:
-
- FS4Packet_QUERYRESULTX *_qresult; // query result packet
- double _queryTime;
- struct Flags {
- Flags() :
- _pendingQuery(false),
- _docsumMld(false),
- _queryTimeout(false),
- _docsumTimeout(false),
- _needSubCost(false)
- { }
- bool _pendingQuery; // is query pending ?
- bool _docsumMld;
- bool _queryTimeout;
- bool _docsumTimeout;
- bool _needSubCost;
- };
-
- Flags _flags;
-
-// Docsum related stuff.
- uint32_t _docidCnt;
- uint32_t _pendingDocsums; // how many docsums pending ?
- uint32_t _docsumRow;
- uint32_t _docsum_offsets_idx;
- double _docsumTime;
-
- FS4Packet_GETDOCSUMSX *_gdx;
- std::vector<uint32_t> _docsum_offsets;
-private:
- std::vector<FastS_FNET_SearchNode::UP> _extraDocsumNodes;
- FastS_FNET_SearchNode *_nextExtraDocsumNode;
- FastS_FNET_SearchNode *_prevExtraDocsumNode;
-public:
-
-// Query processing stuff.
- FS4Packet_QUERYRESULTX::FS4_hit *_hit_beg; // hit array start
- FS4Packet_QUERYRESULTX::FS4_hit *_hit_cur; // current hit
- FS4Packet_QUERYRESULTX::FS4_hit *_hit_end; // end boundary
-
- search::common::SortDataIterator _sortDataIterator;
-
-public:
- FastS_FNET_SearchNode(FastS_FNET_Search *search, uint32_t partid);
- // These objects are referenced everywhere and must never be either copied nor moved,
- // but std::vector requires this to exist. If called it will assert.
- FastS_FNET_SearchNode(FastS_FNET_SearchNode && rhs);
- FastS_FNET_SearchNode(const FastS_FNET_SearchNode &) = delete;
- FastS_FNET_SearchNode& operator=(const FastS_FNET_SearchNode &) = delete;
-
- ~FastS_FNET_SearchNode() override;
-
- // Methods needed by mergehits
- bool NT_InitMerge(uint32_t *numDocs, uint64_t *totalHits, search::HitRank *maxRank, uint32_t *sortDataDocs);
- search::common::SortDataIterator *NT_GetSortDataIterator() { return &_sortDataIterator; }
- FS4Packet_QUERYRESULTX::FS4_hit *NT_GetHit() const { return _hit_cur; }
- uint32_t NT_GetNumHitsUsed() const { return (_hit_cur - _hit_beg); }
- uint32_t NT_GetNumHitsLeft() const { return (_hit_end - _hit_cur); }
- uint64_t NT_GetTotalHits() const { return (_qresult != nullptr) ? _qresult->_totNumDocs : 0; }
- uint32_t NT_GetNumHits() const { return (_hit_end - _hit_beg); }
- void NT_NextHit() { _hit_cur++; }
-
- uint32_t getPartID() const { return _partid; }
- uint32_t GetRowID() const { return _rowid; }
-
- FastS_FNET_SearchNode * allocExtraDocsumNode(bool mld, uint32_t rowid, uint32_t rowbits);
-
- FastS_FNET_Engine *GetEngine() const { return _engine; }
-
- bool IsConnected() const { return _channel != nullptr; }
- void Connect(FastS_FNET_Engine *engine);
- void Connect_HasDSLock(FastS_FNET_Engine *engine);
- FastS_EngineBase * getPartition(const std::unique_lock<std::mutex> &dsGuard, bool userow, FastS_FNET_DataSet *dataset);
- void allocGDX(search::docsummary::GetDocsumArgs *args, const search::engine::PropertiesMap &properties);
- void postGDX(uint32_t *pendingDocsums, uint32_t *pendingDocsumNodes);
- vespalib::string toString() const;
-
- void dropCost() {
- if (_engine != nullptr && _flags._needSubCost) {
- _engine->SubCost();
- _flags._needSubCost = false;
- }
- }
-
- void DirtySetChannelOnlyForTesting(FNET_Channel * channel) {
- _channel = channel;
- }
-
-
- void Disconnect()
- {
- if (_channel != nullptr) {
- _channel->CloseAndFree();
- _channel = nullptr;
- }
- if (_engine != nullptr) {
- if (_flags._needSubCost) {
- _engine->SubCost();
- _flags._needSubCost = false;
- }
- _engine = nullptr;
- }
- }
-
-
- bool PostPacket(FNET_Packet *packet) {
- return (_channel == nullptr) ? packet->Free(), false : _channel->Send(packet);
- }
-
- HP_RetCode HandlePacket(FNET_Packet *packet, FNET_Context context) override;
-};
-
-
-class FastS_FNET_SearchNode::ExtraDocsumNodesIter
-{
-private:
- ExtraDocsumNodesIter(const ExtraDocsumNodesIter &other);
- ExtraDocsumNodesIter& operator=(const ExtraDocsumNodesIter &other);
-
- FastS_FNET_SearchNode *_cur;
- const FastS_FNET_SearchNode *_head;
-
-public:
- ExtraDocsumNodesIter(const FastS_FNET_SearchNode *head)
- : _cur(head->_nextExtraDocsumNode),
- _head(head)
- {
- }
-
- ExtraDocsumNodesIter & operator++() {
- _cur = _cur->_nextExtraDocsumNode;
- return *this;
- }
-
- bool valid() const { return _cur != _head; }
- FastS_FNET_SearchNode *operator*() const { return _cur; }
-};
-
-
-//-----------------------------------------------------------------
-
-class FastS_FNET_Search : public FastS_AsyncSearch
-{
-private:
- FastS_FNET_Search(const FastS_FNET_Search &);
- FastS_FNET_Search& operator=(const FastS_FNET_Search &);
-
-public:
-
- class Timeout : public FNET_Task
- {
- private:
- Timeout(const Timeout &);
- Timeout& operator=(const Timeout &);
-
- FastS_FNET_Search *_search;
-
- public:
- Timeout(FNET_Scheduler *scheduler, FastS_FNET_Search *search)
- : FNET_Task(scheduler),
- _search(search) {}
- void PerformTask() override;
- };
-
- enum FNETMode {
- FNET_NONE = 0x00,
- FNET_QUERY = 0x01,
- FNET_DOCSUMS = 0x02
- };
-
-private:
- std::mutex _lock;
- FastS_TimeKeeper *_timeKeeper;
- double _startTime;
- Timeout _timeout;
- FastS_QueryCacheUtil _util;
- std::unique_ptr<search::grouping::MergingManager> _groupMerger;
- FastS_DataSetCollection *_dsc; // owner keeps this alive
- FastS_FNET_DataSet *_dataset;
- bool _datasetActiveCostRef;
- std::vector<FastS_FNET_SearchNode> _nodes;
- bool _nodesConnected;
-
- uint32_t _estParts;
- uint32_t _estPartCutoff;
-
- FNETMode _FNET_mode;
-
- uint32_t _pendingQueries; // # nodes with query left
- uint32_t _pendingDocsums; // # docsums left
- uint32_t _pendingDocsumNodes; // # nodes with docsums left
- uint32_t _requestedDocsums; // # docsums requested
- uint32_t _queryNodes; // #nodes with query
- uint32_t _queryNodesTimedOut; // #nodes with query timeout
- uint32_t _docsumNodes; // #nodes with docsums
- uint32_t _docsumNodesTimedOut; // #nodes with docsum timeout
- uint32_t _docsumsTimedOut;
- bool _queryTimeout;
- bool _docsumTimeout;
-
- double _queryStartTime;
- double _queryMinWait;
- double _queryMaxWait;
- bool _queryWaitCalculated;
- double _adjustedQueryTimeOut;
- double _docSumStartTime;
- double _adjustedDocSumTimeOut;
- uint32_t _fixedRow;
-
- std::vector<FastS_fullresult> _resbuf;
-
- void dropDatasetActiveCostRef();
-
- typedef std::vector<std::pair<FastS_EngineBase *, FastS_FNET_SearchNode *>> EngineNodeMap;
- void connectNodes(const EngineNodeMap & engines);
- void reallocNodes(size_t numParts);
- void ConnectQueryNodes();
- void ConnectEstimateNodes();
- void connectSearchPath(const vespalib::string &spec);
- void connectSearchPath(const fdispatch::SearchPath::Element &elem,
- const vespalib::string &spec, uint32_t dispatchLevel);
- void ConnectDocsumNodes(bool ignoreRow);
- uint32_t getNextFixedRow();
- uint32_t getFixedRowCandidate();
- uint32_t getHashedRow() const;
-
- std::unique_lock<std::mutex> BeginFNETWork();
- void EndFNETWork(std::unique_lock<std::mutex> searchGuard);
-
- void EncodePartIDs(uint32_t partid, uint32_t rowid, bool mld,
- FS4Packet_QUERYRESULTX::FS4_hit *pt,
- FS4Packet_QUERYRESULTX::FS4_hit *end);
-
- FastS_TimeKeeper *GetTimeKeeper() const { return _timeKeeper; }
-
- FastS_FNET_SearchNode * getNode(size_t i) { return &_nodes[i]; }
-public:
- FastS_FNET_Search(FastS_DataSetCollection *dsc, FastS_FNET_DataSet *dataset, FastS_TimeKeeper *timeKeeper);
- virtual ~FastS_FNET_Search();
-
- void GotQueryResult(FastS_FNET_SearchNode *node, FS4Packet_QUERYRESULTX *qrx);
- void GotDocsum(FastS_FNET_SearchNode *node, FS4Packet_DOCSUM *docsum);
- void LostSearchNode(FastS_FNET_SearchNode *node);
- void GotEOL(FastS_FNET_SearchNode *node);
- void GotError(FastS_FNET_SearchNode *node, search::fs4transport::FS4Packet_ERROR *error);
-
- void HandleTimeout();
-
- bool ShouldLimitHitsPerNode() const;
- void MergeHits();
- void CheckCoverage();
- static FastS_SearchInfo computeCoverage(const std::vector<FastS_FNET_SearchNode> & nodes,
- uint32_t numSearchableCopies, bool adaptiveTimeout);
- void CheckQueryTimes();
- void CheckDocsumTimes();
- void CheckQueryTimeout();
- void CheckDocsumTimeout();
-
- // *** API methods -- BEGIN ***
-
- FastS_SearchInfo *GetSearchInfo() override { return _util.GetSearchInfo(); }
-
- RetCode Search(uint32_t searchOffset, uint32_t maxhits, uint32_t minhits = 0) override;
- RetCode ProcessQueryDone() override;
- RetCode GetDocsums(const FastS_hitresult *hits, uint32_t hitcnt) override;
- RetCode ProcessDocsumsDone() override;
-
- // *** API methods -- END ***
-
- // Hit merging methods
-
- FastS_FNET_SearchNode *ST_GetNode(size_t i) { return getNode(i); }
- uint32_t ST_GetNumNodes() const { return _nodes.size(); }
- bool ST_IsEstimate() const { return _util.IsEstimate(); }
- uint32_t ST_GetEstParts() const { return _estParts; }
- uint32_t ST_GetEstPartCutoff() const { return _estPartCutoff; }
- bool ST_ShouldDropSortData() const { return _util.ShouldDropSortData(); }
- bool ST_ShouldLimitHitsPerNode() const { return ShouldLimitHitsPerNode(); }
- void ST_SetNumHits(uint32_t numHits) {
- _util.SetAlignedHitCount(numHits);
- _util.CalcHitCount();
- _util.AllocAlignedHitBuf();
- }
- void ST_AdjustNumHits(uint32_t numHits) {
- _util.SetAlignedHitCount(numHits);
- _util.CalcHitCount();
- }
- uint32_t ST_GetAlignedSearchOffset() const { return _util.GetAlignedSearchOffset(); }
- uint32_t ST_GetAlignedMaxHits() const { return _util.GetAlignedMaxHits(); }
- uint32_t ST_GetAlignedHitCount() const { return _util.GetAlignedHitCount(); }
- FastS_hitresult *ST_GetAlignedHitBuf() { return _util.GetAlignedHitBuf(); }
- FastS_hitresult *ST_GetAlignedHitBufEnd() { return _util.GetAlignedHitBufEnd(); }
- void ST_AllocSortData(uint32_t len) { _util.AllocSortData(len); }
- uint32_t *ST_GetSortIndex() { return _util.GetSortIndex(); }
- char *ST_GetSortData() { return _util.GetSortData(); }
- FastS_QueryResult *ST_GetQueryResult() { return _util.GetQueryResult(); }
-
- bool useAdaptiveTimeout() const;
- void adjustQueryTimeout();
- void adjustDocsumTimeout();
- uint32_t getRequestedQueries() const { return _queryNodes; }
- uint32_t getPendingQueries() const { return _pendingQueries; }
- uint32_t getDoneQueries() const {
- return getRequestedQueries() - getPendingQueries();
- }
- uint32_t getRequestedDocsums() const { return _requestedDocsums; }
- uint32_t getPendingDocsums() const { return _pendingDocsums; }
- uint32_t getDoneDocsums() const {
- return getRequestedDocsums() - getPendingDocsums();
- }
-
- FNET_Packet::UP
- setupQueryPacket(uint32_t hitsPerNode, uint32_t qflags,
- const search::engine::PropertiesMap &properties);
-};
-
-//-----------------------------------------------------------------------------
-
-class FastS_Sync_FNET_Search : public FastS_SyncSearchAdapter
-{
-private:
- FastS_FNET_Search _search;
-
-public:
- FastS_Sync_FNET_Search(FastS_DataSetCollection *dsc, FastS_FNET_DataSet *dataset, FastS_TimeKeeper *timeKeeper) :
- FastS_SyncSearchAdapter(&_search),
- _search(dsc, dataset, timeKeeper)
- {
- _search.SetAsyncArgs(this);
- }
- ~FastS_Sync_FNET_Search() override;
- void Free() override { delete this; }
-};
-
-//-----------------------------------------------------------------
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/mergehits.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/mergehits.cpp
deleted file mode 100644
index 8c4a08a3bbb..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/mergehits.cpp
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "mergehits.h"
-#include "querycacheutil.h"
-#include "fnet_dataset.h"
-#include "fnet_search.h"
-#include <vespa/searchcore/util/stlishheap.h>
-#include <vespa/vespalib/stllike/hash_set.h>
-#include <vespa/vespalib/stllike/hash_set.hpp>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".fdispatch.mergehits");
-
-using search::common::SortData;
-using search::common::SortDataIterator;
-
-//-----------------------------------------------------------------------------
-
-template <bool SORTDATA, bool DROP>
-struct FastS_MergeFeatures
-{
- static bool UseSortData() { return SORTDATA; }
- static bool DropSortData() { return DROP; }
-};
-
-
-template <typename T, typename F>
-bool
-FastS_MergeCompare(typename T::NodeType *a,
- typename T::NodeType *b)
-{
- bool prefer_b = (b->NT_GetNumHitsUsed() < a->NT_GetNumHitsUsed());
- if (F::UseSortData()) {
- return b->NT_GetSortDataIterator()->Before(a->NT_GetSortDataIterator(), prefer_b);
- } else {
- search::HitRank rank_a = a->NT_GetHit()->HT_GetMetric();
- search::HitRank rank_b = b->NT_GetHit()->HT_GetMetric();
- return ((rank_b > rank_a) || ((rank_b == rank_a) && prefer_b));
- }
-}
-
-
-template <typename T>
-inline void
-FastS_MergeCopySortData(typename T::NodeType *node,
- SortData::Ref *dst,
- uint32_t &sortDataLen)
-{
- if (dst == NULL)
- return;
-
- SortDataIterator *src = node->NT_GetSortDataIterator();
- dst->_buf = src->GetBuf();
- dst->_len = src->GetLen();
- sortDataLen += dst->_len;
-}
-
-
-template <typename T>
-inline void
-FastS_MergeCopyHit(typename T::HitType *src,
- FastS_hitresult *dst)
-{
- dst->HT_SetGlobalID(src->HT_GetGlobalID());
- dst->HT_SetMetric(src->HT_GetMetric());
- dst->HT_SetPartID(src->HT_GetPartID());
- dst->setDistributionKey(src->getDistributionKey());
-}
-
-struct GlobalIdHasher {
- vespalib::hash_set<document::GlobalId, document::GlobalId::hash> seenSet;
- bool insert(const document::GlobalId & g_id) {
- return seenSet.insert(g_id).second;
- }
- GlobalIdHasher(size_t expected_size) : seenSet(expected_size * 3) {}
-};
-
-
-template <typename T, typename F>
-size_t
-FastS_InternalMergeHits(FastS_HitMerger<T> *merger)
-{
- typename T::SearchType *search = merger->GetSearch();
- typename T::NodeType **heap = merger->GetHeap();
- uint32_t heapSize = merger->GetHeapSize();
- typename T::NodeType *node = NULL;
- FastS_hitresult *beg = search->ST_GetAlignedHitBuf();
- FastS_hitresult *end = search->ST_GetAlignedHitBufEnd();
- FastS_hitresult *pt = beg;
-
- // multi-level sorting related variables
- SortData::Ref *sortRef = NULL;
- SortData::Ref *sortItr = NULL;
- uint32_t sortDataLen = 0;
-
- if (F::UseSortData() && !F::DropSortData()) {
- sortRef = merger->AllocSortRef(end - beg);
- sortItr = sortRef;
- }
-
- GlobalIdHasher seenGids(end - beg);
-
- FastS_make_heap(heap, heapSize, FastS_MergeCompare<T, F>);
-
- while ((pt < end) && (heapSize > 0)) {
- node = *heap;
- bool useHit = seenGids.insert(node->NT_GetHit()->HT_GetGlobalID());
- if (F::UseSortData()) {
- if (!F::DropSortData() && useHit) {
- FastS_MergeCopySortData<T>(node, sortItr++, sortDataLen);
- }
- node->NT_GetSortDataIterator()->Next();
- }
- if (useHit) {
- FastS_MergeCopyHit<T>(node->NT_GetHit(), pt++);
- }
- node->NT_NextHit();
- if (node->NT_GetNumHitsLeft() > 0) {
- FastS_pop_push_heap(heap, heapSize, node, FastS_MergeCompare<T, F>);
- } else {
- FastS_pop_heap(heap, heapSize--, FastS_MergeCompare<T, F>);
- }
- }
- if (pt != end) {
- LOG(warning, "Duplicate removal lead to %zd missing hits (wanted %zd, got %zd)",
- end - pt, end - beg, pt - beg);
- }
- merger->SetLastNode(node); // source of last hit
- if (F::UseSortData()) {
- FastS_assert(F::DropSortData() || sortItr == sortRef + (pt - beg));
- }
-
- // generate merged sort data
- if (F::UseSortData() && sortDataLen > 0) {
-
- FastS_assert(!F::DropSortData());
- search->ST_AllocSortData(sortDataLen);
-
- uint32_t offset = 0;
- uint32_t *sortIdx = search->ST_GetSortIndex();
- char *sortData = search->ST_GetSortData();
-
- sortItr = sortRef;
- for (uint32_t residue = (pt - beg); residue > 0; residue--) {
- *sortIdx++ = offset;
- memcpy(sortData + offset, sortItr->_buf, sortItr->_len);
- offset += sortItr->_len;
- sortItr++;
- }
- *sortIdx = offset;
- FastS_assert(sortItr == sortRef + (pt - beg));
- FastS_assert(offset == sortDataLen);
- }
- return (pt - beg);
-}
-
-//-----------------------------------------------------------------------------
-
-template <typename T>
-typename FastS_HitMerger<T>::NODE **
-FastS_HitMerger<T>::AllocHeap(uint32_t maxNodes)
-{
- FastS_assert(_heap == NULL);
- _heap = new NODE*[maxNodes];
- _heapSize = 0;
- _heapMax = maxNodes;
- return _heap;
-}
-
-
-template <typename T>
-SortData::Ref *
-FastS_HitMerger<T>::AllocSortRef(uint32_t size)
-{
- FastS_assert(_sortRef == NULL);
- _sortRef = new SortData::Ref[size];
- return _sortRef;
-}
-
-
-template <typename T>
-void
-FastS_HitMerger<T>::MergeHits()
-{
- uint32_t numNodes = _search->ST_GetNumNodes();
- bool dropSortData = _search->ST_ShouldDropSortData();
- bool useSortData = false;
- uint32_t numDocs = 0;
- uint64_t totalHits = 0;
- search::HitRank maxRank =
- std::numeric_limits<search::HitRank>::is_integer ?
- std::numeric_limits<search::HitRank>::min() :
- - std::numeric_limits<search::HitRank>::max();
- uint32_t sortDataDocs = 0;
-
- FastS_QueryResult *result = _search->ST_GetQueryResult();
-
- // just set totalHitCount for estimates
- if (_search->ST_IsEstimate()) {
- for (uint32_t i = 0; i < numNodes; i++) {
- _search->ST_GetNode(i)->NT_InitMerge(&numDocs, &totalHits,
- &maxRank, &sortDataDocs);
- }
- result->_totalHitCount = (_search->ST_GetEstParts() == 0) ? 0
- : (uint64_t) (((double)totalHits
- * (double)_search->ST_GetEstPartCutoff())
- / (double)_search->ST_GetEstParts());
- return;
- }
-
- // prepare nodes for merging
- NODE **heap = AllocHeap(numNodes);
- for (uint32_t i = 0; i < numNodes; i++) {
- if (_search->ST_GetNode(i)->NT_InitMerge(&numDocs, &totalHits,
- &maxRank, &sortDataDocs))
- {
- heap[_heapSize++] = _search->ST_GetNode(i);
- }
- }
-
- // check if we should use sort data for sorting
- if (sortDataDocs > 0) {
- if (sortDataDocs == numDocs) {
- useSortData = true;
- } else {
- LOG(warning, "Some results are missing sort data, sorting by rank instead");
- }
- }
-
- // set some result variables
- result->_totalHitCount = totalHits;
- result->_maxRank = maxRank;
-
- // allocate some needed structures
- _search->ST_SetNumHits(numDocs); // NB: allocs result buffer
-
- // do actual merging by invoking templated function
- if (useSortData) {
- if (dropSortData) {
- numDocs = FastS_InternalMergeHits
- <T, FastS_MergeFeatures<true, true> >(this);
- } else {
- numDocs = FastS_InternalMergeHits
- <T, FastS_MergeFeatures<true, false> >(this);
- }
- } else {
- numDocs = FastS_InternalMergeHits
- <T, FastS_MergeFeatures<false, false> >(this);
- }
- _search->ST_AdjustNumHits(numDocs);
-
- // detect incomplete/fuzzy results
- if (_search->ST_ShouldLimitHitsPerNode()) {
- if (_search->ST_GetAlignedHitCount() < _search->ST_GetAlignedMaxHits() &&
- result->_totalHitCount > (_search->ST_GetAlignedSearchOffset() +
- _search->ST_GetAlignedHitCount()))
- {
- _incomplete = true;
- }
-
- NODE *lastNode = GetLastNode();
- for (size_t i(0), m(_search->ST_GetNumNodes()); i < m; i++) {
- NODE *node(_search->ST_GetNode(i));
- if (node == lastNode ||
- node->NT_GetTotalHits() == 0)
- continue;
- if (node->NT_GetNumHitsLeft() == 0 &&
- node->NT_GetTotalHits() > (_search->ST_GetAlignedSearchOffset() +
- node->NT_GetNumHits()))
- {
- _fuzzy = true;
- break;
- }
- }
- }
-}
-
-//-----------------------------------------------------------------------------
-
-template class FastS_HitMerger<FastS_MergeHits_DummyMerge>; // for API check
-template class FastS_HitMerger<FastS_FNETMerge>;
-
-//-----------------------------------------------------------------------------
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/mergehits.h b/searchcore/src/vespa/searchcore/fdispatch/search/mergehits.h
deleted file mode 100644
index 306229b4730..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/mergehits.h
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/searchlib/common/sortdata.h>
-#include <vespa/searchlib/common/packets.h>
-#include <vespa/document/base/globalid.h>
-
-//-----------------------------------------------------------------------------
-
-class FastS_hitresult;
-class FastS_QueryResult;
-class FastS_FNET_Search;
-class FastS_FNET_SearchNode;
-
-// T::HitType API
-
-struct FastS_MergeHits_DummyHit
-{
- document::GlobalId _emptyGid;
- uint32_t HT_GetDocID() { return 0; }
- const document::GlobalId & HT_GetGlobalID() { return _emptyGid; }
- search::HitRank HT_GetMetric() { return 0; }
- uint32_t HT_GetPartID() { return 0; }
- uint32_t getDistributionKey() { return 0; }
- void HT_SetDocID(uint32_t val) { (void) val; }
- void HT_SetGlobalID(const document::GlobalId & val) { (void) val; }
- void HT_SetMetric(search::HitRank val) { (void) val; }
- void HT_SetPartID(uint32_t val) { (void) val; }
- void setDistributionKey(uint32_t val) { (void) val; }
-};
-
-// T::NodeType API
-
-struct FastS_MergeHits_DummyNode
-{
- bool NT_InitMerge(uint32_t *numDocs, uint64_t *totalHits,
- search::HitRank *maxRank, uint32_t *sortDataDocs)
- {
- (void) numDocs;
- (void) totalHits;
- (void) maxRank;
- (void) sortDataDocs;
- return false;
- }
- search::common::SortDataIterator *NT_GetSortDataIterator() { return NULL; }
- FastS_MergeHits_DummyHit *NT_GetHit() { return NULL; }
- uint32_t NT_GetNumHitsUsed() { return 0; }
- uint32_t NT_GetNumHitsLeft() { return 0; }
- uint64_t NT_GetTotalHits() { return 0; }
- uint32_t NT_GetNumHits() { return 0; }
- void NT_NextHit() { }
-};
-
-// T::SearchType API
-
-struct FastS_MergeHits_DummySearch
-{
- FastS_MergeHits_DummyNode *ST_GetNode(size_t i) { (void) i; return NULL; }
- uint32_t ST_GetNumNodes() { return 0; }
- bool ST_IsEstimate() { return false; }
- uint32_t ST_GetEstParts() { return 0; }
- uint32_t ST_GetEstPartCutoff() { return 0; }
- bool ST_ShouldDropSortData() { return false; }
- bool ST_ShouldLimitHitsPerNode() { return false; }
- void ST_SetNumHits(uint32_t numHits) { (void) numHits; }
- void ST_AdjustNumHits(uint32_t nH) { (void) nH; }
- uint32_t ST_GetAlignedSearchOffset() { return 0; }
- uint32_t ST_GetAlignedMaxHits() { return 0; }
- uint32_t ST_GetAlignedHitCount() { return 0; }
- FastS_hitresult *ST_GetAlignedHitBuf() { return NULL; }
- FastS_hitresult *ST_GetAlignedHitBufEnd() { return NULL; }
- void ST_AllocSortData(uint32_t len) { (void) len; }
- uint32_t *ST_GetSortIndex() { return NULL; }
- char *ST_GetSortData() { return NULL; }
- FastS_QueryResult *ST_GetQueryResult() { return NULL; }
-};
-
-// T (Merge Type) API
-
-struct FastS_MergeHits_DummyMerge
-{
- typedef FastS_MergeHits_DummyHit HitType;
- typedef FastS_MergeHits_DummyNode NodeType;
- typedef FastS_MergeHits_DummySearch SearchType;
-};
-
-//-----------------------------------------------------------------------------
-
-struct FastS_FNETMerge
-{
- typedef search::fs4transport::FS4Packet_QUERYRESULTX::FS4_hit HitType;
- typedef FastS_FNET_SearchNode NodeType;
- typedef FastS_FNET_Search SearchType;
-};
-
-//-----------------------------------------------------------------------------
-
-template <typename T>
-class FastS_HitMerger
-{
-private:
- FastS_HitMerger(const FastS_HitMerger &);
- FastS_HitMerger& operator=(const FastS_HitMerger &);
-
-
- typedef typename T::NodeType NODE;
- typedef typename T::SearchType SEARCH;
-
- // owning search object
- SEARCH *_search;
-
- // sorting heap
- NODE **_heap;
- uint32_t _heapSize;
- uint32_t _heapMax;
-
- // temporary array for merging sortdata
- search::common::SortData::Ref *_sortRef;
-
- // limit hits per node effect variables
- NODE *_lastNode;
- bool _incomplete;
- bool _fuzzy;
-
-public:
- FastS_HitMerger(SEARCH *search) : _search(search),
- _heap(NULL),
- _heapSize(0),
- _heapMax(0),
- _sortRef(NULL),
- _lastNode(NULL),
- _incomplete(false),
- _fuzzy(false)
- {}
-
- ~FastS_HitMerger()
- {
- delete [] _heap;
- delete [] _sortRef;
- }
-
- NODE **AllocHeap(uint32_t maxNodes);
- search::common::SortData::Ref *AllocSortRef(uint32_t size);
- void SetLastNode(NODE *lastNode) { _lastNode = lastNode; }
-
- SEARCH *GetSearch() const { return _search; }
- NODE **GetHeap() const { return _heap; }
- uint32_t GetHeapSize() const { return _heapSize; }
- uint32_t GetHeapMax() const { return _heapMax; }
- NODE *GetLastNode() const { return _lastNode; }
- bool WasIncomplete() const { return _incomplete; }
- bool WasFuzzy() const { return _fuzzy; }
-
- search::common::SortData::Ref *GetSortRef() const { return _sortRef; }
-
- void MergeHits();
-};
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/nodemanager.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/nodemanager.cpp
deleted file mode 100644
index abe37011850..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/nodemanager.cpp
+++ /dev/null
@@ -1,438 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-
-#include "nodemanager.h"
-#include "datasetcollection.h"
-#include "plain_dataset.h"
-#include "engine_base.h"
-#include <vespa/config/common/exceptions.h>
-#include <vespa/fastos/thread.h>
-#include <set>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".search.nodemanager");
-
-void
-FastS_NodeManager::configure(std::unique_ptr<PartitionsConfig> cfg)
-{
- LOG(config, "configuring datasetcollection from '%s'",
- _configUri.getConfigId().c_str());
- SetPartMap(*cfg, _waitUpMS);
- _componentConfig.addConfig(
- vespalib::ComponentConfigProducer::Config("fdispatch.nodemanager",
- _fetcher->getGeneration(),
- "will not update generation unless config has changed"));
-}
-
-
-class AdminBadEngines
-{
- std::set<vespalib::string> _bad;
-public:
- void addAdminBad(const vespalib::string &name) {
- _bad.insert(name);
- }
-
- bool isAdminBad(const vespalib::string &name) const {
- return _bad.find(name) != _bad.end();
- }
-};
-
-class CollectAdminBadEngines
-{
- AdminBadEngines &_adminBadEngines;
-
-public:
-
- CollectAdminBadEngines(AdminBadEngines &adminBadEngines)
- : _adminBadEngines(adminBadEngines)
- {
- }
-
- void operator()(FastS_EngineBase* engine)
- {
- if (engine->isAdminBad()) {
- _adminBadEngines.addAdminBad(engine->GetName());
- }
- }
-};
-
-
-class PropagateAdminBadEngines
-{
- const AdminBadEngines &_adminBadEngines;
-
-public:
-
- PropagateAdminBadEngines(const AdminBadEngines &adminBadEngines)
- : _adminBadEngines(adminBadEngines)
- {
- }
-
- void operator()(FastS_EngineBase* engine)
- {
- if (_adminBadEngines.isAdminBad(engine->GetName())) {
- engine->MarkBad(FastS_EngineBase::BAD_ADMIN);
- }
- }
-};
-
-
-FastS_NodeManager::FastS_NodeManager(vespalib::SimpleComponentConfigProducer &componentConfig,
- FastS_AppContext *appCtx,
- uint32_t partition)
- : _componentConfig(componentConfig),
- _managerLock(),
- _configLock(),
- _appCtx(appCtx),
- _mldPartit(partition),
- _mldDocStamp(0),
- _mldDocStampMin(0),
- _gencnt(0),
- _fetcher(),
- _configUri(config::ConfigUri::createEmpty()),
- _lastPartMap(NULL),
- _datasetCollection(NULL),
- _oldDSCList(NULL),
- _tempFail(false),
- _failed(false),
- _hasDsc(false),
- _checkTempFailScheduled(false),
- _shutdown(false),
- _waitUpMS(20000)
-{
- _datasetCollection = new FastS_DataSetCollection(_appCtx);
- FastS_assert(_datasetCollection != NULL);
- _datasetCollection->Configure(NULL, 0);
- FastOS_Time now;
- now.SetNow();
- _mldDocStamp = now.GetSeconds();
- _mldDocStampMin = _mldDocStamp;
-}
-
-
-FastS_NodeManager::~FastS_NodeManager()
-{
- free(_lastPartMap);
- FastS_assert(_datasetCollection != NULL);
- _datasetCollection->subRef();
-}
-
-void
-FastS_NodeManager::CheckTempFail()
-{
- bool tempfail;
-
- _checkTempFailScheduled = false;
- tempfail = false;
- {
- std::lock_guard<std::mutex> mangerGuard(_managerLock);
- FastS_DataSetCollection *dsc = PeekDataSetCollection();
- for (unsigned int i = 0; i < dsc->GetMaxNumDataSets(); i++) {
- FastS_DataSetBase *ds;
- FastS_PlainDataSet *ds_plain;
- if ((ds = dsc->PeekDataSet(i)) != NULL &&
- (ds_plain = ds->GetPlainDataSet()) != NULL &&
- ds_plain->GetTempFail()) {
- tempfail = true;
- break;
- }
- }
- }
- _tempFail = tempfail;
-}
-
-void
-FastS_NodeManager::SubscribePartMap(const config::ConfigUri & configUri)
-{
- vespalib::string configId(configUri.getConfigId());
- LOG(debug, "loading new datasetcollection from %s", configId.c_str());
- try {
- _configUri = configUri;
- uint32_t oldWaitTime = _waitUpMS;
- _waitUpMS = 100;
- _fetcher.reset(new config::ConfigFetcher(_configUri.getContext()));
- _fetcher->subscribe<PartitionsConfig>(configId, this);
- _fetcher->start();
- _waitUpMS = oldWaitTime;
- if (_gencnt == 0) {
- throw new config::InvalidConfigException("failure during initial configuration: bad partition map");
- }
- } catch (std::exception &ex) {
- LOG(error, "Runtime exception: %s", (const char *) ex.what());
- EV_STOPPING("", "bad partitions config");
- exit(1);
- }
-}
-
-
-uint32_t
-FastS_NodeManager::SetPartMap(const PartitionsConfig& partmap,
- unsigned int waitms)
-{
- std::lock_guard<std::mutex> configGuard(_configLock);
- FastS_DataSetCollDesc *configDesc = new FastS_DataSetCollDesc();
- if (!configDesc->ReadConfig(partmap)) {
- LOG(error, "NodeManager::SetPartMap: Failed to load configuration");
- delete configDesc;
- return 0;
- }
- int retval = SetCollDesc(configDesc, waitms);
- return retval;
-}
-
-
-uint32_t
-FastS_NodeManager::SetCollDesc(FastS_DataSetCollDesc *configDesc,
- unsigned int waitms)
-{
- FastS_DataSetCollection *newCollection;
- uint32_t gencnt;
-
- if (_shutdown) return 0;
-
- AdminBadEngines adminBad;
-
- {
- CollectAdminBadEngines adminBadCollect(adminBad);
- FastS_DataSetCollection *dsc = GetDataSetCollection();
- for (uint32_t i = 0; i < dsc->GetMaxNumDataSets(); i++) {
- FastS_DataSetBase *ds;
- FastS_PlainDataSet *ds_plain;
- if ((ds = dsc->PeekDataSet(i)) == NULL ||
- (ds_plain = ds->GetPlainDataSet()) == NULL)
- continue;
-
- ds_plain->ForEachEngine(adminBadCollect);
- }
- dsc->subRef();
- }
-
-
- newCollection = new FastS_DataSetCollection(_appCtx);
- if (!newCollection->Configure(configDesc, _gencnt + 1)) {
- LOG(error, "NodeManager::SetPartMap: Inconsistent configuration");
- newCollection->subRef();
- return 0;
- }
-
- {
- PropagateAdminBadEngines adminBadPropagate(adminBad);
- for (uint32_t i = 0; i < newCollection->GetMaxNumDataSets(); i++) {
- FastS_DataSetBase *ds;
- FastS_PlainDataSet *ds_plain;
- if ((ds = newCollection->PeekDataSet(i)) == NULL ||
- (ds_plain = ds->GetPlainDataSet()) == NULL)
- continue;
-
- ds_plain->ForEachEngine(adminBadPropagate);
- }
- }
-
- if (waitms > 0) {
- FastOS_Time last;
- unsigned int rwait;
- bool allup;
- last.SetNow();
- while (1) {
- allup = newCollection->AreEnginesReady();
- rwait = (unsigned int) last.MilliSecsToNow();
- if (rwait >= waitms || allup)
- break;
- FastOS_Thread::Sleep(10);
- };
- if (allup) {
- LOG(debug, "All new engines up after %d ms", rwait);
- } else {
- LOG(debug, "Some new engines still down after %d ms", rwait);
- }
- }
-
- gencnt = SetDataSetCollection(newCollection);
-
- ScheduleCheckTempFail(FastS_NoID32());
- return gencnt;
-}
-
-
-
-/**
- * When calling this method, a single reference on the 'dsc' parameter
- * is passed to the monitor object.
- *
- * @return generation count, or 0 on fail.
- * @param dsc new dataset collection. A single reference is passed
- * to the monitor when this method is invoked.
- **/
-uint32_t
-FastS_NodeManager::SetDataSetCollection(FastS_DataSetCollection *dsc)
-{
- if (dsc == NULL)
- return 0;
-
- uint32_t gencnt = 0;
- FastS_DataSetCollection *old_dsc = NULL;
-
- if (!dsc->IsValid()) {
- LOG(error, "NodeManager::SetDataSetCollection: Inconsistent configuration");
- dsc->subRef();
-
- } else {
- {
- std::lock_guard<std::mutex> managerGuard(_managerLock);
- _gencnt++;
- gencnt = _gencnt;
-
- old_dsc = _datasetCollection;
- _datasetCollection = dsc;
-
- // put old config on service list
- FastS_assert(old_dsc != NULL);
- if (!old_dsc->IsLastRef()) {
- old_dsc->_nextOld = _oldDSCList;
- _oldDSCList = old_dsc;
- old_dsc = NULL;
- }
- _hasDsc = true;
- }
-
- if (old_dsc != NULL)
- old_dsc->subRef();
- }
- return gencnt;
-}
-
-
-FastS_DataSetCollection *
-FastS_NodeManager::GetDataSetCollection()
-{
- FastS_DataSetCollection *ret;
-
- std::lock_guard<std::mutex> managerGuard(_managerLock);
- ret = _datasetCollection;
- FastS_assert(ret != NULL);
- ret->addRef();
-
- return ret;
-}
-
-
-void
-FastS_NodeManager::ShutdownConfig()
-{
- FastS_DataSetCollection *dsc;
- FastS_DataSetCollection *old_dsc;
-
- {
- std::lock_guard<std::mutex> configGuard(_configLock);
- std::lock_guard<std::mutex> managerGuard(_managerLock);
- _shutdown = true; // disallow SetPartMap
- dsc = _datasetCollection;
- _datasetCollection = new FastS_DataSetCollection(_appCtx);
- _datasetCollection->Configure(NULL, 0);
- old_dsc = _oldDSCList;
- _oldDSCList = NULL;
- }
- dsc->AbortQueryQueues();
- dsc->subRef();
- while (old_dsc != NULL) {
- dsc = old_dsc;
- old_dsc = old_dsc->_nextOld;
- dsc->_nextOld = NULL;
- dsc->AbortQueryQueues();
- dsc->subRef();
- }
-}
-
-ChildInfo
-FastS_NodeManager::getChildInfo()
-{
- ChildInfo r;
- r.activeDocs.valid = true;
- FastS_DataSetCollection *dsc = GetDataSetCollection();
-
- for (unsigned int i = 0; i < dsc->GetMaxNumDataSets(); i++) {
- FastS_DataSetBase *ds;
- FastS_PlainDataSet *ds_plain;
- if ((ds = dsc->PeekDataSet(i)) == NULL ||
- (ds_plain = ds->GetPlainDataSet()) == NULL)
- continue;
- r.maxNodes += ds_plain->_partMap._childmaxnodesSinceReload;
- r.activeNodes += ds_plain->_partMap._childnodes;
- r.maxParts += ds_plain->_partMap._childmaxpartsSinceReload;
- r.activeParts += ds_plain->_partMap._childparts;
- PossCount rowActive = ds_plain->getActiveDocs();
- if (rowActive.valid) {
- r.activeDocs.count += rowActive.count;
- } else {
- r.activeDocs.valid = false;
- }
- }
-
- dsc->subRef();
- return r;
-}
-
-void
-FastS_NodeManager::CheckEvents(FastS_TimeKeeper *timeKeeper)
-{
- // CHECK SCHEDULED OPERATIONS
-
- if (_checkTempFailScheduled)
- CheckTempFail();
-
- // CHECK QUERY QUEUES
-
- FastS_DataSetCollection *dsc = GetDataSetCollection();
-
- dsc->CheckQueryQueues(timeKeeper);
- dsc->subRef();
-
- // check old query queues and discard old configs
-
- FastS_DataSetCollection *old_dsc;
- FastS_DataSetCollection *prev = NULL;
- FastS_DataSetCollection *tmp;
-
- {
- std::lock_guard<std::mutex> managerGuard(_managerLock);
- old_dsc = _oldDSCList;
- }
-
- while (old_dsc != NULL) {
- if (old_dsc->IsLastRef()) {
- if (prev == NULL) {
- std::unique_lock<std::mutex> managerGuard(_managerLock);
- if (_oldDSCList == old_dsc) {
- _oldDSCList = old_dsc->_nextOld;
- } else {
- prev = _oldDSCList;
- managerGuard.unlock();
- while (prev->_nextOld != old_dsc)
- prev = prev->_nextOld;
-
- prev->_nextOld = old_dsc->_nextOld;
- }
- } else {
- prev->_nextOld = old_dsc->_nextOld;
- }
- tmp = old_dsc;
- old_dsc = old_dsc->_nextOld;
- tmp->subRef();
-
- } else {
-
- old_dsc->CheckQueryQueues(timeKeeper);
- prev = old_dsc;
- old_dsc = old_dsc->_nextOld;
- }
- }
-}
-
-uint32_t
-FastS_NodeManager::GetMldDocstamp()
-{
- if (!_hasDsc)
- return 0;
- return _mldDocStamp;
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/nodemanager.h b/searchcore/src/vespa/searchcore/fdispatch/search/nodemanager.h
deleted file mode 100644
index 21180014995..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/nodemanager.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "child_info.h"
-#include "configdesc.h"
-#include <vespa/config/helper/configfetcher.h>
-#include <vespa/vespalib/net/simple_component_config_producer.h>
-#include <vespa/config/subscription/configuri.h>
-#include <vespa/vespalib/util/executor.h>
-#include <mutex>
-
-using vespa::config::search::core::PartitionsConfig;
-
-class FastS_DataSetBase;
-class FastS_AppContext;
-class FastS_DataSetCollection;
-class FastS_TimeKeeper;
-
-class FastS_NodeManager : public config::IFetcherCallback<PartitionsConfig>
-{
-private:
- FastS_NodeManager(const FastS_NodeManager &);
- FastS_NodeManager& operator=(const FastS_NodeManager &);
-
- vespalib::SimpleComponentConfigProducer &_componentConfig;
-
- std::mutex _managerLock;
- std::mutex _configLock;
- FastS_AppContext *_appCtx;
- uint32_t _mldPartit;
- uint32_t _mldDocStamp; // Bumped for all cache flushes
- uint32_t _mldDocStampMin; // Bumped for global cache flush
- uint32_t _gencnt;
-
-
-
- std::unique_ptr<config::ConfigFetcher> _fetcher;
- config::ConfigUri _configUri;
-
- char *_lastPartMap;
- FastS_DataSetCollection *_datasetCollection; // current node config
- FastS_DataSetCollection *_oldDSCList; // list of old node configs
-
- bool _tempFail;
- bool _failed;
- bool _hasDsc;
-
- volatile bool _checkTempFailScheduled;
- volatile bool _shutdown;
- volatile uint32_t _waitUpMS;
-
-protected:
-
- void configure(std::unique_ptr<PartitionsConfig> cfg) override;
-
-public:
- FastS_NodeManager(vespalib::SimpleComponentConfigProducer &componentConfig,
- FastS_AppContext *appCtx,
- uint32_t partition);
- ~FastS_NodeManager();
-
- void SubscribePartMap(const config::ConfigUri & configUri);
-
- uint32_t GetMldPartition() const { return _mldPartit; }
- uint32_t GetMldDocstamp();
-
- bool Failed() const { return _failed; }
- bool GetTempFail() const { return _tempFail; }
-
- void ScheduleCheckTempFail(uint32_t datasetid) {
- (void) datasetid;
- _checkTempFailScheduled = true;
- }
-
- FastS_DataSetCollection *PeekDataSetCollection()
- { return _datasetCollection; }
-
- void CheckTempFail();
- uint32_t SetPartMap(const PartitionsConfig& partmap, unsigned int waitms);
- uint32_t SetCollDesc(FastS_DataSetCollDesc *configDesc, unsigned int waitms);
- uint32_t SetDataSetCollection(FastS_DataSetCollection *dsc);
- FastS_DataSetCollection *GetDataSetCollection();
- ChildInfo getChildInfo();
- void ShutdownConfig();
-
- void CheckEvents(FastS_TimeKeeper *timeKeeper); // invoked by FNET thread
-};
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/plain_dataset.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/plain_dataset.cpp
deleted file mode 100644
index cd68b208ff4..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/plain_dataset.cpp
+++ /dev/null
@@ -1,560 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "plain_dataset.h"
-#include "datasetcollection.h"
-#include "engine_base.h"
-#include "nodemanager.h"
-#include <vespa/searchcore/fdispatch/common/search.h>
-#include <vespa/vespalib/util/host_name.h>
-#include <iomanip>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".search.plain_dataset");
-
-//--------------------------------------------------------------------------
-
-static inline int imax(int a, int b) { return (a > b) ? a : b; }
-
-//--------------------------------------------------------------------------
-
-FastS_PartitionMap::Partition::Partition()
- : _engines(nullptr),
- _maxnodesNow(0),
- _maxnodesSinceReload(0),
- _nodes(0),
- _maxpartsNow(0),
- _maxpartsSinceReload(0),
- _parts(0)
-{
-}
-
-
-FastS_PartitionMap::Partition::~Partition()
-{
- FastS_assert(_engines == nullptr);
- FastS_assert(_nodes == 0);
- FastS_assert(_parts == 0);
-}
-
-//--------------------------------------------------------------------------
-
-FastS_PartitionMap::FastS_PartitionMap(FastS_DataSetDesc *desc)
- : _partitions(nullptr),
- _partBits(desc->GetPartBits()),
- _rowBits(desc->GetRowBits()),
- _num_partitions(desc->GetNumParts()),
- _first_partition(desc->GetFirstPart()),
- _minchildparts(desc->GetMinChildParts()),
- _maxNodesDownPerFixedRow(desc->getMaxNodesDownPerFixedRow()),
- _useRoundRobinForFixedRow(desc->useRoundRobinForFixedRow()),
- _childnodes(0),
- _childmaxnodesNow(0),
- _childmaxnodesSinceReload(0),
- _childparts(0),
- _childmaxpartsNow(0),
- _childmaxpartsSinceReload(0),
- _mpp(desc->getMPP()),
- _maxRows(0)
-{
- // finalize config settings
- if (_num_partitions > (1U << _partBits)) {
- LOG(error, "Too many partitions %d constrained by partbits %d", _num_partitions, _partBits);
- _num_partitions = (1U << _partBits);
- }
-
- if (_num_partitions > 0) {
- _partitions = new Partition[_num_partitions];
- FastS_assert(_partitions != nullptr);
- }
- for (FastS_EngineDesc *curr = desc->GetEngineList(); curr != nullptr; curr = curr->GetNext()) {
- _maxRows = std::max(_maxRows, curr->GetConfRowID());
- }
- _numPartitions = std::vector<uint32_t>(getNumRows(), 0);
- for (FastS_EngineDesc *curr = desc->GetEngineList(); curr != nullptr; curr = curr->GetNext()) {
- size_t rowId(curr->GetConfRowID());
- _numPartitions[rowId] = std::max(_numPartitions[rowId], curr->GetConfPartID()+1);
- }
-}
-
-
-FastS_PartitionMap::~FastS_PartitionMap()
-{
- delete [] _partitions;
-}
-
-
-void
-FastS_PartitionMap::RecalcPartCnt(uint32_t partid)
-{
- uint32_t maxparts = 0;
- uint32_t parts = 0;
- for (FastS_EngineBase * engine = _partitions[partid]._engines;
- engine != nullptr; engine = engine->_nextpart) {
- maxparts = imax(engine->_reported._maxParts, maxparts);
- parts = imax(engine->_reported._actParts, parts);
- }
- if (_partitions[partid]._maxpartsNow != maxparts) {
- _childmaxpartsNow += maxparts - _partitions[partid]._maxpartsNow;
- _partitions[partid]._maxpartsNow = maxparts;
- if (_childmaxpartsNow > _childmaxpartsSinceReload)
- _childmaxpartsSinceReload = _childmaxpartsNow;
- }
- if (_partitions[partid]._parts != parts) {
- _childparts += parts - _partitions[partid]._parts;
- _partitions[partid]._parts = parts;
- }
-}
-
-
-void
-FastS_PartitionMap::LinkIn(FastS_EngineBase *engine)
-{
- uint32_t partid = engine->_partid - _first_partition;
-
- FastS_assert(partid < GetSize());
- FastS_assert(engine->_nextpart == nullptr);
- FastS_assert(engine->_prevpart == nullptr);
- FastS_PartitionMap::Partition & part = _partitions[partid];
- engine->_nextpart = part._engines;
- if (engine->_nextpart != nullptr)
- engine->_nextpart->_prevpart = engine;
- part._engines = engine;
- part._maxnodesNow += engine->_reported._maxNodes;
- part._maxnodesSinceReload = std::max(part._maxnodesSinceReload, part._maxnodesNow);
- part._nodes += engine->_reported._actNodes;
- _childmaxnodesNow += engine->_reported._maxNodes;
- _childmaxnodesSinceReload = std::max(_childmaxnodesSinceReload, _childmaxnodesNow);
- _childnodes += engine->_reported._actNodes;
- if (part._maxpartsNow <= engine->_reported._maxParts) {
- _childmaxpartsNow += engine->_reported._maxParts - part._maxpartsNow;
- _childmaxpartsSinceReload += std::max(_childmaxpartsSinceReload, _childmaxpartsNow);
- part._maxpartsNow = engine->_reported._maxParts;
- }
- if (part._parts < engine->_reported._actParts) {
- _childparts += engine->_reported._actParts - part._parts;
- part._parts = engine->_reported._actParts;
- }
-}
-
-
-void
-FastS_PartitionMap::LinkOut(FastS_EngineBase *engine)
-{
- uint32_t partid = engine->_partid - _first_partition;
-
- FastS_assert(partid < GetSize());
- if (engine->_nextpart != nullptr)
- engine->_nextpart->_prevpart = engine->_prevpart;
- if (engine->_prevpart != nullptr)
- engine->_prevpart->_nextpart = engine->_nextpart;
- if (_partitions[partid]._engines == engine)
- _partitions[partid]._engines = engine->_nextpart;
-
- _partitions[partid]._maxnodesNow -= engine->_reported._maxNodes;
- _partitions[partid]._nodes -= engine->_reported._actNodes;
- _childmaxnodesNow -= engine->_reported._maxNodes;
- _childnodes -= engine->_reported._actNodes;
- if (_partitions[partid]._maxpartsNow <= engine->_reported._maxParts ||
- _partitions[partid]._parts <= engine->_reported._actParts)
- RecalcPartCnt(partid);
-
- engine->_nextpart = nullptr;
- engine->_prevpart = nullptr;
-}
-
-//--------------------------------------------------------------------------
-
-FastS_PlainDataSet::MHPN_log_t::MHPN_log_t()
- : _cnt(0),
- _incompleteCnt(0),
- _fuzzyCnt(0)
-{
-}
-
-//--------------------------------------------------------------------------
-
-void
-FastS_PlainDataSet::InsertEngine(FastS_EngineBase *engine)
-{
- _enginesArray.push_back(engine);
-}
-
-FastS_EngineBase *
-FastS_PlainDataSet::ExtractEngine()
-{
- if (_enginesArray.size() > 0) {
- FastS_EngineBase *ret = _enginesArray.back();
- _enginesArray.pop_back();
- return ret;
- } else {
- return nullptr;
- }
-}
-
-FastS_PlainDataSet::FastS_PlainDataSet(FastS_AppContext *appCtx,
- FastS_DataSetDesc *desc)
- : FastS_DataSetBase(appCtx, desc),
- _partMap(desc),
- _stateOfRows(_partMap.getNumRows(), 0.001, desc->GetQueryDistributionMode().getLatencyDecayRate()),
- _MHPN_log(),
- _slowQueryLimitFactor(desc->GetSlowQueryLimitFactor()),
- _slowQueryLimitBias(desc->GetSlowQueryLimitBias()),
- _slowDocsumLimitFactor(desc->GetSlowDocsumLimitFactor()),
- _slowDocsumLimitBias(desc->GetSlowDocsumLimitBias()),
- _monitorInterval(desc->getMonitorInterval()),
- _higherCoverageMaxSearchWait(desc->getHigherCoverageMaxSearchWait()),
- _higherCoverageMinSearchWait(desc->getHigherCoverageMinSearchWait()),
- _higherCoverageBaseSearchWait(desc->getHigherCoverageBaseSearchWait()),
- _minimalSearchCoverage(desc->getMinimalSearchCoverage()),
- _higherCoverageMaxDocSumWait(desc->getHigherCoverageMaxDocSumWait()),
- _higherCoverageMinDocSumWait(desc->getHigherCoverageMinDocSumWait()),
- _higherCoverageBaseDocSumWait(desc->getHigherCoverageBaseDocSumWait()),
- _minimalDocSumCoverage(desc->getMinimalDocSumCoverage()),
- _maxHitsPerNode(desc->GetMaxHitsPerNode()),
- _estimateParts(desc->GetEstimateParts()),
- _estimatePartCutoff(desc->GetEstPartCutoff()),
- _queryDistributionMode(desc->GetQueryDistributionMode()),
- _randState()
-{
- uint32_t seed = 0;
- const char *hostname = vespalib::HostName::get().c_str();
- unsigned const char *p = reinterpret_cast<unsigned const char *>(hostname);
-
- if (p != nullptr) {
- while (*p != '\0') {
- seed = (seed << 7) + *p + (seed >> 25);
- p++;
- }
- }
- seed ^= _createtime.GetSeconds();
- seed ^= _createtime.GetMicroSeconds();
- _randState.srand48(seed);
-}
-
-
-FastS_PlainDataSet::~FastS_PlainDataSet() = default;
-
-void
-FastS_PlainDataSet::UpdateMaxHitsPerNodeLog(bool incomplete, bool fuzzy)
-{
- auto dsGuard(getDsGuard());
- _MHPN_log._cnt++;
- if (incomplete)
- _MHPN_log._incompleteCnt++;
- if (fuzzy)
- _MHPN_log._fuzzyCnt++;
-}
-
-
-bool
-FastS_PlainDataSet::RefCostUseNewEngine(FastS_EngineBase *oldEngine,
- FastS_EngineBase *newEngine,
- unsigned int *oldCount)
-{
- if (oldEngine->_totalrefcost + oldEngine->_config._unitrefcost >
- newEngine->_totalrefcost + newEngine->_config._unitrefcost) {
- *oldCount = 1;
- return true;
- }
- if (oldEngine->_totalrefcost + oldEngine->_config._unitrefcost <
- newEngine->_totalrefcost + newEngine->_config._unitrefcost)
- return false;
- /* Use random generator for tie breaker */
- (*oldCount)++;
- return ((_randState.lrand48() % *oldCount) == 0);
-}
-
-void
-FastS_PlainDataSet::updateSearchTime(double searchTime, uint32_t rowId)
-{
- auto dsGuard(getDsGuard());
- _stateOfRows.updateSearchTime(searchTime, rowId);
-}
-
-uint32_t
-FastS_PlainDataSet::getRandomWeightedRow() const
-{
- return _stateOfRows.getRandomWeightedRow();
-}
-
-
-bool
-FastS_PlainDataSet::UseNewEngine(FastS_EngineBase *oldEngine,
- FastS_EngineBase *newEngine,
- unsigned int *oldCount)
-{
- /*
- * If old engine has used _indexSwitchMinSearchGrace seconds
- * of grace period then select new engine if it has used
- * less grace period.
- */
- if (!EngineDocStampOK(oldEngine->_reported._docstamp) &&
- (EngineDocStampOK(newEngine->_reported._docstamp)))
- {
- *oldCount = 1;
- return true;
- }
-
- /*
- * If new engine has used _indexSwitchMinSearchGrace seconds
- * of grace period then select old engine if it has used
- * less grace period.
- */
- if (!EngineDocStampOK(newEngine->_reported._docstamp) &&
- (EngineDocStampOK(oldEngine->_reported._docstamp)))
- {
- return false;
- }
-
- return RefCostUseNewEngine(oldEngine, newEngine, oldCount);
-}
-
-FastS_EngineBase *
-FastS_PlainDataSet::getPartition(const std::unique_lock<std::mutex> &dsGuard, uint32_t partindex, uint32_t rowid)
-{
- (void) dsGuard;
- FastS_EngineBase* ret = nullptr;
-
- if (IsValidPartIndex_HasLock(partindex)) {
- for (FastS_EngineBase* iter = _partMap._partitions[partindex]._engines;
- iter != nullptr && ret == nullptr;
- iter = iter->_nextpart) {
-
- // NB: cost race condition
-
- if (!iter->IsRealBad() &&
- EngineDocStampOK(iter->_reported._docstamp) &&
- iter->_config._confRowID == rowid) {
- ret = iter;
- }
- }
- }
-
- if (ret != nullptr) {
- ret->AddCost();
- }
- return ret;
-}
-
-size_t
-FastS_PlainDataSet::countNodesUpInRow_HasLock(uint32_t rowid)
-{
- size_t count = 0;
- const size_t numParts = _partMap.GetSize();
- for (size_t partindex = 0; partindex < numParts; ++partindex) {
- for (FastS_EngineBase* iter = _partMap._partitions[partindex]._engines;
- iter != nullptr;
- iter = iter->_nextpart)
- {
- if (!iter->IsRealBad() &&
- EngineDocStampOK(iter->_reported._docstamp) &&
- iter->_config._confRowID == rowid)
- {
- ++count;
- break;
- }
- }
- }
- return count;
-}
-
-FastS_EngineBase *
-FastS_PlainDataSet::getPartition(const std::unique_lock<std::mutex> &dsGuard, uint32_t partindex)
-{
- (void) dsGuard;
- FastS_EngineBase* ret = nullptr;
- unsigned int oldCount = 1;
- unsigned int engineCount = 0;
-
- if (IsValidPartIndex_HasLock(partindex)) {
- for (FastS_EngineBase* iter = _partMap._partitions[partindex]._engines;
- iter != nullptr;
- iter = iter->_nextpart) {
-
- // NB: cost race condition
-
- if (!iter->IsRealBad() &&
- (iter->_config._unitrefcost > 0) &&
- EngineDocStampOK(iter->_reported._docstamp))
- {
- engineCount++;
- if (ret == nullptr || UseNewEngine(ret, iter, &oldCount))
- ret = iter;
- }
- }
- }
-
- if (engineCount < getMPP()) {
- ret = nullptr;
- }
- if (ret != nullptr) {
- ret->AddCost();
- }
- return ret;
-}
-
-FastS_EngineBase *
-FastS_PlainDataSet::getPartitionMLD(const std::unique_lock<std::mutex> &dsGuard, uint32_t partindex, bool mld)
-{
- (void) dsGuard;
- FastS_EngineBase* ret = nullptr;
- unsigned int oldCount = 1;
- if (partindex < _partMap._num_partitions) {
- FastS_EngineBase* iter;
- for (iter = _partMap._partitions[partindex]._engines; iter != nullptr; iter = iter->_nextpart) {
- // NB: cost race condition
-
- if (!iter->IsRealBad() &&
- iter->_reported._mld == mld &&
- (iter->_config._unitrefcost > 0) &&
- EngineDocStampOK(iter->_reported._docstamp) &&
- (ret == nullptr || UseNewEngine(ret, iter, &oldCount)))
- {
- ret = iter;
- }
- }
- } else {
- LOG(error, "Couldn't fetch partition data: Partition ID too big, partindex=%x _partMap._num_partitions=%x", partindex, _partMap._num_partitions);
- }
- if (ret != nullptr) {
- ret->AddCost();
- }
- return ret;
-}
-
-FastS_EngineBase *
-FastS_PlainDataSet::getPartitionMLD(const std::unique_lock<std::mutex> &dsGuard, uint32_t partindex, bool mld, uint32_t rowid)
-{
- (void) dsGuard;
- FastS_EngineBase* ret = nullptr;
- unsigned int oldCount = 1;
-
- if (partindex < _partMap._num_partitions) {
- FastS_EngineBase* iter;
- for (iter = _partMap._partitions[partindex]._engines; iter != nullptr; iter = iter->_nextpart) {
- // NB: cost race condition
- if (!iter->IsRealBad() &&
- (iter->_reported._mld == mld) &&
- (iter->_config._confRowID == rowid) &&
- EngineDocStampOK(iter->_reported._docstamp) &&
- (ret == nullptr || UseNewEngine(ret, iter, &oldCount)))
- {
- ret = iter;
- }
- }
- } else {
- LOG(error, "Couldn't fetch partition data: Partition ID too big, partindex=%x _partMap._num_partitions=%x", partindex, _partMap._num_partitions);
- }
- if (ret != nullptr) {
- ret->AddCost();
- }
- return ret;
-}
-
-void
-FastS_PlainDataSet::LinkInPart_HasLock(FastS_EngineBase *engine)
-{
- if (engine->GetPartID() == FastS_NoID32())
- return;
-
- _partMap.LinkIn(engine);
-}
-
-
-void
-FastS_PlainDataSet::LinkOutPart_HasLock(FastS_EngineBase *engine)
-{
- if (engine->GetPartID() == FastS_NoID32())
- return;
-
- _partMap.LinkOut(engine);
-}
-
-
-uint32_t
-FastS_PlainDataSet::CalculateQueueLens_HasLock(uint32_t &dispatchnodes)
-{
- uint32_t partindex;
- uint32_t equeueLen;
- uint32_t pqueueLen;
- FastS_EngineBase *eng;
- uint32_t pdispatchnodes;
- uint32_t dupnodes;
-
- uint32_t queueLen = 0;
- dispatchnodes = 1;
- for (partindex = 0; partindex < _partMap._num_partitions ; partindex++) {
- eng = _partMap._partitions[partindex]._engines;
- if (eng != nullptr) {
- pqueueLen = eng->GetQueueLen();
- pdispatchnodes = eng->GetDispatchers();
- dupnodes = 1;
- eng = eng->_nextpart;
- while (eng != nullptr) {
- equeueLen = eng->GetQueueLen();
- if (equeueLen < pqueueLen)
- pqueueLen = equeueLen;
- pdispatchnodes += eng->GetDispatchers();
- dupnodes++;
- eng = eng->_nextpart;
- }
- if (pqueueLen > queueLen)
- queueLen = pqueueLen;
- if (dispatchnodes * dupnodes < pdispatchnodes)
- dispatchnodes = pdispatchnodes / dupnodes;
- }
- }
- return queueLen;
-}
-
-namespace {
-struct CheckReady {
- bool allReady;
- CheckReady() : allReady(true) {}
-
- inline void operator()(FastS_EngineBase* engine) {
- allReady &= engine->IsReady();
- }
-};
-
-} //anonymous namespace
-
-bool
-FastS_PlainDataSet::AreEnginesReady()
-{
-
- // We don't need to lock things here, since the engine list
- // is non-mutable during datasetcollection lifetime.
- return ForEachEngine( CheckReady() ).allReady;
-}
-
-void
-FastS_PlainDataSet::Ping()
-{
- for (FastS_EngineBase* engine : _enginesArray) {
- engine->Ping();
- }
-}
-
-
-ChildInfo
-FastS_PlainDataSet::getChildInfo() const
-{
- ChildInfo r;
- r.maxNodes = _partMap._childmaxnodesSinceReload;
- r.activeNodes = _partMap._childnodes;
- r.maxParts = _partMap._childmaxpartsSinceReload;
- r.activeParts = _partMap._childparts;
- r.activeDocs = getActiveDocs();
- return r;
-}
-
-bool
-FastS_PlainDataSet::IsValidPartIndex_HasLock(uint32_t partindex) {
- if (partindex < _partMap._num_partitions) {
- return true;
- } else {
- LOG(error, "Couldn't fetch partition data: Partition ID too big, partindex=%x _partMap._num_partitions=%x", partindex, _partMap._num_partitions);
- return false;
- }
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/plain_dataset.h b/searchcore/src/vespa/searchcore/fdispatch/search/plain_dataset.h
deleted file mode 100644
index 8ce6b3adb75..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/plain_dataset.h
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <list>
-
-#include "child_info.h"
-#include <vespa/searchcore/fdispatch/search/dataset_base.h>
-#include <vespa/searchlib/util/rand48.h>
-#include <vespa/searchcore/fdispatch/search/configdesc.h>
-#include <vespa/searchcore/fdispatch/search/rowstate.h>
-#include <vespa/fnet/task.h>
-
-class FastS_EngineBase;
-
-//----------------------------------------------------------------
-// class holding information about a set of partitions
-//----------------------------------------------------------------
-class FastS_PartitionMap
-{
-public:
-
- //----------------------------------------------------------------
- // class holding information about a single partition
- //----------------------------------------------------------------
- class Partition
- {
-
- public:
- FastS_EngineBase *_engines;
- uint32_t _maxnodesNow;
- uint32_t _maxnodesSinceReload;
- uint32_t _nodes;
- uint32_t _maxpartsNow;
- uint32_t _maxpartsSinceReload;
- uint32_t _parts;
-
- public:
- Partition();
- ~Partition();
- private:
- Partition(const Partition &);
- Partition& operator=(const Partition &);
- };
-
-
-public:
- Partition *_partitions;
- uint32_t _partBits;
- uint32_t _rowBits;
- uint32_t _num_partitions; // Number of partitions (active)
- uint32_t _first_partition; // From partitions-file 'firstpart' (active)
- uint32_t _minchildparts; // Minimum partitions live to avoid tempfail
- uint32_t _maxNodesDownPerFixedRow;
- bool _useRoundRobinForFixedRow;
- uint32_t _childnodes;
- uint32_t _childmaxnodesNow;
- uint32_t _childmaxnodesSinceReload;
- uint32_t _childparts;
- uint32_t _childmaxpartsNow;
- uint32_t _childmaxpartsSinceReload;
- uint32_t _mpp; // Number of engines needed per partition
-
- std::vector<uint32_t> _numPartitions;
-
-public:
- FastS_PartitionMap(FastS_DataSetDesc *desc);
- ~FastS_PartitionMap();
-
- void RecalcPartCnt(uint32_t partid);
- void LinkIn(FastS_EngineBase *engine);
- void LinkOut(FastS_EngineBase *engine);
-
- uint32_t GetSize() { return _num_partitions; }
-
- uint32_t getNumRows() const { return _maxRows + 1; }
- uint32_t getNumPartitions(size_t rowId) { return _numPartitions[rowId]; }
-private:
- FastS_PartitionMap(const FastS_PartitionMap &);
- FastS_PartitionMap& operator=(const FastS_PartitionMap &);
- uint32_t _maxRows;
-
-};
-
-//---------------------------------------------------------------------------
-
-class FastS_PlainDataSet : public FastS_DataSetBase
-{
- friend class FastS_NodeManager;
-
-public:
-
- //----------------------------------------------------------------
- // Max Hits Per Node Stats
- //----------------------------------------------------------------
-
- class MHPN_log_t
- {
- public:
- uint32_t _cnt; // # times maxHitsPerNode affected # hits requested
- uint32_t _incompleteCnt; // # times maxHitsPerNode caused too few hits
- uint32_t _fuzzyCnt; // # times maxHitsPerNode may have caused wrong hits
-
- MHPN_log_t();
- };
-
-protected:
- FastS_PartitionMap _partMap;
- fdispatch::StateOfRows _stateOfRows;
- MHPN_log_t _MHPN_log;
- double _slowQueryLimitFactor;
- double _slowQueryLimitBias;
- double _slowDocsumLimitFactor;
- double _slowDocsumLimitBias;
- double _monitorInterval;
- double _higherCoverageMaxSearchWait;
- double _higherCoverageMinSearchWait;
- double _higherCoverageBaseSearchWait;
- double _minimalSearchCoverage;
- double _higherCoverageMaxDocSumWait;
- double _higherCoverageMinDocSumWait;
- double _higherCoverageBaseDocSumWait;
- double _minimalDocSumCoverage;
- uint32_t _maxHitsPerNode; // Max hits requested from single node
- uint32_t _estimateParts; // number of partitions used for estimate
- uint32_t _estimatePartCutoff; // First partition not used for estimate
-
- FastS_DataSetDesc::QueryDistributionMode _queryDistributionMode;
- //all engines in this dataset
- std::vector<FastS_EngineBase *> _enginesArray;
- search::Rand48 _randState;
-
- void InsertEngine(FastS_EngineBase *engine);
- FastS_EngineBase *ExtractEngine();
- bool RefCostUseNewEngine(FastS_EngineBase *oldEngine, FastS_EngineBase *newEngine, unsigned int *oldCount);
- bool UseNewEngine(FastS_EngineBase *oldEngine, FastS_EngineBase *newEngine, unsigned int *oldCount);
-
- bool IsValidPartIndex_HasLock(uint32_t partindex);
-public:
- FastS_PlainDataSet(FastS_AppContext *appCtx, FastS_DataSetDesc *desc);
- ~FastS_PlainDataSet() override;
-
- bool useFixedRowDistribution() const {
- return _queryDistributionMode == FastS_DataSetDesc::QueryDistributionMode::FIXEDROW;
- }
- uint32_t getNumRows() const { return _partMap.getNumRows(); }
- uint32_t getNumPartitions(size_t rowId) { return _partMap.getNumPartitions(rowId); }
- uint32_t GetRowBits() const { return _partMap._rowBits; }
- uint32_t GetPartBits() const { return _partMap._partBits; }
- uint32_t GetFirstPart() const { return _partMap._first_partition; }
- uint32_t GetLastPart() const {
- return _partMap._first_partition + _partMap._num_partitions;
- }
- uint32_t GetPartitions() const { return _partMap._num_partitions; }
- uint32_t GetEstimateParts() const { return _estimateParts; }
- uint32_t GetEstimatePartCutoff() const { return _estimatePartCutoff; }
- uint32_t GetMaxHitsPerNode() const { return _maxHitsPerNode; }
- double GetSlowQueryLimitFactor() const { return _slowQueryLimitFactor; }
- double GetSlowQueryLimitBias() const { return _slowQueryLimitBias; }
- double GetSlowDocsumLimitFactor() const { return _slowDocsumLimitFactor; }
- double GetSlowDocsumLimitBias() const { return _slowDocsumLimitBias; }
- bool GetTempFail() const { return _partMap._childparts < _partMap._minchildparts; }
- void UpdateMaxHitsPerNodeLog(bool incomplete, bool fuzzy);
- uint32_t getMaxNodesDownPerFixedRow() const { return _partMap._maxNodesDownPerFixedRow; }
- uint32_t useRoundRobinForFixedRow() const { return _partMap._useRoundRobinForFixedRow; }
- double getMinGroupCoverage() const { return _queryDistributionMode.getMinGroupCoverage(); }
- void updateSearchTime(double searchTime, uint32_t rowId);
- void updateActiveDocs_HasLock(uint32_t rowId, PossCount newVal, PossCount oldVal) {
- _stateOfRows.updateActiveDocs(rowId, newVal, oldVal);
- }
- PossCount getActiveDocs() const { return _stateOfRows.getActiveDocs(); }
- uint32_t getRandomWeightedRow() const;
-
- FastS_EngineBase * getPartition(const std::unique_lock<std::mutex> &dsGuard, uint32_t partid);
- FastS_EngineBase * getPartition(const std::unique_lock<std::mutex> &dsGuard, uint32_t partid, uint32_t rowid);
-
- size_t countNodesUpInRow_HasLock(uint32_t rowid);
-
- FastS_EngineBase * getPartitionMLD(const std::unique_lock<std::mutex> &dsGuard, uint32_t partid, bool mld);
- FastS_EngineBase * getPartitionMLD(const std::unique_lock<std::mutex> &dsGuard, uint32_t partid, bool mld, uint32_t rowid);
-
- void LinkInPart_HasLock(FastS_EngineBase *engine);
- void LinkOutPart_HasLock(FastS_EngineBase *engine);
-
- ChildInfo getChildInfo() const override;
-
- uint32_t getMPP() const { return _partMap._mpp; }
- double getMonitorInterval() const { return _monitorInterval; }
- double getHigherCoverageMaxSearchWait() const { return _higherCoverageMaxSearchWait; }
- double getHigherCoverageMinSearchWait() const { return _higherCoverageMinSearchWait; }
- double getMinimalSearchCoverage() const { return _minimalSearchCoverage; }
- double getHigherCoverageMaxDocSumWait() const { return _higherCoverageMaxDocSumWait; }
- double getHigherCoverageMinDocSumWait() const { return _higherCoverageMinDocSumWait; }
- double getHigherCoverageBaseDocSumWait() const { return _higherCoverageBaseDocSumWait; }
- double getMinimalDocSumCoverage() const { return _minimalDocSumCoverage; }
-
- // API
- //----
- uint32_t CalculateQueueLens_HasLock(uint32_t &dispatchnodes) override;
- bool AreEnginesReady() override;
- virtual void Ping();
-
- // Downcast
- //---------
- FastS_PlainDataSet * GetPlainDataSet() override { return this; }
-
- template <class FUN>
- FUN ForEachEngine(FUN fun) {
- for (FastS_EngineBase *ptr : _enginesArray) {
- fun(ptr);
- }
- return fun;
- }
-
- static bool EngineDocStampOK(time_t haveDocStamp) { return (haveDocStamp != 0); }
-};
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/poss_count.h b/searchcore/src/vespa/searchcore/fdispatch/search/poss_count.h
deleted file mode 100644
index b5075116da2..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/poss_count.h
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <cstdint>
-
-struct PossCount {
- bool valid;
- uint64_t count;
-
- PossCount() : valid(false), count(0) {}
-
- bool operator != (const PossCount& other) {
- return (valid != other.valid) || (count != other.count);
- }
-};
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/query.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/query.cpp
deleted file mode 100644
index d50a2aed46a..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/query.cpp
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "query.h"
-#include <vespa/searchlib/parsequery/simplequerystack.h>
-
-/** Marks as empty
- */
-FastS_query::FastS_query()
- : _dataset(0),
- _flags(0),
- _stackDump(),
- _sortSpec(),
- _groupSpec(),
- _location(),
- _rankProperties(),
- _featureOverrides()
-{
-};
-
-FastS_query::FastS_query(const search::docsummary::GetDocsumArgs &docsumArgs)
- : _dataset(0), // not known
- _flags(docsumArgs.GetQueryFlags()),
- _stackDump(docsumArgs.getStackDump()),
- _sortSpec(), // not known
- _groupSpec(), // not known
- _location(),
- _rankProperties(docsumArgs.rankProperties()),
- _featureOverrides(docsumArgs.featureOverrides())
-{
- // _query = search::SimpleQueryStack::StackbufToString(docsumArgs.getStackDump());
- if (docsumArgs.getLocation().size() > 0) {
- _location = strdup(docsumArgs.getLocation().c_str());
- }
-}
-
-
-void
-FastS_query::SetStackDump(vespalib::stringref stackRef)
-{
- _stackDump = stackRef;
-}
-
-const char *
-FastS_query::getPrintableQuery()
-{
- if (_printableQuery.empty()) {
- _printableQuery = search::SimpleQueryStack::StackbufToString(_stackDump);
- }
- return _printableQuery.c_str();
-}
-
-FastS_query::~FastS_query()
-{
-}
-
-
-void
-FastS_query::SetDataSet(uint32_t dataset)
-{
- _dataset = dataset;
-}
-
-unsigned int
-FastS_query::StackDumpHashKey() const
-{
- unsigned int res = 0;
- const unsigned char *p;
- const unsigned char *e;
- p = (const unsigned char *) _stackDump.begin();
- e = (const unsigned char *) _stackDump.end();
- while (p != e) {
- res = (res << 7) + (res >> 25) + *p;
- p++;
- }
- return res;
-}
-
-namespace
-{
-
-// This is ugly, somebody please find a better way.
-
-class SizeCollector : public search::fef::IPropertiesVisitor
-{
- static const size_t _stringFuzz = 15; // Compensate for malloc() waste
- static const size_t _vectorFuzz = 15;
- static const size_t _mapFuzz = 15;
- size_t _size;
-public:
- SizeCollector()
- : _size(0)
- {
- }
-
- virtual void
- visitProperty(const search::fef::Property::Value &key,
- const search::fef::Property &values) override
- {
- // Account for std::map element size
- _size += _mapFuzz;
- // Account for key string size
- _size += key.size() + _stringFuzz;
- size_t numValues = values.size();
- // Account for value vector size
- if (numValues > 0) {
- _size += numValues * sizeof(search::fef::Property::Value) + _vectorFuzz;
- for (size_t i = 0; i < numValues; ++i) {
- // Account for string sizes in value vector
- const search::fef::Property::Value &str = values.getAt(i);
- _size += str.size() + _stringFuzz;
- }
- }
- }
-
- size_t
- getSize() const
- {
- return _size;
- }
-};
-
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/query.h b/searchcore/src/vespa/searchcore/fdispatch/search/query.h
deleted file mode 100644
index b6949e70d8f..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/query.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/searchlib/fef/properties.h>
-#include <vespa/searchsummary/docsummary/getdocsumargs.h>
-
-class FastS_query
-{
-public:
- uint32_t _dataset;
- uint32_t _flags;
- vespalib::string _stackDump;
- vespalib::string _printableQuery;
- vespalib::string _sortSpec;
- std::vector<char> _groupSpec; // this is binary
- vespalib::string _location;
- search::fef::Properties _rankProperties;
- search::fef::Properties _featureOverrides;
-
- FastS_query(const FastS_query &other);
- FastS_query &operator=(const FastS_query &other);
-public:
- FastS_query();
- FastS_query(const search::docsummary::GetDocsumArgs &docsumArgs);
- ~FastS_query();
-
- void SetStackDump(vespalib::stringref stackDump);
- void SetSortSpec(const char *spec) { _sortSpec = spec; }
- void SetLocation(const char *loc) { _location = loc; }
- void SetRankProperties(const search::fef::Properties &rp) { _rankProperties = rp; }
- void SetFeatureOverrides(const search::fef::Properties &fo) { _featureOverrides = fo; }
- void SetDataSet(uint32_t dataset);
- void SetQueryFlags(uint32_t flags) { _flags = flags; }
- void SetFlag(uint32_t flag) { _flags |= flag; }
- void ClearFlag(uint32_t flag) { _flags &= ~flag; }
- const vespalib::string &getStackDump() const { return _stackDump; }
- const char *GetSortSpec() const { return _sortSpec.c_str(); }
- const char *GetLocation() const { return _location.c_str(); }
- const search::fef::Properties &GetRankProperties() const { return _rankProperties; }
- const search::fef::Properties &GetFeatureOverrides() const { return _featureOverrides; }
-
- uint32_t GetQueryFlags() const { return _flags; }
- const char *getPrintableQuery();
- bool IsFlagSet(uint32_t flag) const { return (_flags & flag) != 0; }
-
- unsigned int StackDumpHashKey() const;
-
-
-private:
- static unsigned int hash_str_check(const unsigned char *pt)
- {
- if (pt == NULL)
- return 0;
-
- unsigned int res = 0;
- for (; *pt != 0; pt++)
- res = (res << 7) + (res >> 25) + *pt;
- return res;
- }
- static bool cmp_str_check(const char *a, const char *b)
- {
- if (a == NULL && b == NULL)
- return true;
- if (a == NULL || b == NULL)
- return false;
- return (strcmp(a, b) == 0);
- }
- static bool cmp_str_ref(vespalib::stringref a,
- vespalib::stringref b)
- {
- return (a.size() == b.size() &&
- memcmp(a.data(), b.data(), a.size()) == 0);
- }
-};
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/querycacheutil.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/querycacheutil.cpp
deleted file mode 100644
index 5a2fd5a95c3..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/querycacheutil.cpp
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "querycacheutil.h"
-#include <vespa/searchlib/parsequery/simplequerystack.h>
-#include <vespa/searchlib/common/sortdata.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".search.querycacheutil");
-
-using search::common::SortData;
-
-uint32_t FastS_QueryCacheUtil::_systemMaxHits;
-uint32_t FastS_QueryCacheUtil::_maxOffset = 4000;
-
-
-FastS_QueryCacheUtil::FastS_QueryCacheUtil()
- : _startTime(),
- _userMaxHits(0),
- _alignedMaxHits(0),
- _alignedSearchOffset(0),
- _ranking(),
- _dateTime(0),
- _query(),
- _queryResult(),
- _docsumsResult(),
- _searchInfo(),
- _alignedHitBuf(NULL),
- _hitbuf_needfree(false),
- _alignedHitCount(0),
- _sortIndex(NULL),
- _sortData(NULL),
- _sortdata_needfree(false)
-{
- _searchInfo._maxHits = 10;
-}
-
-FastS_QueryCacheUtil::~FastS_QueryCacheUtil()
-{
-}
-
-void
-FastS_QueryCacheUtil::setSearchRequest(const search::engine::SearchRequest * request)
-{
- _ranking = request->ranking;
-
- _query.SetQueryFlags(request->queryFlags);
-
- _query.SetStackDump(request->getStackRef());
- _query.SetSortSpec(request->sortSpec.c_str());
- _query._groupSpec = request->groupSpec;
- _query.SetLocation(request->location.c_str());
- _query.SetRankProperties(request->propertiesMap.rankProperties());
- _query.SetFeatureOverrides(request->propertiesMap.featureOverrides());
-}
-
-
-void
-FastS_QueryCacheUtil::SetupQuery(uint32_t maxhits,
- uint32_t offset)
-{
- FastS_assert(_queryResult._hitbuf == NULL);
- FastS_assert(_alignedHitBuf == NULL);
- FastS_assert(!_hitbuf_needfree);
- FastS_assert(_queryResult._hitCount == 0);
- FastS_assert(_docsumsResult._fullResultCount == 0);
- FastS_assert(_alignedHitCount == 0);
- FastS_assert(_queryResult._totalHitCount == 0);
- FastS_assert(_alignedMaxHits == 0);
- FastS_assert(_alignedSearchOffset == 0);
- FastS_assert(_docsumsResult._fullresult == NULL);
- _searchInfo._searchOffset = offset;
- _searchInfo._maxHits = maxhits;
-}
-
-
-void
-FastS_QueryCacheUtil::AdjustSearchParameters(uint32_t partitions)
-{
- bool strict = (partitions > 1);
-
- if (_searchInfo._maxHits == 0) {
- _searchInfo._searchOffset = 0;
- }
-
- _searchInfo._maxHits = std::min(_searchInfo._maxHits, _maxOffset + _systemMaxHits);
- if (strict) {
- _searchInfo._searchOffset = std::min(_searchInfo._searchOffset, _maxOffset);
- _searchInfo._maxHits = std::min(_searchInfo._maxHits, _maxOffset + _systemMaxHits - _searchInfo._searchOffset);
- }
-}
-
-
-void
-FastS_QueryCacheUtil::AdjustSearchParametersFinal(uint32_t partitions)
-{
- if (IsEstimate()) {
-
- FastS_assert(_searchInfo._searchOffset == 0);
- FastS_assert(_searchInfo._maxHits == 0);
-
- _alignedSearchOffset = 0;
- _alignedMaxHits = 0;
- } else {
- _alignedSearchOffset = (partitions > 1) ? 0 : _searchInfo._searchOffset;
- _alignedMaxHits = _searchInfo._maxHits + _searchInfo._searchOffset - _alignedSearchOffset;
- FastS_assert(_alignedMaxHits <= _maxOffset + _systemMaxHits);
- }
-}
-
-void
-FastS_QueryCacheUtil::DropResult()
-{
- _queryResult._groupResultLen = 0;
- _queryResult._groupResult = NULL;
-
- if (_hitbuf_needfree) {
- FastS_assert(_alignedHitBuf != NULL);
- free(_alignedHitBuf);
- }
- if (_sortdata_needfree) {
- FastS_assert(_sortIndex != NULL);
- free(_sortIndex);
- }
- _sortIndex = NULL;
- _sortData = NULL;
- _sortdata_needfree = false;
- _alignedHitBuf = NULL;
- _queryResult._hitbuf = NULL;
- _hitbuf_needfree = false;
- free(_docsumsResult._fullresult);
- _docsumsResult._fullresult = NULL;
- _queryResult._hitCount = 0;
- _docsumsResult._fullResultCount = 0;
- _queryResult._totalHitCount = 0;
- _queryResult._maxRank = std::numeric_limits<search::HitRank>::is_integer ?
- std::numeric_limits<search::HitRank>::min() :
- - std::numeric_limits<search::HitRank>::max();
-
- _alignedHitCount = 0;
-}
-
-
-bool
-FastS_QueryCacheUtil::IsEstimate() const
-{
- return _query.IsFlagSet(search::fs4transport::QFLAG_ESTIMATE);
-}
-
-void
-FastS_QueryCacheUtil::InitEstimateMode()
-{
- _searchInfo._searchOffset = 0;
- _searchInfo._maxHits = 0;
- _ranking.clear();
- _dateTime = 0;
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/querycacheutil.h b/searchcore/src/vespa/searchcore/fdispatch/search/querycacheutil.h
deleted file mode 100644
index 442fa3f0710..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/querycacheutil.h
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/searchcore/fdispatch/search/query.h>
-#include <vespa/searchlib/common/transport.h>
-
-#include <vespa/searchcore/fdispatch/common/search.h>
-#include <vespa/searchcore/util/log.h>
-
-class FastS_DataSetCollection;
-
-class FastS_QueryCacheUtil
-{
-private:
- FastS_QueryCacheUtil(const FastS_QueryCacheUtil &);
- FastS_QueryCacheUtil& operator=(const FastS_QueryCacheUtil &);
-
- double _startTime; // For the query
-
- uint32_t _userMaxHits; // Max hits spec.d by user; NB: see _systemMaxHits
- uint32_t _alignedMaxHits; // Max hits (forwarded to engine)
- uint32_t _alignedSearchOffset; // Search offset (forwarded to engine)
- vespalib::string _ranking; // ranking profile to be used
- uint32_t _randomSeed; // seed for random rank values
- uint32_t _dateTime; // datetime used for freshness boost
-
- FastS_query _query; // NB: Here it is!
-
- FastS_QueryResult _queryResult;
- FastS_DocsumsResult _docsumsResult;
- FastS_SearchInfo _searchInfo;
-
- FastS_hitresult *_alignedHitBuf; // Hits from engine
- bool _hitbuf_needfree; // Destructor should free _hitbuf.
- uint32_t _alignedHitCount; // # Hits from engine
-
- uint32_t *_sortIndex;
- char *_sortData; // NB: same malloc as _sortIndex
- bool _sortdata_needfree;
-
-public:
- static uint32_t _systemMaxHits;
- static uint32_t _maxOffset;
-public:
- FastS_QueryCacheUtil();
- ~FastS_QueryCacheUtil();
- bool AgeDropCheck();
- void DropResult();
- bool GotNoResultsYet() const { return _queryResult._hitbuf == NULL; }
- uint32_t GetSearchOffset() const { return _searchInfo._searchOffset; }
- uint32_t GetMaxHits() const { return _searchInfo._maxHits; }
- uint32_t GetAlignedMaxHits() const { return _alignedMaxHits; }
- uint32_t GetAlignedSearchOffset() const { return _alignedSearchOffset; }
- const vespalib::string & GetRanking() const { return _ranking; }
- uint32_t GetRandomSeed() const { return _randomSeed; }
- uint32_t GetDateTime() const { return _dateTime; }
- FastS_query &GetQuery() { return _query; }
- const char *GetSortSpec() const { return _query.GetSortSpec(); }
- const char *GetLocation() const { return _query.GetLocation(); }
- bool ShouldDropSortData() const {
- return _query.IsFlagSet(search::fs4transport::QFLAG_DROP_SORTDATA);
- }
- bool IsQueryFlagSet(uint32_t flag) const { return _query.IsFlagSet(flag); }
- FastS_QueryResult *GetQueryResult() {
- return &_queryResult;
- }
- FastS_DocsumsResult *GetDocsumsResult() { return &_docsumsResult; }
- FastS_SearchInfo *GetSearchInfo() { return &_searchInfo; }
- void SetStartTime(double timeref) { _startTime = timeref; }
- void AdjustSearchParameters(uint32_t partitions);
- void AdjustSearchParametersFinal(uint32_t partitions);
- void SetupQuery(uint32_t maxhits, uint32_t offset);
- bool IsEstimate() const;
- void ForceStrictLimits();
- void InitEstimateMode();
- double ElapsedSecs(double now) const {
- double ret = now - _startTime;
- if (ret < 0.0)
- ret = 0.0;
- return ret;
- }
- void SetCoverage(uint64_t coverageDocs, uint64_t activeDocs, uint64_t soonActiveDocs,
- uint32_t degradeReason, uint16_t nodesQueried, uint16_t nodesReplied)
- {
- _searchInfo._coverageDocs = coverageDocs;
- _searchInfo._activeDocs = activeDocs;
- _searchInfo._soonActiveDocs = soonActiveDocs;
- _searchInfo._degradeReason = degradeReason;
- _searchInfo._nodesQueried = nodesQueried;
- _searchInfo._nodesReplied = nodesReplied;
- }
- void SetAlignedHitCount(uint32_t alignedHitCount) {
- if (alignedHitCount > _alignedMaxHits) {
- alignedHitCount = _alignedMaxHits;
- }
- _alignedHitCount = alignedHitCount;
- }
- void CalcHitCount() {
- if (_alignedHitCount + _alignedSearchOffset > _searchInfo._searchOffset) {
- _queryResult._hitCount = _alignedHitCount + _alignedSearchOffset - _searchInfo._searchOffset;
- } else {
- _queryResult._hitCount = 0;
- }
- if (_queryResult._hitCount > _searchInfo._maxHits) {
- _queryResult._hitCount = _searchInfo._maxHits;
- }
- }
- void AllocAlignedHitBuf() {
- FastS_assert(_alignedHitBuf == NULL);
- if (_alignedHitCount != 0) {
- _alignedHitBuf = (FastS_hitresult*)malloc(sizeof(FastS_hitresult) * _alignedHitCount);
- _hitbuf_needfree = true;
- _queryResult._hitbuf = _alignedHitBuf + _searchInfo._searchOffset - _alignedSearchOffset;
- }
- }
- void AllocSortData(uint32_t sortDataLen)
- {
- FastS_assert(_sortIndex == NULL && _sortData == NULL);
- uint32_t hitcnt = _alignedHitCount;
- if (hitcnt == 0) {
- FastS_assert(sortDataLen == 0);
- return;
- }
- void *pt = malloc((hitcnt + 1) * sizeof(uint32_t) + sortDataLen);
- FastS_assert(pt != NULL);
- _sortIndex = (uint32_t *) pt;
- _sortData = (char *)(void *)(_sortIndex + hitcnt + 1);
- _sortdata_needfree = true;
- if (hitcnt > _searchInfo._searchOffset) {
- _queryResult._sortIndex =
- _sortIndex + _searchInfo._searchOffset - _alignedSearchOffset;
- _queryResult._sortData = _sortData;
- }
- }
- uint32_t *GetSortIndex() const { return _sortIndex; }
- char *GetSortData() const { return _sortData; }
- FastS_hitresult *GetAlignedHitBuf() const { return _alignedHitBuf; }
- FastS_hitresult *GetAlignedHitBufEnd() const {
- return _alignedHitBuf + _alignedHitCount;
- }
- uint32_t GetAlignedHitCount() const { return _alignedHitCount; }
- void SetGroupResult(const char *groupResult) {
- _queryResult._groupResult = groupResult;
- }
- void SetGroupResultLen(uint32_t groupResultLen) {
- _queryResult._groupResultLen = groupResultLen;
- }
- void setSearchRequest(const search::engine::SearchRequest * request);
-};
-
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/rowstate.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/rowstate.cpp
deleted file mode 100644
index b11d9aeac53..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/rowstate.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/searchcore/fdispatch/search/rowstate.h>
-
-namespace fdispatch {
-
-constexpr uint64_t MIN_DECAY_RATE = 42;
-constexpr double MIN_QUERY_TIME = 0.001;
-
-RowState::RowState(double initialValue, uint64_t decayRate) :
- _decayRate(std::max(decayRate, MIN_DECAY_RATE)),
- _avgSearchTime(std::max(initialValue, MIN_QUERY_TIME)),
- _sumActiveDocs(0),
- _numQueries(0)
-{ }
-
-void RowState::updateSearchTime(double searchTime)
-{
- searchTime = std::max(searchTime, MIN_QUERY_TIME);
- double decayRate = std::min(_numQueries + MIN_DECAY_RATE, _decayRate);
- _avgSearchTime = (searchTime + (decayRate-1)*_avgSearchTime)/decayRate;
- ++_numQueries;
-}
-
-StateOfRows::StateOfRows(size_t numRows, double initialValue, uint64_t decayRate) :
- _rows(numRows, RowState(initialValue, decayRate)),
- _sumActiveDocs(0), _invalidActiveDocsCounter(0)
-{
- srand48(1);
-}
-
-void
-StateOfRows::updateSearchTime(double searchTime, uint32_t rowId)
-{
- _rows[rowId].updateSearchTime(searchTime);
-}
-
-uint32_t
-StateOfRows::getRandomWeightedRow() const
-{
- return getWeightedNode(drand48());
-}
-
-uint32_t
-StateOfRows::getWeightedNode(double cand) const
-{
- double sum = 0;
- for (const RowState & rs : _rows) {
- sum += rs.getAverageSearchTimeInverse();
- }
- double accum(0.0);
- for (size_t rowId(0); (rowId + 1) < _rows.size(); rowId++) {
- accum += _rows[rowId].getAverageSearchTimeInverse();
- if (cand < accum/sum) {
- return rowId;
- }
- }
- return _rows.size() - 1;
-}
-
-void
-StateOfRows::updateActiveDocs(uint32_t rowId, PossCount newVal, PossCount oldVal)
-{
- uint64_t tmp = _sumActiveDocs + newVal.count - oldVal.count;
- _sumActiveDocs = tmp;
- _rows[rowId].updateActiveDocs(newVal.count, oldVal.count);
- if (newVal.valid != oldVal.valid) {
- if (oldVal.valid) {
- ++_invalidActiveDocsCounter;
- } else {
- --_invalidActiveDocsCounter;
- }
- }
-}
-
-PossCount
-StateOfRows::getActiveDocs() const
-{
- PossCount r;
- if (activeDocsValid()) {
- r.valid = true;
- r.count = 0;
- for (const RowState &row : _rows) {
- r.count = std::max(r.count, row.activeDocs());
- }
- }
- return r;
-}
-
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/rowstate.h b/searchcore/src/vespa/searchcore/fdispatch/search/rowstate.h
deleted file mode 100644
index 92268a291bb..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/rowstate.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vector>
-#include <stdint.h>
-#include <stdlib.h>
-#include "poss_count.h"
-
-namespace fdispatch {
-
-/**
- * RowState keeps track of state per row or rather group.
- * Currently it just keeps the average searchtime as exponential decay.
- **/
-class RowState {
-public:
- RowState(double initialValue, uint64_t decayRate);
- double getAverageSearchTime() const { return _avgSearchTime; }
- double getAverageSearchTimeInverse() const { return 1.0/_avgSearchTime; }
- void updateSearchTime(double searchTime);
- void setAverageSearchTime(double avgSearchTime) { _avgSearchTime = avgSearchTime; }
- uint64_t activeDocs() const { return _sumActiveDocs; }
- void updateActiveDocs(uint64_t newVal, uint64_t oldVal) {
- uint64_t tmp = _sumActiveDocs + newVal - oldVal;
- _sumActiveDocs = tmp;
- }
-private:
- const uint64_t _decayRate;
- double _avgSearchTime;
- uint64_t _sumActiveDocs;
- uint64_t _numQueries;
-};
-
-/**
- * StateOfRows keeps track of the state of all rows/groups.
- * Currently used for tracking latency in groups. This latency
- * can be used for selecting a random node with weighted probability
- * with the intention to favor load on fast groups.
- **/
-class StateOfRows {
-public:
- StateOfRows(size_t numRows, double initial, uint64_t decayRate);
- void updateSearchTime(double searchTime, uint32_t rowId);
- const RowState & getRowState(uint32_t rowId) const { return _rows[rowId]; }
- RowState & getRowState(uint32_t rowId) { return _rows[rowId]; }
- uint32_t getRandomWeightedRow() const;
- uint32_t getWeightedNode(double rnd) const;
- void updateActiveDocs(uint32_t rowId, PossCount newVal, PossCount oldVal);
- uint32_t numRowStates() const { return _rows.size(); }
- uint64_t sumActiveDocs() const { return _sumActiveDocs; }
- PossCount getActiveDocs() const;
- bool activeDocsValid() const { return _invalidActiveDocsCounter == 0; }
-private:
- std::vector<RowState> _rows;
- uint64_t _sumActiveDocs;
- size_t _invalidActiveDocsCounter;
-};
-
-}
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/search_path.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/search_path.cpp
deleted file mode 100644
index daaf2a7c1db..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/search_path.cpp
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "search_path.h"
-#include <vespa/vespalib/stllike/asciistream.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".fdispatch.search_path");
-
-namespace fdispatch {
-
-SearchPath::Element::Element()
- : _nodes(),
- _row(std::numeric_limits<size_t>::max())
-{
-}
-
-vespalib::stringref
-SearchPath::parseElement(vespalib::stringref spec, size_t numNodes)
-{
- _elements.push_back(Element());
- vespalib::string::size_type specSepPos(spec.find('/'));
- parsePartList(spec.substr(0, specSepPos), numNodes);
-
- vespalib::stringref remaining = spec.substr(specSepPos + 1);
- vespalib::string::size_type elementSepPos = remaining.find(';');
- parseRow(remaining.substr(0, elementSepPos));
-
- if (elementSepPos != vespalib::string::npos) {
- return remaining.substr(elementSepPos + 1);
- }
- return vespalib::stringref();
-}
-
-void
-SearchPath::parsePartList(vespalib::stringref partSpec, size_t numNodes)
-{
- try {
- if (!partSpec.empty() && (partSpec[0] != '*')) {
- vespalib::asciistream is(partSpec);
- is.eatWhite();
- parsePartList(is, numNodes);
- } else {
- for (size_t i(0); i < numNodes; i++) {
- _elements.back().addPart(i);
- }
- }
- } catch (const std::exception & e) {
- LOG(warning, "Failed parsing part of searchpath='%s' with error '%s'. Result might be mumbo jumbo.",
- vespalib::string(partSpec).c_str(), e.what());
- }
-}
-
-void
-SearchPath::parsePartList(vespalib::asciistream &spec, size_t numNodes)
-{
- spec.eatWhite();
- if ( !spec.empty() ) {
- char c(spec.c_str()[0]);
- if (c == '[') {
- parsePartRange(spec, numNodes);
- } else {
- size_t num(0);
- spec >> num;
- _elements.back().addPart(num);
- }
- if ( ! spec.eof() ) {
- spec >> c;
- if (c == ',') {
- parsePartList(spec, numNodes);
- }
- }
- } else {
- throw std::runtime_error("Expected either '[' or a number, got EOF");
- }
-}
-
-void
-SearchPath::parsePartRange(vespalib::asciistream &spec, size_t numNodes)
-{
- size_t from(0);
- size_t to(numNodes);
- char s(0), c(0), e(0);
- spec >> s >> from >> c >> to >> e;
- if (c != ',') {
- throw std::runtime_error("Expected ','");
- }
- if (e != '>') {
- throw std::runtime_error("Expected '>'");
- }
- to = std::min(numNodes, to);
- for (size_t i(from); i < to; i++) {
- _elements.back().addPart(i);
- }
-}
-
-void
-SearchPath::parseRow(vespalib::stringref rowSpec)
-{
- if (!rowSpec.empty()) {
- // FIXME C++17 range-safe from_chars() instead of strtoul()
- _elements.back().setRow(strtoul(rowSpec.data(), nullptr, 0));
- }
-}
-
-SearchPath::SearchPath(const vespalib::string &spec, size_t numNodes)
- : _elements()
-{
- vespalib::stringref specBuf = spec;
- while (!specBuf.empty()) {
- specBuf = parseElement(specBuf, numNodes);
- }
-}
-
-} // namespace fdispatch
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/search_path.h b/searchcore/src/vespa/searchcore/fdispatch/search/search_path.h
deleted file mode 100644
index bbf1002742e..00000000000
--- a/searchcore/src/vespa/searchcore/fdispatch/search/search_path.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/vespalib/stllike/string.h>
-#include <set>
-#include <vector>
-#include <limits>
-
-namespace vespalib {
- class asciistream;
-}
-namespace fdispatch {
-
-class SearchPath
-{
-public:
- typedef std::set<size_t> NodeList;
-
- class Element
- {
- private:
- NodeList _nodes;
- size_t _row;
-
- public:
- Element();
- Element &addPart(size_t part) {
- _nodes.insert(part);
- return *this;
- }
- Element &setRow(size_t row_) {
- _row = row_;
- return *this;
- }
- bool hasRow() const { return _row != std::numeric_limits<size_t>::max(); }
- size_t row() const { return _row; }
- const NodeList &nodes() const { return _nodes; }
- };
-
- typedef std::vector<Element> ElementVector;
-
-private:
- ElementVector _elements;
-
- vespalib::stringref parseElement(vespalib::stringref spec, size_t numNodes);
- void parsePartList(vespalib::stringref partSpec, size_t numNodes);
- void parsePartList(vespalib::asciistream &spec, size_t numNodes);
- void parsePartRange(vespalib::asciistream &spec, size_t numNodes);
- void parseRow(vespalib::stringref rowSpec);
-
-public:
- SearchPath(const vespalib::string &spec, size_t numNodes);
- const ElementVector &elements() const { return _elements; }
-};
-
-} // namespace fdispatch
-
diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/docsumcontext.cpp b/searchcore/src/vespa/searchcore/proton/docsummary/docsumcontext.cpp
index edb91787214..c65257e7f6a 100644
--- a/searchcore/src/vespa/searchcore/proton/docsummary/docsumcontext.cpp
+++ b/searchcore/src/vespa/searchcore/proton/docsummary/docsumcontext.cpp
@@ -4,6 +4,7 @@
#include <vespa/searchlib/queryeval/begin_and_end_id.h>
#include <vespa/searchlib/attribute/iattributemanager.h>
#include <vespa/searchlib/common/location.h>
+#include <vespa/searchlib/common/matching_elements.h>
#include <vespa/searchlib/common/transport.h>
#include <vespa/vespalib/data/slime/slime.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -213,4 +214,10 @@ DocsumContext::ParseLocation(search::docsummary::GetDocsumsState *state)
state->_parsedLocation.reset(getLocation(_request.location, _attrMgr));
}
+std::unique_ptr<MatchingElements>
+DocsumContext::fill_matching_elements()
+{
+ return std::make_unique<MatchingElements>();
+}
+
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/docsumcontext.h b/searchcore/src/vespa/searchcore/proton/docsummary/docsumcontext.h
index 45f3ca8e44f..30b5ef16cb1 100644
--- a/searchcore/src/vespa/searchcore/proton/docsummary/docsumcontext.h
+++ b/searchcore/src/vespa/searchcore/proton/docsummary/docsumcontext.h
@@ -48,6 +48,7 @@ public:
void FillSummaryFeatures(search::docsummary::GetDocsumsState * state, search::docsummary::IDocsumEnvironment * env) override;
void FillRankFeatures(search::docsummary::GetDocsumsState * state, search::docsummary::IDocsumEnvironment * env) override;
void ParseLocation(search::docsummary::GetDocsumsState * state) override;
+ std::unique_ptr<search::MatchingElements> fill_matching_elements() override;
};
} // namespace proton
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/AggregationResult.java b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/AggregationResult.java
index 264a9d4d4e9..6858ebb8f82 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/AggregationResult.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/AggregationResult.java
@@ -8,7 +8,7 @@ import com.yahoo.vespa.objects.ObjectVisitor;
import com.yahoo.vespa.objects.Serializer;
/**
- * <p>This is the aggregation super-class from which all types of aggregation inherits.</p>
+ * The result of some aggregation
*
* @author baldersheim
* @author Simon Thoresen Hult
@@ -20,7 +20,7 @@ public abstract class AggregationResult extends ExpressionNode {
private int tag = -1;
/**
- * <p>Returns the tag of this aggregation result. This is useful for uniquely identifying a result.</p>
+ * Returns the tag of this aggregation result. This is useful for uniquely identifying a result.
*
* @return The numerical tag.
*/
@@ -29,10 +29,10 @@ public abstract class AggregationResult extends ExpressionNode {
}
/**
- * <p>Assigns a tag to this group.</p>
+ * Assigns a tag to this group.
*
- * @param tag The numerical tag to set.
- * @return This, to allow chaining.
+ * @param tag the numerical tag to set.
+ * @return this, to allow chaining.
*/
public AggregationResult setTag(int tag) {
this.tag = tag;
@@ -40,55 +40,47 @@ public abstract class AggregationResult extends ExpressionNode {
}
/**
- * <p>This method is called when merging aggregation results. This method is simply a proxy for the abstract {@link
- * #onMerge(AggregationResult)} method.</p>
+ * Called when merging aggregation results. This method is simply a proxy for the abstract {@link
+ * #onMerge(AggregationResult)} method.
*
- * @param result The result to merge with.
+ * @param result the result to merge with.
*/
public void merge(AggregationResult result) {
onMerge(result);
}
/**
- * <p>This method is called when all aggregation results have been merged. This method can be overloaded by
- * subclasses that need special behaviour to occur after merge.</p>
+ * Hook called when all aggregation results have been merged. This method can be overloaded by
+ * subclasses that need special behaviour to occur after merge.
*/
public void postMerge() {
// empty
}
- /**
- * <p>This method returns a value that can be used for ranking.</p>
- *
- * @return The rankable result.
- */
+ /** Returns a value that can be used for ranking. */
public abstract ResultNode getRank();
/**
- * <p>Sets the expression to aggregate on.</p>
+ * Sets the expression to aggregate on.
*
- * @param exp The expression.
- * @return This, to allow chaining.
+ * @param exp the expression
+ * @return this, to allow chaining
*/
public AggregationResult setExpression(ExpressionNode exp) {
expression = exp;
return this;
}
- /**
- * <p>Returns the expression to aggregate on.</p>
- *
- * @return The expression.
- */
+ /** Returns the expression to aggregate on. */
public ExpressionNode getExpression() {
return expression;
}
/**
- * <p>This method must be implemented by subclasses to support merge. It is called as the {@link
- * #merge(AggregationResult)} method is invoked.</p>
+ * Mmust be implemented by subclasses to support merge. It is called as the {@link
+ * #merge(AggregationResult)} method is invoked.
*
- * @param result The result to merge with.
+ * @param result the result to merge with
*/
protected abstract void onMerge(AggregationResult result);
@@ -158,4 +150,5 @@ public abstract class AggregationResult extends ExpressionNode {
visitor.visit("expression", expression);
visitor.visit("tag", tag);
}
+
}
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/FS4Hit.java b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/FS4Hit.java
index 399ffd3128f..07de8bbdc55 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/FS4Hit.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/FS4Hit.java
@@ -7,7 +7,7 @@ import com.yahoo.vespa.objects.ObjectVisitor;
import com.yahoo.vespa.objects.Serializer;
/**
- * This class represents a single hit from the fastserver4 backend
+ * A single hit from a Vespa content cluster
*
* @author havardpe
*/
@@ -103,7 +103,7 @@ public class FS4Hit extends Hit {
return super.hashCode() + path + globalId.hashCode() + distributionKey;
}
- @SuppressWarnings({ "EqualsWhichDoesntCheckParameterClass", "RedundantIfStatement" })
+ @SuppressWarnings("RedundantIfStatement")
@Override
public boolean equals(Object obj) {
if (!super.equals(obj)) {
@@ -129,4 +129,5 @@ public class FS4Hit extends Hit {
visitor.visit("globalId", globalId.toString());
visitor.visit("distributionKey", distributionKey);
}
+
}
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java
index 73171f4dd00..c508296d739 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java
@@ -12,28 +12,28 @@ public class Group extends Identifiable {
public static final int classId = registerClass(0x4000 + 90, Group.class);
private static final ObjectPredicate REF_LOCATOR = new RefLocator();
- private List<Integer> orderByIdx = new ArrayList<Integer>();
- private List<ExpressionNode> orderByExp = new ArrayList<ExpressionNode>();
- private List<AggregationResult> aggregationResults = new ArrayList<AggregationResult>();
- private List<Group> children = new ArrayList<Group>();
+ private List<Integer> orderByIdx = new ArrayList<>();
+ private List<ExpressionNode> orderByExp = new ArrayList<>();
+ private List<AggregationResult> aggregationResults = new ArrayList<>();
+ private List<Group> children = new ArrayList<>();
private ResultNode id = null;
private double rank;
private int tag = -1;
private SortType sortType = SortType.UNSORTED;
/**
- * <p>This tells you if the children are ranked by the pure relevance or by a more complex expression. That
- * indicates if the rank score from the child can be used for ordering.</p>
+ * This tells you if the children are ranked by the pure relevance or by a more complex expression.
+ * That indicates if the rank score from the child can be used for ordering.
*
- * @return True if it ranked by pure relevance.
+ * @return true if it ranked by pure relevance.
*/
public boolean isRankedByRelevance() {
return orderByIdx.isEmpty();
}
/**
- * <p>Merges the content of the given group <b>into</b> this. When this function returns, make sure to call {@link
- * #postMerge(java.util.List, int, int)}.</p>
+ * Merges the content of the given group <b>into</b> this. When this function returns, make sure to call
+ * {@link #postMerge(java.util.List, int, int)}.
*
* @param firstLevel The first level to merge.
* @param currentLevel The current level.
@@ -49,7 +49,7 @@ public class Group extends Identifiable {
}
}
- ArrayList<Group> merged = new ArrayList<Group>();
+ ArrayList<Group> merged = new ArrayList<>();
Iterator<Group> lhsChild = children.iterator(), rhsChild = rhs.children.iterator();
if (lhsChild.hasNext() && rhsChild.hasNext()) {
Group lhsGroup = lhsChild.next();
@@ -93,8 +93,8 @@ public class Group extends Identifiable {
}
/**
- * <p>After merging, this method will prune all levels so that they do not exceed the configured maximum number of
- * groups per level.</p>
+ * After merging, this method will prune all levels so that they do not exceed the configured maximum number of
+ * groups per level.
*
* @param levels The specs of all grouping levels.
* @param firstLevel The first level to merge.
@@ -127,9 +127,7 @@ public class Group extends Identifiable {
}
- /**
- * <p>Will sort the children by their id, if they are not sorted already.</p>
- */
+ /** Sorts the children by their id, if they are not sorted already. */
public void sortChildrenById() {
if (sortType == SortType.BYID) {
return;
@@ -142,9 +140,7 @@ public class Group extends Identifiable {
sortType = SortType.BYID;
}
- /**
- * <p>Will sort the children by their rank, if they are not sorted already.</p>
- */
+ /** Sorts the children by their rank, if they are not sorted already. */
public void sortChildrenByRank() {
if (sortType == SortType.BYRANK) {
return;
@@ -158,18 +154,16 @@ public class Group extends Identifiable {
}
/**
- * <p>Returns the label to use for this group. See comment on {@link #setId(com.yahoo.searchlib.expression.ResultNode)}
- * on the rationale of this being a {@link ResultNode}.</p>
- *
- * @return The label.
+ * Returns the label to use for this group. See comment on {@link #setId(com.yahoo.searchlib.expression.ResultNode)}
+ * on the rationale of this being a {@link ResultNode}.
*/
public ResultNode getId() {
return id;
}
/**
- * <p>Sets the label to use for this group. This is a {@link ResultNode} so that a group can be labeled with
- * whatever value the classifier expression returns.</p>
+ * Sets the label to use for this group. This is a {@link ResultNode} so that a group can be labeled with
+ * whatever value the classifier expression returns.
*
* @param id The label to set.
* @return This, to allow chaining.
@@ -180,7 +174,7 @@ public class Group extends Identifiable {
}
/**
- * <p>Sets the relevancy to use for this group.</p>
+ * Sets the relevancy to use for this group.
*
* @param rank The rank to set.
* @return This, to allow chaining.
@@ -190,17 +184,13 @@ public class Group extends Identifiable {
return this;
}
- /**
- * <p>Return the relevancy of this group.</p>
- *
- * @return Relevance.
- */
+ /** Return the rank score of this group. */
public double getRank() {
return rank;
}
/**
- * <p>Adds a child group to this.</p>
+ * Adds a child group to this.
*
* @param child The group to add.
* @return This, to allow chaining.
@@ -308,13 +298,7 @@ public class Group extends Identifiable {
if (diff > 0) {
return 1;
}
- if (rank > rhs.rank) {
- return -1;
- }
- if (rank < rhs.rank) {
- return 1;
- }
- return 0;
+ return -Double.compare(rank, rhs.rank);
}
@Override
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Hit.java b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Hit.java
index 6b2ce5c3b72..663bc18dc14 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Hit.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Hit.java
@@ -79,14 +79,14 @@ public abstract class Hit extends Identifiable {
return super.hashCode() + (int)rank;
}
- @SuppressWarnings({ "RedundantIfStatement", "EqualsWhichDoesntCheckParameterClass" })
+ @SuppressWarnings({ "RedundantIfStatement" })
@Override
public boolean equals(Object obj) {
if (!super.equals(obj)) {
return false;
}
Hit rhs = (Hit)obj;
- if (rank != rhs.rank) {
+ if (Double.compare(rank, rhs.rank) != 0) {
return false;
}
if (!equals(context, rhs.context)) {
@@ -101,4 +101,5 @@ public abstract class Hit extends Identifiable {
visitor.visit("rank", rank);
visitor.visit("context", context);
}
+
}
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/HitsAggregationResult.java b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/HitsAggregationResult.java
index 275f38f7350..63b2b881e01 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/HitsAggregationResult.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/HitsAggregationResult.java
@@ -73,8 +73,8 @@ public class HitsAggregationResult extends AggregationResult {
/**
* Sets the summary class of hits to collect.
*
- * @param summaryClass The summary class to collect.
- * @return This, to allow chaining.
+ * @param summaryClass the summary class to collect.
+ * @return this, to allow chaining.
*/
public HitsAggregationResult setSummaryClass(String summaryClass) {
this.summaryClass = summaryClass;
@@ -84,8 +84,8 @@ public class HitsAggregationResult extends AggregationResult {
/**
* Sets the maximum number of hits to collect.
*
- * @param maxHits The number of hits to collect.
- * @return This, to allow chaining.
+ * @param maxHits the number of hits to collect.
+ * @return this, to allow chaining.
*/
public HitsAggregationResult setMaxHits(int maxHits) {
this.maxHits = maxHits;
@@ -102,7 +102,7 @@ public class HitsAggregationResult extends AggregationResult {
}
/**
- * Add a hit to this aggregation result
+ * Adds a hit to this aggregation result
*
* @param h the hit
* @return this object
@@ -159,11 +159,7 @@ public class HitsAggregationResult extends AggregationResult {
@Override
public void postMerge() {
- Collections.sort(hits, new Comparator<Hit>() {
- public int compare(Hit lhs, Hit rhs) {
- return (lhs.getRank() > rhs.getRank()) ? -1 : (lhs.getRank() < rhs.getRank()) ? 1 : 0;
- }
- });
+ hits.sort((lhs, rhs) -> -Double.compare(lhs.getRank(), rhs.getRank()));
if ((maxHits >= 0) && (hits.size() > maxHits)) {
hits = hits.subList(0, maxHits);
}
@@ -172,15 +168,9 @@ public class HitsAggregationResult extends AggregationResult {
@Override
protected boolean equalsAggregation(AggregationResult obj) {
HitsAggregationResult rhs = (HitsAggregationResult)obj;
- if (!summaryClass.equals(rhs.summaryClass)) {
- return false;
- }
- if (maxHits != rhs.maxHits) {
- return false;
- }
- if (!hits.equals(rhs.hits)) {
- return false;
- }
+ if ( ! summaryClass.equals(rhs.summaryClass)) return false;
+ if (maxHits != rhs.maxHits) return false;
+ if ( ! hits.equals(rhs.hits)) return false;
return true;
}
@@ -215,4 +205,5 @@ public class HitsAggregationResult extends AggregationResult {
hit.select(predicate, operation);
}
}
+
}
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java
index fe5405ecb6a..ec379e5f8af 100644
--- a/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java
@@ -223,5 +223,6 @@ public class GroupingTestCase {
Grouping other = (Grouping)Grouping.create(buf);
assertEquals(grouping, other);
}
+
}
diff --git a/searchlib/src/tests/attribute/attribute_test.cpp b/searchlib/src/tests/attribute/attribute_test.cpp
index 98caf39dace..2f3df28cc6f 100644
--- a/searchlib/src/tests/attribute/attribute_test.cpp
+++ b/searchlib/src/tests/attribute/attribute_test.cpp
@@ -15,6 +15,7 @@
#include <vespa/searchlib/attribute/multistringattribute.h>
#include <vespa/searchlib/attribute/predicate_attribute.h>
#include <vespa/searchlib/attribute/singlenumericattribute.h>
+#include <vespa/searchlib/attribute/singlenumericpostattribute.h>
#include <vespa/searchlib/attribute/singlestringattribute.h>
#include <vespa/searchlib/index/dummyfileheadercontext.h>
#include <vespa/searchlib/util/randomgenerator.h>
@@ -241,6 +242,8 @@ private:
void testCompactLidSpace();
+ void test_default_value_ref_count_is_updated_after_shrink_lid_space();
+
template <typename AttributeType>
void requireThatAddressSpaceUsageIsReported(const Config &config, bool fastSearch);
template <typename AttributeType>
@@ -2019,6 +2022,43 @@ AttributeTest::testCompactLidSpace()
TEST_DO(testCompactLidSpace(Config(BasicType::PREDICATE, CollectionType::SINGLE)));
}
+namespace {
+
+uint32_t
+get_default_value_ref_count(AttributeVector &attr)
+{
+ auto *enum_store_base = attr.getEnumStoreBase();
+ auto &enum_store = dynamic_cast<EnumStoreT<int32_t> &>(*enum_store_base);
+ IAttributeVector::EnumHandle default_value_handle(0);
+ if (enum_store.find_enum(attr.getDefaultValue(), default_value_handle)) {
+ datastore::EntryRef default_value_ref(default_value_handle);
+ assert(default_value_ref.valid());
+ return enum_store.get_ref_count(default_value_ref);
+ } else {
+ return 0u;
+ }
+}
+
+}
+
+
+void
+AttributeTest::test_default_value_ref_count_is_updated_after_shrink_lid_space()
+{
+ Config cfg(BasicType::INT32, CollectionType::SINGLE);
+ cfg.setFastSearch(true);
+ vespalib::string name = "shrink";
+ AttributePtr attr = AttributeFactory::createAttribute(name, cfg);
+ attr->addReservedDoc();
+ attr->addDocs(10);
+ EXPECT_EQUAL(11u, get_default_value_ref_count(*attr));
+ attr->compactLidSpace(6);
+ EXPECT_EQUAL(11u, get_default_value_ref_count(*attr));
+ attr->shrinkLidSpace();
+ EXPECT_EQUAL(6u, attr->getNumDocs());
+ EXPECT_EQUAL(6u, get_default_value_ref_count(*attr));
+}
+
template <typename AttributeType>
void
AttributeTest::requireThatAddressSpaceUsageIsReported(const Config &config, bool fastSearch)
@@ -2223,6 +2263,7 @@ int AttributeTest::Main()
testCreateSerialNum();
testPredicateHeaderTags();
TEST_DO(testCompactLidSpace());
+ TEST_DO(test_default_value_ref_count_is_updated_after_shrink_lid_space());
TEST_DO(requireThatAddressSpaceUsageIsReported());
testReaderDuringLastUpdate();
TEST_DO(testPendingCompaction());
diff --git a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
index d636de46426..63c1b320fb8 100644
--- a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
+++ b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
@@ -131,6 +131,7 @@ checkStats(IDataStore &store,
}
+#ifdef __linux__
TEST("test that DirectIOPadding works accordng to spec") {
constexpr ssize_t FILE_SIZE = 4096*3;
FastOS_File file("directio.test");
@@ -199,6 +200,7 @@ TEST("test that DirectIOPadding works accordng to spec") {
EXPECT_TRUE(file.Close());
FastOS_File::Delete(file.GetFileName());
}
+#endif
TEST("testGrowing") {
FastOS_File::EmptyAndRemoveDirectory("growing");
@@ -287,6 +289,7 @@ TEST("testTruncatedIdxFile"){
}
const char * magic = "mumbo jumbo";
{
+ truncate("bug-7257706-truncated/1422358701368384000.idx", 3830);
LogDataStore datastore(executor, "bug-7257706-truncated", config, GrowStrategy(),
TuneFileSummary(), fileHeaderContext, tlSyncer, nullptr);
EXPECT_EQUAL(331ul, datastore.lastSyncToken());
@@ -843,7 +846,7 @@ struct Fixture {
Fixture(const vespalib::string &dirName = "tmp",
bool dirCleanup = true,
size_t maxFileSize = 4096 * 2)
- : executor(1, 0x10000),
+ : executor(1, 0x20000),
dir(dirName),
serialNum(0),
fileHeaderCtx(),
diff --git a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.sh b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.sh
index 45b3c804014..4aeea02efe2 100755
--- a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.sh
+++ b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.sh
@@ -23,7 +23,6 @@ cp -a $SOURCE_DIRECTORY/bug-7257706/1422358701368384000.idx incompletecompact-te
cp -a $SOURCE_DIRECTORY/bug-7257706/1422358701368384000.dat incompletecompact-test/2422358701368384000.dat
cp -a $SOURCE_DIRECTORY/bug-7257706/1422358701368384000.idx incompletecompact-test/2422358701368384000.idx
-truncate --size 3830 bug-7257706-truncated/1422358701368384000.idx
fail=0
VESPA_LOG_TARGET=file:vlog2.txt $VALGRIND ./searchlib_logdatastore_test_app || fail=1
rm -rf bug-7257706-truncated dangling-test incompletecompact-test
diff --git a/searchlib/src/tests/queryeval/same_element/same_element_test.cpp b/searchlib/src/tests/queryeval/same_element/same_element_test.cpp
index c24a11833e7..622c8077c14 100644
--- a/searchlib/src/tests/queryeval/same_element/same_element_test.cpp
+++ b/searchlib/src/tests/queryeval/same_element/same_element_test.cpp
@@ -9,6 +9,7 @@
#include <vespa/searchlib/queryeval/emptysearch.h>
#include <vespa/searchcommon/attribute/i_search_context.h>
#include <vespa/searchlib/attribute/elementiterator.h>
+#include <vespa/vespalib/test/insertion_operators.h>
using namespace search::fef;
using namespace search::queryeval;
diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
index 2a544814710..cda2bea9fb1 100644
--- a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
@@ -78,6 +78,19 @@ EnumStoreDictionary<DictionaryT>::free_unused_values(const IndexSet& to_remove,
}
template <typename DictionaryT>
+void
+EnumStoreDictionary<DictionaryT>::remove(const EntryComparator &comp, EntryRef ref)
+{
+ assert(ref.valid());
+ auto itr = this->_dict.lowerBound(ref, comp);
+ assert(itr.valid() && itr.getKey() == ref);
+ if constexpr (std::is_same_v<DictionaryT, EnumPostingTree>) {
+ assert(EntryRef(itr.getData()) == EntryRef());
+ }
+ this->_dict.remove(itr);
+}
+
+template <typename DictionaryT>
bool
EnumStoreDictionary<DictionaryT>::find_index(const datastore::EntryComparator& cmp,
Index& idx) const
@@ -184,9 +197,13 @@ EnumStoreFoldedDictionary::remove(const EntryComparator& comp, EntryRef ref)
EntryRef posting_list_ref(it.getData());
_dict.remove(it);
// Maybe copy posting list reference to next entry
- if (posting_list_ref.valid() && it.valid() && !EntryRef(it.getData()).valid() && !(*_folded_compare)(ref, it.getKey())) {
- this->_dict.thaw(it);
- it.writeData(posting_list_ref.ref());
+ if (posting_list_ref.valid()) {
+ if (it.valid() && !EntryRef(it.getData()).valid() && !(*_folded_compare)(ref, it.getKey())) {
+ this->_dict.thaw(it);
+ it.writeData(posting_list_ref.ref());
+ } else {
+ LOG_ABORT("Posting list not cleared for removed unique value");
+ }
}
}
diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h
index 5e7a63e68b4..c2c4c96c2d9 100644
--- a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h
+++ b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h
@@ -41,6 +41,7 @@ public:
void free_unused_values(const IndexSet& to_remove,
const datastore::EntryComparator& cmp) override;
+ void remove(const datastore::EntryComparator& comp, datastore::EntryRef ref) override;
bool find_index(const datastore::EntryComparator& cmp, Index& idx) const override;
bool find_frozen_index(const datastore::EntryComparator& cmp, Index& idx) const override;
std::vector<attribute::IAttributeVector::EnumHandle>
diff --git a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
index 897d8a10ec6..b527f89b224 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
@@ -41,6 +41,7 @@ void EnumAttribute<B>::load_enum_store(LoadedVector& loaded)
prev = value.getValue();
prevRefCount = 1;
} else {
+ assert(prevRefCount < std::numeric_limits<uint32_t>::max());
prevRefCount++;
}
value.setEidx(index);
diff --git a/searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h b/searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h
index a8d6fc3ba68..275fadd1e7f 100644
--- a/searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h
+++ b/searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h
@@ -138,6 +138,7 @@ public:
(void) docId;
(void) weight;
assert(e < _hist.size());
+ assert(_hist[e] < std::numeric_limits<uint32_t>::max());
++_hist[e];
}
};
diff --git a/searchlib/src/vespa/searchlib/attribute/postingchange.cpp b/searchlib/src/vespa/searchlib/attribute/postingchange.cpp
index a98c030bcee..066b391f514 100644
--- a/searchlib/src/vespa/searchlib/attribute/postingchange.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/postingchange.cpp
@@ -130,36 +130,6 @@ PostingChange<P>::PostingChange() = default;
template <typename P>
PostingChange<P>::~PostingChange() = default;
-template <typename P>
-void
-PostingChange<P>::apply(GrowableBitVector &bv)
-{
- P *a = &_additions[0];
- P *ae = &_additions[0] + _additions.size();
- uint32_t *r = &_removals[0];
- uint32_t *re = &_removals[0] + _removals.size();
-
- while (a != ae || r != re) {
- if (r != re && (a == ae || *r < a->_key)) {
- // remove
- assert(*r < bv.size());
- bv.slowClearBit(*r);
- ++r;
- } else {
- if (r != re && !(a->_key < *r)) {
- // update or add
- assert(a->_key < bv.size());
- bv.slowSetBit(a->_key);
- ++r;
- } else {
- assert(a->_key < bv.size());
- bv.slowSetBit(a->_key);
- }
- ++a;
- }
- }
-}
-
template <typename WeightedIndex>
class ActualChangeComputer {
public:
diff --git a/searchlib/src/vespa/searchlib/attribute/postingchange.h b/searchlib/src/vespa/searchlib/attribute/postingchange.h
index 569fbfd6517..26b004270bf 100644
--- a/searchlib/src/vespa/searchlib/attribute/postingchange.h
+++ b/searchlib/src/vespa/searchlib/attribute/postingchange.h
@@ -43,7 +43,6 @@ public:
* posting list tree doesn't support duplicate entries.
*/
void removeDups();
- void apply(GrowableBitVector &bv);
};
class EnumIndexMapper
diff --git a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp
index 21bbec729df..19ef92c9356 100644
--- a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp
@@ -93,6 +93,7 @@ PostingListAttributeBase<P>::handle_load_posting_lists_and_update_enum_store(enu
postings.clear();
}
}
+ assert(refCount < std::numeric_limits<uint32_t>::max());
++refCount;
assert(elem.getDocId() < docIdLimit);
(void) docIdLimit;
diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
index 265116f3bd1..5ad1629e673 100644
--- a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
@@ -284,6 +284,17 @@ SingleValueEnumAttribute<B>::onShrinkLidSpace()
if (pab != nullptr) {
pab->clearPostings(e, committedDocIdLimit, _enumIndices.size());
}
+ uint32_t shrink_docs = _enumIndices.size() - committedDocIdLimit;
+ if (shrink_docs > 0u) {
+ datastore::EntryRef default_value_ref(e);
+ assert(default_value_ref.valid());
+ uint32_t default_value_ref_count = this->_enumStore.get_ref_count(default_value_ref);
+ assert(default_value_ref_count >= shrink_docs);
+ this->_enumStore.set_ref_count(default_value_ref, default_value_ref_count - shrink_docs);
+ IEnumStore::IndexSet possibly_unused;
+ possibly_unused.insert(default_value_ref);
+ this->_enumStore.free_unused_values(possibly_unused);
+ }
_enumIndices.shrink(committedDocIdLimit);
this->setNumDocs(committedDocIdLimit);
}
diff --git a/searchsummary/src/tests/docsummary/attribute_combiner/CMakeLists.txt b/searchsummary/src/tests/docsummary/attribute_combiner/CMakeLists.txt
index df323b9c982..cffdef25e5b 100644
--- a/searchsummary/src/tests/docsummary/attribute_combiner/CMakeLists.txt
+++ b/searchsummary/src/tests/docsummary/attribute_combiner/CMakeLists.txt
@@ -1,8 +1,10 @@
# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+find_package(GTest REQUIRED)
vespa_add_executable(searchsummary_attribute_combiner_test_app TEST
SOURCES
attribute_combiner_test.cpp
DEPENDS
searchsummary
+ GTest::GTest
)
vespa_add_test(NAME searchsummary_attribute_combiner_test_app COMMAND searchsummary_attribute_combiner_test_app)
diff --git a/searchsummary/src/tests/docsummary/attribute_combiner/attribute_combiner_test.cpp b/searchsummary/src/tests/docsummary/attribute_combiner/attribute_combiner_test.cpp
index 60b1574a8d5..961acadeddf 100644
--- a/searchsummary/src/tests/docsummary/attribute_combiner/attribute_combiner_test.cpp
+++ b/searchsummary/src/tests/docsummary/attribute_combiner/attribute_combiner_test.cpp
@@ -8,12 +8,13 @@
#include <vespa/searchlib/attribute/floatbase.h>
#include <vespa/searchlib/attribute/integerbase.h>
#include <vespa/searchlib/attribute/stringbase.h>
+#include <vespa/searchlib/common/matching_elements.h>
#include <vespa/searchlib/util/slime_output_raw_buf_adapter.h>
#include <vespa/searchsummary/docsummary/docsumstate.h>
#include <vespa/searchsummary/docsummary/docsum_field_writer_state.h>
#include <vespa/searchsummary/docsummary/attribute_combiner_dfw.h>
#include <vespa/vespalib/data/slime/slime.h>
-#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/log/log.h>
LOG_SETUP("attribute_combiner_test");
@@ -23,6 +24,7 @@ using search::AttributeManager;
using search::AttributeVector;
using search::IntegerAttribute;
using search::FloatingPointAttribute;
+using search::MatchingElements;
using search::StringAttribute;
using search::attribute::BasicType;
using search::attribute::CollectionType;
@@ -122,7 +124,7 @@ AttributeManagerFixture::buildAttribute(const vespalib::string &name,
for (const auto &docValues : values) {
uint32_t docId = 0;
EXPECT_TRUE(attr->addDoc(docId));
- EXPECT_NOT_EQUAL(0u, docId);
+ EXPECT_NE(0u, docId);
for (const auto &value : docValues) {
attr->append(docId, value, 1);
}
@@ -157,40 +159,65 @@ AttributeManagerFixture::buildIntegerAttribute(const vespalib::string &name,
class DummyStateCallback : public GetDocsumsStateCallback
{
public:
+ MatchingElements _matching_elements;
+
+ DummyStateCallback();
void FillSummaryFeatures(GetDocsumsState *, IDocsumEnvironment *) override { }
void FillRankFeatures(GetDocsumsState *, IDocsumEnvironment *) override { }
void ParseLocation(GetDocsumsState *) override { }
+ std::unique_ptr<MatchingElements> fill_matching_elements() override { return std::make_unique<MatchingElements>(_matching_elements); }
~DummyStateCallback() override { }
};
+DummyStateCallback::DummyStateCallback()
+ : GetDocsumsStateCallback(),
+ _matching_elements()
+{
+ _matching_elements.add_matching_elements(1, "array", {1});
+ _matching_elements.add_matching_elements(3, "array", {0});
+ _matching_elements.add_matching_elements(4, "array", {1});
+ _matching_elements.add_matching_elements(1, "smap", {1});
+ _matching_elements.add_matching_elements(3, "smap", {0});
+ _matching_elements.add_matching_elements(4, "smap", {1});
+ _matching_elements.add_matching_elements(1, "map", {1});
+ _matching_elements.add_matching_elements(3, "map", {0});
+ _matching_elements.add_matching_elements(4, "map", {1});
+}
-struct Fixture
+struct AttributeCombinerTest : public ::testing::Test
{
AttributeManagerFixture attrs;
std::unique_ptr<IDocsumFieldWriter> writer;
DummyStateCallback stateCallback;
GetDocsumsState state;
- Fixture(const vespalib::string &fieldName);
- ~Fixture();
+ AttributeCombinerTest();
+ ~AttributeCombinerTest();
+ void set_field(const vespalib::string &field_name, bool filter_elements);
void assertWritten(const vespalib::string &exp, uint32_t docId);
};
-Fixture::Fixture(const vespalib::string &fieldName)
+AttributeCombinerTest::AttributeCombinerTest()
: attrs(),
- writer(AttributeCombinerDFW::create(fieldName, attrs.mgr)),
+ writer(),
stateCallback(),
state(stateCallback)
{
- EXPECT_TRUE(writer->setFieldWriterStateIndex(0));
state._attrCtx = attrs.mgr.createContext();
- state._fieldWriterStates.resize(1);
}
-Fixture::~Fixture() = default;
+AttributeCombinerTest::~AttributeCombinerTest() = default;
+
+void
+AttributeCombinerTest::set_field(const vespalib::string &field_name, bool filter_elements)
+{
+ writer = AttributeCombinerDFW::create(field_name, attrs.mgr, filter_elements);
+ EXPECT_TRUE(writer->setFieldWriterStateIndex(0));
+ state._fieldWriterStates.resize(1);
+}
void
-Fixture::assertWritten(const vespalib::string &expectedJson, uint32_t docId)
+AttributeCombinerTest::assertWritten(const vespalib::string &expectedJson, uint32_t docId)
{
vespalib::Slime target;
vespalib::slime::SlimeInserter inserter(target);
@@ -200,41 +227,76 @@ Fixture::assertWritten(const vespalib::string &expectedJson, uint32_t docId)
search::SlimeOutputRawBufAdapter adapter(binary);
vespalib::slime::BinaryFormat::encode(target, adapter);
FieldBlock block(expectedJson);
- if (!EXPECT_EQUAL(block.dataLen(), binary.GetUsedLen()) ||
- !EXPECT_EQUAL(0, memcmp(block.data(), binary.GetDrainPos(), block.dataLen()))) {
+ EXPECT_EQ(block.dataLen(), binary.GetUsedLen());
+ EXPECT_EQ(0, memcmp(block.data(), binary.GetDrainPos(), block.dataLen()));
+ if (block.dataLen() != binary.GetUsedLen() ||
+ memcmp(block.data(), binary.GetDrainPos(), block.dataLen()) != 0) {
LOG(error, "Expected '%s'", expectedJson.c_str());
LOG(error, "Expected normalized '%s'", block.json.c_str());
LOG(error, "Got '%s'", json.c_str());
}
}
-TEST_F("require that attribute combiner dfw generates correct slime output for array of struct", Fixture("array"))
+TEST_F(AttributeCombinerTest, require_that_attribute_combiner_dfw_generates_correct_slime_output_for_array_of_struct)
+{
+ set_field("array", false);
+ assertWritten("[ { fval: 110.0, name: \"n1.1\", val: 10}, { name: \"n1.2\", val: 11}]", 1);
+ assertWritten("[ { fval: 120.0, name: \"n2\", val: 20}, { fval: 121.0, val: 21 }]", 2);
+ assertWritten("[ { fval: 130.0, name: \"n3.1\", val: 30}, { fval: 131.0, name: \"n3.2\"} ]", 3);
+ assertWritten("[ { }, { fval: 141.0, name: \"n4.2\", val: 41} ]", 4);
+ assertWritten("null", 5);
+}
+
+TEST_F(AttributeCombinerTest, require_that_attribute_combiner_dfw_generates_correct_slime_output_for_map_of_struct)
+{
+ set_field("smap", false);
+ assertWritten("[ { key: \"k1.1\", value: { fval: 110.0, name: \"n1.1\", val: 10} }, { key: \"k1.2\", value: { name: \"n1.2\", val: 11} }]", 1);
+ assertWritten("[ { key: \"k2\", value: { fval: 120.0, name: \"n2\", val: 20} }, { value: { fval: 121.0, val: 21 } }]", 2);
+ assertWritten("[ { key: \"k3.1\", value: { fval: 130.0, name: \"n3.1\", val: 30} }, { key: \"k3.2\", value: { fval: 131.0, name: \"n3.2\"} } ]", 3);
+ assertWritten("[ { value: { } }, { key: \"k4.2\", value: { fval: 141.0, name: \"n4.2\", val: 41} } ]", 4);
+ assertWritten("null", 5);
+}
+
+TEST_F(AttributeCombinerTest, require_that_attribute_combiner_dfw_generates_correct_slime_output_for_map_of_string)
+{
+ set_field("map", false);
+ assertWritten("[ { key: \"k1.1\", value: \"n1.1\" }, { key: \"k1.2\", value: \"n1.2\"}]", 1);
+ assertWritten("[ { key: \"k2\"}]", 2);
+ assertWritten("[ { key: \"k3.1\", value: \"n3.1\" }, { value: \"n3.2\"} ]", 3);
+ assertWritten("[ { }, { key: \"k4.2\", value: \"n4.2\" } ]", 4);
+ assertWritten("null", 5);
+}
+
+TEST_F(AttributeCombinerTest, require_that_attribute_combiner_dfw_generates_correct_slime_output_for_filtered_array_of_struct)
{
- TEST_DO(f.assertWritten("[ { fval: 110.0, name: \"n1.1\", val: 10}, { name: \"n1.2\", val: 11}]", 1));
- TEST_DO(f.assertWritten("[ { fval: 120.0, name: \"n2\", val: 20}, { fval: 121.0, val: 21 }]", 2));
- TEST_DO(f.assertWritten("[ { fval: 130.0, name: \"n3.1\", val: 30}, { fval: 131.0, name: \"n3.2\"} ]", 3));
- TEST_DO(f.assertWritten("[ { }, { fval: 141.0, name: \"n4.2\", val: 41} ]", 4));
- TEST_DO(f.assertWritten("null", 5));
+ set_field("array", true);
+ assertWritten("[ { name: \"n1.2\", val: 11}]", 1);
+ assertWritten("[ ]", 2);
+ assertWritten("[ { fval: 130.0, name: \"n3.1\", val: 30} ]", 3);
+ assertWritten("[ { fval: 141.0, name: \"n4.2\", val: 41} ]", 4);
+ assertWritten("null", 5);
}
-TEST_F("require that attribute combiner dfw generates correct slime output for map of struct", Fixture("smap"))
+TEST_F(AttributeCombinerTest, require_that_attribute_combiner_dfw_generates_correct_slime_output_for_filtered_map_of_struct)
{
- TEST_DO(f.assertWritten("[ { key: \"k1.1\", value: { fval: 110.0, name: \"n1.1\", val: 10} }, { key: \"k1.2\", value: { name: \"n1.2\", val: 11} }]", 1));
- TEST_DO(f.assertWritten("[ { key: \"k2\", value: { fval: 120.0, name: \"n2\", val: 20} }, { value: { fval: 121.0, val: 21 } }]", 2));
- TEST_DO(f.assertWritten("[ { key: \"k3.1\", value: { fval: 130.0, name: \"n3.1\", val: 30} }, { key: \"k3.2\", value: { fval: 131.0, name: \"n3.2\"} } ]", 3));
- TEST_DO(f.assertWritten("[ { value: { } }, { key: \"k4.2\", value: { fval: 141.0, name: \"n4.2\", val: 41} } ]", 4));
- TEST_DO(f.assertWritten("null", 5));
+ set_field("smap", true);
+ assertWritten("[ { key: \"k1.2\", value: { name: \"n1.2\", val: 11} }]", 1);
+ assertWritten("[ ]", 2);
+ assertWritten("[ { key: \"k3.1\", value: { fval: 130.0, name: \"n3.1\", val: 30} } ]", 3);
+ assertWritten("[ { key: \"k4.2\", value: { fval: 141.0, name: \"n4.2\", val: 41} } ]", 4);
+ assertWritten("null", 5);
}
-TEST_F("require that attribute combiner dfw generates correct slime output for map of string", Fixture("map"))
+TEST_F(AttributeCombinerTest, require_that_attribute_combiner_dfw_generates_correct_slime_output_for_filtered_map_of_string)
{
- TEST_DO(f.assertWritten("[ { key: \"k1.1\", value: \"n1.1\" }, { key: \"k1.2\", value: \"n1.2\"}]", 1));
- TEST_DO(f.assertWritten("[ { key: \"k2\"}]", 2));
- TEST_DO(f.assertWritten("[ { key: \"k3.1\", value: \"n3.1\" }, { value: \"n3.2\"} ]", 3));
- TEST_DO(f.assertWritten("[ { }, { key: \"k4.2\", value: \"n4.2\" } ]", 4));
- TEST_DO(f.assertWritten("null", 5));
+ set_field("map", true);
+ assertWritten("[ { key: \"k1.2\", value: \"n1.2\"}]", 1);
+ assertWritten("[ ]", 2);
+ assertWritten("[ { key: \"k3.1\", value: \"n3.1\" } ]", 3);
+ assertWritten("[ { key: \"k4.2\", value: \"n4.2\" } ]", 4);
+ assertWritten("null", 5);
}
}
-TEST_MAIN() { TEST_RUN_ALL(); }
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchsummary/src/tests/docsummary/positionsdfw_test.cpp b/searchsummary/src/tests/docsummary/positionsdfw_test.cpp
index 764ff4723cb..476891ca40f 100644
--- a/searchsummary/src/tests/docsummary/positionsdfw_test.cpp
+++ b/searchsummary/src/tests/docsummary/positionsdfw_test.cpp
@@ -3,6 +3,7 @@
#include <vespa/searchlib/attribute/extendableattributes.h>
#include <vespa/searchlib/attribute/iattributemanager.h>
+#include <vespa/searchlib/common/matching_elements.h>
#include <vespa/searchsummary/docsummary/docsumfieldwriter.h>
#include <vespa/searchsummary/docsummary/positionsdfw.h>
#include <vespa/searchsummary/docsummary/idocsumenvironment.h>
@@ -17,6 +18,7 @@ LOG_SETUP("positionsdfw_test");
using search::RawBuf;
using search::IAttributeManager;
+using search::MatchingElements;
using search::SingleInt64ExtAttribute;
using search::attribute::IAttributeContext;
using search::attribute::IAttributeVector;
@@ -104,6 +106,7 @@ struct MyGetDocsumsStateCallback : GetDocsumsStateCallback {
virtual void FillSummaryFeatures(GetDocsumsState *, IDocsumEnvironment *) override {}
virtual void FillRankFeatures(GetDocsumsState *, IDocsumEnvironment *) override {}
virtual void ParseLocation(GetDocsumsState *) override {}
+ std::unique_ptr<MatchingElements> fill_matching_elements() override { abort(); }
};
template <typename AttrType>
diff --git a/searchsummary/src/tests/docsummary/slime_summary/slime_summary_test.cpp b/searchsummary/src/tests/docsummary/slime_summary/slime_summary_test.cpp
index 5d6565ceadb..efeb066135f 100644
--- a/searchsummary/src/tests/docsummary/slime_summary/slime_summary_test.cpp
+++ b/searchsummary/src/tests/docsummary/slime_summary/slime_summary_test.cpp
@@ -1,5 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/searchlib/common/matching_elements.h>
#include <vespa/searchsummary/docsummary/docsumwriter.h>
#include <vespa/searchsummary/docsummary/resultpacker.h>
#include <vespa/searchsummary/docsummary/docsumstate.h>
@@ -9,6 +10,7 @@
using namespace vespalib::slime::convenience;
using namespace search::docsummary;
+using search::MatchingElements;
namespace {
@@ -77,6 +79,7 @@ struct DocsumFixture : IDocsumStore, GetDocsumsStateCallback {
void FillSummaryFeatures(GetDocsumsState *, IDocsumEnvironment *) override { }
void FillRankFeatures(GetDocsumsState *, IDocsumEnvironment *) override { }
void ParseLocation(GetDocsumsState *) override { }
+ std::unique_ptr<MatchingElements> fill_matching_elements() override { abort(); }
};
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/array_attribute_combiner_dfw.cpp b/searchsummary/src/vespa/searchsummary/docsummary/array_attribute_combiner_dfw.cpp
index 6a643a74458..9d4d3829047 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/array_attribute_combiner_dfw.cpp
+++ b/searchsummary/src/vespa/searchsummary/docsummary/array_attribute_combiner_dfw.cpp
@@ -5,7 +5,9 @@
#include "attribute_field_writer.h"
#include <vespa/searchcommon/attribute/iattributecontext.h>
#include <vespa/searchcommon/attribute/iattributevector.h>
+#include <vespa/searchlib/common/matching_elements.h>
#include <vespa/vespalib/data/slime/cursor.h>
+#include <cassert>
using search::attribute::IAttributeContext;
using search::attribute::IAttributeVector;
@@ -18,20 +20,29 @@ namespace {
class ArrayAttributeFieldWriterState : public DocsumFieldWriterState
{
std::vector<std::unique_ptr<AttributeFieldWriter>> _writers;
+ const vespalib::string& _field_name;
+ const MatchingElements* const _matching_elements;
public:
ArrayAttributeFieldWriterState(const std::vector<vespalib::string> &fieldNames,
const std::vector<vespalib::string> &attributeNames,
- IAttributeContext &context);
+ IAttributeContext &context,
+ const vespalib::string &field_name,
+ const MatchingElements* matching_elements);
~ArrayAttributeFieldWriterState() override;
+ void insert_element(uint32_t element_index, Cursor &array);
void insertField(uint32_t docId, vespalib::slime::Inserter &target) override;
};
ArrayAttributeFieldWriterState::ArrayAttributeFieldWriterState(const std::vector<vespalib::string> &fieldNames,
const std::vector<vespalib::string> &attributeNames,
- IAttributeContext &context)
+ IAttributeContext &context,
+ const vespalib::string &field_name,
+ const MatchingElements *matching_elements)
: DocsumFieldWriterState(),
- _writers()
+ _writers(),
+ _field_name(field_name),
+ _matching_elements(matching_elements)
{
size_t fields = fieldNames.size();
_writers.reserve(fields);
@@ -46,6 +57,15 @@ ArrayAttributeFieldWriterState::ArrayAttributeFieldWriterState(const std::vector
ArrayAttributeFieldWriterState::~ArrayAttributeFieldWriterState() = default;
void
+ArrayAttributeFieldWriterState::insert_element(uint32_t element_index, Cursor &array)
+{
+ Cursor &obj = array.addObject();
+ for (auto &writer : _writers) {
+ writer->print(element_index, obj);
+ }
+}
+
+void
ArrayAttributeFieldWriterState::insertField(uint32_t docId, vespalib::slime::Inserter &target)
{
uint32_t elems = 0;
@@ -59,10 +79,19 @@ ArrayAttributeFieldWriterState::insertField(uint32_t docId, vespalib::slime::Ins
return;
}
Cursor &arr = target.insertArray();
- for (uint32_t idx = 0; idx < elems; ++idx) {
- Cursor &obj = arr.addObject();
- for (auto &writer : _writers) {
- writer->print(idx, obj);
+ if (_matching_elements != nullptr) {
+ auto &elements = _matching_elements->get_matching_elements(docId, _field_name);
+ auto elements_iterator = elements.cbegin();
+ for (uint32_t idx = 0; idx < elems && elements_iterator != elements.cend(); ++idx) {
+ assert(*elements_iterator >= idx);
+ if (*elements_iterator == idx) {
+ insert_element(idx, arr);
+ ++elements_iterator;
+ }
+ }
+ } else {
+ for (uint32_t idx = 0; idx < elems; ++idx) {
+ insert_element(idx, arr);
}
}
}
@@ -70,8 +99,9 @@ ArrayAttributeFieldWriterState::insertField(uint32_t docId, vespalib::slime::Ins
}
ArrayAttributeCombinerDFW::ArrayAttributeCombinerDFW(const vespalib::string &fieldName,
- const std::vector<vespalib::string> &fields)
- : AttributeCombinerDFW(fieldName),
+ const std::vector<vespalib::string> &fields,
+ bool filter_elements)
+ : AttributeCombinerDFW(fieldName, filter_elements),
_fields(fields),
_attributeNames()
{
@@ -85,9 +115,9 @@ ArrayAttributeCombinerDFW::ArrayAttributeCombinerDFW(const vespalib::string &fie
ArrayAttributeCombinerDFW::~ArrayAttributeCombinerDFW() = default;
std::unique_ptr<DocsumFieldWriterState>
-ArrayAttributeCombinerDFW::allocFieldWriterState(IAttributeContext &context)
+ArrayAttributeCombinerDFW::allocFieldWriterState(IAttributeContext &context, const MatchingElements* matching_elements)
{
- return std::make_unique<ArrayAttributeFieldWriterState>(_fields, _attributeNames, context);
+ return std::make_unique<ArrayAttributeFieldWriterState>(_fields, _attributeNames, context, _fieldName, matching_elements);
}
}
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/array_attribute_combiner_dfw.h b/searchsummary/src/vespa/searchsummary/docsummary/array_attribute_combiner_dfw.h
index c02d2bd5da6..15c3030c782 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/array_attribute_combiner_dfw.h
+++ b/searchsummary/src/vespa/searchsummary/docsummary/array_attribute_combiner_dfw.h
@@ -19,10 +19,11 @@ class ArrayAttributeCombinerDFW : public AttributeCombinerDFW
std::vector<vespalib::string> _fields;
std::vector<vespalib::string> _attributeNames;
- std::unique_ptr<DocsumFieldWriterState> allocFieldWriterState(search::attribute::IAttributeContext &context) override;
+ std::unique_ptr<DocsumFieldWriterState> allocFieldWriterState(search::attribute::IAttributeContext &context, const MatchingElements* matching_elements) override;
public:
ArrayAttributeCombinerDFW(const vespalib::string &fieldName,
- const std::vector<vespalib::string> &fields);
+ const std::vector<vespalib::string> &fields,
+ bool filter_elements);
~ArrayAttributeCombinerDFW() override;
};
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/attribute_combiner_dfw.cpp b/searchsummary/src/vespa/searchsummary/docsummary/attribute_combiner_dfw.cpp
index ef621dc5b4a..ed053ce0115 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/attribute_combiner_dfw.cpp
+++ b/searchsummary/src/vespa/searchsummary/docsummary/attribute_combiner_dfw.cpp
@@ -89,9 +89,10 @@ StructFields::~StructFields() = default;
}
-AttributeCombinerDFW::AttributeCombinerDFW(const vespalib::string &fieldName)
+AttributeCombinerDFW::AttributeCombinerDFW(const vespalib::string &fieldName, bool filter_elements)
: ISimpleDFW(),
_stateIndex(0),
+ _filter_elements(filter_elements),
_fieldName(fieldName)
{
}
@@ -112,15 +113,15 @@ AttributeCombinerDFW::setFieldWriterStateIndex(uint32_t fieldWriterStateIndex)
}
std::unique_ptr<IDocsumFieldWriter>
-AttributeCombinerDFW::create(const vespalib::string &fieldName, IAttributeManager &attrMgr)
+AttributeCombinerDFW::create(const vespalib::string &fieldName, IAttributeManager &attrMgr, bool filter_elements)
{
StructFields structFields(fieldName, attrMgr);
if (structFields.getError()) {
return std::unique_ptr<IDocsumFieldWriter>();
} else if (!structFields.getMapFields().empty()) {
- return std::make_unique<StructMapAttributeCombinerDFW>(fieldName, structFields.getMapFields());
+ return std::make_unique<StructMapAttributeCombinerDFW>(fieldName, structFields.getMapFields(), filter_elements);
}
- return std::make_unique<ArrayAttributeCombinerDFW>(fieldName, structFields.getArrayFields());
+ return std::make_unique<ArrayAttributeCombinerDFW>(fieldName, structFields.getArrayFields(), filter_elements);
}
void
@@ -128,7 +129,11 @@ AttributeCombinerDFW::insertField(uint32_t docid, GetDocsumsState *state, ResTyp
{
auto &fieldWriterState = state->_fieldWriterStates[_stateIndex];
if (!fieldWriterState) {
- fieldWriterState = allocFieldWriterState(*state->_attrCtx);
+ const MatchingElements *matching_elements = nullptr;
+ if (_filter_elements) {
+ matching_elements = &state->get_matching_elements();
+ }
+ fieldWriterState = allocFieldWriterState(*state->_attrCtx, matching_elements);
}
fieldWriterState->insertField(docid, target);
}
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/attribute_combiner_dfw.h b/searchsummary/src/vespa/searchsummary/docsummary/attribute_combiner_dfw.h
index 6c0991fdf92..ec806ba0ac6 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/attribute_combiner_dfw.h
+++ b/searchsummary/src/vespa/searchsummary/docsummary/attribute_combiner_dfw.h
@@ -4,6 +4,7 @@
#include "docsumfieldwriter.h"
+namespace search { class MatchingElements; }
namespace search::attribute { class IAttributeContext; }
namespace search::docsummary {
@@ -19,15 +20,16 @@ class AttributeCombinerDFW : public ISimpleDFW
{
protected:
uint32_t _stateIndex;
+ const bool _filter_elements;
vespalib::string _fieldName;
- AttributeCombinerDFW(const vespalib::string &fieldName);
+ AttributeCombinerDFW(const vespalib::string &fieldName, bool filter_elements);
protected:
- virtual std::unique_ptr<DocsumFieldWriterState> allocFieldWriterState(search::attribute::IAttributeContext &context) = 0;
+ virtual std::unique_ptr<DocsumFieldWriterState> allocFieldWriterState(search::attribute::IAttributeContext &context, const MatchingElements* matching_elements) = 0;
public:
~AttributeCombinerDFW() override;
bool IsGenerated() const override;
bool setFieldWriterStateIndex(uint32_t fieldWriterStateIndex) override;
- static std::unique_ptr<IDocsumFieldWriter> create(const vespalib::string &fieldName, IAttributeManager &attrMgr);
+ static std::unique_ptr<IDocsumFieldWriter> create(const vespalib::string &fieldName, IAttributeManager &attrMgr, bool filter_elements);
void insertField(uint32_t docid, GetDocsumsState *state, ResType type, vespalib::slime::Inserter &target) override;
};
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/docsumconfig.cpp b/searchsummary/src/vespa/searchsummary/docsummary/docsumconfig.cpp
index b95591546b5..3a06346c7b4 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/docsumconfig.cpp
+++ b/searchsummary/src/vespa/searchsummary/docsummary/docsumconfig.cpp
@@ -94,7 +94,13 @@ DynamicDocsumConfig::createFieldWriter(const string & fieldName, const string &
}
} else if (overrideName == "attributecombiner") {
if (getEnvironment() && getEnvironment()->getAttributeManager()) {
- fieldWriter = AttributeCombinerDFW::create(fieldName, *getEnvironment()->getAttributeManager());
+ fieldWriter = AttributeCombinerDFW::create(fieldName, *getEnvironment()->getAttributeManager(), false);
+ rc = static_cast<bool>(fieldWriter);
+ }
+ } else if (overrideName == "matchedattributeelementsfilter") {
+ string source_field = argument.empty() ? fieldName : argument;
+ if (getEnvironment() && getEnvironment()->getAttributeManager()) {
+ fieldWriter = AttributeCombinerDFW::create(source_field, *getEnvironment()->getAttributeManager(), true);
rc = static_cast<bool>(fieldWriter);
}
} else {
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp b/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp
index b0431b6e6ac..7a609f2c971 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp
+++ b/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp
@@ -4,10 +4,10 @@
#include <vespa/juniper/rpinterface.h>
#include <vespa/searchcommon/attribute/iattributecontext.h>
#include <vespa/searchlib/common/location.h>
+#include <vespa/searchlib/common/matching_elements.h>
#include "docsum_field_writer_state.h"
-namespace search {
-namespace docsummary {
+namespace search::docsummary {
GetDocsumsState::GetDocsumsState(GetDocsumsStateCallback &callback)
: _args(),
@@ -25,7 +25,8 @@ GetDocsumsState::GetDocsumsState(GetDocsumsStateCallback &callback)
_parsedLocation(),
_summaryFeatures(NULL),
_summaryFeaturesCached(false),
- _rankFeatures(NULL)
+ _rankFeatures(NULL),
+ _matching_elements()
{
_dynteaser._docid = static_cast<uint32_t>(-1);
_dynteaser._input = static_cast<uint32_t>(-1);
@@ -48,5 +49,13 @@ GetDocsumsState::~GetDocsumsState()
}
}
+const MatchingElements &
+GetDocsumsState::get_matching_elements()
+{
+ if (!_matching_elements) {
+ _matching_elements = _callback.fill_matching_elements();
+ }
+ return *_matching_elements;
}
+
}
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.h b/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.h
index fa47d5244eb..9eec51e3459 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.h
+++ b/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.h
@@ -13,6 +13,7 @@ namespace juniper {
class Result;
}
+namespace search { class MatchingElements; }
namespace search::common { class Location; }
namespace search::attribute {
class IAttributeContext;
@@ -31,6 +32,7 @@ public:
virtual void FillSummaryFeatures(GetDocsumsState * state, IDocsumEnvironment * env) = 0;
virtual void FillRankFeatures(GetDocsumsState * state, IDocsumEnvironment * env) = 0;
virtual void ParseLocation(GetDocsumsState * state) = 0;
+ virtual std::unique_ptr<MatchingElements> fill_matching_elements() = 0;
virtual ~GetDocsumsStateCallback(void) { }
GetDocsumsStateCallback(const GetDocsumsStateCallback &) = delete;
GetDocsumsStateCallback & operator = (const GetDocsumsStateCallback &) = delete;
@@ -84,10 +86,14 @@ public:
// used by RankFeaturesDFW
FeatureSet::SP _rankFeatures;
+ // Used by AttributeCombinerDFW when filtering is enabled
+ std::unique_ptr<search::MatchingElements> _matching_elements;
+
GetDocsumsState(const GetDocsumsState &) = delete;
GetDocsumsState& operator=(const GetDocsumsState &) = delete;
GetDocsumsState(GetDocsumsStateCallback &callback);
~GetDocsumsState();
+ const MatchingElements &get_matching_elements();
};
}
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/struct_map_attribute_combiner_dfw.cpp b/searchsummary/src/vespa/searchsummary/docsummary/struct_map_attribute_combiner_dfw.cpp
index e4e30afbc4d..e34012de7de 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/struct_map_attribute_combiner_dfw.cpp
+++ b/searchsummary/src/vespa/searchsummary/docsummary/struct_map_attribute_combiner_dfw.cpp
@@ -5,7 +5,9 @@
#include "attribute_field_writer.h"
#include <vespa/searchcommon/attribute/iattributecontext.h>
#include <vespa/searchcommon/attribute/iattributevector.h>
+#include <vespa/searchlib/common/matching_elements.h>
#include <vespa/vespalib/data/slime/cursor.h>
+#include <cassert>
using search::attribute::IAttributeContext;
using search::attribute::IAttributeVector;
@@ -22,23 +24,32 @@ class StructMapAttributeFieldWriterState : public DocsumFieldWriterState
{
std::unique_ptr<AttributeFieldWriter> _keyWriter;
std::vector<std::unique_ptr<AttributeFieldWriter>> _valueWriters;
+ const vespalib::string& _field_name;
+ const MatchingElements* const _matching_elements;
public:
StructMapAttributeFieldWriterState(const vespalib::string &keyAttributeName,
const std::vector<vespalib::string> &valueFieldNames,
const std::vector<vespalib::string> &valueAttributeNames,
- IAttributeContext &context);
+ IAttributeContext &context,
+ const vespalib::string &field_name,
+ const MatchingElements* matching_elements);
~StructMapAttributeFieldWriterState() override;
+ void insert_element(uint32_t element_index, Cursor &array);
void insertField(uint32_t docId, vespalib::slime::Inserter &target) override;
};
StructMapAttributeFieldWriterState::StructMapAttributeFieldWriterState(const vespalib::string &keyAttributeName,
const std::vector<vespalib::string> &valueFieldNames,
const std::vector<vespalib::string> &valueAttributeNames,
- IAttributeContext &context)
+ IAttributeContext &context,
+ const vespalib::string& field_name,
+ const MatchingElements *matching_elements)
: DocsumFieldWriterState(),
_keyWriter(),
- _valueWriters()
+ _valueWriters(),
+ _field_name(field_name),
+ _matching_elements(matching_elements)
{
const IAttributeVector *attr = context.getAttribute(keyAttributeName);
if (attr != nullptr) {
@@ -57,6 +68,19 @@ StructMapAttributeFieldWriterState::StructMapAttributeFieldWriterState(const ves
StructMapAttributeFieldWriterState::~StructMapAttributeFieldWriterState() = default;
void
+StructMapAttributeFieldWriterState::insert_element(uint32_t element_index, Cursor &array)
+{
+ Cursor &keyValueObj = array.addObject();
+ if (_keyWriter) {
+ _keyWriter->print(element_index, keyValueObj);
+ }
+ Cursor &obj = keyValueObj.setObject(valueName);
+ for (auto &valueWriter : _valueWriters) {
+ valueWriter->print(element_index, obj);
+ }
+}
+
+void
StructMapAttributeFieldWriterState::insertField(uint32_t docId, vespalib::slime::Inserter &target)
{
uint32_t elems = 0;
@@ -76,14 +100,19 @@ StructMapAttributeFieldWriterState::insertField(uint32_t docId, vespalib::slime:
return;
}
Cursor &arr = target.insertArray();
- for (uint32_t idx = 0; idx < elems; ++idx) {
- Cursor &keyValueObj = arr.addObject();
- if (_keyWriter) {
- _keyWriter->print(idx, keyValueObj);
+ if (_matching_elements != nullptr) {
+ auto &elements = _matching_elements->get_matching_elements(docId, _field_name);
+ auto elements_iterator = elements.cbegin();
+ for (uint32_t idx = 0; idx < elems && elements_iterator != elements.cend(); ++idx) {
+ assert(*elements_iterator >= idx);
+ if (*elements_iterator == idx) {
+ insert_element(idx, arr);
+ ++elements_iterator;
+ }
}
- Cursor &obj = keyValueObj.setObject(valueName);
- for (auto &valueWriter : _valueWriters) {
- valueWriter->print(idx, obj);
+ } else {
+ for (uint32_t idx = 0; idx < elems; ++idx) {
+ insert_element(idx, arr);
}
}
}
@@ -91,8 +120,9 @@ StructMapAttributeFieldWriterState::insertField(uint32_t docId, vespalib::slime:
}
StructMapAttributeCombinerDFW::StructMapAttributeCombinerDFW(const vespalib::string &fieldName,
- const std::vector<vespalib::string> &valueFields)
- : AttributeCombinerDFW(fieldName),
+ const std::vector<vespalib::string> &valueFields,
+ bool filter_elements)
+ : AttributeCombinerDFW(fieldName, filter_elements),
_keyAttributeName(),
_valueFields(valueFields),
_valueAttributeNames()
@@ -108,9 +138,9 @@ StructMapAttributeCombinerDFW::StructMapAttributeCombinerDFW(const vespalib::str
StructMapAttributeCombinerDFW::~StructMapAttributeCombinerDFW() = default;
std::unique_ptr<DocsumFieldWriterState>
-StructMapAttributeCombinerDFW::allocFieldWriterState(IAttributeContext &context)
+StructMapAttributeCombinerDFW::allocFieldWriterState(IAttributeContext &context, const MatchingElements* matching_elements)
{
- return std::make_unique<StructMapAttributeFieldWriterState>(_keyAttributeName, _valueFields, _valueAttributeNames, context);
+ return std::make_unique<StructMapAttributeFieldWriterState>(_keyAttributeName, _valueFields, _valueAttributeNames, context, _fieldName, matching_elements);
}
}
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/struct_map_attribute_combiner_dfw.h b/searchsummary/src/vespa/searchsummary/docsummary/struct_map_attribute_combiner_dfw.h
index 99ad007559b..375d55df678 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/struct_map_attribute_combiner_dfw.h
+++ b/searchsummary/src/vespa/searchsummary/docsummary/struct_map_attribute_combiner_dfw.h
@@ -20,10 +20,11 @@ class StructMapAttributeCombinerDFW : public AttributeCombinerDFW
std::vector<vespalib::string> _valueFields;
std::vector<vespalib::string> _valueAttributeNames;
- std::unique_ptr<DocsumFieldWriterState> allocFieldWriterState(search::attribute::IAttributeContext &context) override;
+ std::unique_ptr<DocsumFieldWriterState> allocFieldWriterState(search::attribute::IAttributeContext &context, const MatchingElements* matching_elements) override;
public:
StructMapAttributeCombinerDFW(const vespalib::string &fieldName,
- const std::vector<vespalib::string> &valueFields);
+ const std::vector<vespalib::string> &valueFields,
+ bool filter_elements);
~StructMapAttributeCombinerDFW() override;
};
diff --git a/storage/src/tests/bucketdb/bucketmanagertest.cpp b/storage/src/tests/bucketdb/bucketmanagertest.cpp
index 181bac4c6dd..123dc401b5a 100644
--- a/storage/src/tests/bucketdb/bucketmanagertest.cpp
+++ b/storage/src/tests/bucketdb/bucketmanagertest.cpp
@@ -311,7 +311,7 @@ TEST_F(BucketManagerTest, Min_Used_Bits_From_Component_Is_Honored) {
}
// FIXME: non-deterministic test
-TEST_F(BucketManagerTest, IGNORED_request_bucket_info_with_state) {
+TEST_F(BucketManagerTest, DISABLED_request_bucket_info_with_state) {
// Test prior to building bucket cache
setupTestEnvironment();
addBucketsToDB(30);
diff --git a/storage/src/tests/distributor/btree_bucket_database_test.cpp b/storage/src/tests/distributor/btree_bucket_database_test.cpp
index c253a758f98..43d74ca2fb5 100644
--- a/storage/src/tests/distributor/btree_bucket_database_test.cpp
+++ b/storage/src/tests/distributor/btree_bucket_database_test.cpp
@@ -2,11 +2,56 @@
#include <vespa/storage/bucketdb/btree_bucket_database.h>
#include <tests/distributor/bucketdatabasetest.h>
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
+
+using namespace ::testing;
namespace storage::distributor {
INSTANTIATE_TEST_CASE_P(BTreeDatabase, BucketDatabaseTest,
::testing::Values(std::make_shared<BTreeBucketDatabase>()));
+using document::BucketId;
+
+namespace {
+
+BucketCopy BC(uint32_t node_idx, uint32_t state) {
+ api::BucketInfo info(0x123, state, state);
+ return BucketCopy(0, node_idx, info);
+}
+
+
+BucketInfo BI(uint32_t node_idx, uint32_t state) {
+ BucketInfo bi;
+ bi.addNode(BC(node_idx, state), toVector<uint16_t>(0));
+ return bi;
+}
+
+}
+
+struct BTreeReadGuardTest : Test {
+ BTreeBucketDatabase _db;
+};
+
+TEST_F(BTreeReadGuardTest, guard_does_not_observe_new_entries) {
+ auto guard = _db.acquire_read_guard();
+ _db.update(BucketDatabase::Entry(BucketId(16, 16), BI(1, 1234)));
+ std::vector<BucketDatabase::Entry> entries;
+ guard->find_parents_and_self(BucketId(16, 16), entries);
+ EXPECT_EQ(entries.size(), 0U);
+}
+
+TEST_F(BTreeReadGuardTest, guard_observes_entries_alive_at_acquire_time) {
+ BucketId bucket(16, 16);
+ _db.update(BucketDatabase::Entry(bucket, BI(1, 1234)));
+ auto guard = _db.acquire_read_guard();
+ _db.remove(bucket);
+ std::vector<BucketDatabase::Entry> entries;
+ guard->find_parents_and_self(bucket, entries);
+ ASSERT_EQ(entries.size(), 1U);
+ EXPECT_EQ(entries[0].getBucketInfo(), BI(1, 1234));
+}
+
}
diff --git a/storage/src/tests/distributor/distributortestutil.cpp b/storage/src/tests/distributor/distributortestutil.cpp
index 91af37e0f30..4a9ef147741 100644
--- a/storage/src/tests/distributor/distributortestutil.cpp
+++ b/storage/src/tests/distributor/distributortestutil.cpp
@@ -29,6 +29,7 @@ DistributorTestUtil::createLinks()
*_threadPool,
*this,
true,
+ false, // TODO swap default
_hostInfo,
&_messageSender));
_component.reset(new storage::DistributorComponent(_node->getComponentRegister(), "distrtestutil"));
diff --git a/storage/src/tests/distributor/simplemaintenancescannertest.cpp b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
index b21a10c319e..abf061dd990 100644
--- a/storage/src/tests/distributor/simplemaintenancescannertest.cpp
+++ b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
@@ -36,7 +36,7 @@ void
SimpleMaintenanceScannerTest::SetUp()
{
_priorityGenerator = std::make_unique<MockMaintenancePriorityGenerator>();
- _bucketSpaceRepo = std::make_unique<DistributorBucketSpaceRepo>();
+ _bucketSpaceRepo = std::make_unique<DistributorBucketSpaceRepo>(false);
_priorityDb = std::make_unique<SimpleBucketPriorityDatabase>();
_scanner = std::make_unique<SimpleMaintenanceScanner>(*_priorityDb, *_priorityGenerator, *_bucketSpaceRepo);
}
@@ -79,7 +79,7 @@ TEST_F(SimpleMaintenanceScannerTest, prioritize_single_bucket) {
TEST_F(SimpleMaintenanceScannerTest, prioritize_single_bucket_alt_bucket_space) {
document::BucketSpace bucketSpace(4);
- _bucketSpaceRepo->add(bucketSpace, std::make_unique<DistributorBucketSpace>());
+ _bucketSpaceRepo->add(bucketSpace, std::make_unique<DistributorBucketSpace>(false));
_scanner->reset();
addBucketToDb(bucketSpace, 1);
std::string expected("PrioritizedBucket(Bucket(BucketSpace(0x0000000000000004), BucketId(0x4000000000000001)), pri VERY_HIGH)\n");
diff --git a/storage/src/vespa/storage/bucketdb/btree_bucket_database.cpp b/storage/src/vespa/storage/bucketdb/btree_bucket_database.cpp
index a901bbdd96b..c3ade3c2877 100644
--- a/storage/src/vespa/storage/bucketdb/btree_bucket_database.cpp
+++ b/storage/src/vespa/storage/bucketdb/btree_bucket_database.cpp
@@ -537,4 +537,8 @@ void BTreeBucketDatabase::ReadGuardImpl::find_parents_and_self(const document::B
_db->find_parents_and_self_internal(_frozen_view, bucket, entries);
}
+uint64_t BTreeBucketDatabase::ReadGuardImpl::generation() const noexcept {
+ return _guard.getGeneration();
+}
+
}
diff --git a/storage/src/vespa/storage/bucketdb/btree_bucket_database.h b/storage/src/vespa/storage/bucketdb/btree_bucket_database.h
index 5c69b57956b..1f2b25814a8 100644
--- a/storage/src/vespa/storage/bucketdb/btree_bucket_database.h
+++ b/storage/src/vespa/storage/bucketdb/btree_bucket_database.h
@@ -81,16 +81,16 @@ private:
void find_parents_and_self(const document::BucketId& bucket,
std::vector<Entry>& entries) const override;
+ uint64_t generation() const noexcept override;
};
friend class ReadGuardImpl;
-
+ friend struct BTreeBuilderMerger;
+ friend struct BTreeTrailingInserter;
+public:
std::unique_ptr<ReadGuard> acquire_read_guard() const override {
return std::make_unique<ReadGuardImpl>(*this);
}
-
- friend struct BTreeBuilderMerger;
- friend struct BTreeTrailingInserter;
};
}
diff --git a/storage/src/vespa/storage/bucketdb/bucketdatabase.h b/storage/src/vespa/storage/bucketdb/bucketdatabase.h
index 0659d02cc15..46aaaa997d9 100644
--- a/storage/src/vespa/storage/bucketdb/bucketdatabase.h
+++ b/storage/src/vespa/storage/bucketdb/bucketdatabase.h
@@ -244,6 +244,7 @@ public:
virtual void find_parents_and_self(const document::BucketId& bucket,
std::vector<Entry>& entries) const = 0;
+ virtual uint64_t generation() const noexcept = 0;
};
virtual std::unique_ptr<ReadGuard> acquire_read_guard() const {
diff --git a/storage/src/vespa/storage/bucketdb/mapbucketdatabase.h b/storage/src/vespa/storage/bucketdb/mapbucketdatabase.h
index b295be588a6..9fe5e2d7740 100644
--- a/storage/src/vespa/storage/bucketdb/mapbucketdatabase.h
+++ b/storage/src/vespa/storage/bucketdb/mapbucketdatabase.h
@@ -73,6 +73,7 @@ private:
void find_parents_and_self(const document::BucketId& bucket,
std::vector<Entry>& entries) const override;
+ uint64_t generation() const noexcept override { return 0; }
};
uint32_t allocate();
diff --git a/storage/src/vespa/storage/config/stor-distributormanager.def b/storage/src/vespa/storage/config/stor-distributormanager.def
index 182aa2008c5..e789f03bb14 100644
--- a/storage/src/vespa/storage/config/stor-distributormanager.def
+++ b/storage/src/vespa/storage/config/stor-distributormanager.def
@@ -199,3 +199,9 @@ allow_stale_reads_during_cluster_state_transitions bool default=false
## Setting any of these values only makes sense for testing!
simulated_db_pruning_latency_msec int default=0
simulated_db_merging_latency_msec int default=0
+
+## Whether to use a B-tree data structure for the distributor bucket database instead
+## of the legacy database. Setting this option may trigger alternate code paths for
+## read only operations, as the B-tree database is thread safe for concurrent reads.
+use_btree_database bool default=false restart
+
diff --git a/storage/src/vespa/storage/distributor/distributor.cpp b/storage/src/vespa/storage/distributor/distributor.cpp
index 7cb7d687446..69b64ac8dc1 100644
--- a/storage/src/vespa/storage/distributor/distributor.cpp
+++ b/storage/src/vespa/storage/distributor/distributor.cpp
@@ -58,6 +58,7 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
framework::TickingThreadPool& threadPool,
DoneInitializeHandler& doneInitHandler,
bool manageActiveBucketCopies,
+ bool use_btree_database,
HostInfo& hostInfoReporterRegistrar,
ChainedMessageSender* messageSender)
: StorageLink("distributor"),
@@ -66,8 +67,8 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
_clusterStateBundle(lib::ClusterState()),
_compReg(compReg),
_component(compReg, "distributor"),
- _bucketSpaceRepo(std::make_unique<DistributorBucketSpaceRepo>()),
- _readOnlyBucketSpaceRepo(std::make_unique<DistributorBucketSpaceRepo>()),
+ _bucketSpaceRepo(std::make_unique<DistributorBucketSpaceRepo>(use_btree_database)),
+ _readOnlyBucketSpaceRepo(std::make_unique<DistributorBucketSpaceRepo>(use_btree_database)),
_metrics(new DistributorMetricSet(_component.getLoadTypes()->getMetricLoadTypes())),
_operationOwner(*this, _component.getClock()),
_maintenanceOperationOwner(*this, _component.getClock()),
@@ -103,6 +104,10 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
std::chrono::seconds(0))), // Set by config later
_must_send_updated_host_info(false)
{
+ if (use_btree_database) {
+ LOG(info, "Using new B-tree bucket database implementation instead of legacy implementation"); // TODO remove this once default is swapped
+ }
+
_component.registerMetric(*_metrics);
_component.registerMetricUpdateHook(_metricUpdateHook,
framework::SecondTime(0));
diff --git a/storage/src/vespa/storage/distributor/distributor.h b/storage/src/vespa/storage/distributor/distributor.h
index 424ac0e7a78..638704adf24 100644
--- a/storage/src/vespa/storage/distributor/distributor.h
+++ b/storage/src/vespa/storage/distributor/distributor.h
@@ -51,6 +51,7 @@ public:
framework::TickingThreadPool&,
DoneInitializeHandler&,
bool manageActiveBucketCopies,
+ bool use_btree_database,
HostInfo& hostInfoReporterRegistrar,
ChainedMessageSender* = nullptr);
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp b/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
index f013ce43048..3f7dbda62d9 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
@@ -10,15 +10,17 @@ namespace storage::distributor {
namespace {
-std::unique_ptr<BucketDatabase> make_default_db_impl() {
- //return std::make_unique<BTreeBucketDatabase>();
+std::unique_ptr<BucketDatabase> make_default_db_impl(bool use_btree_db) {
+ if (use_btree_db) {
+ return std::make_unique<BTreeBucketDatabase>();
+ }
return std::make_unique<MapBucketDatabase>();
}
}
-DistributorBucketSpace::DistributorBucketSpace()
- : _bucketDatabase(make_default_db_impl()),
+DistributorBucketSpace::DistributorBucketSpace(bool use_btree_db)
+ : _bucketDatabase(make_default_db_impl(use_btree_db)),
_clusterState(),
_distribution()
{
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space.h b/storage/src/vespa/storage/distributor/distributor_bucket_space.h
index effb0dc3e17..26a0ee9098c 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space.h
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space.h
@@ -30,7 +30,7 @@ class DistributorBucketSpace {
std::shared_ptr<const lib::ClusterState> _clusterState;
std::shared_ptr<const lib::Distribution> _distribution;
public:
- DistributorBucketSpace();
+ explicit DistributorBucketSpace(bool use_btree_db);
~DistributorBucketSpace();
DistributorBucketSpace(const DistributorBucketSpace&) = delete;
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp b/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp
index 744c54676ae..54287d32666 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp
@@ -13,11 +13,11 @@ using document::BucketSpace;
namespace storage::distributor {
-DistributorBucketSpaceRepo::DistributorBucketSpaceRepo()
+DistributorBucketSpaceRepo::DistributorBucketSpaceRepo(bool use_btree_db)
: _map()
{
- add(document::FixedBucketSpaces::default_space(), std::make_unique<DistributorBucketSpace>());
- add(document::FixedBucketSpaces::global_space(), std::make_unique<DistributorBucketSpace>());
+ add(document::FixedBucketSpaces::default_space(), std::make_unique<DistributorBucketSpace>(use_btree_db));
+ add(document::FixedBucketSpaces::global_space(), std::make_unique<DistributorBucketSpace>(use_btree_db));
}
DistributorBucketSpaceRepo::~DistributorBucketSpaceRepo() = default;
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h b/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h
index ee36842969a..bc42ae8bb3a 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h
@@ -19,7 +19,7 @@ private:
BucketSpaceMap _map;
public:
- DistributorBucketSpaceRepo();
+ explicit DistributorBucketSpaceRepo(bool use_btree_db); // TODO remove param once B-tree is default
~DistributorBucketSpaceRepo();
DistributorBucketSpaceRepo(const DistributorBucketSpaceRepo&&) = delete;
diff --git a/storage/src/vespa/storage/storageserver/distributornode.cpp b/storage/src/vespa/storage/storageserver/distributornode.cpp
index 1cd1477e769..20b5dd641f7 100644
--- a/storage/src/vespa/storage/storageserver/distributornode.cpp
+++ b/storage/src/vespa/storage/storageserver/distributornode.cpp
@@ -19,6 +19,7 @@ DistributorNode::DistributorNode(
DistributorNodeContext& context,
ApplicationGenerationFetcher& generationFetcher,
NeedActiveState activeState,
+ bool use_btree_database,
StorageLink::UP communicationManager)
: StorageNode(configUri, context, generationFetcher,
std::unique_ptr<HostInfo>(new HostInfo()),
@@ -29,6 +30,7 @@ DistributorNode::DistributorNode(
_lastUniqueTimestampRequested(0),
_uniqueTimestampCounter(0),
_manageActiveBucketCopies(activeState == NEED_ACTIVE_BUCKET_STATES_SET),
+ _use_btree_database(use_btree_database),
_retrievedCommunicationManager(std::move(communicationManager))
{
try{
@@ -108,6 +110,7 @@ DistributorNode::createChain()
new storage::distributor::Distributor(
dcr, *_threadPool, getDoneInitializeHandler(),
_manageActiveBucketCopies,
+ _use_btree_database,
stateManager->getHostInfo())));
chain->push_back(StorageLink::UP(stateManager.release()));
diff --git a/storage/src/vespa/storage/storageserver/distributornode.h b/storage/src/vespa/storage/storageserver/distributornode.h
index 34f2dbb42a7..3a81e31fc56 100644
--- a/storage/src/vespa/storage/storageserver/distributornode.h
+++ b/storage/src/vespa/storage/storageserver/distributornode.h
@@ -24,6 +24,7 @@ class DistributorNode
uint64_t _lastUniqueTimestampRequested;
uint32_t _uniqueTimestampCounter;
bool _manageActiveBucketCopies;
+ bool _use_btree_database;
std::unique_ptr<StorageLink> _retrievedCommunicationManager;
public:
@@ -38,8 +39,9 @@ public:
DistributorNodeContext&,
ApplicationGenerationFetcher& generationFetcher,
NeedActiveState,
+ bool use_btree_database,
std::unique_ptr<StorageLink> communicationManager);
- ~DistributorNode();
+ ~DistributorNode() override;
const lib::NodeType& getNodeType() const override { return lib::NodeType::DISTRIBUTOR; }
ResumeGuard pause() override;
diff --git a/storageserver/src/vespa/storageserver/app/distributorprocess.cpp b/storageserver/src/vespa/storageserver/app/distributorprocess.cpp
index 57fdcdeb248..ff4b2e98cca 100644
--- a/storageserver/src/vespa/storageserver/app/distributorprocess.cpp
+++ b/storageserver/src/vespa/storageserver/app/distributorprocess.cpp
@@ -11,7 +11,8 @@ namespace storage {
DistributorProcess::DistributorProcess(const config::ConfigUri & configUri)
: Process(configUri),
- _activeFlag(DistributorNode::NO_NEED_FOR_ACTIVE_STATES)
+ _activeFlag(DistributorNode::NO_NEED_FOR_ACTIVE_STATES),
+ _use_btree_database(false)
{
}
@@ -29,19 +30,22 @@ DistributorProcess::shutdown()
void
DistributorProcess::setupConfig(uint64_t subscribeTimeout)
{
- std::unique_ptr<vespa::config::content::core::StorServerConfig> config =
- config::ConfigGetter<vespa::config::content::core::StorServerConfig>::getConfig(_configUri.getConfigId(), _configUri.getContext(), subscribeTimeout);
- if (config->persistenceProvider.type
- != vespa::config::content::core::StorServerConfig::PersistenceProvider::Type::STORAGE)
- {
+ using vespa::config::content::core::StorServerConfig;
+ using vespa::config::content::core::StorDistributormanagerConfig;
+ using vespa::config::content::core::StorVisitordispatcherConfig;
+
+ auto stor_config = config::ConfigGetter<StorServerConfig>::getConfig(
+ _configUri.getConfigId(), _configUri.getContext(), subscribeTimeout);
+ if (stor_config->persistenceProvider.type != StorServerConfig::PersistenceProvider::Type::STORAGE) {
_activeFlag = DistributorNode::NEED_ACTIVE_BUCKET_STATES_SET;
}
+ auto dist_config = config::ConfigGetter<StorDistributormanagerConfig>::getConfig(
+ _configUri.getConfigId(), _configUri.getContext(), subscribeTimeout);
+ _use_btree_database = dist_config->useBtreeDatabase;
_distributorConfigHandler
- = _configSubscriber.subscribe<vespa::config::content::core::StorDistributormanagerConfig>(
- _configUri.getConfigId(), subscribeTimeout);
+ = _configSubscriber.subscribe<StorDistributormanagerConfig>(_configUri.getConfigId(), subscribeTimeout);
_visitDispatcherConfigHandler
- = _configSubscriber.subscribe<vespa::config::content::core::StorVisitordispatcherConfig>(
- _configUri.getConfigId(), subscribeTimeout);
+ = _configSubscriber.subscribe<StorVisitordispatcherConfig>(_configUri.getConfigId(), subscribeTimeout);
Process::setupConfig(subscribeTimeout);
}
@@ -75,7 +79,7 @@ DistributorProcess::configUpdated()
void
DistributorProcess::createNode()
{
- _node.reset(new DistributorNode(_configUri, _context, *this, _activeFlag, StorageLink::UP()));
+ _node.reset(new DistributorNode(_configUri, _context, *this, _activeFlag, _use_btree_database, StorageLink::UP()));
_node->handleConfigChange(*_distributorConfigHandler->getConfig());
_node->handleConfigChange(*_visitDispatcherConfigHandler->getConfig());
}
diff --git a/storageserver/src/vespa/storageserver/app/distributorprocess.h b/storageserver/src/vespa/storageserver/app/distributorprocess.h
index 0e8d0e2d599..57193f77e42 100644
--- a/storageserver/src/vespa/storageserver/app/distributorprocess.h
+++ b/storageserver/src/vespa/storageserver/app/distributorprocess.h
@@ -15,6 +15,7 @@ namespace storage {
class DistributorProcess : public Process {
DistributorNodeContext _context;
DistributorNode::NeedActiveState _activeFlag;
+ bool _use_btree_database;
DistributorNode::UP _node;
config::ConfigHandle<vespa::config::content::core::StorDistributormanagerConfig>::UP
_distributorConfigHandler;
@@ -23,7 +24,7 @@ class DistributorProcess : public Process {
public:
DistributorProcess(const config::ConfigUri & configUri);
- ~DistributorProcess();
+ ~DistributorProcess() override;
void shutdown() override;
void setupConfig(uint64_t subscribeTimeout) override;
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/utils/SiaUtils.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/utils/SiaUtils.java
index 40f12b9c6db..4d7f4b1c397 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/utils/SiaUtils.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/utils/SiaUtils.java
@@ -52,6 +52,13 @@ public class SiaUtils {
.resolve(String.format("%s.%s.cert.pem", service.getDomainName(), service.getName()));
}
+ public static Path getCaCertificatesFile() {
+ // The contents of this is the same as /opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem installed
+ // by the yahoo_certificates_bundle RPM package, except the latter also contains a textual description
+ // (decoded) of the certificates.
+ return DEFAULT_SIA_DIRECTORY.resolve("certs").resolve("ca.cert.pem");
+ }
+
public static Optional<PrivateKey> readPrivateKeyFile(AthenzIdentity service) {
return readPrivateKeyFile(DEFAULT_SIA_DIRECTORY, service);
}
diff --git a/vespajlib/src/main/java/com/yahoo/vespa/objects/Identifiable.java b/vespajlib/src/main/java/com/yahoo/vespa/objects/Identifiable.java
index 3e392448ce2..78fad5f5867 100644
--- a/vespajlib/src/main/java/com/yahoo/vespa/objects/Identifiable.java
+++ b/vespajlib/src/main/java/com/yahoo/vespa/objects/Identifiable.java
@@ -9,7 +9,7 @@ import java.lang.reflect.InvocationTargetException;
import java.util.HashMap;
/**
- * This is the base class to do cross-language serialization and deserialization of complete object structures without
+ * The base class to do cross-language serialization and deserialization of complete object structures without
* the need for a separate protocol. Each subclass needs to register itself using the {@link #registerClass(int, Class)}
* method, and override {@link #onGetClassId()} to return the same classId as the one registered. Creating an instance
* of an identifiable object is done through the {@link #create(Deserializer)} or {@link #createFromId(int)} factory
diff --git a/vespajlib/src/main/java/com/yahoo/vespa/objects/Selectable.java b/vespajlib/src/main/java/com/yahoo/vespa/objects/Selectable.java
index f1b7d6dc841..e60bb664d7d 100644
--- a/vespajlib/src/main/java/com/yahoo/vespa/objects/Selectable.java
+++ b/vespajlib/src/main/java/com/yahoo/vespa/objects/Selectable.java
@@ -2,17 +2,17 @@
package com.yahoo.vespa.objects;
/**
- * @author baldersheim
- *
- * This class acts as an interface for traversing a tree, or a graph.
+ * A node in a traversable tree.
* Every non leaf Object implements {@link #selectMembers(ObjectPredicate, ObjectOperation)} implementing
* the actual traversal. You can then implement an {@link ObjectPredicate} to select which nodes you want to look at with
* your {@link ObjectOperation}
+ *
+ * @author baldersheim
*/
public class Selectable {
/**
- * Apply the predicate to this object. If the predicate returns true, pass this object to the operation, otherwise
+ * Applies the predicate to this object. If the predicate returns true, pass this object to the operation, otherwise
* invoke the {@link #selectMembers(ObjectPredicate, ObjectOperation)} method to locate sub-elements that might
* trigger the predicate.
*
@@ -28,7 +28,7 @@ public class Selectable {
}
/**
- * Invoke {@link #select(ObjectPredicate, ObjectOperation)} on any member objects this object wants to expose
+ * Invokes {@link #select(ObjectPredicate, ObjectOperation)} on any member objects this object wants to expose
* through the selection mechanism. Overriding this method is optional, and which objects to expose is determined by
* the application logic of the object itself.
*
@@ -44,4 +44,5 @@ public class Selectable {
selectable.select(predicate, operation);
}
}
+
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_builder.h b/vespalib/src/vespa/vespalib/datastore/unique_store_builder.h
index 7f5162d97ff..c0a44855063 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_builder.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_builder.h
@@ -35,6 +35,7 @@ public:
}
EntryRef mapEnumValueToEntryRef(uint32_t enumValue) {
assert(enumValue < _refs.size());
+ assert(_refCounts[enumValue] < std::numeric_limits<uint32_t>::max());
++_refCounts[enumValue];
return _refs[enumValue];
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_entry_base.h b/vespalib/src/vespa/vespalib/datastore/unique_store_entry_base.h
index 2b5bff45d79..79ebae9389f 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_entry_base.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_entry_base.h
@@ -4,6 +4,8 @@
#include <cstring>
#include <cstdint>
+#include <cassert>
+#include <limits>
namespace search::datastore {
@@ -20,8 +22,14 @@ protected:
public:
uint32_t get_ref_count() const { return _ref_count; }
void set_ref_count(uint32_t ref_count) const { _ref_count = ref_count; }
- void inc_ref_count() const { ++_ref_count; }
- void dec_ref_count() const { --_ref_count; }
+ void inc_ref_count() const {
+ assert(_ref_count < std::numeric_limits<uint32_t>::max());
+ ++_ref_count;
+ }
+ void dec_ref_count() const {
+ assert(_ref_count > 0u);
+ --_ref_count;
+ }
};
}
diff --git a/vsm/src/vespa/vsm/vsm/vsm-adapter.cpp b/vsm/src/vespa/vsm/vsm/vsm-adapter.cpp
index f87c327e86a..8a480419aa5 100644
--- a/vsm/src/vespa/vsm/vsm/vsm-adapter.cpp
+++ b/vsm/src/vespa/vsm/vsm/vsm-adapter.cpp
@@ -2,12 +2,14 @@
#include "vsm-adapter.h"
#include "docsumconfig.h"
+#include <vespa/searchlib/common/matching_elements.h>
#include <vespa/log/log.h>
LOG_SETUP(".vsm.vsm-adapter");
using search::docsummary::ResConfigEntry;
using search::docsummary::KeywordExtractor;
+using search::MatchingElements;
using config::ConfigSnapshot;
namespace vsm {
@@ -45,6 +47,11 @@ void GetDocsumsStateCallback::FillDocumentLocations(GetDocsumsState *state, IDoc
(void) env;
}
+std::unique_ptr<MatchingElements>
+GetDocsumsStateCallback::fill_matching_elements()
+{
+ return std::make_unique<MatchingElements>();
+}
GetDocsumsStateCallback::~GetDocsumsStateCallback() = default;
diff --git a/vsm/src/vespa/vsm/vsm/vsm-adapter.h b/vsm/src/vespa/vsm/vsm/vsm-adapter.h
index 2bdd0248be9..2a5b1e1d47c 100644
--- a/vsm/src/vespa/vsm/vsm/vsm-adapter.h
+++ b/vsm/src/vespa/vsm/vsm/vsm-adapter.h
@@ -39,6 +39,7 @@ public:
void FillRankFeatures(GetDocsumsState * state, IDocsumEnvironment * env) override;
void ParseLocation(GetDocsumsState * state) override;
virtual void FillDocumentLocations(GetDocsumsState * state, IDocsumEnvironment * env);
+ virtual std::unique_ptr<search::MatchingElements> fill_matching_elements() override;
void setSummaryFeatures(const search::FeatureSet::SP & sf) { _summaryFeatures = sf; }
void setRankFeatures(const search::FeatureSet::SP & rf) { _rankFeatures = rf; }
~GetDocsumsStateCallback();
diff --git a/zkfacade/src/main/java/com/yahoo/vespa/curator/Curator.java b/zkfacade/src/main/java/com/yahoo/vespa/curator/Curator.java
index 4f9622de556..a543d43abcd 100644
--- a/zkfacade/src/main/java/com/yahoo/vespa/curator/Curator.java
+++ b/zkfacade/src/main/java/com/yahoo/vespa/curator/Curator.java
@@ -87,6 +87,7 @@ public class Curator implements AutoCloseable {
.connectionTimeoutMs(ZK_CONNECTION_TIMEOUT)
.connectString(connectionSpec)
.zookeeperFactory(new DNSResolvingFixerZooKeeperFactory(UNKNOWN_HOST_TIMEOUT_MILLIS))
+ .dontUseContainerParents() // TODO: Remove when we know ZooKeeper 3.5 works fine, consider waiting until Vespa 8
.build());
}
diff --git a/zookeeper-command-line-client/pom.xml b/zookeeper-command-line-client/pom.xml
index 1633f14e6dc..f4b75aba906 100644
--- a/zookeeper-command-line-client/pom.xml
+++ b/zookeeper-command-line-client/pom.xml
@@ -20,6 +20,11 @@
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ <version>1.7.25</version>
+ </dependency>
</dependencies>
<build>
<plugins>
diff --git a/zookeeper-command-line-client/src/main/resources/log4j-vespa.properties b/zookeeper-command-line-client/src/main/resources/log4j-vespa.properties
index 34a4b174335..add1f89ddb4 100644
--- a/zookeeper-command-line-client/src/main/resources/log4j-vespa.properties
+++ b/zookeeper-command-line-client/src/main/resources/log4j-vespa.properties
@@ -1,6 +1,7 @@
-log4j.rootLogger=WARN
+log4j.rootLogger=debug,console
-# CONSOLE is set to be a ConsoleAppender using a PatternLayout
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=[%-5p] %m%n
+# console is set to be a ConsoleAppender using a PatternLayout
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=[%-5p] %m%n
+log4j.appender.console.threshold=warn