summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--client/go/go.mod5
-rw-r--r--client/go/go.sum4
-rw-r--r--client/go/internal/cli/cmd/api_key.go4
-rw-r--r--client/go/internal/cli/cmd/cert.go10
-rw-r--r--client/go/internal/cli/cmd/config.go2
-rw-r--r--client/go/internal/cli/cmd/config_test.go16
-rw-r--r--client/go/internal/cli/cmd/login.go4
-rw-r--r--client/go/internal/cli/cmd/logout.go4
-rw-r--r--client/go/internal/cli/cmd/root.go66
-rw-r--r--client/go/internal/cli/cmd/status_test.go4
-rw-r--r--client/go/internal/mock/http.go2
-rw-r--r--client/go/internal/util/http.go23
-rw-r--r--client/go/internal/vespa/document/dispatcher.go12
-rw-r--r--client/go/internal/vespa/document/dispatcher_test.go6
-rw-r--r--client/go/internal/vespa/document/document.go250
-rw-r--r--client/go/internal/vespa/document/document_test.go55
-rw-r--r--client/go/internal/vespa/document/http.go93
-rw-r--r--client/go/internal/vespa/document/http_test.go67
-rw-r--r--client/go/internal/vespa/document/throttler.go33
-rw-r--r--client/go/internal/vespa/system.go42
-rw-r--r--client/go/internal/vespa/target_cloud.go18
-rw-r--r--client/go/internal/vespa/target_custom.go13
-rw-r--r--client/go/internal/vespa/target_test.go6
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java1
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java13
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/AttributesImplicitWord.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/CloudHttpConnectorValidator.java45
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java76
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java18
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/http/ConnectorFactory.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/Content.java48
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java13
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java25
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/routing/DocumentProtocol.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java60
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/IndexingDocproc.java68
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java7
-rw-r--r--config-model/src/main/resources/schema/common.rnc6
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java44
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/CloudHttpConnectorValidatorTest.java104
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidatorTest.java20
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java58
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java1
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/xml/JettyContainerModelBuilderTest.java10
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java38
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java24
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java28
-rw-r--r--config-model/src/test/schema-test-files/services-hosted.xml2
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java9
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/Zone.java2
-rw-r--r--config-provisioning/src/test/java/com/yahoo/config/provision/NodeResourcesTest.java35
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/provision/ProvisionerAdapter.java4
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java3
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java36
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java25
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java11
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RevisionHistory.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Submission.java12
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java11
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/certificate/EndpointCertificatesHandler.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java17
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview-2.json9
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json12
-rw-r--r--document/src/vespa/document/annotation/spantree.h2
-rw-r--r--document/src/vespa/document/base/testdocman.cpp1
-rw-r--r--document/src/vespa/document/select/valuenodes.cpp1
-rw-r--r--document/src/vespa/document/serialization/annotationdeserializer.cpp2
-rw-r--r--eval/src/tests/eval/value_cache/dense-short1.json1
-rw-r--r--eval/src/tests/eval/value_cache/dense-short2.json3
-rw-r--r--eval/src/tests/eval/value_cache/sparse-short1.json5
-rw-r--r--eval/src/tests/eval/value_cache/sparse-short2.json7
-rw-r--r--eval/src/tests/eval/value_cache/tensor_loader_test.cpp24
-rw-r--r--eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp77
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java20
-rw-r--r--linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java1
-rw-r--r--linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java4
-rw-r--r--logd/src/logd/empty_forwarder.cpp1
-rw-r--r--maven-plugins/allowed-maven-dependencies.txt2
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java8
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/Unit.java3
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java12
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java54
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java15
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java16
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java45
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java56
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java36
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java35
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/capacity-zone.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-wg.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json2
-rw-r--r--parent/pom.xml2
-rw-r--r--screwdriver.yaml110
-rwxr-xr-xscrewdriver/release-container-image-docker.sh81
-rw-r--r--searchcore/src/tests/grouping/grouping.cpp44
-rw-r--r--searchcore/src/tests/proton/common/timer/timer_test.cpp19
-rw-r--r--searchcore/src/tests/proton/flushengine/flushengine_test.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/document_field_extractor.cpp1
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp19
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/partial_result.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/sessionmanager.cpp78
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/sessionmanager.h3
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/proton.cpp4
-rw-r--r--searchcore/src/vespa/searchcorespi/index/iindexmaintaineroperations.h2
-rw-r--r--searchlib/src/apps/uniform/uniform.cpp1
-rw-r--r--searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp22
-rw-r--r--searchlib/src/vespa/searchcommon/attribute/config.cpp7
-rw-r--r--searchlib/src/vespa/searchcommon/attribute/config.h7
-rw-r--r--searchlib/src/vespa/searchlib/attribute/load_utils.hpp1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/readerbase.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/attribute/readerbase.h7
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/compression.h2
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/pagedict4.h2
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.h6
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/posocccompression.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fieldreader.h8
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fileheader.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fusion.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp5
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zcposting.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/fef/matchdatalayout.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/fef/matchdatalayout.h9
-rw-r--r--searchlib/src/vespa/searchlib/fef/objectstore.h2
-rw-r--r--searchlib/src/vespa/searchlib/fef/phrase_splitter_query_env.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/fef/phrasesplitter.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/fef/simpletermdata.h21
-rw-r--r--searchlib/src/vespa/searchlib/fef/termfieldmatchdataarray.h2
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/url_field_inverter.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/query/tree/node.h1
-rw-r--r--searchlib/src/vespa/searchlib/query/tree/termnodes.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp10
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h1
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/equiv_blueprint.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/fake_search.cpp11
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/fake_search.h8
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/same_element_blueprint.cpp5
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/same_element_search.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/searchable.h27
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h3
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp9
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h1
-rw-r--r--searchlib/src/vespa/searchlib/test/diskindex/threelevelcountbuffers.cpp1
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp1
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/geoposdfw.cpp1
-rw-r--r--storage/src/tests/distributor/check_condition_test.cpp16
-rw-r--r--storage/src/tests/distributor/putoperationtest.cpp3
-rw-r--r--storage/src/tests/distributor/removeoperationtest.cpp44
-rw-r--r--storage/src/vespa/storage/distributor/distributormetricsset.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/distributormetricsset.h22
-rw-r--r--storage/src/vespa/storage/distributor/externaloperationhandler.cpp7
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/check_condition.cpp15
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/check_condition.h6
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.cpp5
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.h3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp14
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removeoperation.h5
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h1
-rw-r--r--storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp14
-rw-r--r--storage/src/vespa/storage/distributor/persistence_operation_metric_set.h6
-rw-r--r--streamingvisitors/src/vespa/vsm/vsm/docsumfilter.cpp1
-rw-r--r--tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java2
-rw-r--r--vespa-dependencies-enforcer/allowed-maven-dependencies.txt2
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java18
-rw-r--r--vespalib/src/tests/btree/btree_test.cpp7
-rw-r--r--vespalib/src/vespa/vespalib/btree/btree.h111
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreeiterator.h361
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodeallocator.h6
-rw-r--r--vespalib/src/vespa/vespalib/geo/zcurve.cpp28
-rw-r--r--vespalib/src/vespa/vespalib/geo/zcurve.h31
-rw-r--r--vespalib/src/vespa/vespalib/metrics/simple_metrics_manager.cpp1
-rw-r--r--vespalib/src/vespa/vespalib/metrics/stable_store.h5
-rw-r--r--vespalib/src/vespa/vespalib/testkit/time_bomb.cpp12
-rw-r--r--vespalib/src/vespa/vespalib/testkit/time_bomb.h3
-rw-r--r--vespalib/src/vespa/vespalib/util/fiddle.h6
-rw-r--r--vespalib/src/vespa/vespalib/util/latch.h1
229 files changed, 2237 insertions, 1401 deletions
diff --git a/client/go/go.mod b/client/go/go.mod
index c70ee5b75c8..5d1f6175e55 100644
--- a/client/go/go.mod
+++ b/client/go/go.mod
@@ -1,12 +1,13 @@
module github.com/vespa-engine/vespa/client/go
-go 1.18
+go 1.19
require (
github.com/alessio/shellescape v1.4.1
github.com/briandowns/spinner v1.23.0
github.com/fatih/color v1.15.0
- github.com/goccy/go-json v0.10.2
+ // This is the most recent version compatible with Go 1.19. Upgrade when we upgrade our Go version
+ github.com/go-json-experiment/json v0.0.0-20230216065249-540f01442424
github.com/klauspost/compress v1.16.5
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.18
diff --git a/client/go/go.sum b/client/go/go.sum
index 9b79c215864..03206b0c5e8 100644
--- a/client/go/go.sum
+++ b/client/go/go.sum
@@ -11,8 +11,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
-github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
-github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/go-json-experiment/json v0.0.0-20230216065249-540f01442424 h1:I1EK0t+BDH+kvlozNqrvzKqsWeM2QUKxXH0iW2fjDDw=
+github.com/go-json-experiment/json v0.0.0-20230216065249-540f01442424/go.mod h1:I+I5/LT2lLP0eZsBNaVDrOrYASx9h7o7mRHmy+535/A=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
diff --git a/client/go/internal/cli/cmd/api_key.go b/client/go/internal/cli/cmd/api_key.go
index 367a515f3c3..8b3780ab82b 100644
--- a/client/go/internal/cli/cmd/api_key.go
+++ b/client/go/internal/cli/cmd/api_key.go
@@ -58,11 +58,11 @@ func doApiKey(cli *CLI, overwriteKey bool, args []string) error {
if err != nil {
return err
}
- targetType, err := cli.config.targetType()
+ targetType, err := cli.targetType()
if err != nil {
return err
}
- system, err := cli.system(targetType)
+ system, err := cli.system(targetType.name)
if err != nil {
return err
}
diff --git a/client/go/internal/cli/cmd/cert.go b/client/go/internal/cli/cmd/cert.go
index 48bad974c3f..95206b7e77d 100644
--- a/client/go/internal/cli/cmd/cert.go
+++ b/client/go/internal/cli/cmd/cert.go
@@ -107,15 +107,15 @@ func doCert(cli *CLI, overwriteCertificate, noApplicationPackage bool, args []st
return err
}
}
- targetType, err := cli.config.targetType()
+ targetType, err := cli.targetType()
if err != nil {
return err
}
- privateKeyFile, err := cli.config.privateKeyPath(app, targetType)
+ privateKeyFile, err := cli.config.privateKeyPath(app, targetType.name)
if err != nil {
return err
}
- certificateFile, err := cli.config.certificatePath(app, targetType)
+ certificateFile, err := cli.config.certificatePath(app, targetType.name)
if err != nil {
return err
}
@@ -178,11 +178,11 @@ func doCertAdd(cli *CLI, overwriteCertificate bool, args []string) error {
if err != nil {
return err
}
- targetType, err := cli.config.targetType()
+ targetType, err := cli.targetType()
if err != nil {
return err
}
- certificateFile, err := cli.config.certificatePath(app, targetType)
+ certificateFile, err := cli.config.certificatePath(app, targetType.name)
if err != nil {
return err
}
diff --git a/client/go/internal/cli/cmd/config.go b/client/go/internal/cli/cmd/config.go
index e2132814386..0e120546c8b 100644
--- a/client/go/internal/cli/cmd/config.go
+++ b/client/go/internal/cli/cmd/config.go
@@ -329,7 +329,7 @@ func (c *Config) write() error {
return c.config.WriteFile(configFile)
}
-func (c *Config) targetType() (string, error) {
+func (c *Config) targetOrURL() (string, error) {
targetType, ok := c.get(targetFlag)
if !ok {
return "", fmt.Errorf("target is unset")
diff --git a/client/go/internal/cli/cmd/config_test.go b/client/go/internal/cli/cmd/config_test.go
index 66b65bf402b..3a81b93ea0d 100644
--- a/client/go/internal/cli/cmd/config_test.go
+++ b/client/go/internal/cli/cmd/config_test.go
@@ -261,6 +261,22 @@ func TestConfigReadTLSOptions(t *testing.T) {
)
}
+func TestConfigTargetResolving(t *testing.T) {
+ cli, _, _ := newTestCLI(t)
+ require.Nil(t, cli.Run("config", "set", "target", "https://example.com"))
+ assertTargetType(t, vespa.TargetCustom, cli)
+ require.Nil(t, cli.Run("config", "set", "target", "https://foo.bar.vespa-team.no-north-1.dev.z.vespa-app.cloud"))
+ assertTargetType(t, vespa.TargetCloud, cli)
+ require.Nil(t, cli.Run("config", "set", "target", "https://foo.bar.vespa-team.no-north-1.dev.z.vespa.oath.cloud:4443"))
+ assertTargetType(t, vespa.TargetHosted, cli)
+}
+
+func assertTargetType(t *testing.T, expected string, cli *CLI) {
+ targetType, err := cli.targetType()
+ require.Nil(t, err)
+ assert.Equal(t, expected, targetType.name)
+}
+
func assertTLSOptions(t *testing.T, homeDir string, app vespa.ApplicationID, target string, want vespa.TLSOptions, envVars ...string) {
t.Helper()
envVars = append(envVars, "VESPA_CLI_HOME="+homeDir)
diff --git a/client/go/internal/cli/cmd/login.go b/client/go/internal/cli/cmd/login.go
index 9ac2262e78d..d2075bdfcf0 100644
--- a/client/go/internal/cli/cmd/login.go
+++ b/client/go/internal/cli/cmd/login.go
@@ -27,11 +27,11 @@ func newLoginCmd(cli *CLI) *cobra.Command {
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
- targetType, err := cli.config.targetType()
+ targetType, err := cli.targetType()
if err != nil {
return err
}
- system, err := cli.system(targetType)
+ system, err := cli.system(targetType.name)
if err != nil {
return err
}
diff --git a/client/go/internal/cli/cmd/logout.go b/client/go/internal/cli/cmd/logout.go
index 32e7cd9783b..93f7cb6270f 100644
--- a/client/go/internal/cli/cmd/logout.go
+++ b/client/go/internal/cli/cmd/logout.go
@@ -14,11 +14,11 @@ func newLogoutCmd(cli *CLI) *cobra.Command {
DisableAutoGenTag: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
- targetType, err := cli.config.targetType()
+ targetType, err := cli.targetType()
if err != nil {
return err
}
- system, err := cli.system(targetType)
+ system, err := cli.system(targetType.name)
if err != nil {
return err
}
diff --git a/client/go/internal/cli/cmd/root.go b/client/go/internal/cli/cmd/root.go
index c4012024426..17c4fc41625 100644
--- a/client/go/internal/cli/cmd/root.go
+++ b/client/go/internal/cli/cmd/root.go
@@ -6,6 +6,7 @@ import (
"fmt"
"io"
"log"
+ "net/url"
"os"
"os/exec"
"strings"
@@ -73,6 +74,11 @@ type targetOptions struct {
noCertificate bool
}
+type targetType struct {
+ name string
+ url string
+}
+
// errHint creates a new CLI error, with optional hints that will be printed after the error
func errHint(err error, hints ...string) ErrCLI { return ErrCLI{Status: 1, hints: hints, error: err} }
@@ -297,7 +303,19 @@ func (c *CLI) printWarning(msg interface{}, hints ...string) {
// target creates a target according the configuration of this CLI and given opts.
func (c *CLI) target(opts targetOptions) (vespa.Target, error) {
- target, err := c.createTarget(opts)
+ targetType, err := c.targetType()
+ if err != nil {
+ return nil, err
+ }
+ var target vespa.Target
+ switch targetType.name {
+ case vespa.TargetLocal, vespa.TargetCustom:
+ target, err = c.createCustomTarget(targetType.name, targetType.url)
+ case vespa.TargetCloud, vespa.TargetHosted:
+ target, err = c.createCloudTarget(targetType.name, opts, targetType.url)
+ default:
+ return nil, errHint(fmt.Errorf("invalid target: %s", targetType), "Valid targets are 'local', 'cloud', 'hosted' or an URL")
+ }
if err != nil {
return nil, err
}
@@ -309,24 +327,39 @@ func (c *CLI) target(opts targetOptions) (vespa.Target, error) {
return target, nil
}
-func (c *CLI) createTarget(opts targetOptions) (vespa.Target, error) {
- targetType, err := c.config.targetType()
+// targetType resolves the real target type and its custom URL (if any)
+func (c *CLI) targetType() (targetType, error) {
+ v, err := c.config.targetOrURL()
if err != nil {
- return nil, err
+ return targetType{}, err
}
- customURL := ""
- if strings.HasPrefix(targetType, "http") {
- customURL = targetType
- targetType = vespa.TargetCustom
+ tt := targetType{name: v}
+ if strings.HasPrefix(tt.name, "http://") || strings.HasPrefix(tt.name, "https://") {
+ tt.url = tt.name
+ tt.name, err = c.targetFromURL(tt.url)
+ if err != nil {
+ return targetType{}, err
+ }
}
- switch targetType {
- case vespa.TargetLocal, vespa.TargetCustom:
- return c.createCustomTarget(targetType, customURL)
- case vespa.TargetCloud, vespa.TargetHosted:
- return c.createCloudTarget(targetType, opts)
- default:
- return nil, errHint(fmt.Errorf("invalid target: %s", targetType), "Valid targets are 'local', 'cloud', 'hosted' or an URL")
+ return tt, nil
+}
+
+func (c *CLI) targetFromURL(customURL string) (string, error) {
+ u, err := url.Parse(customURL)
+ if err != nil {
+ return "", err
+ }
+ // Check if URL belongs to a cloud target
+ for _, cloudTarget := range []string{vespa.TargetHosted, vespa.TargetCloud} {
+ system, err := c.system(cloudTarget)
+ if err != nil {
+ return "", err
+ }
+ if strings.HasSuffix(u.Hostname(), "."+system.EndpointDomain) {
+ return cloudTarget, nil
+ }
}
+ return vespa.TargetCustom, nil
}
func (c *CLI) createCustomTarget(targetType, customURL string) (vespa.Target, error) {
@@ -344,7 +377,7 @@ func (c *CLI) createCustomTarget(targetType, customURL string) (vespa.Target, er
}
}
-func (c *CLI) createCloudTarget(targetType string, opts targetOptions) (vespa.Target, error) {
+func (c *CLI) createCloudTarget(targetType string, opts targetOptions, customURL string) (vespa.Target, error) {
system, err := c.system(targetType)
if err != nil {
return nil, err
@@ -409,6 +442,7 @@ func (c *CLI) createCloudTarget(targetType string, opts targetOptions) (vespa.Ta
deploymentOptions := vespa.CloudDeploymentOptions{
Deployment: deployment,
TLSOptions: deploymentTLSOptions,
+ CustomURL: customURL,
ClusterURLs: endpoints,
}
logLevel := opts.logLevel
diff --git a/client/go/internal/cli/cmd/status_test.go b/client/go/internal/cli/cmd/status_test.go
index a3cae7c3fe4..76efea55503 100644
--- a/client/go/internal/cli/cmd/status_test.go
+++ b/client/go/internal/cli/cmd/status_test.go
@@ -16,7 +16,7 @@ func TestStatusDeployCommand(t *testing.T) {
}
func TestStatusDeployCommandWithURLTarget(t *testing.T) {
- assertDeployStatus("http://mydeploytarget:19071", []string{"-t", "http://mydeploytarget"}, t)
+ assertDeployStatus("http://mydeploytarget:19071", []string{"-t", "http://mydeploytarget:19071"}, t)
}
func TestStatusDeployCommandWithLocalTarget(t *testing.T) {
@@ -28,7 +28,7 @@ func TestStatusQueryCommand(t *testing.T) {
}
func TestStatusQueryCommandWithUrlTarget(t *testing.T) {
- assertQueryStatus("http://mycontainertarget:8080", []string{"-t", "http://mycontainertarget"}, t)
+ assertQueryStatus("http://mycontainertarget:8080", []string{"-t", "http://mycontainertarget:8080"}, t)
}
func TestStatusQueryCommandWithLocalTarget(t *testing.T) {
diff --git a/client/go/internal/mock/http.go b/client/go/internal/mock/http.go
index 8a17d9996d6..f176870a940 100644
--- a/client/go/internal/mock/http.go
+++ b/client/go/internal/mock/http.go
@@ -61,6 +61,8 @@ func (c *HTTPClient) Do(request *http.Request, timeout time.Duration) (*http.Res
return nil, err
}
c.LastBody = body
+ } else {
+ c.LastBody = nil
}
c.Requests = append(c.Requests, request)
if response.Header == nil {
diff --git a/client/go/internal/util/http.go b/client/go/internal/util/http.go
index 546098a204d..35e35b16720 100644
--- a/client/go/internal/util/http.go
+++ b/client/go/internal/util/http.go
@@ -19,17 +19,20 @@ type HTTPClient interface {
}
type defaultHTTPClient struct {
- client *http.Client
+ client *http.Client
+ setUserAgent bool
}
func (c *defaultHTTPClient) Do(request *http.Request, timeout time.Duration) (response *http.Response, error error) {
if c.client.Timeout != timeout { // Set wanted timeout
c.client.Timeout = timeout
}
- if request.Header == nil {
- request.Header = make(http.Header)
+ if c.setUserAgent {
+ if request.Header == nil {
+ request.Header = make(http.Header)
+ }
+ request.Header.Set("User-Agent", fmt.Sprintf("Vespa CLI/%s", build.Version))
}
- request.Header.Set("User-Agent", fmt.Sprintf("Vespa CLI/%s", build.Version))
return c.client.Do(request)
}
@@ -65,6 +68,7 @@ func ForceHTTP2(client HTTPClient, certificates []tls.Certificate, caCertificate
if !ok {
return
}
+ c.setUserAgent = false // Let caller control all request headers
var dialFunc func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error)
if certificates == nil {
// No certificate, so force H2C (HTTP/2 over clear-text) by using a non-TLS Dialer
@@ -86,8 +90,11 @@ func ForceHTTP2(client HTTPClient, certificates []tls.Certificate, caCertificate
}
func CreateClient(timeout time.Duration) HTTPClient {
- return &defaultHTTPClient{client: &http.Client{
- Timeout: timeout,
- Transport: http.DefaultTransport,
- }}
+ return &defaultHTTPClient{
+ client: &http.Client{
+ Timeout: timeout,
+ Transport: http.DefaultTransport,
+ },
+ setUserAgent: true,
+ }
}
diff --git a/client/go/internal/vespa/document/dispatcher.go b/client/go/internal/vespa/document/dispatcher.go
index 7237a87b7e2..2ad5b841616 100644
--- a/client/go/internal/vespa/document/dispatcher.go
+++ b/client/go/internal/vespa/document/dispatcher.go
@@ -25,7 +25,7 @@ type Dispatcher struct {
msgs chan string
inflight map[string]*Queue[documentOp]
- inflightCount int64
+ inflightCount atomic.Int64
output io.Writer
verbose bool
@@ -76,7 +76,7 @@ func (d *Dispatcher) shouldRetry(op documentOp, result Result) bool {
}
if result.HTTPStatus == 429 || result.HTTPStatus == 503 {
d.msgs <- fmt.Sprintf("feed: %s was throttled with status %d: retrying", op.document, result.HTTPStatus)
- d.throttler.Throttled(atomic.LoadInt64(&d.inflightCount))
+ d.throttler.Throttled(d.inflightCount.Load())
return true
}
if result.Err != nil || result.HTTPStatus == 500 || result.HTTPStatus == 502 || result.HTTPStatus == 504 {
@@ -226,20 +226,20 @@ func (d *Dispatcher) acceptDocument() bool {
}
func (d *Dispatcher) acquireSlot() {
- for atomic.LoadInt64(&d.inflightCount) >= d.throttler.TargetInflight() {
+ for d.inflightCount.Load() >= d.throttler.TargetInflight() {
time.Sleep(time.Millisecond)
}
- atomic.AddInt64(&d.inflightCount, 1)
+ d.inflightCount.Add(1)
}
-func (d *Dispatcher) releaseSlot() { atomic.AddInt64(&d.inflightCount, -1) }
+func (d *Dispatcher) releaseSlot() { d.inflightCount.Add(-1) }
func (d *Dispatcher) Enqueue(doc Document) error { return d.enqueue(documentOp{document: doc}, false) }
func (d *Dispatcher) Stats() Stats {
d.statsMu.Lock()
defer d.statsMu.Unlock()
- d.stats.Inflight = atomic.LoadInt64(&d.inflightCount)
+ d.stats.Inflight = d.inflightCount.Load()
return d.stats
}
diff --git a/client/go/internal/vespa/document/dispatcher_test.go b/client/go/internal/vespa/document/dispatcher_test.go
index 252bd94dff9..382d21501c3 100644
--- a/client/go/internal/vespa/document/dispatcher_test.go
+++ b/client/go/internal/vespa/document/dispatcher_test.go
@@ -61,8 +61,8 @@ func TestDispatcher(t *testing.T) {
breaker := NewCircuitBreaker(time.Second, 0)
dispatcher := NewDispatcher(feeder, throttler, breaker, io.Discard, false)
docs := []Document{
- {Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Fields: []byte(`{"foo": "123"}`)},
- {Id: mustParseId("id:ns:type::doc2"), Operation: OperationPut, Fields: []byte(`{"bar": "456"}`)},
+ {Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Body: []byte(`{"fields": {"foo": "123"}}`)},
+ {Id: mustParseId("id:ns:type::doc2"), Operation: OperationPut, Body: []byte(`{"fields": {"bar": "456"}}`)},
}
for _, d := range docs {
dispatcher.Enqueue(d)
@@ -192,7 +192,7 @@ func BenchmarkDocumentDispatching(b *testing.B) {
throttler := newThrottler(8, clock.now)
breaker := NewCircuitBreaker(time.Second, 0)
dispatcher := NewDispatcher(feeder, throttler, breaker, io.Discard, false)
- doc := Document{Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Fields: []byte(`{"foo": "123"}`)}
+ doc := Document{Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Body: []byte(`{"fields": {"foo": "123"}}`)}
b.ResetTimer() // ignore setup time
for n := 0; n < b.N; n++ {
diff --git a/client/go/internal/vespa/document/document.go b/client/go/internal/vespa/document/document.go
index ce8b22b24f0..8f884b223d7 100644
--- a/client/go/internal/vespa/document/document.go
+++ b/client/go/internal/vespa/document/document.go
@@ -11,17 +11,32 @@ import (
"time"
- "github.com/goccy/go-json"
+ // Why do we use an experimental parser? This appears to be the only JSON library that satisfies the following
+ // requirements:
+ // - Faster than the std parser
+ // - Supports parsing from a io.Reader
+ // - Supports parsing token-by-token
+ // - Few allocations during parsing (especially for large objects)
+ "github.com/go-json-experiment/json"
)
-var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
-
type Operation int
const (
OperationPut Operation = iota
OperationUpdate
OperationRemove
+
+ jsonArrayStart json.Kind = '['
+ jsonArrayEnd json.Kind = ']'
+ jsonObjectStart json.Kind = '{'
+ jsonObjectEnd json.Kind = '}'
+ jsonString json.Kind = '"'
+)
+
+var (
+ fieldsPrefix = []byte(`{"fields":`)
+ fieldsSuffix = []byte("}")
)
// Id represents a Vespa document ID.
@@ -98,27 +113,20 @@ func ParseId(serialized string) (Id, error) {
type Document struct {
Id Id
Condition string
- Fields []byte
+ Body []byte
Operation Operation
Create bool
}
-type jsonDocument struct {
- IdString string `json:"id"`
- PutId string `json:"put"`
- UpdateId string `json:"update"`
- RemoveId string `json:"remove"`
- Condition string `json:"condition"`
- Fields json.RawMessage `json:"fields"`
- Create bool `json:"create"`
-}
-
// Decoder decodes documents from a JSON structure which is either an array of objects, or objects separated by newline.
type Decoder struct {
- buf *bufio.Reader
- dec *json.Decoder
+ dec *json.Decoder
+ buf bytes.Buffer
+
array bool
jsonl bool
+
+ fieldsEnd int64
}
func (d Document) String() string {
@@ -139,49 +147,61 @@ func (d Document) String() string {
if d.Create {
sb.WriteString(", create=true")
}
+ if d.Body != nil {
+ sb.WriteString(", body=")
+ sb.WriteString(string(d.Body))
+ }
return sb.String()
}
func (d *Decoder) guessMode() error {
- for !d.array && !d.jsonl {
- b, err := d.buf.ReadByte()
- if err != nil {
- return err
- }
- // Skip leading whitespace
- if b < 0x80 && asciiSpace[b] != 0 {
- continue
- }
- switch rune(b) {
- case '{':
- d.jsonl = true
- case '[':
- d.array = true
- default:
- return fmt.Errorf("unexpected token: %q", string(b))
- }
- if err := d.buf.UnreadByte(); err != nil {
- return err
- }
- if err := d.readArrayToken(true); err != nil {
+ if d.array || d.jsonl {
+ return nil
+ }
+ kind := d.dec.PeekKind()
+ switch kind {
+ case jsonArrayStart:
+ if _, err := d.readNext(jsonArrayStart); err != nil {
return err
}
+ d.array = true
+ case jsonObjectStart:
+ d.jsonl = true
+ default:
+ return fmt.Errorf("expected %s or %s, got %s", jsonArrayStart, jsonObjectStart, kind)
}
return nil
}
-func (d *Decoder) readArrayToken(open bool) error {
- if !d.array {
- return nil
+func (d *Decoder) readNext(kind json.Kind) (json.Token, error) {
+ t, err := d.dec.ReadToken()
+ if err != nil {
+ return json.Token{}, err
}
- t, err := d.dec.Token()
+ if t.Kind() != kind {
+ return json.Token{}, fmt.Errorf("unexpected json kind: %q: want %q", t, kind)
+ }
+ return t, nil
+}
+
+func (d *Decoder) readString() (string, error) {
+ t, err := d.readNext(jsonString)
if err != nil {
- return err
+ return "", err
}
- if (open && t == json.Delim('[')) || (!open && t == json.Delim(']')) {
- return nil
+ return t.String(), nil
+}
+
+func (d *Decoder) readBool() (bool, error) {
+ t, err := d.dec.ReadToken()
+ if err != nil {
+ return false, err
+ }
+ kind := t.Kind()
+ if kind != 't' && kind != 'f' {
+ return false, fmt.Errorf("unexpected json kind: %q: want %q or %q", t, 't', 'f')
}
- return fmt.Errorf("invalid array token: %q", t)
+ return t.Bool(), nil
}
func (d *Decoder) Decode() (Document, error) {
@@ -192,60 +212,118 @@ func (d *Decoder) Decode() (Document, error) {
return doc, err
}
+func (d *Decoder) readField(name string, offset int64, doc *Document) error {
+ readId := false
+ switch name {
+ case "id", "put":
+ readId = true
+ doc.Operation = OperationPut
+ case "update":
+ readId = true
+ doc.Operation = OperationUpdate
+ case "remove":
+ readId = true
+ doc.Operation = OperationRemove
+ case "condition":
+ condition, err := d.readString()
+ if err != nil {
+ return err
+ }
+ doc.Condition = condition
+ case "create":
+ create, err := d.readBool()
+ if err != nil {
+ return err
+ }
+ doc.Create = create
+ case "fields":
+ if _, err := d.readNext(jsonObjectStart); err != nil {
+ return err
+ }
+ // Skip data between start of operation and start of fields
+ fieldsStart := d.dec.InputOffset() - 1
+ d.buf.Next(int(fieldsStart - offset))
+ depth := 1
+ for depth > 0 {
+ t, err := d.dec.ReadToken()
+ if err != nil {
+ return err
+ }
+ switch t.Kind() {
+ case jsonObjectStart:
+ depth++
+ case jsonObjectEnd:
+ depth--
+ }
+ }
+ d.fieldsEnd = d.dec.InputOffset()
+ fields := d.buf.Next(int(d.fieldsEnd - fieldsStart))
+ doc.Body = make([]byte, 0, len(fieldsPrefix)+len(fields)+len(fieldsSuffix))
+ doc.Body = append(doc.Body, fieldsPrefix...)
+ doc.Body = append(doc.Body, fields...)
+ doc.Body = append(doc.Body, fieldsSuffix...)
+ }
+ if readId {
+ s, err := d.readString()
+ if err != nil {
+ return err
+ }
+ id, err := ParseId(s)
+ if err != nil {
+ return err
+ }
+ doc.Id = id
+ }
+ return nil
+}
+
func (d *Decoder) decode() (Document, error) {
+ start := d.dec.InputOffset()
if err := d.guessMode(); err != nil {
return Document{}, err
}
- if !d.dec.More() {
- if err := d.readArrayToken(false); err != nil {
+ if d.array && d.dec.PeekKind() == jsonArrayEnd {
+ // Reached end of the array holding document operations
+ if _, err := d.readNext(jsonArrayEnd); err != nil {
return Document{}, err
}
return Document{}, io.EOF
}
- doc := jsonDocument{}
- if err := d.dec.Decode(&doc); err != nil {
+ // Start of document operation
+ if _, err := d.readNext(jsonObjectStart); err != nil {
return Document{}, err
}
- return parseDocument(&doc)
-}
-
-func NewDecoder(r io.Reader) *Decoder {
- buf := bufio.NewReaderSize(r, 1<<26)
- return &Decoder{
- buf: buf,
- dec: json.NewDecoder(buf),
+ var doc Document
+loop:
+ for {
+ switch d.dec.PeekKind() {
+ case jsonString:
+ t, err := d.dec.ReadToken()
+ if err != nil {
+ return Document{}, err
+ }
+ if err := d.readField(t.String(), start, &doc); err != nil {
+ return Document{}, err
+ }
+ default:
+ if _, err := d.readNext(jsonObjectEnd); err != nil {
+ return Document{}, err
+ }
+ // Drop operation from the buffer
+ start = max(start, d.fieldsEnd)
+ end := d.dec.InputOffset()
+ d.buf.Next(int(end - start))
+ break loop
+ }
}
+ return doc, nil
}
-func parseDocument(d *jsonDocument) (Document, error) {
- id := ""
- var op Operation
- if d.IdString != "" {
- op = OperationPut
- id = d.IdString
- } else if d.PutId != "" {
- op = OperationPut
- id = d.PutId
- } else if d.UpdateId != "" {
- op = OperationUpdate
- id = d.UpdateId
- } else if d.RemoveId != "" {
- op = OperationRemove
- id = d.RemoveId
- } else {
- return Document{}, fmt.Errorf("invalid document: missing operation: %v", d)
- }
- docId, err := ParseId(id)
- if err != nil {
- return Document{}, err
- }
- return Document{
- Id: docId,
- Operation: op,
- Condition: d.Condition,
- Create: d.Create,
- Fields: d.Fields,
- }, nil
+func NewDecoder(r io.Reader) *Decoder {
+ br := bufio.NewReaderSize(r, 1<<26)
+ d := &Decoder{}
+ d.dec = json.NewDecoder(io.TeeReader(br, &d.buf))
+ return d
}
func parseError(value string) error {
diff --git a/client/go/internal/vespa/document/document_test.go b/client/go/internal/vespa/document/document_test.go
index 397136173bc..fbaa076ab9d 100644
--- a/client/go/internal/vespa/document/document_test.go
+++ b/client/go/internal/vespa/document/document_test.go
@@ -113,18 +113,31 @@ func feedInput(jsonl bool) string {
`
{
"put": "id:ns:type::doc1",
- "fields": {"foo": "123"}
+ "fields": { "foo" : "123", "bar": {"a": [1, 2, 3]}}
}`,
`
-{
+
+ {
"put": "id:ns:type::doc2",
+ "create": false,
+ "condition": "foo",
"fields": {"bar": "456"}
}`,
`
{
- "remove": "id:ns:type::doc1"
+ "remove": "id:ns:type::doc3"
}
-`}
+`,
+ `
+{
+ "fields": {"qux": "789"},
+ "put": "id:ns:type::doc4",
+ "create": true
+}`,
+ `
+{
+ "remove": "id:ns:type::doc5"
+}`}
if jsonl {
return strings.Join(operations, "\n")
}
@@ -133,15 +146,17 @@ func feedInput(jsonl bool) string {
func testDocumentDecoder(t *testing.T, jsonLike string) {
t.Helper()
- r := NewDecoder(strings.NewReader(jsonLike))
+ dec := NewDecoder(strings.NewReader(jsonLike))
want := []Document{
- {Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Fields: []byte(`{"foo": "123"}`)},
- {Id: mustParseId("id:ns:type::doc2"), Operation: OperationPut, Fields: []byte(`{"bar": "456"}`)},
- {Id: mustParseId("id:ns:type::doc1"), Operation: OperationRemove},
+ {Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Body: []byte(`{"fields":{ "foo" : "123", "bar": {"a": [1, 2, 3]}}}`)},
+ {Id: mustParseId("id:ns:type::doc2"), Operation: OperationPut, Condition: "foo", Body: []byte(`{"fields":{"bar": "456"}}`)},
+ {Id: mustParseId("id:ns:type::doc3"), Operation: OperationRemove},
+ {Id: mustParseId("id:ns:type::doc4"), Operation: OperationPut, Create: true, Body: []byte(`{"fields":{"qux": "789"}}`)},
+ {Id: mustParseId("id:ns:type::doc5"), Operation: OperationRemove},
}
got := []Document{}
for {
- doc, err := r.Decode()
+ doc, err := dec.Decode()
if err == io.EOF {
break
}
@@ -150,15 +165,23 @@ func testDocumentDecoder(t *testing.T, jsonLike string) {
}
got = append(got, doc)
}
+ wantBufLen := 0
+ if dec.array {
+ wantBufLen = 1
+ }
+ if l := dec.buf.Len(); l != wantBufLen {
+ t.Errorf("got dec.buf.Len() = %d, want %d", l, wantBufLen)
+ }
if !reflect.DeepEqual(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
}
-func TestDocumentDecoder(t *testing.T) {
- testDocumentDecoder(t, feedInput(false))
- testDocumentDecoder(t, feedInput(true))
+func TestDocumentDecoderArray(t *testing.T) { testDocumentDecoder(t, feedInput(false)) }
+
+func TestDocumentDecoderJSONL(t *testing.T) { testDocumentDecoder(t, feedInput(true)) }
+func TestDocumentDecoderInvalid(t *testing.T) {
jsonLike := `
{
"put": "id:ns:type::doc1",
@@ -169,13 +192,13 @@ func TestDocumentDecoder(t *testing.T) {
"fields": {"foo": "invalid
}
`
- r := NewDecoder(strings.NewReader(jsonLike))
- _, err := r.Decode() // first object is valid
+ dec := NewDecoder(strings.NewReader(jsonLike))
+ _, err := dec.Decode() // first object is valid
if err != nil {
t.Errorf("unexpected error: %s", err)
}
- _, err = r.Decode()
- wantErr := "invalid json at byte offset 122: json: string of object unexpected end of JSON input"
+ _, err = dec.Decode()
+ wantErr := "invalid json at byte offset 110: json: invalid character '\\n' within string (expecting non-control character)"
if err.Error() != wantErr {
t.Errorf("want error %q, got %q", wantErr, err.Error())
}
diff --git a/client/go/internal/vespa/document/http.go b/client/go/internal/vespa/document/http.go
index 3655bd020f4..ce57ac55f03 100644
--- a/client/go/internal/vespa/document/http.go
+++ b/client/go/internal/vespa/document/http.go
@@ -2,7 +2,6 @@ package document
import (
"bytes"
- "encoding/json"
"fmt"
"io"
"math"
@@ -15,8 +14,10 @@ import (
"sync/atomic"
"time"
+ "github.com/go-json-experiment/json"
"github.com/klauspost/compress/gzip"
+ "github.com/vespa-engine/vespa/client/go/internal/build"
"github.com/vespa-engine/vespa/client/go/internal/util"
)
@@ -29,8 +30,15 @@ const (
)
var (
- fieldsPrefix = []byte(`{"fields":`)
- fieldsSuffix = []byte("}")
+ defaultHeaders http.Header = map[string][]string{
+ "User-Agent": {fmt.Sprintf("Vespa CLI/%s", build.Version)},
+ "Content-Type": {"application/json; charset=utf-8"},
+ }
+ gzipHeaders http.Header = map[string][]string{
+ "User-Agent": {fmt.Sprintf("Vespa CLI/%s", build.Version)},
+ "Content-Type": {"application/json; charset=utf-8"},
+ "Content-Encoding": {"gzip"},
+ }
)
// Client represents a HTTP client for the /document/v1/ API.
@@ -106,7 +114,7 @@ func NewClient(options ClientOptions, httpClients []util.HTTPClient) (*Client, e
return c, nil
}
-func writeQueryParam(sb *strings.Builder, start int, escape bool, k, v string) {
+func writeQueryParam(sb *bytes.Buffer, start int, escape bool, k, v string) {
if sb.Len() == start {
sb.WriteString("?")
} else {
@@ -121,16 +129,7 @@ func writeQueryParam(sb *strings.Builder, start int, escape bool, k, v string) {
}
}
-func writeRequestBody(w io.Writer, body []byte) error {
- for _, b := range [][]byte{fieldsPrefix, body, fieldsSuffix} {
- if _, err := w.Write(b); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (c *Client) methodAndURL(d Document) (string, string) {
+func (c *Client) methodAndURL(d Document, sb *bytes.Buffer) (string, string) {
httpMethod := ""
switch d.Operation {
case OperationPut:
@@ -140,7 +139,6 @@ func (c *Client) methodAndURL(d Document) (string, string) {
case OperationRemove:
httpMethod = "DELETE"
}
- var sb strings.Builder
// Base URL and path
sb.WriteString(c.options.BaseURL)
if !strings.HasSuffix(c.options.BaseURL, "/") {
@@ -165,22 +163,22 @@ func (c *Client) methodAndURL(d Document) (string, string) {
// Query part
queryStart := sb.Len()
if c.options.Timeout > 0 {
- writeQueryParam(&sb, queryStart, false, "timeout", strconv.FormatInt(c.options.Timeout.Milliseconds(), 10)+"ms")
+ writeQueryParam(sb, queryStart, false, "timeout", strconv.FormatInt(c.options.Timeout.Milliseconds(), 10)+"ms")
}
if c.options.Route != "" {
- writeQueryParam(&sb, queryStart, true, "route", c.options.Route)
+ writeQueryParam(sb, queryStart, true, "route", c.options.Route)
}
if c.options.TraceLevel > 0 {
- writeQueryParam(&sb, queryStart, false, "tracelevel", strconv.Itoa(c.options.TraceLevel))
+ writeQueryParam(sb, queryStart, false, "tracelevel", strconv.Itoa(c.options.TraceLevel))
}
if c.options.Speedtest {
- writeQueryParam(&sb, queryStart, false, "dryRun", "true")
+ writeQueryParam(sb, queryStart, false, "dryRun", "true")
}
if d.Condition != "" {
- writeQueryParam(&sb, queryStart, true, "condition", d.Condition)
+ writeQueryParam(sb, queryStart, true, "condition", d.Condition)
}
if d.Create {
- writeQueryParam(&sb, queryStart, false, "create", "true")
+ writeQueryParam(sb, queryStart, false, "create", "true")
}
return httpMethod, sb.String()
}
@@ -217,9 +215,9 @@ func (c *Client) buffer() *bytes.Buffer {
func (c *Client) preparePending() {
for pd := range c.pending {
- method, url := c.methodAndURL(pd.document)
pd.buf = c.buffer()
- pd.request, pd.err = c.createRequest(method, url, pd.document.Fields, pd.buf)
+ method, url := c.methodAndURL(pd.document, pd.buf)
+ pd.request, pd.err = c.createRequest(method, url, pd.document.Body, pd.buf)
pd.prepared <- true
}
}
@@ -231,37 +229,41 @@ func (c *Client) prepare(document Document) (*http.Request, *bytes.Buffer, error
return pd.request, pd.buf, pd.err
}
+func newRequest(method, url string, body io.Reader, gzipped bool) (*http.Request, error) {
+ req, err := http.NewRequest(method, url, body)
+ if err != nil {
+ return nil, err
+ }
+ if gzipped {
+ req.Header = gzipHeaders
+ } else {
+ req.Header = defaultHeaders
+ }
+ return req, nil
+}
+
func (c *Client) createRequest(method, url string, body []byte, buf *bytes.Buffer) (*http.Request, error) {
+ buf.Reset()
if len(body) == 0 {
- req, err := http.NewRequest(method, url, nil)
- return req, err
+ return newRequest(method, url, nil, false)
}
- bodySize := len(fieldsPrefix) + len(body) + len(fieldsSuffix)
- useGzip := c.options.Compression == CompressionGzip || (c.options.Compression == CompressionAuto && bodySize > 512)
- buf.Grow(min(1024, bodySize))
+ useGzip := c.options.Compression == CompressionGzip || (c.options.Compression == CompressionAuto && len(body) > 512)
+ var r io.Reader
if useGzip {
+ buf.Grow(min(1024, len(body)))
zw := c.gzipWriter(buf)
defer c.gzippers.Put(zw)
- if err := writeRequestBody(zw, body); err != nil {
+ if _, err := zw.Write(body); err != nil {
return nil, err
}
if err := zw.Close(); err != nil {
return nil, err
}
+ r = buf
} else {
- if err := writeRequestBody(buf, body); err != nil {
- return nil, err
- }
- }
- req, err := http.NewRequest(method, url, buf)
- if err != nil {
- return nil, err
- }
- if useGzip {
- req.Header.Set("Content-Encoding", "gzip")
+ r = bytes.NewReader(body)
}
- req.Header.Set("Content-Type", "application/json; charset=utf-8")
- return req, nil
+ return newRequest(method, url, r, useGzip)
}
func (c *Client) clientTimeout() time.Duration {
@@ -280,7 +282,10 @@ func (c *Client) Send(document Document) Result {
if err != nil {
return resultWithErr(result, err)
}
- bodySize := buf.Len()
+ bodySize := len(document.Body)
+ if buf.Len() > 0 {
+ bodySize = buf.Len()
+ }
resp, err := c.leastBusyClient().Do(req, c.clientTimeout())
if err != nil {
return resultWithErr(result, err)
@@ -312,8 +317,8 @@ func resultWithResponse(resp *http.Response, sentBytes int, result Result, elaps
result.Status = StatusTransportFailure
}
var body struct {
- Message string `json:"message"`
- Trace json.RawMessage `json:"trace"`
+ Message string `json:"message"`
+ Trace json.RawValue `json:"trace"`
}
buf.Reset()
written, err := io.Copy(buf, resp.Body)
diff --git a/client/go/internal/vespa/document/http_test.go b/client/go/internal/vespa/document/http_test.go
index 95af5f997f4..6eda5f04fd6 100644
--- a/client/go/internal/vespa/document/http_test.go
+++ b/client/go/internal/vespa/document/http_test.go
@@ -3,7 +3,6 @@ package document
import (
"bytes"
"fmt"
- "net/http"
"reflect"
"strings"
"testing"
@@ -57,10 +56,23 @@ func assertLeastBusy(t *testing.T, id int, client *Client) {
}
func TestClientSend(t *testing.T) {
- docs := []Document{
- {Create: true, Id: mustParseId("id:ns:type::doc1"), Operation: OperationUpdate, Fields: []byte(`{"foo": "123"}`)},
- {Create: true, Id: mustParseId("id:ns:type::doc2"), Operation: OperationUpdate, Fields: []byte(`{"foo": "456"}`)},
- {Create: true, Id: mustParseId("id:ns:type::doc3"), Operation: OperationUpdate, Fields: []byte(`{"baz": "789"}`)},
+ var tests = []struct {
+ in Document
+ method string
+ url string
+ }{
+ {Document{Create: true, Id: mustParseId("id:ns:type::doc1"), Operation: OperationUpdate, Body: []byte(`{"fields":{"foo": "123"}}`)},
+ "PUT",
+ "https://example.com:1337/document/v1/ns/type/docid/doc1?timeout=5000ms&create=true"},
+ {Document{Id: mustParseId("id:ns:type::doc2"), Operation: OperationUpdate, Body: []byte(`{"fields":{"foo": "456"}}`)},
+ "PUT",
+ "https://example.com:1337/document/v1/ns/type/docid/doc2?timeout=5000ms"},
+ {Document{Id: mustParseId("id:ns:type::doc3"), Operation: OperationRemove},
+ "DELETE",
+ "https://example.com:1337/document/v1/ns/type/docid/doc3?timeout=5000ms"},
+ {Document{Condition: "foo", Id: mustParseId("id:ns:type::doc4"), Operation: OperationUpdate, Body: []byte(`{"fields":{"baz": "789"}}`)},
+ "PUT",
+ "https://example.com:1337/document/v1/ns/type/docid/doc4?timeout=5000ms&condition=foo"},
}
httpClient := mock.HTTPClient{ReadBody: true}
client, _ := NewClient(ClientOptions{
@@ -70,7 +82,8 @@ func TestClientSend(t *testing.T) {
clock := manualClock{t: time.Now(), tick: time.Second}
client.now = clock.now
var stats Stats
- for i, doc := range docs {
+ for i, tt := range tests {
+ doc := tt.in
wantRes := Result{
Id: doc.Id,
Stats: Stats{
@@ -81,7 +94,7 @@ func TestClientSend(t *testing.T) {
MaxLatency: time.Second,
},
}
- if i < 2 {
+ if i < 3 {
httpClient.NextResponseString(200, `{"message":"All good!"}`)
wantRes.Status = StatusSuccess
wantRes.HTTPStatus = 200
@@ -104,35 +117,33 @@ func TestClientSend(t *testing.T) {
}
stats.Add(res.Stats)
r := httpClient.LastRequest
- if r.Method != http.MethodPut {
- t.Errorf("got r.Method = %q, want %q", r.Method, http.MethodPut)
+ if r.Method != tt.method {
+ t.Errorf("got r.Method = %q, want %q", r.Method, tt.method)
+ }
+ if !reflect.DeepEqual(r.Header, defaultHeaders) {
+ t.Errorf("got r.Header = %v, want %v", r.Header, defaultHeaders)
}
- wantURL := fmt.Sprintf("https://example.com:1337/document/v1/ns/type/docid/%s?timeout=5000ms&create=true", doc.Id.UserSpecific)
- if r.URL.String() != wantURL {
- t.Errorf("got r.URL = %q, want %q", r.URL, wantURL)
+ if r.URL.String() != tt.url {
+ t.Errorf("got r.URL = %q, want %q", r.URL, tt.url)
}
- var wantBody bytes.Buffer
- wantBody.WriteString(`{"fields":`)
- wantBody.Write(doc.Fields)
- wantBody.WriteString("}")
- if !bytes.Equal(httpClient.LastBody, wantBody.Bytes()) {
- t.Errorf("got r.Body = %q, want %q", string(httpClient.LastBody), wantBody.String())
+ if !bytes.Equal(httpClient.LastBody, doc.Body) {
+ t.Errorf("got r.Body = %q, want %q", string(httpClient.LastBody), doc.Body)
}
}
want := Stats{
- Requests: 3,
- Responses: 3,
+ Requests: 4,
+ Responses: 4,
ResponsesByCode: map[int]int64{
- 200: 2,
+ 200: 3,
502: 1,
},
Errors: 1,
Inflight: 0,
- TotalLatency: 3 * time.Second,
+ TotalLatency: 4 * time.Second,
MinLatency: time.Second,
MaxLatency: time.Second,
BytesSent: 75,
- BytesRecv: 82,
+ BytesRecv: 105,
}
if !reflect.DeepEqual(want, stats) {
t.Errorf("got %+v, want %+v", stats, want)
@@ -146,9 +157,9 @@ func TestClientSendCompressed(t *testing.T) {
Timeout: time.Duration(5 * time.Second),
}, []util.HTTPClient{httpClient})
- bigBody := fmt.Sprintf(`{"foo": "%s"}`, strings.Repeat("s", 512+1))
- bigDoc := Document{Create: true, Id: mustParseId("id:ns:type::doc1"), Operation: OperationUpdate, Fields: []byte(bigBody)}
- smallDoc := Document{Create: true, Id: mustParseId("id:ns:type::doc2"), Operation: OperationUpdate, Fields: []byte(`{"foo": "s"}`)}
+ bigBody := fmt.Sprintf(`{"fields": {"foo": "%s"}}`, strings.Repeat("s", 512+1))
+ bigDoc := Document{Create: true, Id: mustParseId("id:ns:type::doc1"), Operation: OperationUpdate, Body: []byte(bigBody)}
+ smallDoc := Document{Create: true, Id: mustParseId("id:ns:type::doc2"), Operation: OperationUpdate, Body: []byte(`{"fields": {"foo": "s"}}`)}
var result Result
client.options.Compression = CompressionNone
@@ -267,7 +278,7 @@ func TestClientMethodAndURL(t *testing.T) {
client.options.Route = tt.options.Route
client.options.TraceLevel = tt.options.TraceLevel
client.options.Speedtest = tt.options.Speedtest
- method, url := client.methodAndURL(tt.in)
+ method, url := client.methodAndURL(tt.in, &bytes.Buffer{})
if url != tt.url || method != tt.method {
t.Errorf("#%d: methodAndURL(doc) = (%s, %s), want (%s, %s)", i, method, url, tt.method, tt.url)
}
@@ -289,7 +300,7 @@ func benchmarkClientSend(b *testing.B, compression Compression, document Documen
}
func makeDocument(size int) Document {
- return Document{Id: mustParseId("id:ns:type::doc1"), Operation: OperationUpdate, Fields: []byte(fmt.Sprintf(`{"foo": "%s"}`, randString(size)))}
+ return Document{Id: mustParseId("id:ns:type::doc1"), Operation: OperationUpdate, Body: []byte(fmt.Sprintf(`{"fields": {"foo": "%s"}}`, randString(size)))}
}
func BenchmarkClientSendSmallUncompressed(b *testing.B) {
diff --git a/client/go/internal/vespa/document/throttler.go b/client/go/internal/vespa/document/throttler.go
index 667a10d28e3..e32fb804b23 100644
--- a/client/go/internal/vespa/document/throttler.go
+++ b/client/go/internal/vespa/document/throttler.go
@@ -23,11 +23,11 @@ type Throttler interface {
type dynamicThrottler struct {
minInflight int64
maxInflight int64
- targetInflight int64
- targetTimesTen int64
+ targetInflight atomic.Int64
+ targetTimesTen atomic.Int64
throughputs []float64
- ok int64
+ ok atomic.Int64
sent int64
start time.Time
@@ -39,23 +39,24 @@ func newThrottler(connections int, nowFunc func() time.Time) *dynamicThrottler {
minInflight = 16 * int64(connections)
maxInflight = 256 * minInflight // 4096 max streams per connection on the server side
)
- return &dynamicThrottler{
- minInflight: minInflight,
- maxInflight: maxInflight,
- targetInflight: 8 * minInflight,
- targetTimesTen: 10 * maxInflight,
+ t := &dynamicThrottler{
+ minInflight: minInflight,
+ maxInflight: maxInflight,
throughputs: make([]float64, 128),
start: nowFunc(),
now: nowFunc,
}
+ t.targetInflight.Store(8 * minInflight)
+ t.targetTimesTen.Store(10 * maxInflight)
+ return t
}
func NewThrottler(connections int) Throttler { return newThrottler(connections, time.Now) }
func (t *dynamicThrottler) Sent() {
- currentInflight := atomic.LoadInt64(&t.targetInflight)
+ currentInflight := t.targetInflight.Load()
t.sent++
if t.sent*t.sent*t.sent < 100*currentInflight*currentInflight {
return
@@ -64,7 +65,7 @@ func (t *dynamicThrottler) Sent() {
now := t.now()
elapsed := now.Sub(t.start)
t.start = now
- currentThroughput := float64(atomic.SwapInt64(&t.ok, 0)) / float64(elapsed)
+ currentThroughput := float64(t.ok.Swap(0)) / float64(elapsed)
// Use buckets for throughput over inflight, along the log-scale, in [minInflight, maxInflight).
index := int(float64(len(t.throughputs)) * math.Log(max(1, min(255, float64(currentInflight)/float64(t.minInflight)))) / math.Log(256))
@@ -85,20 +86,20 @@ func (t *dynamicThrottler) Sent() {
}
}
target := int64((rand.Float64()*0.20 + 0.92) * choice) // Random walk, skewed towards increase
- atomic.StoreInt64(&t.targetInflight, max(t.minInflight, min(t.maxInflight, target)))
+ t.targetInflight.Store(max(t.minInflight, min(t.maxInflight, target)))
}
func (t *dynamicThrottler) Success() {
- atomic.AddInt64(&t.targetTimesTen, 1)
- atomic.AddInt64(&t.ok, 1)
+ t.targetTimesTen.Add(1)
+ t.ok.Add(1)
}
func (t *dynamicThrottler) Throttled(inflight int64) {
- atomic.StoreInt64(&t.targetTimesTen, max(inflight*5, t.minInflight*10))
+ t.targetTimesTen.Store(max(inflight*5, t.minInflight*10))
}
func (t *dynamicThrottler) TargetInflight() int64 {
- staticTargetInflight := min(t.maxInflight, atomic.LoadInt64(&t.targetTimesTen)/10)
- targetInflight := atomic.LoadInt64(&t.targetInflight)
+ staticTargetInflight := min(t.maxInflight, t.targetTimesTen.Load()/10)
+ targetInflight := t.targetInflight.Load()
return min(staticTargetInflight, targetInflight)
}
diff --git a/client/go/internal/vespa/system.go b/client/go/internal/vespa/system.go
index b8263dbdec0..96795cc0ef8 100644
--- a/client/go/internal/vespa/system.go
+++ b/client/go/internal/vespa/system.go
@@ -4,36 +4,40 @@ import "fmt"
// PublicSystem represents the main Vespa Cloud system.
var PublicSystem = System{
- Name: "public",
- URL: "https://api-ctl.vespa-cloud.com:4443",
- ConsoleURL: "https://console.vespa-cloud.com",
- DefaultZone: ZoneID{Environment: "dev", Region: "aws-us-east-1c"},
+ Name: "public",
+ URL: "https://api-ctl.vespa-cloud.com:4443",
+ ConsoleURL: "https://console.vespa-cloud.com",
+ DefaultZone: ZoneID{Environment: "dev", Region: "aws-us-east-1c"},
+ EndpointDomain: "vespa-app.cloud",
}
// PublicCDSystem represents the CD variant of the Vespa Cloud system.
var PublicCDSystem = System{
- Name: "publiccd",
- URL: "https://api-ctl.cd.vespa-cloud.com:4443",
- ConsoleURL: "https://console.cd.vespa-cloud.com",
- DefaultZone: ZoneID{Environment: "dev", Region: "aws-us-east-1c"},
+ Name: "publiccd",
+ URL: "https://api-ctl.cd.vespa-cloud.com:4443",
+ ConsoleURL: "https://console.cd.vespa-cloud.com",
+ DefaultZone: ZoneID{Environment: "dev", Region: "aws-us-east-1c"},
+ EndpointDomain: "cd.vespa-app.cloud",
}
// MainSystem represents the main hosted Vespa system.
var MainSystem = System{
- Name: "main",
- URL: "https://api.vespa.ouryahoo.com:4443",
- ConsoleURL: "https://console.vespa.ouryahoo.com",
- DefaultZone: ZoneID{Environment: "dev", Region: "us-east-1"},
- AthenzDomain: "vespa.vespa",
+ Name: "main",
+ URL: "https://api.vespa.ouryahoo.com:4443",
+ ConsoleURL: "https://console.vespa.ouryahoo.com",
+ DefaultZone: ZoneID{Environment: "dev", Region: "us-east-1"},
+ AthenzDomain: "vespa.vespa",
+ EndpointDomain: "vespa.oath.cloud",
}
// CDSystem represents the CD variant of the hosted Vespa system.
var CDSystem = System{
- Name: "cd",
- URL: "https://api-cd.vespa.ouryahoo.com:4443",
- ConsoleURL: "https://console-cd.vespa.ouryahoo.com",
- DefaultZone: ZoneID{Environment: "dev", Region: "cd-us-west-1"},
- AthenzDomain: "vespa.vespa.cd",
+ Name: "cd",
+ URL: "https://api-cd.vespa.ouryahoo.com:4443",
+ ConsoleURL: "https://console-cd.vespa.ouryahoo.com",
+ DefaultZone: ZoneID{Environment: "dev", Region: "cd-us-west-1"},
+ AthenzDomain: "vespa.vespa.cd",
+ EndpointDomain: "cd.vespa.oath.cloud",
}
// System represents a Vespa system.
@@ -47,6 +51,8 @@ type System struct {
// AthenzDomain is the Athenz domain used by this system. This is empty for systems not using Athenz for tenant
// authentication.
AthenzDomain string
+ // EndpointDomain is the domain used for application endpoints in this system
+ EndpointDomain string
}
// IsPublic returns whether system s is a public (Vespa Cloud) system.
diff --git a/client/go/internal/vespa/target_cloud.go b/client/go/internal/vespa/target_cloud.go
index 928bb788494..c0169f1a9bd 100644
--- a/client/go/internal/vespa/target_cloud.go
+++ b/client/go/internal/vespa/target_cloud.go
@@ -26,6 +26,7 @@ type APIOptions struct {
type CloudDeploymentOptions struct {
Deployment Deployment
TLSOptions TLSOptions
+ CustomURL string
ClusterURLs map[string]string // Endpoints keyed on cluster name
}
@@ -73,7 +74,15 @@ func CloudTarget(httpClient util.HTTPClient, apiAuth Authenticator, deploymentAu
}, nil
}
-func (t *cloudTarget) findClusterURL(cluster string) (string, error) {
+func (t *cloudTarget) findClusterURL(cluster string, timeout time.Duration, runID int64) (string, error) {
+ if t.deploymentOptions.CustomURL != "" {
+ return t.deploymentOptions.CustomURL, nil
+ }
+ if t.deploymentOptions.ClusterURLs == nil {
+ if err := t.waitForEndpoints(timeout, runID); err != nil {
+ return "", err
+ }
+ }
clusters := make([]string, 0, len(t.deploymentOptions.ClusterURLs))
for c := range t.deploymentOptions.ClusterURLs {
clusters = append(clusters, c)
@@ -129,12 +138,7 @@ func (t *cloudTarget) Service(name string, timeout time.Duration, runID int64, c
}
return service, nil
case QueryService, DocumentService:
- if t.deploymentOptions.ClusterURLs == nil {
- if err := t.waitForEndpoints(timeout, runID); err != nil {
- return nil, err
- }
- }
- url, err := t.findClusterURL(cluster)
+ url, err := t.findClusterURL(cluster, timeout, runID)
if err != nil {
return nil, err
}
diff --git a/client/go/internal/vespa/target_custom.go b/client/go/internal/vespa/target_custom.go
index 0a3a9d48fed..0129b1e1153 100644
--- a/client/go/internal/vespa/target_custom.go
+++ b/client/go/internal/vespa/target_custom.go
@@ -41,7 +41,7 @@ func (t *customTarget) Deployment() Deployment { return Deployment{} }
func (t *customTarget) createService(name string) (*Service, error) {
switch name {
case DeployService, QueryService, DocumentService:
- url, err := t.urlWithPort(name)
+ url, err := t.serviceURL(name, t.targetType)
if err != nil {
return nil, err
}
@@ -79,20 +79,21 @@ func (t *customTarget) PrintLog(options LogOptions) error {
func (t *customTarget) CheckVersion(version version.Version) error { return nil }
-func (t *customTarget) urlWithPort(serviceName string) (string, error) {
+func (t *customTarget) serviceURL(name string, targetType string) (string, error) {
u, err := url.Parse(t.baseURL)
if err != nil {
return "", err
}
- port := u.Port()
- if port == "" {
- switch serviceName {
+ if targetType == TargetLocal {
+ // Use same ports as the vespaengine/vespa container image
+ port := ""
+ switch name {
case DeployService:
port = "19071"
case QueryService, DocumentService:
port = "8080"
default:
- return "", fmt.Errorf("unknown service: %s", serviceName)
+ return "", fmt.Errorf("unknown service: %s", name)
}
u.Host = u.Host + ":" + port
}
diff --git a/client/go/internal/vespa/target_test.go b/client/go/internal/vespa/target_test.go
index bf266e8f9ec..6dc97f496f5 100644
--- a/client/go/internal/vespa/target_test.go
+++ b/client/go/internal/vespa/target_test.go
@@ -76,9 +76,9 @@ func TestCustomTarget(t *testing.T) {
assertServiceURL(t, "http://127.0.0.1:8080", lt, "document")
ct := CustomTarget(&mock.HTTPClient{}, "http://192.0.2.42", TLSOptions{})
- assertServiceURL(t, "http://192.0.2.42:19071", ct, "deploy")
- assertServiceURL(t, "http://192.0.2.42:8080", ct, "query")
- assertServiceURL(t, "http://192.0.2.42:8080", ct, "document")
+ assertServiceURL(t, "http://192.0.2.42", ct, "deploy")
+ assertServiceURL(t, "http://192.0.2.42", ct, "query")
+ assertServiceURL(t, "http://192.0.2.42", ct, "document")
ct2 := CustomTarget(&mock.HTTPClient{}, "http://192.0.2.42:60000", TLSOptions{})
assertServiceURL(t, "http://192.0.2.42:60000", ct2, "deploy")
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 41ee47513a0..0d42df88d04 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -116,6 +116,7 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"baldersheim"}, comment = "Select summary decode type") default String summaryDecodePolicy() { return "eager"; }
@ModelFeatureFlag(owners = {"hmusum"}) default boolean allowMoreThanOneContentGroupDown(ClusterSpec.Id id) { return false; }
@ModelFeatureFlag(owners = {"vekterli", "havardpe"}) default boolean enableConditionalPutRemoveWriteRepair() { return false; }
+ @ModelFeatureFlag(owners = {"mortent", "olaa"}) default boolean enableDataplaneProxy() { return false; }
//Below are all flags that must be kept until 7 is out of the door
@ModelFeatureFlag(owners = {"arnej"}, removeAfter="7.last") default boolean ignoreThreadStackSizes() { return false; }
diff --git a/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java b/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
index 41697e61bf2..ae9d696a9a3 100644
--- a/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
+++ b/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
@@ -160,7 +160,7 @@ public class InMemoryProvisioner implements HostProvisioner {
public List<HostSpec> prepare(ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
provisioned.add(cluster.id(), requested);
clusters.add(cluster);
- if (environment == Environment.dev) {
+ if (environment == Environment.dev && ! requested.isRequired()) {
requested = requested.withLimits(requested.minResources().withNodes(1),
requested.maxResources().withNodes(1));
}
@@ -232,9 +232,14 @@ public class InMemoryProvisioner implements HostProvisioner {
}
// Minimal capacity policies
- private NodeResources decideResources(NodeResources requestedResources) {
- if (requestedResources.isUnspecified()) return defaultNodeResources;
- return requestedResources;
+ private NodeResources decideResources(NodeResources resources) {
+ if (resources.vcpuIsUnspecified())
+ resources = resources.withVcpu(defaultNodeResources.vcpu());
+ if (resources.memoryGbIsUnspecified())
+ resources = resources.withMemoryGb(defaultNodeResources.memoryGb());
+ if (resources.diskGbIsUnspecified())
+ resources = resources.withDiskGb(defaultNodeResources.diskGb());
+ return resources;
}
private List<HostSpec> allocateHostGroup(ClusterSpec clusterGroup, NodeResources requestedResourcesOrUnspecified,
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/AttributesImplicitWord.java b/config-model/src/main/java/com/yahoo/schema/processing/AttributesImplicitWord.java
index 415f23f2786..f0b673920da 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/AttributesImplicitWord.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/AttributesImplicitWord.java
@@ -44,10 +44,8 @@ public class AttributesImplicitWord extends Processor {
private boolean fieldImplicitlyWordMatch(ImmutableSDField field) {
// numeric types should not trigger exact-match query parsing
- DataType dt = field.getDataType().getPrimitiveType();
- if (dt != null && dt instanceof NumericDataType) {
- return false;
- }
+ if (field.getDataType().getPrimitiveType() instanceof NumericDataType) return false;
+
return (! field.hasIndex()
&& !field.getAttributes().isEmpty()
&& field.getIndices().isEmpty()
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
index 362bc7b0964..a60d4d45317 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
@@ -131,6 +131,11 @@ public class VespaMetricSet {
addMetric(metrics, ConfigServerMetrics.ZK_CONNECTIONS.last());
addMetric(metrics, ConfigServerMetrics.ZK_OUTSTANDING_REQUESTS.last());
+ addMetric(metrics, ConfigServerMetrics.CLUSTER_COST.last());
+ addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.last());
+ addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.last());
+ addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last());
+
return metrics;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/CloudHttpConnectorValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/CloudHttpConnectorValidator.java
new file mode 100644
index 00000000000..acfbaa0f485
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/CloudHttpConnectorValidator.java
@@ -0,0 +1,45 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.model.application.validation;
+
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.vespa.model.VespaModel;
+import com.yahoo.vespa.model.container.Container;
+import com.yahoo.vespa.model.container.http.ConnectorFactory;
+import com.yahoo.vespa.model.container.http.ssl.DefaultSslProvider;
+import com.yahoo.vespa.model.container.http.ssl.HostedSslConnectorFactory;
+
+/**
+ * Enforces that Cloud applications cannot
+ * 1) override connector specific TLS configuration
+ * 2) add additional HTTP connectors
+ *
+ * @author bjorncs
+ */
+public class CloudHttpConnectorValidator extends Validator {
+ @Override
+ public void validate(VespaModel model, DeployState state) {
+ if (!state.isHostedTenantApplication(model.getAdmin().getApplicationType())) return;
+
+ model.getContainerClusters().forEach((__, cluster) -> {
+ var http = cluster.getHttp();
+ if (http == null) return;
+ var illegalConnectors = http.getHttpServer().stream().flatMap(s -> s.getConnectorFactories().stream()
+ .filter(c -> !isAllowedConnector(c)))
+ .map(cf -> "%s@%d".formatted(cf.getName(), cf.getListenPort()))
+ .toList();
+ if (illegalConnectors.isEmpty()) return;
+ throw new IllegalArgumentException(
+ ("Adding additional or modifying existing HTTPS connectors is not allowed for Vespa Cloud applications." +
+ " Violating connectors: %s. See https://cloud.vespa.ai/en/security/whitepaper, " +
+ "https://cloud.vespa.ai/en/security/guide#data-plane.")
+ .formatted(illegalConnectors));
+ });
+ }
+
+ private static boolean isAllowedConnector(ConnectorFactory cf) {
+ return cf instanceof HostedSslConnectorFactory
+ || cf.getClass().getSimpleName().endsWith("HealthCheckProxyConnector")
+ || (cf.getListenPort() == Container.BASEPORT && cf.sslProvider() instanceof DefaultSslProvider);
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java
index 66da43856b1..eccb6910866 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java
@@ -28,11 +28,14 @@ public class ConstantTensorJsonValidator {
private static final String FIELD_CELLS = "cells";
private static final String FIELD_ADDRESS = "address";
private static final String FIELD_VALUE = "value";
+ private static final String FIELD_VALUES = "values";
private static final JsonFactory jsonFactory = new JsonFactory();
private JsonParser parser;
private Map<String, TensorType.Dimension> tensorDimensions;
+ private boolean isSingleDenseType = false;
+ private boolean isSingleMappedType = false;
public void validate(String fileName, TensorType type, Reader tensorData) {
if (fileName.endsWith(".json")) {
@@ -57,19 +60,69 @@ public class ConstantTensorJsonValidator {
.dimensions()
.stream()
.collect(Collectors.toMap(TensorType.Dimension::name, Function.identity()));
+ if (type.dimensions().size() == 1) {
+ this.isSingleMappedType = (type.indexedSubtype() == TensorType.empty);
+ this.isSingleDenseType = (type.mappedSubtype() == TensorType.empty);
+ }
+ var top = parser.nextToken();
+ if (top == JsonToken.START_ARRAY) {
+ consumeValuesArray();
+ } else if (top == JsonToken.START_OBJECT) {
+ consumeTopObject();
+ }
+ });
+ }
- assertNextTokenIs(JsonToken.START_OBJECT);
- assertNextTokenIs(JsonToken.FIELD_NAME);
- assertFieldNameIs(FIELD_CELLS);
+ private void consumeValuesArray() throws IOException {
+ if (! isSingleDenseType) {
+ throw new InvalidConstantTensorException(parser, String.format("Field 'values' is only valid for simple vectors (1-d dense tensors"));
+ }
+ assertCurrentTokenIs(JsonToken.START_ARRAY);
+ while (parser.nextToken() != JsonToken.END_ARRAY) {
+ validateNumeric(parser.getCurrentToken());
+ }
+ }
+ private void consumeTopObject() throws IOException {
+ assertCurrentTokenIs(JsonToken.START_OBJECT);
+ assertNextTokenIs(JsonToken.FIELD_NAME);
+ String fieldName = parser.getCurrentName();
+ if (fieldName.equals(FIELD_VALUES)) {
assertNextTokenIs(JsonToken.START_ARRAY);
+ consumeValuesArray();
+ } else if (fieldName.equals(FIELD_CELLS)) {
+ consumeCellsField();
+ } else {
+ throw new InvalidConstantTensorException(parser, String.format("Expected 'cells' or 'values', got '%s'", fieldName));
+ }
+ assertNextTokenIs(JsonToken.END_OBJECT);
+ }
- while (parser.nextToken() != JsonToken.END_ARRAY) {
- validateTensorCell();
- }
+ private void consumeCellsField() throws IOException {
+ var token = parser.nextToken();
+ if (token == JsonToken.START_ARRAY) {
+ consumeLiteralFormArray();
+ } else if (token == JsonToken.START_OBJECT) {
+ consumeSimpleMappedObject();
+ } else {
+ throw new InvalidConstantTensorException(parser, String.format("Field 'cells' must be object or array, but got %s", token.toString()));
+ }
+ }
- assertNextTokenIs(JsonToken.END_OBJECT);
- });
+ private void consumeLiteralFormArray() throws IOException {
+ while (parser.nextToken() != JsonToken.END_ARRAY) {
+ validateTensorCell();
+ }
+ }
+
+ private void consumeSimpleMappedObject() throws IOException {
+ if (! isSingleMappedType) {
+ throw new InvalidConstantTensorException(parser, String.format("Field 'cells' must be an array of address/value objects"));
+ }
+ while (parser.nextToken() != JsonToken.END_OBJECT) {
+ assertCurrentTokenIs(JsonToken.FIELD_NAME);
+ validateTensorCellValue();
+ }
}
private void validateTensorCell() {
@@ -87,7 +140,7 @@ public class ConstantTensorJsonValidator {
if (fieldName.equals(FIELD_ADDRESS)) {
validateTensorAddress();
} else if (fieldName.equals(FIELD_VALUE)) {
- validateTensorValue();
+ validateTensorCellValue();
}
} else {
throw new InvalidConstantTensorException(parser, "Only 'address' or 'value' fields are permitted within a cell object");
@@ -169,9 +222,12 @@ public class ConstantTensorJsonValidator {
throw new InvalidConstantTensorException(parser, String.format("Index '%s' for dimension '%s' is not an integer", value, dimensionName));
}
- private void validateTensorValue() throws IOException {
+ private void validateTensorCellValue() throws IOException {
JsonToken token = parser.nextToken();
+ validateNumeric(token);
+ }
+ private void validateNumeric(JsonToken token) throws IOException {
if (token != JsonToken.VALUE_NUMBER_FLOAT && token != JsonToken.VALUE_NUMBER_INT) {
throw new InvalidConstantTensorException(parser, String.format("Tensor value is not a number (%s)", token.toString()));
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java
index 4ea74147aaf..475a4174f9a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java
@@ -53,8 +53,8 @@ public class QuotaValidator extends Validator {
}
throwIfBudgetNegative(actualSpend, budget, systemName);
- throwIfBudgetExceeded(actualSpend, budget, systemName);
- throwIfBudgetExceeded(maxSpend, budget, systemName);
+ throwIfBudgetExceeded(actualSpend, budget, systemName, true);
+ throwIfBudgetExceeded(maxSpend, budget, systemName, false);
}
private Set<ClusterSpec.Id> adminClusterIds(VespaModel model) {
@@ -86,18 +86,22 @@ public class QuotaValidator extends Validator {
private static void throwIfBudgetNegative(double spend, BigDecimal budget, SystemName systemName) {
if (budget.doubleValue() < 0) {
- throw new IllegalArgumentException(quotaMessage("Please free up some capacity.", systemName, spend, budget));
+ throw new IllegalArgumentException(quotaMessage("Please free up some capacity.", systemName, spend, budget, true));
}
}
- private static void throwIfBudgetExceeded(double spend, BigDecimal budget, SystemName systemName) {
+ private static void throwIfBudgetExceeded(double spend, BigDecimal budget, SystemName systemName, boolean actual) {
if (budget.doubleValue() < spend) {
- throw new IllegalArgumentException(quotaMessage("Contact support to upgrade your plan.", systemName, spend, budget));
+ throw new IllegalArgumentException(quotaMessage("Contact support to upgrade your plan.", systemName, spend, budget, actual));
}
}
- private static String quotaMessage(String message, SystemName system, double spend, BigDecimal budget) {
- String quotaDescription = String.format(Locale.ENGLISH, "The max resources specified cost $%.2f but your quota is $%.2f", spend, budget);
+ private static String quotaMessage(String message, SystemName system, double spend, BigDecimal budget, boolean actual) {
+ String quotaDescription = String.format(Locale.ENGLISH,
+ "The %s cost $%.2f but your quota is $%.2f",
+ actual ? "resources used" : "max resources specified",
+ spend,
+ budget);
return (system == SystemName.Public ? "" : system.value() + ": ") + quotaDescription + ": " + message;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
index 4f2f8e7932c..efa02781f7e 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
@@ -1,7 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.application.validation;
-import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.application.api.ValidationId;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.model.api.ConfigChangeAction;
@@ -13,7 +12,6 @@ import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.application.validation.change.CertificateRemovalChangeValidator;
import com.yahoo.vespa.model.application.validation.change.ChangeValidator;
import com.yahoo.vespa.model.application.validation.change.CloudAccountChangeValidator;
-import com.yahoo.vespa.model.application.validation.change.ResourcesReductionValidator;
import com.yahoo.vespa.model.application.validation.change.ConfigValueChangeValidator;
import com.yahoo.vespa.model.application.validation.change.ContainerRestartValidator;
import com.yahoo.vespa.model.application.validation.change.ContentClusterRemovalValidator;
@@ -23,11 +21,11 @@ import com.yahoo.vespa.model.application.validation.change.IndexedSearchClusterC
import com.yahoo.vespa.model.application.validation.change.IndexingModeChangeValidator;
import com.yahoo.vespa.model.application.validation.change.NodeResourceChangeValidator;
import com.yahoo.vespa.model.application.validation.change.RedundancyIncreaseValidator;
+import com.yahoo.vespa.model.application.validation.change.ResourcesReductionValidator;
import com.yahoo.vespa.model.application.validation.change.StartupCommandChangeValidator;
import com.yahoo.vespa.model.application.validation.change.StreamingSearchClusterChangeValidator;
import com.yahoo.vespa.model.application.validation.first.RedundancyValidator;
-import java.time.Instant;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
@@ -88,6 +86,7 @@ public class Validation {
new CloudDataPlaneFilterValidator().validate(model, deployState);
new AccessControlFilterExcludeValidator().validate(model, deployState);
new CloudUserFilterValidator().validate(model, deployState);
+ new CloudHttpConnectorValidator().validate(model, deployState);
additionalValidators.forEach(v -> v.validate(model, deployState));
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
index 4990ddc9a53..588ecab537a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
@@ -94,9 +94,7 @@ public class DomAdminV4Builder extends DomAdminBuilderBase {
private NodesSpecification createNodesSpecificationForLogserver() {
DeployState deployState = context.getDeployState();
if ( deployState.getProperties().useDedicatedNodeForLogserver()
- && context.getApplicationType() == ConfigModelContext.ApplicationType.DEFAULT
- && deployState.isHosted()
- && ! deployState.getProperties().applicationId().instance().isTester())
+ && deployState.isHostedTenantApplication(context.getApplicationType()))
return NodesSpecification.dedicated(1, context);
else
return NodesSpecification.nonDedicated(1, context);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java
index c968e31325a..e70c555a366 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java
@@ -307,9 +307,9 @@ public class NodesSpecification {
}
private static Pair<NodeResources, NodeResources> nodeResourcesFromResourcesElement(ModelElement element) {
- Pair<Double, Double> vcpu = toRange(element.requiredStringAttribute("vcpu"), .0, Double::parseDouble);
- Pair<Double, Double> memory = toRange(element.requiredStringAttribute("memory"), .0, s -> parseGbAmount(s, "B"));
- Pair<Double, Double> disk = toRange(element.requiredStringAttribute("disk"), .0, s -> parseGbAmount(s, "B"));
+ Pair<Double, Double> vcpu = toRange(element.stringAttribute("vcpu"), .0, Double::parseDouble);
+ Pair<Double, Double> memory = toRange(element.stringAttribute("memory"), .0, s -> parseGbAmount(s, "B"));
+ Pair<Double, Double> disk = toRange(element.stringAttribute("disk"), .0, s -> parseGbAmount(s, "B"));
Pair<Double, Double> bandwith = toRange(element.stringAttribute("bandwidth"), .3, s -> parseGbAmount(s, "BPS"));
NodeResources.DiskSpeed diskSpeed = parseOptionalDiskSpeed(element.stringAttribute("disk-speed"));
NodeResources.StorageType storageType = parseOptionalStorageType(element.stringAttribute("storage-type"));
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/http/ConnectorFactory.java b/config-model/src/main/java/com/yahoo/vespa/model/container/http/ConnectorFactory.java
index c76077e6c7b..697cfc95039 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/http/ConnectorFactory.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/http/ConnectorFactory.java
@@ -59,6 +59,8 @@ public class ConnectorFactory extends SimpleComponent implements ConnectorConfig
public void setDefaultResponseFilterChain(ComponentId filterChain) { this.defaultResponseFilterChain = filterChain; }
+ public SslProvider sslProvider() { return sslProviderComponent; }
+
public static class Builder {
private final String name;
private final int listenPort;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
index 3d1c8ca1d76..e1d222e0546 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
@@ -136,7 +136,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/vespa/load-balancer/status.html");
// Data plane port for hosted Vespa
- static final int HOSTED_VESPA_DATAPLANE_PORT = 4443;
+ public static final int HOSTED_VESPA_DATAPLANE_PORT = 4443;
//Path to vip status file for container in Hosted Vespa. Only used if set, else use HOSTED_VESPA_STATUS_FILE
private static final String HOSTED_VESPA_STATUS_FILE_SETTING = "VESPA_LB_STATUS_FILE";
@@ -932,9 +932,11 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Using " + nodeCount + " nodes in " + cluster);
var nodesSpec = NodesSpecification.dedicated(nodeCount, context);
+ ClusterSpec.Id clusterId = ClusterSpec.Id.from(cluster.getName());
var hosts = nodesSpec.provision(hostSystem,
ClusterSpec.Type.container,
- ClusterSpec.Id.from(cluster.getName()),
+ clusterId,
+ zoneEndpoint(context, clusterId),
deployState.getDeployLogger(),
false,
context.clusterInfo().build());
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java b/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
index e044b97546c..43f045940c9 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
@@ -28,6 +28,7 @@ import com.yahoo.vespa.model.container.docproc.DocprocChain;
import com.yahoo.vespa.model.container.docproc.DocprocChains;
import com.yahoo.vespa.model.content.cluster.ContentCluster;
import com.yahoo.vespa.model.search.IndexedSearchCluster;
+import com.yahoo.vespa.model.search.IndexingDocproc;
import com.yahoo.vespa.model.search.IndexingDocprocChain;
import com.yahoo.vespa.model.search.SearchCluster;
import com.yahoo.vespa.model.search.SearchNode;
@@ -213,13 +214,17 @@ public class Content extends ConfigModel {
/** Select/creates and initializes the indexing cluster coupled to this */
private void buildIndexingClusters(Content content, ConfigModelContext modelContext,
ApplicationConfigProducerRoot root) {
- if ( ! content.getCluster().getSearch().hasIndexedCluster()) return;
-
- IndexedSearchCluster indexedSearchCluster = content.getCluster().getSearch().getIndexed();
- if (indexedSearchCluster.hasExplicitIndexingCluster()) {
- setExistingIndexingCluster(indexedSearchCluster, content.containers);
+ var search = content.getCluster().getSearch();
+ if (!search.getIndexingDocproc().isPresent()) {
+ return;
+ }
+ var indexingDocproc = search.getIndexingDocproc().get();
+ if (indexingDocproc.hasExplicitCluster()) {
+ setExistingIndexingCluster(content, indexingDocproc, content.containers);
} else {
- setContainerAsIndexingCluster(indexedSearchCluster, content, modelContext, root);
+ if (search.hasIndexedCluster()) {
+ setContainerAsIndexingCluster(search.getIndexed(), content, modelContext, root);
+ }
}
}
@@ -237,18 +242,19 @@ public class Content extends ConfigModel {
targetCluster = content.containers.iterator().next().getCluster();
addDocproc(targetCluster);
- indexedSearchCluster.setIndexingClusterName(targetCluster.getName());
- addIndexingChainsTo(targetCluster, indexedSearchCluster);
+ var indexingDocproc = indexedSearchCluster.getIndexingDocproc();
+ indexingDocproc.setClusterName(targetCluster.getName());
+ addIndexingChainsTo(targetCluster, content, indexingDocproc);
}
}
- private void setExistingIndexingCluster(IndexedSearchCluster cluster, Collection<ContainerModel> containers) {
- String indexingClusterName = cluster.getIndexingClusterName();
+ private void setExistingIndexingCluster(Content content, IndexingDocproc indexingDocproc, Collection<ContainerModel> containers) {
+ String indexingClusterName = indexingDocproc.getClusterName(content.getCluster().getName());
ContainerModel containerModel = findByName(indexingClusterName, containers);
if (containerModel == null)
- throw new IllegalArgumentException("Content cluster '" + cluster.getClusterName() + "' refers to docproc " +
+ throw new IllegalArgumentException("Content cluster '" + content.getCluster().getName() + "' refers to docproc " +
"cluster '" + indexingClusterName + "', but this cluster does not exist.");
- addIndexingChainsTo(containerModel.getCluster(), cluster);
+ addIndexingChainsTo(containerModel.getCluster(), content, indexingDocproc);
}
private ContainerModel findByName(String name, Collection<ContainerModel> containers) {
@@ -258,19 +264,19 @@ public class Content extends ConfigModel {
return null;
}
- private void addIndexingChainsTo(ContainerCluster<?> indexer, IndexedSearchCluster cluster) {
+ private void addIndexingChainsTo(ContainerCluster<?> indexer, Content content, IndexingDocproc indexingDocproc) {
addIndexingChain(indexer);
DocprocChain indexingChain;
ComponentRegistry<DocprocChain> allChains = indexer.getDocprocChains().allChains();
- if (cluster.hasExplicitIndexingChain()) {
- indexingChain = allChains.getComponent(cluster.getIndexingChainName());
+ if (indexingDocproc.hasExplicitChain() && !indexingDocproc.getChainName().equals(IndexingDocprocChain.NAME)) {
+ indexingChain = allChains.getComponent(indexingDocproc.getChainName());
if (indexingChain == null) {
- throw new IllegalArgumentException(cluster + " refers to docproc " +
- "chain '" + cluster.getIndexingChainName() +
+ throw new IllegalArgumentException(content.getCluster() + " refers to docproc " +
+ "chain '" + indexingDocproc.getChainName() +
"' for indexing, but this chain does not exist");
}
else if (indexingChain.getId().getName().equals("default")) {
- throw new IllegalArgumentException(cluster + " specifies the chain " +
+ throw new IllegalArgumentException(content.getCluster() + " specifies the chain " +
"'default' as indexing chain. As the 'default' chain is run by default, " +
"using it as the indexing chain will run it twice. " +
"Use a different name for the indexing chain.");
@@ -282,7 +288,7 @@ public class Content extends ConfigModel {
indexingChain = allChains.getComponent(IndexingDocprocChain.NAME);
}
- cluster.setIndexingChain(indexingChain);
+ indexingDocproc.setChain(indexingChain);
}
private TreeConfigProducer<AnyConfigProducer> getDocProc(ApplicationConfigProducerRoot root) {
@@ -301,7 +307,7 @@ public class Content extends ConfigModel {
Content content,
ConfigModelContext modelContext,
ApplicationConfigProducerRoot root) {
- String indexerName = cluster.getIndexingClusterName();
+ String indexerName = cluster.getIndexingDocproc().getClusterName(content.getCluster().getName());
TreeConfigProducer<AnyConfigProducer> parent = getDocProc(root);
ApplicationContainerCluster indexingCluster = new ApplicationContainerCluster(parent, "cluster." + indexerName, indexerName, modelContext.getDeployState());
ContainerModel indexingClusterModel = new ContainerModel(modelContext.withParent(parent).withId(indexingCluster.getSubId()));
@@ -334,7 +340,7 @@ public class Content extends ConfigModel {
indexingCluster.addContainers(nodes);
addIndexingChain(indexingCluster);
- cluster.setIndexingChain(indexingCluster.getDocprocChains().allChains().getComponent(IndexingDocprocChain.NAME));
+ cluster.getIndexingDocproc().setChain(indexingCluster.getDocprocChains().allChains().getComponent(IndexingDocprocChain.NAME));
}
private ContainerCluster<?> getContainerWithDocproc(Collection<ContainerModel> containers) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
index a0240d28a3c..ec7acaf819f 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
@@ -16,6 +16,7 @@ import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
import com.yahoo.vespa.model.builder.xml.dom.VespaDomBuilder;
import com.yahoo.vespa.model.content.cluster.ContentCluster;
import com.yahoo.vespa.model.search.IndexedSearchCluster;
+import com.yahoo.vespa.model.search.IndexingDocproc;
import com.yahoo.vespa.model.search.NodeSpec;
import com.yahoo.vespa.model.search.SchemaDefinitionXMLHandler;
import com.yahoo.vespa.model.search.SearchCluster;
@@ -57,6 +58,7 @@ public class ContentSearchCluster extends TreeConfigProducer<AnyConfigProducer>
/** The single, indexed search cluster this sets up (supporting multiple document types), or null if none */
private IndexedSearchCluster indexedCluster;
+ private Optional<IndexingDocproc> indexingDocproc;
private Redundancy redundancy;
private final String clusterName;
@@ -206,6 +208,7 @@ public class ContentSearchCluster extends TreeConfigProducer<AnyConfigProducer>
double fractionOfMemoryReserved)
{
super(parent, "search");
+ this.indexingDocproc = Optional.empty();
this.clusterName = clusterName;
this.documentDefinitions = documentDefinitions;
this.globallyDistributedDocuments = globallyDistributedDocuments;
@@ -259,6 +262,10 @@ public class ContentSearchCluster extends TreeConfigProducer<AnyConfigProducer>
throw new IllegalArgumentException("Duplicate indexed cluster '" + indexedCluster.getClusterName() + "'");
}
indexedCluster = (IndexedSearchCluster)sc;
+ if (indexingDocproc.isPresent()) {
+ throw new IllegalArgumentException("Indexing docproc has previously been setup for streaming search");
+ }
+ indexingDocproc = Optional.of(indexedCluster.getIndexingDocproc());
}
clusters.put(sc.getClusterName(), sc);
}
@@ -458,6 +465,12 @@ public class ContentSearchCluster extends TreeConfigProducer<AnyConfigProducer>
public Map<String, SearchCluster> getClusters() { return clusters; }
public IndexedSearchCluster getIndexed() { return indexedCluster; }
public boolean hasIndexedCluster() { return indexedCluster != null; }
+ public Optional<IndexingDocproc> getIndexingDocproc() { return indexingDocproc; }
+ public void setupStreamingSearchIndexingDocProc() {
+ if (indexingDocproc.isEmpty()) {
+ indexingDocproc = Optional.of(new IndexingDocproc());
+ }
+ }
public String getClusterName() { return clusterName; }
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
index 66a99e1993c..dfdfa9303a7 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
@@ -203,19 +203,24 @@ public class ContentCluster extends TreeConfigProducer<AnyConfigProducer> implem
if (docprocCluster != null) {
docprocCluster = docprocCluster.trim();
}
- if (c.getSearch().hasIndexedCluster()) {
- if (docprocCluster != null && !docprocCluster.isEmpty()) {
- c.getSearch().getIndexed().setIndexingClusterName(docprocCluster);
- }
- }
-
String docprocChain = e.stringAttribute("chain");
if (docprocChain != null) {
docprocChain = docprocChain.trim();
}
- if (c.getSearch().hasIndexedCluster()) {
- if (docprocChain != null && !docprocChain.isEmpty()) {
- c.getSearch().getIndexed().setIndexingChainName(docprocChain);
+ if (docprocCluster != null && !docprocCluster.isEmpty()) {
+ if (!c.getSearch().hasIndexedCluster() && !c.getSearch().getIndexingDocproc().isPresent() &&
+ docprocChain != null && !docprocChain.isEmpty()) {
+ c.getSearch().setupStreamingSearchIndexingDocProc();
+ }
+ var indexingDocproc = c.getSearch().getIndexingDocproc();
+ if (indexingDocproc.isPresent()) {
+ indexingDocproc.get().setClusterName(docprocCluster);
+ }
+ }
+ if (docprocChain != null && !docprocChain.isEmpty()) {
+ var indexingDocproc = c.getSearch().getIndexingDocproc();
+ if (indexingDocproc.isPresent()) {
+ indexingDocproc.get().setChainName(docprocChain);
}
}
}
@@ -451,7 +456,7 @@ public class ContentCluster extends TreeConfigProducer<AnyConfigProducer> implem
@Override
public void getConfig(MessagetyperouteselectorpolicyConfig.Builder builder) {
- if ( ! getSearch().hasIndexedCluster()) return;
+ if ( ! getSearch().getIndexingDocproc().isPresent()) return;
DocumentProtocol.getConfig(builder, getConfigId());
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/routing/DocumentProtocol.java b/config-model/src/main/java/com/yahoo/vespa/model/routing/DocumentProtocol.java
index ad0312705ca..6623efb599d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/routing/DocumentProtocol.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/routing/DocumentProtocol.java
@@ -110,7 +110,7 @@ public final class DocumentProtocol implements Protocol,
for (ContentCluster cluster : Content.getContentClusters(repo)) {
DocumentProtocolPoliciesConfig.Cluster.Builder clusterBuilder = new DocumentProtocolPoliciesConfig.Cluster.Builder();
addSelector(cluster.getConfigId(), cluster.getRoutingSelector(), clusterBuilder);
- if (cluster.getSearch().hasIndexedCluster())
+ if (cluster.getSearch().getIndexingDocproc().isPresent())
addRoutes(getDirectRouteName(cluster.getConfigId()), getIndexedRouteName(cluster.getConfigId()), clusterBuilder);
else
clusterBuilder.defaultRoute(cluster.getConfigId());
@@ -227,10 +227,11 @@ public final class DocumentProtocol implements Protocol,
for (ContentCluster cluster : content) {
RouteSpec spec = new RouteSpec(cluster.getConfigId());
- if (cluster.getSearch().hasIndexedCluster()) {
+ if (cluster.getSearch().getIndexingDocproc().isPresent()) {
+ var indexingDocproc = cluster.getSearch().getIndexingDocproc().get();
table.addRoute(spec.addHop("[MessageType:" + cluster.getConfigId() + "]"));
table.addRoute(new RouteSpec(getIndexedRouteName(cluster.getConfigId()))
- .addHop(cluster.getSearch().getIndexed().getIndexingServiceName())
+ .addHop(indexingDocproc.getServiceName())
.addHop("[Content:cluster=" + cluster.getName() + "]"));
table.addRoute(new RouteSpec(getDirectRouteName(cluster.getConfigId()))
.addHop("[Content:cluster=" + cluster.getName() + "]"));
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
index 670460a9f9f..080a2ca43dc 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
@@ -43,11 +43,7 @@ public class IndexedSearchCluster extends SearchCluster
DispatchNodesConfig.Producer,
ConfigInstance.Producer {
- private String indexingClusterName = null; // The name of the docproc cluster to run indexing, by config.
- private String indexingChainName = null;
-
- private DocprocChain indexingChain; // The actual docproc chain indexing for this.
-
+ private IndexingDocproc indexingDocproc;
private Tuning tuning;
private SearchCoverage searchCoverage;
@@ -77,6 +73,7 @@ public class IndexedSearchCluster extends SearchCluster
public IndexedSearchCluster(TreeConfigProducer<AnyConfigProducer> parent, String clusterName, int index, ModelContext.FeatureFlags featureFlags) {
super(parent, clusterName, index);
+ indexingDocproc = new IndexingDocproc();
documentDbsConfigProducer = new MultipleDocumentDatabasesConfigProducer(this, documentDbs);
rootDispatch = new DispatchGroup(this);
defaultDispatchPolicy = DispatchTuning.Builder.toDispatchPolicy(featureFlags.queryDispatchPolicy());
@@ -87,58 +84,7 @@ public class IndexedSearchCluster extends SearchCluster
@Override
protected IndexingMode getIndexingMode() { return IndexingMode.REALTIME; }
- public final boolean hasExplicitIndexingCluster() {
- return indexingClusterName != null;
- }
-
- public final boolean hasExplicitIndexingChain() {
- return indexingChainName != null;
- }
-
- /**
- * Returns the name of the docproc cluster running indexing for this search cluster. This is derived from the
- * services file on initialization, this can NOT be used at runtime to determine indexing chain. When initialization
- * is done, the {@link #getIndexingServiceName()} method holds the actual indexing docproc chain object.
- *
- * @return the name of the docproc cluster associated with this
- */
- public String getIndexingClusterName() {
- return hasExplicitIndexingCluster() ? indexingClusterName : getClusterName() + ".indexing";
- }
-
- public String getIndexingChainName() {
- return indexingChainName;
- }
-
- public void setIndexingChainName(String indexingChainName) {
- this.indexingChainName = indexingChainName;
- }
-
- /**
- * Sets the name of the docproc cluster running indexing for this search cluster. This is for initial configuration,
- * and will not reflect the actual indexing chain. See {@link #getIndexingClusterName} for more detail.
- *
- * @param name the name of the docproc cluster associated with this
- */
- public void setIndexingClusterName(String name) {
- indexingClusterName = name;
- }
-
- public String getIndexingServiceName() {
- return indexingChain.getServiceName();
- }
-
- /**
- * Sets the docproc chain that will be running indexing for this search cluster. This is set by the
- * {@link com.yahoo.vespa.model.content.Content} model during build.
- *
- * @param chain the chain that is to run indexing for this cluster
- * @return this, to allow chaining
- */
- public SearchCluster setIndexingChain(DocprocChain chain) {
- indexingChain = chain;
- return this;
- }
+ public IndexingDocproc getIndexingDocproc() { return indexingDocproc; }
public DispatchGroup getRootDispatch() { return rootDispatch; }
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexingDocproc.java b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexingDocproc.java
new file mode 100644
index 00000000000..46f3e6f459d
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexingDocproc.java
@@ -0,0 +1,68 @@
+package com.yahoo.vespa.model.search;
+
+import com.yahoo.vespa.model.container.docproc.DocprocChain;
+
+/**
+ * Utility class to track configuration for which indexing docproc to use by a search cluster.
+ */
+public class IndexingDocproc {
+ private String clusterName; // The name of the docproc cluster to run indexing, by config.
+ private String chainName;
+
+ private DocprocChain chain; // The actual docproc chain indexing for this.
+
+ public boolean hasExplicitCluster() {
+ return clusterName != null;
+ }
+
+ public boolean hasExplicitChain() {
+ return chainName != null;
+ }
+
+ /**
+ * Returns the name of the docproc cluster running indexing for this search cluster. This is derived from the
+ * services file on initialization, this can NOT be used at runtime to determine indexing chain. When initialization
+ * is done, the {@link #getServiceName()} method holds the actual indexing docproc chain object.
+ *
+ * @return the name of the docproc cluster associated with this
+ */
+ public String getClusterName(String searchClusterName) {
+ return hasExplicitCluster() ? clusterName : searchClusterName + ".indexing";
+ }
+
+ public String getChainName() {
+ return chainName;
+ }
+
+ public void setChainName(String name) {
+ chainName = name;
+ }
+
+ /**
+ * Sets the name of the docproc cluster running indexing for this search cluster. This is for initial configuration,
+ * and will not reflect the actual indexing chain. See {@link #getClusterName} for more detail.
+ *
+ * @param name the name of the docproc cluster associated with this
+ */
+ public void setClusterName(String name) {
+ clusterName = name;
+ }
+
+ public String getServiceName() {
+ return chain.getServiceName();
+ }
+
+ /**
+ * Sets the docproc chain that will be running indexing for this search cluster. This is set by the
+ * {@link com.yahoo.vespa.model.content.Content} model during build.
+ *
+ * @param chain the chain that is to run indexing for this cluster
+ */
+ public void setChain(DocprocChain chain) { this.chain = chain; }
+
+ public IndexingDocproc() {
+ clusterName = null;
+ chainName = null;
+ chain = null;
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java b/config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java
index 5b747b93268..b444de5fb14 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java
@@ -27,8 +27,9 @@ public class NodeResourcesTuning implements ProtonConfig.Producer {
private final int threadsPerSearch;
private final double fractionOfMemoryReserved;
- // "Reserve" 0.5GB of memory for other processes running on the content node (config-proxy, metrics-proxy).
- public static final double reservedMemoryGb = 0.7;
+ // Memory for other processes running on the node (config-proxy, metrics-proxy).
+ // Keep in sync with node-repository/ClusterModel
+ public static final double nodeMemoryOverheadGb = 0.7;
public NodeResourcesTuning(NodeResources resources,
int threadsPerSearch,
@@ -128,7 +129,7 @@ public class NodeResourcesTuning implements ProtonConfig.Producer {
/** Returns the memory we can expect will be available for the content node processes */
private double usableMemoryGb() {
- double usableMemoryGb = resources.memoryGb() - reservedMemoryGb;
+ double usableMemoryGb = resources.memoryGb() - nodeMemoryOverheadGb;
return usableMemoryGb * (1 - fractionOfMemoryReserved);
}
diff --git a/config-model/src/main/resources/schema/common.rnc b/config-model/src/main/resources/schema/common.rnc
index 538a8f069f5..21f3399a027 100644
--- a/config-model/src/main/resources/schema/common.rnc
+++ b/config-model/src/main/resources/schema/common.rnc
@@ -23,9 +23,9 @@ Nodes = element nodes {
}
Resources = element resources {
- attribute vcpu { xsd:double { minExclusive = "0.0" } | xsd:string } &
- attribute memory { xsd:string } &
- attribute disk { xsd:string } &
+ attribute vcpu { xsd:double { minExclusive = "0.0" } | xsd:string }? &
+ attribute memory { xsd:string }? &
+ attribute disk { xsd:string }? &
attribute disk-speed { xsd:string }? &
attribute storage-type { xsd:string }? &
attribute architecture { xsd:string }? &
diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
index f1dffe53ad7..2c6a0fb7826 100644
--- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
@@ -57,7 +57,7 @@ import static com.yahoo.config.provision.NodeResources.DiskSpeed;
import static com.yahoo.config.provision.NodeResources.StorageType;
import static com.yahoo.vespa.defaults.Defaults.getDefaults;
import static com.yahoo.vespa.model.search.NodeResourcesTuning.GB;
-import static com.yahoo.vespa.model.search.NodeResourcesTuning.reservedMemoryGb;
+import static com.yahoo.vespa.model.search.NodeResourcesTuning.nodeMemoryOverheadGb;
import static com.yahoo.vespa.model.test.utils.ApplicationPackageUtils.generateSchemas;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -277,8 +277,8 @@ public class ModelProvisioningTest {
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(18, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
- assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.18)), protonMemorySize(model.getContentClusters()
- .get("content1")), "Memory for proton is lowered to account for the jvm heap");
+ assertEquals((long) ((3 - nodeMemoryOverheadGb) * (Math.pow(1024, 3)) * (1 - 0.18)), protonMemorySize(model.getContentClusters()
+ .get("content1")), "Memory for proton is lowered to account for the jvm heap");
assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model);
assertEquals(1, logger.msgs().size());
@@ -314,8 +314,8 @@ public class ModelProvisioningTest {
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(30, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
- assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.30)), protonMemorySize(model.getContentClusters()
- .get("content1")), "Memory for proton is lowered to account for the jvm heap");
+ assertEquals((long) ((3 - nodeMemoryOverheadGb) * (Math.pow(1024, 3)) * (1 - 0.30)), protonMemorySize(model.getContentClusters()
+ .get("content1")), "Memory for proton is lowered to account for the jvm heap");
assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model);
}
@@ -346,7 +346,7 @@ public class ModelProvisioningTest {
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(ApplicationContainerCluster.defaultHeapSizePercentageOfTotalNodeMemory, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is normal");
- assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1")), "Memory for proton is normal");
+ assertEquals((long) ((3 - nodeMemoryOverheadGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1")), "Memory for proton is normal");
}
@Test
@@ -1542,7 +1542,7 @@ public class ModelProvisioningTest {
tester.addHosts(new NodeResources(8, 200, 1000000, 0.3), 5); // Content-foo
tester.addHosts(new NodeResources(10, 64, 200, 0.3), 6); // Content-bar
tester.addHosts(new NodeResources(0.5, 2, 10, 0.3), 6); // Cluster-controller
- VespaModel model = tester.createModel(services, true, 0);
+ VespaModel model = tester.createModel(services, true, NodeResources.unspecified(), 0);
assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size());
}
@@ -2425,7 +2425,7 @@ public class ModelProvisioningTest {
assertTrue(config.build().server().stream().noneMatch(ZookeeperServerConfig.Server::joining), "Initial servers are not joining");
}
{
- VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(3), true, false, false, 0, Optional.of(model), new DeployState.Builder(), "node-1-3-50-04", "node-1-3-50-03");
+ VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(3), true, false, false, NodeResources.unspecified(), 0, Optional.of(model), new DeployState.Builder(), "node-1-3-50-04", "node-1-3-50-03");
ApplicationContainerCluster cluster = nextModel.getContainerClusters().get("zk");
ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder();
cluster.getContainers().forEach(c -> c.getConfig(config));
@@ -2491,13 +2491,35 @@ public class ModelProvisioningTest {
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(1, 3, 10, 5, NodeResources.DiskSpeed.slow), 5);
- VespaModel model = tester.createModel(services, true, 0);
+ VespaModel model = tester.createModel(services, true, NodeResources.unspecified(), 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
assertEquals(2, cluster.getSearchNodes().size());
assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001);
assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001);
}
+ @Test
+ public void require_that_resources_can_be_partially_specified() {
+ String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
+ "<services>",
+ " <content version='1.0' id='test'>",
+ " <redundancy>2</redundancy>" +
+ " <documents>",
+ " <document type='type1' mode='index'/>",
+ " </documents>",
+ " <nodes count='2'>",
+ " <resources vcpu='1'/>",
+ " </nodes>",
+ " </content>",
+ "</services>");
+
+ VespaModelTester tester = new VespaModelTester();
+ tester.addHosts(new NodeResources(1, 3, 10, 5), 5);
+ VespaModel model = tester.createModel(services, true, new NodeResources(1.0, 3.0, 9.0, 1.0), 0);
+ ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
+ assertEquals(2, cluster.getSearchNodes().size());
+ }
+
private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
List<SearchNode> searchNodes = cluster.getSearchNodes();
@@ -2542,12 +2564,12 @@ public class ModelProvisioningTest {
VespaModelTester tester = new VespaModelTester();
tester.addHosts(new NodeResources(1, 3, 10, 1), 4);
tester.addHosts(new NodeResources(1, 128, 100, 0.3), 1);
- VespaModel model = tester.createModel(services, true, 0);
+ VespaModel model = tester.createModel(services, true, NodeResources.unspecified(), 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize()); // from config override
assertEquals(1000, cfg.flush().memory().maxmemory()); // from explicit tuning
- assertEquals((long) ((128 - reservedMemoryGb) * GB * 0.08), cfg.flush().memory().each().maxmemory()); // from default node flavor tuning
+ assertEquals((long) ((128 - nodeMemoryOverheadGb) * GB * 0.08), cfg.flush().memory().each().maxmemory()); // from default node flavor tuning
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/CloudHttpConnectorValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/CloudHttpConnectorValidatorTest.java
new file mode 100644
index 00000000000..2b47bd7910f
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/CloudHttpConnectorValidatorTest.java
@@ -0,0 +1,104 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.model.application.validation;
+
+import com.yahoo.config.model.NullConfigModelRegistry;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.config.model.deploy.TestProperties;
+import com.yahoo.config.model.test.MockApplicationPackage;
+import com.yahoo.vespa.model.VespaModel;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+/**
+ * @author bjorncs
+ */
+class CloudHttpConnectorValidatorTest {
+
+ private static final String CUSTOM_SSL_ON_8080 =
+ """
+ <server port='8080' id='default'>
+ <ssl>
+ <private-key-file>/foo/key</private-key-file>
+ <certificate-file>/foo/cert</certificate-file>
+ </ssl>
+ </server>
+ """;
+
+ private static final String DEFAULT_SSL_ON_8080 =
+ """
+ <server port='8080' id='default'/>
+ """;
+
+ private static final String ADDITIONAL_CONNECTOR =
+ """
+ <server port='8080' id='default'/>
+ <server port='1234' id='custom'/>
+ """;
+
+ @Test
+ void fails_on_custom_ssl_for_cloud_application() {
+ var exception = assertThrows(IllegalArgumentException.class, () -> runValidatorOnApp(true, "", CUSTOM_SSL_ON_8080));
+ var expected = "Adding additional or modifying existing HTTPS connectors is not allowed for Vespa Cloud applications. " +
+ "Violating connectors: [default@8080]. See https://cloud.vespa.ai/en/security/whitepaper, " +
+ "https://cloud.vespa.ai/en/security/guide#data-plane.";
+ assertEquals(expected, exception.getMessage());
+ }
+
+ @Test
+ void allows_custom_ssl_for_infra() {
+ assertDoesNotThrow(() -> runValidatorOnApp(true, " application-type='hosted-infrastructure'", CUSTOM_SSL_ON_8080));
+ }
+
+ @Test
+ void allows_custom_ssl_for_self_hosted() {
+ assertDoesNotThrow(() -> runValidatorOnApp(false, "", CUSTOM_SSL_ON_8080));
+ }
+
+ @Test
+ void fails_on_additional_connectors_for_cloud_application() {
+ var exception = assertThrows(IllegalArgumentException.class, () -> runValidatorOnApp(true, "", ADDITIONAL_CONNECTOR));
+ var expected = "Illegal port 1234 in http server 'custom': Port must be set to 8080"; // Currently fails earlier in model construction
+ assertEquals(expected, exception.getMessage());
+ }
+
+ @Test
+ void allows_additional_connectors_for_self_hosted() {
+ assertDoesNotThrow(() -> runValidatorOnApp(false, "", ADDITIONAL_CONNECTOR));
+ }
+
+ @Test
+ void allows_default_ssl_for_cloud_application() {
+ assertDoesNotThrow(() -> runValidatorOnApp(true, "", DEFAULT_SSL_ON_8080));
+ }
+
+ @Test
+ void allows_default_ssl_for_self_hosted() {
+ assertDoesNotThrow(() -> runValidatorOnApp(false, "", DEFAULT_SSL_ON_8080));
+ }
+
+ private static void runValidatorOnApp(boolean hosted, String appTypeAttribute, String serverXml) throws Exception {
+ String servicesXml = """
+ <services version='1.0'%s>
+ <container version='1.0'>
+ <http>
+ %s
+ </http>
+ </container>
+ </services>
+ """.formatted(appTypeAttribute, serverXml);
+ var state = new DeployState.Builder()
+ .applicationPackage(
+ new MockApplicationPackage.Builder()
+ .withServices(servicesXml)
+ .build())
+ .properties(new TestProperties().setHostedVespa(hosted))
+ .build();
+ var model = new VespaModel(new NullConfigModelRegistry(), state);
+ new CloudHttpConnectorValidator().validate(model, state);
+ }
+
+} \ No newline at end of file
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidatorTest.java
index 80643917a58..42be1592eca 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidatorTest.java
@@ -281,7 +281,25 @@ public class ConstantTensorJsonValidatorTest {
" }",
"}"));
});
- assertTrue(exception.getMessage().contains("Expected field name 'cells', got 'stats'"));
+ System.err.println("msg: " + exception.getMessage());
+ assertTrue(exception.getMessage().contains("Expected 'cells' or 'values', got 'stats'"));
+ }
+
+ @Test
+ void ensure_that_values_array_for_vector_works() {
+ validateTensorJson(
+ TensorType.fromSpec("tensor(x[5])"),
+ inputJsonToReader("[5,4.0,3.1,-2,-1.0]"));
+ validateTensorJson(
+ TensorType.fromSpec("tensor(x[5])"),
+ inputJsonToReader("{'values':[5,4.0,3.1,-2,-1.0]}"));
+ }
+
+ @Test
+ void ensure_that_simple_object_for_map_works() {
+ validateTensorJson(
+ TensorType.fromSpec("tensor(x{})"),
+ inputJsonToReader("{'cells':{'a':5,'b':4.0,'c':3.1,'d':-2,'e':-1.0}}"));
}
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java
index 1a7b3d62cb7..a1a3b40a858 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java
@@ -21,19 +21,20 @@ public class QuotaValidatorTest {
private final Zone publicZone = new Zone(SystemName.Public, Environment.prod, RegionName.from("foo"));
private final Zone publicCdZone = new Zone(SystemName.PublicCd, Environment.prod, RegionName.from("foo"));
+ private final Zone devZone = new Zone(SystemName.Public, Environment.dev, RegionName.from("foo"));
private final Quota quota = Quota.unlimited().withClusterSize(10).withBudget(BigDecimal.valueOf(1.25));
@Test
void test_deploy_under_quota() {
var tester = new ValidationTester(8, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
- tester.deploy(null, getServices("testCluster", 4), Environment.prod, null);
+ tester.deploy(null, getServices(4), Environment.prod, null);
}
@Test
void test_deploy_above_quota_clustersize() {
var tester = new ValidationTester(14, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
try {
- tester.deploy(null, getServices("testCluster", 11), Environment.prod, null);
+ tester.deploy(null, getServices(11), Environment.prod, null);
fail();
} catch (RuntimeException e) {
assertEquals("Clusters testCluster exceeded max cluster size of 10", e.getMessage());
@@ -44,10 +45,10 @@ public class QuotaValidatorTest {
void test_deploy_above_quota_budget() {
var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
try {
- tester.deploy(null, getServices("testCluster", 10), Environment.prod, null);
+ tester.deploy(null, getServices(10), Environment.prod, null);
fail();
} catch (RuntimeException e) {
- assertEquals("The max resources specified cost $1.63 but your quota is $1.25: Contact support to upgrade your plan.", e.getMessage());
+ assertEquals("The resources used cost $1.63 but your quota is $1.25: Contact support to upgrade your plan.", e.getMessage());
}
}
@@ -55,10 +56,10 @@ public class QuotaValidatorTest {
void test_deploy_above_quota_budget_in_publiccd() {
var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota.withBudget(BigDecimal.ONE)).setZone(publicCdZone));
try {
- tester.deploy(null, getServices("testCluster", 10), Environment.prod, null);
+ tester.deploy(null, getServices(10), Environment.prod, null);
fail();
} catch (RuntimeException e) {
- assertEquals("publiccd: The max resources specified cost $1.63 but your quota is $1.00: Contact support to upgrade your plan.", e.getMessage());
+ assertEquals("publiccd: The resources used cost $1.63 but your quota is $1.00: Contact support to upgrade your plan.", e.getMessage());
}
}
@@ -66,11 +67,33 @@ public class QuotaValidatorTest {
void test_deploy_max_resources_above_quota() {
var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicCdZone));
try {
- tester.deploy(null, getServices("testCluster", 10), Environment.prod, null);
+ tester.deploy(null, getServices(10), Environment.prod, null);
fail();
} catch (RuntimeException e) {
- assertEquals("publiccd: The max resources specified cost $1.63 but your quota is $1.25: Contact support to upgrade your plan.", e.getMessage());
+ assertEquals("publiccd: The resources used cost $1.63 but your quota is $1.25: Contact support to upgrade your plan.", e.getMessage());
+ }
+ }
+
+
+ @Test
+ void test_deploy_above_quota_budget_in_dev() {
+ var quota = Quota.unlimited().withBudget(BigDecimal.valueOf(0.01));
+ var tester = new ValidationTester(5, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(devZone));
+ // There is downscaling to 1 node per cluster in dev
+ try {
+ tester.deploy(null, getServices(2, false), Environment.dev, null);
+ fail();
+ } catch (RuntimeException e) {
+ assertEquals("The resources used cost $0.16 but your quota is $0.01: Contact support to upgrade your plan.", e.getMessage());
+ }
+
+ // Override so that we will get 2 nodes in content cluster
+ try {
+ tester.deploy(null, getServices(2, true), Environment.dev, null);
+ fail();
+ } catch (RuntimeException e) {
+ assertEquals("The resources used cost $0.33 but your quota is $0.01: Contact support to upgrade your plan.", e.getMessage());
}
}
@@ -79,25 +102,26 @@ public class QuotaValidatorTest {
var quota = Quota.unlimited().withBudget(BigDecimal.valueOf(-1));
var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
try {
- tester.deploy(null, getServices("testCluster", 10), Environment.prod, null);
+ tester.deploy(null, getServices(10), Environment.prod, null);
fail();
} catch (RuntimeException e) {
- assertEquals("The max resources specified cost $-.-- but your quota is $--.--: Please free up some capacity.",
+ assertEquals("The resources used cost $-.-- but your quota is $--.--: Please free up some capacity.",
ValidationTester.censorNumbers(e.getMessage()));
}
}
- private static String getServices(String contentClusterId, int nodeCount) {
- return "<services version='1.0'>" +
- " <content id='" + contentClusterId + "' version='1.0'>" +
+ private static String getServices(int nodeCount) {
+ return getServices(nodeCount, false);
+ }
+
+ private static String getServices(int nodeCount, boolean devOverride) {
+ return "<services version='1.0' xmlns:deploy='vespa' xmlns:preprocess='properties'>" +
+ " <content id='" + "testCluster" + "' version='1.0'>" +
" <redundancy>1</redundancy>" +
- " <engine>" +
- " <proton/>" +
- " </engine>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
- " <nodes count='" + nodeCount + "'>" +
+ " <nodes count='" + nodeCount + "' " + (devOverride ? "required='true'" : "") + " >\n" +
" <resources vcpu=\"[0.5, 2]\" memory=\"[1Gb, 6Gb]\" disk=\"[1Gb, 18Gb]\"/>\n" +
" </nodes>" +
" </content>" +
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java
index 78d3838d39d..1517f7971ed 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java
@@ -57,6 +57,7 @@ public class ValidationTester {
public ValidationTester(InMemoryProvisioner hostProvisioner, TestProperties testProperties) {
this.hostProvisioner = hostProvisioner;
this.properties = testProperties;
+ hostProvisioner.setEnvironment(testProperties.zone().environment());
}
/**
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JettyContainerModelBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JettyContainerModelBuilderTest.java
index 8b1217758ab..89cce7feacb 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JettyContainerModelBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JettyContainerModelBuilderTest.java
@@ -243,11 +243,7 @@ public class JettyContainerModelBuilderTest extends ContainerModelBuilderTestBas
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <http>",
- " <server port='8080' id='ssl'>",
- " <ssl>",
- " <private-key-file>/foo/key</private-key-file>",
- " <certificate-file>/foo/cert</certificate-file>",
- " </ssl>",
+ " <server port='8080' id='default'>",
" </server>",
" </http>",
multiNode,
@@ -272,8 +268,8 @@ public class JettyContainerModelBuilderTest extends ContainerModelBuilderTestBas
.build();
MockRoot root = new MockRoot("root", deployState);
createModel(root, deployState, null, clusterElem);
- ConnectorConfig sslProvider = root.getConfig(ConnectorConfig.class, "default/http/jdisc-jetty/ssl");
- assertTrue(sslProvider.ssl().enabled());
+ ConnectorConfig sslProvider = root.getConfig(ConnectorConfig.class, "default/http/jdisc-jetty/default");
+ assertFalse(sslProvider.ssl().enabled());
assertEquals("", sslProvider.ssl().certificate());
assertEquals("", sslProvider.ssl().privateKey());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
index 14fe7bbcc36..f9b1edf4f35 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
@@ -1397,4 +1397,42 @@ public class ContentClusterTest extends ContentBaseTest {
assertEquals(2, fleetControllerConfigBuilder.build().max_number_of_groups_allowed_to_be_down());
}
+ private void assertIndexingDocprocEnabled(boolean indexed, boolean force, boolean expEnabled)
+ {
+ String services = "<?xml version='1.0' encoding='UTF-8' ?>" +
+ "<services version='1.0'>" +
+ " <container id='default' version='1.0'>" +
+ " <document-processing/>" +
+ " </container>" +
+ " <content id='search' version='1.0'>" +
+ " <redundancy>1</redundancy>" +
+ " <documents>" +
+ " <document-processing cluster='default'" + (force ? " chain='indexing'" : "") + "/>" +
+ " <document type='type1' mode='" + (indexed ? "index" : "streaming") + "'/>" +
+ " </documents>" +
+ " </content>" +
+ "</services>";
+ VespaModel model = createEnd2EndOneNode(new TestProperties(), services);
+ var searchCluster = model.getContentClusters().get("search").getSearch();
+ assertEquals(expEnabled, searchCluster.getIndexingDocproc().isPresent());
+ }
+
+ @Test
+ void testIndexingDocprocEnabledWhenIndexMode()
+ {
+ assertIndexingDocprocEnabled(true, false, true);
+ }
+
+ @Test
+ void testIndexingDocprocNotEnabledWhenStreamingMode()
+ {
+ assertIndexingDocprocEnabled(false, false, false);
+ }
+
+ @Test
+ void testIndexingDocprocEnabledWhenStreamingModeAndForced()
+ {
+ assertIndexingDocprocEnabled(false, true, true);
+ }
+
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java
index 4476e128196..ac9d0ad8724 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java
@@ -220,7 +220,7 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
fail("Expected exception");
}
catch (IllegalArgumentException e) {
- assertTrue(e.getMessage().startsWith("Indexing cluster 'musiccluster' specifies the chain 'default' as indexing chain"));
+ assertTrue(e.getMessage().startsWith("content cluster 'musiccluster' specifies the chain 'default' as indexing chain"));
}
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java b/config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java
index 5831090c261..ab138cb2e34 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java
@@ -11,7 +11,7 @@ import org.junit.jupiter.api.Test;
import java.util.Arrays;
import java.util.List;
-import static com.yahoo.vespa.model.search.NodeResourcesTuning.reservedMemoryGb;
+import static com.yahoo.vespa.model.search.NodeResourcesTuning.nodeMemoryOverheadGb;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static com.yahoo.vespa.model.search.NodeResourcesTuning.MB;
import static com.yahoo.vespa.model.search.NodeResourcesTuning.GB;
@@ -33,13 +33,13 @@ public class NodeResourcesTuningTest {
@Test
void require_that_hwinfo_memory_size_is_set() {
- assertEquals(24 * GB, configFromMemorySetting(24 + reservedMemoryGb, 0).hwinfo().memory().size());
- assertEquals(combinedFactor * 24 * GB, configFromMemorySetting(24 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster * 0.01).hwinfo().memory().size(), 1000);
+ assertEquals(24 * GB, configFromMemorySetting(24 + nodeMemoryOverheadGb, 0).hwinfo().memory().size());
+ assertEquals(combinedFactor * 24 * GB, configFromMemorySetting(24 + nodeMemoryOverheadGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster * 0.01).hwinfo().memory().size(), 1000);
}
@Test
void reserved_memory_on_content_node() {
- assertEquals(0.7, reservedMemoryGb, delta);
+ assertEquals(0.7, nodeMemoryOverheadGb, delta);
}
private ProtonConfig getProtonMemoryConfig(List<Pair<String, String>> sdAndMode, double gb) {
@@ -54,7 +54,7 @@ public class NodeResourcesTuningTest {
}
private void verify_that_initial_numdocs_is_dependent_of_mode() {
- ProtonConfig cfg = getProtonMemoryConfig(Arrays.asList(new Pair<>("a", "INDEX"), new Pair<>("b", "STREAMING"), new Pair<>("c", "STORE_ONLY")), 24 + reservedMemoryGb);
+ ProtonConfig cfg = getProtonMemoryConfig(Arrays.asList(new Pair<>("a", "INDEX"), new Pair<>("b", "STREAMING"), new Pair<>("c", "STORE_ONLY")), 24 + nodeMemoryOverheadGb);
assertEquals(3, cfg.documentdb().size());
assertEquals(1024, cfg.documentdb(0).allocation().initialnumdocs());
assertEquals("a", cfg.documentdb(0).inputdoctypename());
@@ -162,14 +162,14 @@ public class NodeResourcesTuningTest {
@Test
void require_that_summary_cache_max_bytes_is_set_based_on_memory() {
- assertEquals(1 * GB / 25, configFromMemorySetting(1 + reservedMemoryGb, 0).summary().cache().maxbytes());
- assertEquals(256 * GB / 25, configFromMemorySetting(256 + reservedMemoryGb, 0).summary().cache().maxbytes());
+ assertEquals(1 * GB / 25, configFromMemorySetting(1 + nodeMemoryOverheadGb, 0).summary().cache().maxbytes());
+ assertEquals(256 * GB / 25, configFromMemorySetting(256 + nodeMemoryOverheadGb, 0).summary().cache().maxbytes());
}
@Test
void require_that_summary_cache_memory_is_reduced_with_combined_cluster() {
- assertEquals(combinedFactor * 1 * GB / 25, configFromMemorySetting(1 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster * 0.01).summary().cache().maxbytes(), 1000);
- assertEquals(combinedFactor * 256 * GB / 25, configFromMemorySetting(256 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster * 0.01).summary().cache().maxbytes(), 1000);
+ assertEquals(combinedFactor * 1 * GB / 25, configFromMemorySetting(1 + nodeMemoryOverheadGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster * 0.01).summary().cache().maxbytes(), 1000);
+ assertEquals(combinedFactor * 256 * GB / 25, configFromMemorySetting(256 + nodeMemoryOverheadGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster * 0.01).summary().cache().maxbytes(), 1000);
}
@Test
@@ -191,12 +191,12 @@ public class NodeResourcesTuningTest {
}
private static void assertDocumentStoreMaxFileSize(long expFileSizeBytes, int wantedMemoryGb) {
- assertEquals(expFileSizeBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).summary().log().maxfilesize());
+ assertEquals(expFileSizeBytes, configFromMemorySetting(wantedMemoryGb + nodeMemoryOverheadGb, 0).summary().log().maxfilesize());
}
private static void assertFlushStrategyMemory(long expMemoryBytes, int wantedMemoryGb) {
- assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).flush().memory().maxmemory());
- assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).flush().memory().each().maxmemory());
+ assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + nodeMemoryOverheadGb, 0).flush().memory().maxmemory());
+ assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + nodeMemoryOverheadGb, 0).flush().memory().each().maxmemory());
}
private static void assertFlushStrategyTlsSize(long expTlsSizeBytes, int diskGb) {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
index 500fb0838e1..e7d46e1c009 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
@@ -130,51 +130,60 @@ public class VespaModelTester {
/** Creates a model which uses 0 as start index */
public VespaModel createModel(String services, String hosts, boolean failOnOutOfCapacity, String ... retiredHostNames) {
- return createModel(Zone.defaultZone(), services, hosts, failOnOutOfCapacity, false, false, 0,
+ return createModel(Zone.defaultZone(), services, hosts, failOnOutOfCapacity, false, false,
+ NodeResources.unspecified(), 0,
Optional.empty(), new DeployState.Builder(), retiredHostNames);
}
/** Creates a model which uses 0 as start index */
public VespaModel createModel(String services, boolean failOnOutOfCapacity, DeployState.Builder builder) {
- return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, false, false, 0, Optional.empty(), builder);
+ return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, false, false,
+ NodeResources.unspecified(), 0, Optional.empty(), builder);
}
/** Creates a model which uses 0 as start index */
public VespaModel createModel(String services, boolean failOnOutOfCapacity, boolean useMaxResources, String ... retiredHostNames) {
- return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, useMaxResources, false, 0,
+ return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, useMaxResources, false,
+ NodeResources.unspecified(), 0,
Optional.empty(), new DeployState.Builder(), retiredHostNames);
}
/** Creates a model which uses 0 as start index */
public VespaModel createModel(String services, boolean failOnOutOfCapacity, boolean useMaxResources, boolean alwaysReturnOneNode, String ... retiredHostNames) {
- return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, useMaxResources, alwaysReturnOneNode, 0,
+ return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, useMaxResources, alwaysReturnOneNode,
+ NodeResources.unspecified(), 0,
Optional.empty(), new DeployState.Builder(), retiredHostNames);
}
/** Creates a model which uses 0 as start index */
- public VespaModel createModel(String services, boolean failOnOutOfCapacity, int startIndexForClusters, String ... retiredHostNames) {
- return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, false, false, startIndexForClusters,
+ public VespaModel createModel(String services, boolean failOnOutOfCapacity, NodeResources defaultResources,
+ int startIndexForClusters, String ... retiredHostNames) {
+ return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, false, false,
+ defaultResources, startIndexForClusters,
Optional.empty(), new DeployState.Builder(), retiredHostNames);
}
/** Creates a model which uses 0 as start index */
public VespaModel createModel(Zone zone, String services, boolean failOnOutOfCapacity, String ... retiredHostNames) {
- return createModel(zone, services, failOnOutOfCapacity, false, false, 0,
+ return createModel(zone, services, failOnOutOfCapacity, false, false, NodeResources.unspecified(), 0,
Optional.empty(), new DeployState.Builder(), retiredHostNames);
}
/** Creates a model which uses 0 as start index */
public VespaModel createModel(Zone zone, String services, boolean failOnOutOfCapacity,
DeployState.Builder deployStateBuilder, String ... retiredHostNames) {
- return createModel(zone, services, failOnOutOfCapacity, false, false, 0,
+ return createModel(zone, services, failOnOutOfCapacity, false, false,
+ NodeResources.unspecified(),0,
Optional.empty(), deployStateBuilder, retiredHostNames);
}
public VespaModel createModel(Zone zone, String services, boolean failOnOutOfCapacity, boolean useMaxResources,
boolean alwaysReturnOneNode,
+ NodeResources defaultResources,
int startIndexForClusters, Optional<VespaModel> previousModel,
DeployState.Builder deployStatebuilder, String ... retiredHostNames) {
return createModel(zone, services, null, failOnOutOfCapacity, useMaxResources, alwaysReturnOneNode,
+ defaultResources,
startIndexForClusters, previousModel, deployStatebuilder, retiredHostNames);
}
/**
@@ -189,6 +198,7 @@ public class VespaModelTester {
*/
public VespaModel createModel(Zone zone, String services, String hosts, boolean failOnOutOfCapacity, boolean useMaxResources,
boolean alwaysReturnOneNode,
+ NodeResources defaultResources,
int startIndexForClusters, Optional<VespaModel> previousModel,
DeployState.Builder deployStatebuilder, String ... retiredHostNames) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, generateSchemas("type1"));
@@ -200,7 +210,7 @@ public class VespaModelTester {
useMaxResources,
alwaysReturnOneNode,
false,
- NodeResources.unspecified(),
+ defaultResources,
startIndexForClusters,
retiredHostNames);
provisioner.setEnvironment(zone.environment());
diff --git a/config-model/src/test/schema-test-files/services-hosted.xml b/config-model/src/test/schema-test-files/services-hosted.xml
index 1246f06c58f..db4f9fa34ab 100644
--- a/config-model/src/test/schema-test-files/services-hosted.xml
+++ b/config-model/src/test/schema-test-files/services-hosted.xml
@@ -25,7 +25,7 @@
<content id="search" version="1.0">
<redundancy>2</redundancy>
<nodes count="7" flavor="large" groups="12" no-vespamalloc="proton distributord">
- <resources vcpu="3.0" memory="32000.0Mb" disk="300 Gb"/>
+ <resources memory="32000.0Mb" disk="300 Gb"/>
</nodes>
</content>
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java
index 9ca10091129..36ba12c7cf8 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java
@@ -210,6 +210,10 @@ public class NodeResources {
public Architecture architecture() { return architecture; }
public GpuResources gpuResources() { return gpuResources; }
+ public boolean vcpuIsUnspecified() { return vcpu == 0; }
+ public boolean memoryGbIsUnspecified() { return memoryGb == 0; }
+ public boolean diskGbIsUnspecified() { return diskGb == 0; }
+
/** Returns the standard cost of these resources, in dollars per hour */
public double cost() {
return (vcpu * cpuUnitCost) +
@@ -219,19 +223,16 @@ public class NodeResources {
}
public NodeResources withVcpu(double vcpu) {
- ensureSpecified();
if (vcpu == this.vcpu) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources withMemoryGb(double memoryGb) {
- ensureSpecified();
if (memoryGb == this.memoryGb) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
public NodeResources withDiskGb(double diskGb) {
- ensureSpecified();
if (diskGb == this.diskGb) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
@@ -361,7 +362,7 @@ public class NodeResources {
appendDouble(sb, vcpu);
sb.append(", memory: ");
appendDouble(sb, memoryGb);
- sb.append(" Gb, disk ");
+ sb.append(" Gb, disk: ");
appendDouble(sb, diskGb);
sb.append(" Gb");
if (bandwidthGbps > 0) {
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/Zone.java b/config-provisioning/src/main/java/com/yahoo/config/provision/Zone.java
index 17010fe3fd3..0b56d811712 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/Zone.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/Zone.java
@@ -27,7 +27,7 @@ public class Zone {
.name(CloudName.from(configserverConfig.cloud()))
.dynamicProvisioning(cloudConfig.dynamicProvisioning())
.allowHostSharing(cloudConfig.allowHostSharing())
- .allowEnclave(cloudConfig.dynamicProvisioning())
+ .allowEnclave(configserverConfig.cloud().equals("aws") || configserverConfig.cloud().equals("gcp"))
.requireAccessControl(cloudConfig.requireAccessControl())
.account(CloudAccount.from(cloudConfig.account()))
.build(),
diff --git a/config-provisioning/src/test/java/com/yahoo/config/provision/NodeResourcesTest.java b/config-provisioning/src/test/java/com/yahoo/config/provision/NodeResourcesTest.java
index ae052c03556..230d28e04aa 100644
--- a/config-provisioning/src/test/java/com/yahoo/config/provision/NodeResourcesTest.java
+++ b/config-provisioning/src/test/java/com/yahoo/config/provision/NodeResourcesTest.java
@@ -6,6 +6,8 @@ import org.junit.jupiter.api.Test;
import java.util.function.Supplier;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
/**
@@ -20,11 +22,11 @@ public class NodeResourcesTest {
@Test
void testToString() {
- assertEquals("[vcpu: 1.0, memory: 10.0 Gb, disk 100.0 Gb, architecture: any]",
+ assertEquals("[vcpu: 1.0, memory: 10.0 Gb, disk: 100.0 Gb, architecture: any]",
new NodeResources(1., 10., 100., 0).toString());
- assertEquals("[vcpu: 0.3, memory: 3.3 Gb, disk 33.3 Gb, bandwidth: 0.3 Gbps, architecture: any]",
+ assertEquals("[vcpu: 0.3, memory: 3.3 Gb, disk: 33.3 Gb, bandwidth: 0.3 Gbps, architecture: any]",
new NodeResources(1 / 3., 10 / 3., 100 / 3., 0.3).toString());
- assertEquals("[vcpu: 0.7, memory: 9.0 Gb, disk 66.7 Gb, bandwidth: 0.7 Gbps, architecture: any]",
+ assertEquals("[vcpu: 0.7, memory: 9.0 Gb, disk: 66.7 Gb, bandwidth: 0.7 Gbps, architecture: any]",
new NodeResources(2 / 3., 8.97, 200 / 3., 0.67).toString());
}
@@ -37,6 +39,33 @@ public class NodeResourcesTest {
}
@Test
+ void testSatisfies() {
+ var hostResources = new NodeResources(1, 2, 3, 1);
+ assertTrue(hostResources.satisfies(new NodeResources(1, 2, 3, 1)));
+ assertTrue(hostResources.satisfies(new NodeResources(1, 1, 1, 1)));
+ assertFalse(hostResources.satisfies(new NodeResources(2, 2, 3, 1)));
+ assertFalse(hostResources.satisfies(new NodeResources(1, 3, 3, 1)));
+ assertFalse(hostResources.satisfies(new NodeResources(1, 2, 4, 1)));
+
+ var gpuHostResources = new NodeResources(1, 2, 3, 1,
+ NodeResources.DiskSpeed.fast,
+ NodeResources.StorageType.local,
+ NodeResources.Architecture.x86_64,
+ new NodeResources.GpuResources(1, 16));
+ assertTrue(gpuHostResources.satisfies(new NodeResources(1, 2, 3, 1,
+ NodeResources.DiskSpeed.fast,
+ NodeResources.StorageType.local,
+ NodeResources.Architecture.x86_64,
+ new NodeResources.GpuResources(1, 16))));
+ assertFalse(gpuHostResources.satisfies(new NodeResources(1, 2, 3, 1,
+ NodeResources.DiskSpeed.fast,
+ NodeResources.StorageType.local,
+ NodeResources.Architecture.x86_64,
+ new NodeResources.GpuResources(1, 32))));
+ assertFalse(hostResources.satisfies(gpuHostResources));
+ }
+
+ @Test
void benchmark() {
NodeResources [] resouces = new NodeResources[100];
for (int i = 0; i < resouces.length; i++) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index 59ddb8b68ab..efdcaeec3aa 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -206,6 +206,7 @@ public class ModelContextImpl implements ModelContext {
private final String summaryDecodePolicy;
private final Predicate<ClusterSpec.Id> allowMoreThanOneContentGroupDown;
private final boolean enableConditionalPutRemoveWriteRepair;
+ private final boolean enableDataplaneProxy;
public FeatureFlags(FlagSource source, ApplicationId appId, Version version) {
this.defaultTermwiseLimit = flagValue(source, appId, version, Flags.DEFAULT_TERM_WISE_LIMIT);
@@ -254,6 +255,7 @@ public class ModelContextImpl implements ModelContext {
this.summaryDecodePolicy = flagValue(source, appId, version, Flags.SUMMARY_DECODE_POLICY);
this.allowMoreThanOneContentGroupDown = clusterId -> flagValue(source, appId, version, clusterId, Flags.ALLOW_MORE_THAN_ONE_CONTENT_GROUP_DOWN);
this.enableConditionalPutRemoveWriteRepair = flagValue(source, appId, version, Flags.ENABLE_CONDITIONAL_PUT_REMOVE_WRITE_REPAIR);
+ this.enableDataplaneProxy = flagValue(source, appId, version, Flags.ENABLE_DATAPLANE_PROXY);
}
@Override public int heapSizePercentage() { return heapPercentage; }
@@ -310,6 +312,7 @@ public class ModelContextImpl implements ModelContext {
@Override public boolean enableGlobalPhase() { return enableGlobalPhase; }
@Override public boolean allowMoreThanOneContentGroupDown(ClusterSpec.Id id) { return allowMoreThanOneContentGroupDown.test(id); }
@Override public boolean enableConditionalPutRemoveWriteRepair() { return enableConditionalPutRemoveWriteRepair; }
+ @Override public boolean enableDataplaneProxy() { return enableDataplaneProxy; }
private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, UnboundFlag<? extends V, ?, ?> flag) {
return flag.bindTo(source)
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/provision/ProvisionerAdapter.java b/configserver/src/main/java/com/yahoo/vespa/config/server/provision/ProvisionerAdapter.java
index 29cd44b57a4..4d1f058b005 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/provision/ProvisionerAdapter.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/provision/ProvisionerAdapter.java
@@ -35,7 +35,9 @@ public class ProvisionerAdapter implements HostProvisioner {
@Override
public HostSpec allocateHost(String alias) {
// TODO: Remove this method since hosted/non-hosted needs different interfaces. See also ModelContextImpl.getHostProvisioner
- throw new UnsupportedOperationException("Clusters in hosted environments must have a <nodes count='N'> tag " +
+ // Illegal argument becaiuse we end here by following a path not suitable for the environment steered
+ // by the content of the nodes tag in services
+ throw new IllegalArgumentException("Clusters in hosted environments must have a <nodes count='N'> tag " +
"matching all zones, and having no <node> subtags, " +
"see https://cloud.vespa.ai/en/reference/services");
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java
index f9a2e3bc1a6..b92a723fac8 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java
@@ -163,7 +163,8 @@ class HttpRequestDispatch {
double maxConnectionLifeInSeconds = connectorConfig.maxConnectionLife();
if (maxConnectionLifeInSeconds > 0) {
long createdAt = connection.getCreatedTimeStamp();
- Instant expiredAt = Instant.ofEpochMilli((long) (createdAt + maxConnectionLifeInSeconds * 1000));
+ long tenPctVariance = connection.hashCode() % 10; // should be random enough, and must be consistent for a given connection
+ Instant expiredAt = Instant.ofEpochMilli((long) (createdAt + maxConnectionLifeInSeconds * 10 * (100 - tenPctVariance)));
boolean isExpired = Instant.now().isAfter(expiredAt);
if (isExpired) {
gracefulShutdown(connection);
diff --git a/container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java b/container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java
index 04857e982a9..1702b1f4804 100644
--- a/container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java
+++ b/container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java
@@ -4,7 +4,6 @@ package com.yahoo.prelude.statistics;
import ai.vespa.metrics.ContainerMetrics;
import com.yahoo.component.chain.dependencies.Before;
import com.yahoo.concurrent.CopyOnWriteHashMap;
-import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.jdisc.Metric;
import com.yahoo.metrics.simple.MetricReceiver;
import com.yahoo.metrics.simple.MetricSettings;
@@ -25,6 +24,7 @@ import java.util.Map;
import java.util.Optional;
import java.util.PriorityQueue;
import java.util.Queue;
+import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import static com.yahoo.container.protect.Error.BACKEND_COMMUNICATION_ERROR;
@@ -224,7 +224,9 @@ public class StatisticsSearcher extends Searcher {
incrQueryCount(metricContext);
logQuery(query);
- long start_ns = getStartNanoTime(query);
+ // Timestamp when request was initially processed by Jetty
+ long startMs = Optional.ofNullable(query.getHttpRequest())
+ .map(r -> r.creationTime(TimeUnit.MILLISECONDS)).orElseGet(System::currentTimeMillis);
qps(metricContext);
metric.set(QUERY_TIMEOUT_METRIC, query.getTimeout(), metricContext);
Result result;
@@ -236,14 +238,14 @@ public class StatisticsSearcher extends Searcher {
throw e;
}
- long end_ns = System.nanoTime(); // End time, in nanoseconds
- long latency_ns = end_ns - start_ns;
- if (latency_ns >= 0) {
- addLatency(latency_ns, metricContext);
+ long endMs = System.currentTimeMillis();
+ long latencyMs = endMs - startMs;
+ if (latencyMs >= 0) {
+ addLatency(latencyMs, metricContext);
} else {
getLogger().log(Level.WARNING,
- "Apparently negative latency measure, start: " + start_ns
- + ", end: " + end_ns + ", for query: " + query);
+ "Apparently negative latency measure, start: " + startMs
+ + ", end: " + endMs + ", for query: " + query + ". Could be caused by NTP adjustments.");
}
if (result.hits().getError() != null) {
incrErrorCount(result, metricContext);
@@ -284,11 +286,10 @@ public class StatisticsSearcher extends Searcher {
}
}
- private void addLatency(long latency_ns, Metric.Context metricContext) {
- double latency = 0.000001 * latency_ns;
- metric.set(QUERY_LATENCY_METRIC, latency, metricContext);
- metric.set(MEAN_QUERY_LATENCY_METRIC, latency, metricContext);
- metric.set(MAX_QUERY_LATENCY_METRIC, latency, metricContext);
+ private void addLatency(long latencyMs, Metric.Context metricContext) {
+ metric.set(QUERY_LATENCY_METRIC, (double) latencyMs, metricContext);
+ metric.set(MEAN_QUERY_LATENCY_METRIC, (double) latencyMs, metricContext);
+ metric.set(MAX_QUERY_LATENCY_METRIC, (double) latencyMs, metricContext);
}
private void incrQueryCount(Metric.Context metricContext) {
@@ -409,14 +410,5 @@ public class StatisticsSearcher extends Searcher {
metric.set(QUERY_ITEM_COUNT, query.getModel().getQueryTree().treeSize(), context);
}
- /**
- * Returns the relative start time from request was received by jdisc
- */
- private static long getStartNanoTime(Query query) {
- return Optional.ofNullable(query.getHttpRequest())
- .map(HttpRequest::relativeCreatedAtNanoTime)
- .orElseGet(System::nanoTime);
- }
-
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java
index eb2005bf268..29e1d494ffc 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java
@@ -35,12 +35,14 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> {
private final boolean hasPackage;
private final boolean shouldSkip;
private final Optional<String> description;
+ private final Optional<Instant> submittedAt;
private final int risk;
public ApplicationVersion(RevisionId id, Optional<SourceRevision> source, Optional<String> authorEmail,
Optional<Version> compileVersion, Optional<Integer> allowedMajor, Optional<Instant> buildTime,
Optional<String> sourceUrl, Optional<String> commit, Optional<String> bundleHash,
- Optional<Instant> obsoleteAt, boolean hasPackage, boolean shouldSkip, Optional<String> description, int risk) {
+ Optional<Instant> obsoleteAt, boolean hasPackage, boolean shouldSkip, Optional<String> description,
+ Optional<Instant> submittedAt, int risk) {
if (commit.isPresent() && commit.get().length() > 128)
throw new IllegalArgumentException("Commit may not be longer than 128 characters");
@@ -64,6 +66,7 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> {
this.hasPackage = hasPackage;
this.shouldSkip = shouldSkip;
this.description = description;
+ this.submittedAt = requireNonNull(submittedAt);
this.risk = requireAtLeast(risk, "application build risk", 0);
}
@@ -73,15 +76,18 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> {
/** Creates a minimal version for a development build. */
public static ApplicationVersion forDevelopment(RevisionId id, Optional<Version> compileVersion, Optional<Integer> allowedMajor) {
- return new ApplicationVersion(id, Optional.empty(), Optional.empty(), compileVersion, allowedMajor, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), true, false, Optional.empty(), 0);
+ return new ApplicationVersion(id, Optional.empty(), Optional.empty(), compileVersion, allowedMajor, Optional.empty(),
+ Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), true, false,
+ Optional.empty(), Optional.empty(), 0);
}
/** Creates a version from a completed build, an author email, and build metadata. */
public static ApplicationVersion forProduction(RevisionId id, Optional<SourceRevision> source, Optional<String> authorEmail,
Optional<Version> compileVersion, Optional<Integer> allowedMajor, Optional<Instant> buildTime, Optional<String> sourceUrl,
- Optional<String> commit, Optional<String> bundleHash, Optional<String> description, int risk) {
+ Optional<String> commit, Optional<String> bundleHash, Optional<String> description, Instant submittedAt, int risk) {
return new ApplicationVersion(id, source, authorEmail, compileVersion, allowedMajor, buildTime,
- sourceUrl, commit, bundleHash, Optional.empty(), true, false, description, risk);
+ sourceUrl, commit, bundleHash, Optional.empty(), true, false,
+ description, Optional.of(submittedAt), risk);
}
/** Returns a unique identifier for this version or "unknown" if version is not known */
@@ -140,12 +146,12 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> {
/** Returns a copy of this without a package stored. */
public ApplicationVersion withoutPackage() {
- return new ApplicationVersion(id, source, authorEmail, compileVersion, allowedMajor, buildTime, sourceUrl, commit, bundleHash, obsoleteAt, false, shouldSkip, description, risk);
+ return new ApplicationVersion(id, source, authorEmail, compileVersion, allowedMajor, buildTime, sourceUrl, commit, bundleHash, obsoleteAt, false, shouldSkip, description, submittedAt, risk);
}
/** Returns a copy of this which is obsolete now. */
public ApplicationVersion obsoleteAt(Instant now) {
- return new ApplicationVersion(id, source, authorEmail, compileVersion, allowedMajor, buildTime, sourceUrl, commit, bundleHash, Optional.of(now), hasPackage, shouldSkip, description, risk);
+ return new ApplicationVersion(id, source, authorEmail, compileVersion, allowedMajor, buildTime, sourceUrl, commit, bundleHash, Optional.of(now), hasPackage, shouldSkip, description, submittedAt, risk);
}
/** Returns the instant at which this became obsolete, i.e., no longer relevant for automated deployments. */
@@ -160,7 +166,7 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> {
/** Returns a copy of this which will not be rolled out to production. */
public ApplicationVersion skipped() {
- return new ApplicationVersion(id, source, authorEmail, compileVersion, allowedMajor, buildTime, sourceUrl, commit, bundleHash, obsoleteAt, hasPackage, true, description, risk);
+ return new ApplicationVersion(id, source, authorEmail, compileVersion, allowedMajor, buildTime, sourceUrl, commit, bundleHash, obsoleteAt, hasPackage, true, description, submittedAt, risk);
}
/** Whether we still have the package for this revision. */
@@ -178,6 +184,11 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> {
return description;
}
+ /** Instant at which this version was submitted to the build system. */
+ public Optional<Instant> submittedAt() {
+ return submittedAt;
+ }
+
/** The assumed risk of rolling out this revision, relative to the previous. */
public int risk() {
return risk;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
index 91ece6733e1..ac896338643 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
@@ -645,10 +645,16 @@ public class DeploymentStatus {
Optional<Instant> platformReadyAt = step.dependenciesCompletedAt(change.withoutApplication(), Optional.of(job));
Optional<Instant> revisionReadyAt = step.dependenciesCompletedAt(change.withoutPlatform(), Optional.of(job));
+ boolean failingUpgradeOnlyTests = ! jobs().type(systemTest(job.type()), stagingTest(job.type()))
+ .failingHardOn(Versions.from(change.withoutApplication(), application, deploymentFor(job), () -> systemVersion))
+ .isEmpty();
+
// If neither change is ready, we guess based on the specified rollout.
if (platformReadyAt.isEmpty() && revisionReadyAt.isEmpty()) {
return switch (rollout) {
- case separate -> List.of(change.withoutApplication(), change); // Platform should stay ahead.
+ case separate -> ! failingUpgradeOnlyTests
+ ? List.of(change.withoutApplication(), change) // Platform should stay ahead ...
+ : List.of(change); // ... unless upgrade-only is failing tests.
case leading -> List.of(change); // They should eventually join.
case simultaneous -> List.of(change.withoutPlatform(), change); // Revision should get ahead.
};
@@ -663,9 +669,6 @@ public class DeploymentStatus {
// Both changes are ready for this step, and we look to the specified rollout to decide.
boolean platformReadyFirst = platformReadyAt.get().isBefore(revisionReadyAt.get());
boolean revisionReadyFirst = revisionReadyAt.get().isBefore(platformReadyAt.get());
- boolean failingUpgradeOnlyTests = ! jobs().type(systemTest(job.type()), stagingTest(job.type()))
- .failingHardOn(Versions.from(change.withoutApplication(), application, deploymentFor(job), () -> systemVersion))
- .isEmpty();
return switch (rollout) {
case separate -> // Let whichever change rolled out first, keep rolling first, unless upgrade alone is failing.
(platformReadyFirst || platformReadyAt.get().equals(Instant.EPOCH)) // Assume platform was first if no jobs have run yet.
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RevisionHistory.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RevisionHistory.java
index 272417ba0ac..72fff51d6b2 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RevisionHistory.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RevisionHistory.java
@@ -93,7 +93,7 @@ public class RevisionHistory {
// Fallback for when an application version isn't known for the given key.
private static ApplicationVersion revisionOf(RevisionId id) {
- return new ApplicationVersion(id, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), false, false, Optional.empty(), 0);
+ return new ApplicationVersion(id, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), false, false, Optional.empty(), Optional.empty(), 0);
}
/** Returns the production {@link ApplicationVersion} with this revision ID. */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Submission.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Submission.java
index 6c9de2fd584..e59b4eb0a07 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Submission.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Submission.java
@@ -5,8 +5,8 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
+import java.time.Instant;
import java.util.Optional;
-import java.util.OptionalInt;
import static com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage.calculateHash;
@@ -21,23 +21,22 @@ public class Submission {
private final Optional<SourceRevision> source;
private final Optional<String> authorEmail;
private final Optional<String> description;
+ private final Instant now;
private final int risk;
public Submission(ApplicationPackage applicationPackage, byte[] testPackage, Optional<String> sourceUrl,
- Optional<SourceRevision> source, Optional<String> authorEmail, Optional<String> description, int risk) {
+ Optional<SourceRevision> source, Optional<String> authorEmail, Optional<String> description,
+ Instant now, int risk) {
this.applicationPackage = applicationPackage;
this.testPackage = testPackage;
this.sourceUrl = sourceUrl;
this.source = source;
this.authorEmail = authorEmail;
this.description = description;
+ this.now = now;
this.risk = risk;
}
- public static Submission basic(ApplicationPackage applicationPackage, byte[] testPackage) {
- return new Submission(applicationPackage, testPackage, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0);
- }
-
public ApplicationVersion toApplicationVersion(long number) {
return ApplicationVersion.forProduction(RevisionId.forProduction(number),
source,
@@ -49,6 +48,7 @@ public class Submission {
source.map(SourceRevision::commit),
Optional.of(applicationPackage.bundleHash() + calculateHash(testPackage)),
description,
+ now,
risk);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
index e5006ab9785..9890a5b361b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
@@ -116,6 +116,7 @@ public class ApplicationSerializer {
private static final String branchField = "branchField";
private static final String commitField = "commitField";
private static final String descriptionField = "description";
+ private static final String submittedAtField = "submittedAt";
private static final String riskField = "risk";
private static final String authorEmailField = "authorEmailField";
private static final String deployedDirectlyField = "deployedDirectly";
@@ -271,6 +272,7 @@ public class ApplicationSerializer {
object.setBool(hasPackageField, applicationVersion.hasPackage());
object.setBool(shouldSkipField, applicationVersion.shouldSkip());
applicationVersion.description().ifPresent(description -> object.setString(descriptionField, description));
+ applicationVersion.submittedAt().ifPresent(at -> object.setLong(submittedAtField, at.toEpochMilli()));
if (applicationVersion.risk() != 0) object.setLong(riskField, applicationVersion.risk());
applicationVersion.bundleHash().ifPresent(bundleHash -> object.setString(bundleHashField, bundleHash));
}
@@ -496,11 +498,12 @@ public class ApplicationSerializer {
boolean hasPackage = object.field(hasPackageField).asBool();
boolean shouldSkip = object.field(shouldSkipField).asBool();
Optional<String> description = SlimeUtils.optionalString(object.field(descriptionField));
+ Optional<Instant> submittedAt = SlimeUtils.optionalInstant(object.field(submittedAtField));
int risk = (int) object.field(riskField).asLong();
Optional<String> bundleHash = SlimeUtils.optionalString(object.field(bundleHashField));
return new ApplicationVersion(id, sourceRevision, authorEmail, compileVersion, allowedMajor, buildTime,
- sourceUrl, commit, bundleHash, obsoleteAt, hasPackage, shouldSkip, description, risk);
+ sourceUrl, commit, bundleHash, obsoleteAt, hasPackage, shouldSkip, description, submittedAt, risk);
}
private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index f7cca7d1015..b16fadfd230 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -2428,6 +2428,8 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
if ( ! type.environment().isManuallyDeployed() && ! (isOperator(request) || controller.system().isCd()))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
+ controller.applications().verifyPlan(id.tenant());
+
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
@@ -3047,6 +3049,9 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
+ TenantName tenantName = TenantName.from(tenant);
+ controller.applications().verifyPlan(tenantName);
+
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong(); // Absence of this means it's not a prod app :/
@@ -3070,10 +3075,8 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
ApplicationPackage applicationPackage =
new ApplicationPackage(dataParts.get(APPLICATION_ZIP), true, controller.system().isPublic());
byte[] testPackage = dataParts.getOrDefault(APPLICATION_TEST_ZIP, new byte[0]);
- Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk);
+ Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, controller.clock().instant(), risk);
- TenantName tenantName = TenantName.from(tenant);
- controller.applications().verifyPlan(tenantName);
controller.applications().verifyApplicationIdentityConfiguration(tenantName,
Optional.empty(),
Optional.empty(),
@@ -3089,7 +3092,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
JobControllerApiHandlerHelper.submitResponse(controller.jobController(),
TenantAndApplicationId.from(tenant, application),
new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(),
- Optional.empty(), Optional.empty(), Optional.empty(), 0),
+ Optional.empty(), Optional.empty(), Optional.empty(), controller.clock().instant(), 0),
0);
return new MessageResponse("All deployments removed");
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
index 7f2b8c8a924..2f93ce999cd 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
@@ -416,6 +416,7 @@ class JobControllerApiHandlerHelper {
version.description().ifPresent(description -> versionObject.setString("description", description));
if (version.risk() != 0) versionObject.setLong("risk", version.risk());
versionObject.setBool("deployable", version.isDeployable());
+ version.submittedAt().ifPresent(submittedAt -> versionObject.setLong("submittedAt", submittedAt.toEpochMilli()));
}
static void toSlime(Cursor versionObject, ApplicationVersion version) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/certificate/EndpointCertificatesHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/certificate/EndpointCertificatesHandler.java
index a00e80b17d4..9bee5623774 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/certificate/EndpointCertificatesHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/certificate/EndpointCertificatesHandler.java
@@ -42,7 +42,7 @@ public class EndpointCertificatesHandler extends ThreadedHttpRequestHandler {
public HttpResponse handle(HttpRequest request) {
if (request.getMethod().equals(GET)) return listEndpointCertificates();
- if (request.getMethod().equals(POST)) return reRequestEndpointCertificateFor(request.getProperty("application"));
+ if (request.getMethod().equals(POST)) return reRequestEndpointCertificateFor(request.getProperty("application"), request.getProperty("ignoreExistingMetadata") != null);
throw new RestApiException.MethodNotAllowed(request);
}
@@ -59,7 +59,7 @@ public class EndpointCertificatesHandler extends ThreadedHttpRequestHandler {
return new StringResponse(requestsWithNames);
}
- public StringResponse reRequestEndpointCertificateFor(String instanceId) {
+ public StringResponse reRequestEndpointCertificateFor(String instanceId, boolean ignoreExistingMetadata) {
ApplicationId applicationId = ApplicationId.fromFullString(instanceId);
try (var lock = curator.lock(TenantAndApplicationId.from(applicationId))) {
@@ -67,7 +67,7 @@ public class EndpointCertificatesHandler extends ThreadedHttpRequestHandler {
.orElseThrow(() -> new RestApiException.NotFound("No certificate found for application " + applicationId.serializedForm()));
EndpointCertificateMetadata reRequestedMetadata = endpointCertificateProvider.requestCaSignedCertificate(
- applicationId, endpointCertificateMetadata.requestedDnsSans(), Optional.of(endpointCertificateMetadata));
+ applicationId, endpointCertificateMetadata.requestedDnsSans(), ignoreExistingMetadata ? Optional.empty() : Optional.of(endpointCertificateMetadata));
curator.writeEndpointCertificateMetadata(applicationId, reRequestedMetadata);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
index 04c8c46e1ef..693a74f8651 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
@@ -109,7 +109,7 @@ public class ControllerTest {
context.submit(applicationPackage);
RevisionId id = RevisionId.forProduction(1);
Version compileVersion = new Version("6.1");
- assertEquals(new ApplicationVersion(id, Optional.of(DeploymentContext.defaultSourceRevision), Optional.of("a@b"), Optional.of(compileVersion), Optional.empty(), Optional.of(Instant.ofEpochSecond(1)), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), true, false, Optional.empty(), 0),
+ assertEquals(new ApplicationVersion(id, Optional.of(DeploymentContext.defaultSourceRevision), Optional.of("a@b"), Optional.of(compileVersion), Optional.empty(), Optional.of(Instant.ofEpochSecond(1)), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), true, false, Optional.empty(), Optional.empty(), 0),
context.application().revisions().get(context.instance().change().revision().get()),
"Application version is known from completion of initial job");
context.runJob(systemTest);
@@ -1339,7 +1339,7 @@ public class ControllerTest {
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0]));
var app = tester.newDeploymentContext();
- tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1);
+ tester.jobs().submit(app.application().id(), new Submission(applicationPackage, testPackage, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Instant.EPOCH, 0), 1);
assertEquals(List.of(new Notification(tester.clock().instant(),
Type.testPackage,
Level.warning,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
index d32ec03469e..da982fa67a8 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
@@ -318,7 +318,7 @@ public class DeploymentContext {
.projectId()
.orElse(1000); // These are really set through submission, so just pick one if it hasn't been set.
var testerpackage = new byte[]{ (byte) (salt >> 56), (byte) (salt >> 48), (byte) (salt >> 40), (byte) (salt >> 32), (byte) (salt >> 24), (byte) (salt >> 16), (byte) (salt >> 8), (byte) salt };
- lastSubmission = jobs.submit(applicationId, new Submission(applicationPackage, testerpackage, Optional.empty(), sourceRevision, Optional.of("a@b"), Optional.empty(), risk), projectId).id();
+ lastSubmission = jobs.submit(applicationId, new Submission(applicationPackage, testerpackage, Optional.empty(), sourceRevision, Optional.of("a@b"), Optional.empty(), tester.clock().instant(), risk), projectId).id();
return this;
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java
index f761c605f9d..b0fe2867ab7 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java
@@ -24,6 +24,7 @@ import com.yahoo.vespa.hosted.controller.integration.MetricsMock;
import org.junit.jupiter.api.Test;
import java.time.Duration;
+import java.time.Instant;
import java.util.Collections;
import java.util.EnumMap;
import java.util.List;
@@ -92,7 +93,7 @@ public class JobRunnerTest {
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
byte[] testPackageBytes = new byte[0];
- jobs.submit(appId, Submission.basic(applicationPackage, testPackageBytes), 2);
+ jobs.submit(appId, submission(applicationPackage, testPackageBytes), 2);
start(jobs, id, systemTest);
try {
start(jobs, id, systemTest);
@@ -128,7 +129,7 @@ public class JobRunnerTest {
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
byte[] testPackageBytes = new byte[0];
- jobs.submit(appId, Submission.basic(applicationPackage, testPackageBytes), 2);
+ jobs.submit(appId, submission(applicationPackage, testPackageBytes), 2);
Supplier<Run> run = () -> jobs.last(id, systemTest).get();
start(jobs, id, systemTest);
@@ -236,7 +237,7 @@ public class JobRunnerTest {
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
byte[] testPackageBytes = new byte[0];
- jobs.submit(appId, Submission.basic(applicationPackage, testPackageBytes), 2);
+ jobs.submit(appId, submission(applicationPackage, testPackageBytes), 2);
RunId runId = new RunId(id, systemTest, 1);
start(jobs, id, systemTest);
@@ -276,7 +277,7 @@ public class JobRunnerTest {
ApplicationId instanceId = appId.defaultInstance();
JobId jobId = new JobId(instanceId, systemTest);
byte[] testPackageBytes = new byte[0];
- jobs.submit(appId, Submission.basic(applicationPackage, testPackageBytes), 2);
+ jobs.submit(appId, submission(applicationPackage, testPackageBytes), 2);
assertFalse(jobs.lastSuccess(jobId).isPresent());
for (int i = 0; i < jobs.historyLength(); i++) {
@@ -372,7 +373,7 @@ public class JobRunnerTest {
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
byte[] testPackageBytes = new byte[0];
- jobs.submit(appId, Submission.basic(applicationPackage, testPackageBytes), 2);
+ jobs.submit(appId, submission(applicationPackage, testPackageBytes), 2);
start(jobs, id, systemTest);
tester.clock().advance(JobRunner.jobTimeout.plus(Duration.ofSeconds(1)));
@@ -390,7 +391,7 @@ public class JobRunnerTest {
TenantAndApplicationId appId = tester.createApplication("tenant", "real", "default").id();
ApplicationId id = appId.defaultInstance();
byte[] testPackageBytes = new byte[0];
- jobs.submit(appId, Submission.basic(applicationPackage, testPackageBytes), 2);
+ jobs.submit(appId, submission(applicationPackage, testPackageBytes), 2);
for (Step step : JobProfile.of(systemTest).steps())
outcomes.put(step, running);
@@ -475,4 +476,8 @@ public class JobRunnerTest {
};
}
+ private static Submission submission(ApplicationPackage applicationPackage, byte[] testPackage) {
+ return new Submission(applicationPackage, testPackage, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Instant.EPOCH, 0);
+ }
+
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
index 96c1d7c545d..f1e8697cf41 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
@@ -170,7 +170,10 @@ public class UpgraderTest {
// --- Failing application is repaired by changing the application, causing confidence to move above 'high' threshold
// Deploy application change
default0.submit(applicationPackage("default"));
- default0.deploy();
+ default0.runJob(systemTest)
+ .jobAborted(stagingTest) // New revision causes run with failing upgrade alone to be aborted.
+ .runJob(stagingTest)
+ .deploy();
tester.controllerTester().computeVersionStatus();
assertEquals(VespaVersion.Confidence.high, tester.controller().readVersionStatus().systemVersion().get().confidence());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
index b71d3cf838b..69b473dce87 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
@@ -105,13 +105,14 @@ public class ApplicationSerializerTest {
true,
false,
Optional.of("~(˘▾˘)~"),
+ Optional.of(Instant.ofEpochMilli(496)),
3);
assertEquals("https://github/org/repo/tree/commit1", applicationVersion1.sourceUrl().get());
RevisionId id = RevisionId.forDevelopment(31, new JobId(id1, DeploymentContext.productionUsEast3));
SourceRevision source = new SourceRevision("repo1", "branch1", "commit1");
Version compileVersion = Version.fromString("6.3.1");
- ApplicationVersion applicationVersion2 = new ApplicationVersion(id, Optional.of(source), Optional.of("a@b"), Optional.of(compileVersion), Optional.empty(), Optional.of(Instant.ofEpochMilli(496)), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), true, false, Optional.empty(), 0);
+ ApplicationVersion applicationVersion2 = new ApplicationVersion(id, Optional.of(source), Optional.of("a@b"), Optional.of(compileVersion), Optional.empty(), Optional.of(Instant.ofEpochMilli(496)), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), true, false, Optional.empty(), Optional.empty(), 0);
Instant activityAt = Instant.parse("2018-06-01T10:15:30.00Z");
deployments.add(new Deployment(zone1, CloudAccount.empty, applicationVersion1.id(), Version.fromString("1.2.3"), Instant.ofEpochMilli(3),
DeploymentMetrics.none, DeploymentActivity.none, QuotaUsage.none, OptionalDouble.empty()));
@@ -179,6 +180,7 @@ public class ApplicationSerializerTest {
assertEquals(original.revisions().last().get().hasPackage(), serialized.revisions().last().get().hasPackage());
assertEquals(original.revisions().last().get().shouldSkip(), serialized.revisions().last().get().shouldSkip());
assertEquals(original.revisions().last().get().description(), serialized.revisions().last().get().description());
+ assertEquals(original.revisions().last().get().submittedAt(), serialized.revisions().last().get().submittedAt());
assertEquals(original.revisions().last().get().risk(), serialized.revisions().last().get().risk());
assertEquals(original.revisions().withPackage(), serialized.revisions().withPackage());
assertEquals(original.revisions().production(), serialized.revisions().production());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview-2.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview-2.json
index b103b579166..19b3d5dc2d7 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview-2.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview-2.json
@@ -1266,21 +1266,24 @@
"compileVersion": "6.1.0",
"sourceUrl": "repository1/tree/commit1",
"commit": "commit1",
- "deployable": true
+ "deployable": true,
+ "submittedAt": 14403000
},
{
"build": 2,
"compileVersion": "6.1.0",
"sourceUrl": "repository1/tree/commit1",
"commit": "commit1",
- "deployable": true
+ "deployable": true,
+ "submittedAt": 1000
},
{
"build": 1,
"compileVersion": "6.1.0",
"sourceUrl": "repository1/tree/commit1",
"commit": "commit1",
- "deployable": true
+ "deployable": true,
+ "submittedAt": 0
}
]
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json
index 500f66a7cdb..1d115049b35 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json
@@ -706,7 +706,8 @@
"commit": "commit1",
"description": "my best commit yet",
"risk": 9001,
- "deployable": true
+ "deployable": true,
+ "submittedAt": 1600000000000
},
{
"build": 3,
@@ -715,7 +716,8 @@
"commit": "commit1",
"description": "my best commit yet",
"risk": 9001,
- "deployable": false
+ "deployable": false,
+ "submittedAt": 1600000000000
},
{
"build": 2,
@@ -724,7 +726,8 @@
"commit": "commit1",
"description": "my best commit yet",
"risk": 9001,
- "deployable": false
+ "deployable": false,
+ "submittedAt": 1600000000000
},
{
"build": 1,
@@ -733,7 +736,8 @@
"commit": "commit1",
"description": "my best commit yet",
"risk": 9001,
- "deployable": true
+ "deployable": true,
+ "submittedAt": 1600000000000
}
]
}
diff --git a/document/src/vespa/document/annotation/spantree.h b/document/src/vespa/document/annotation/spantree.h
index a0010fafa74..8745fb4421d 100644
--- a/document/src/vespa/document/annotation/spantree.h
+++ b/document/src/vespa/document/annotation/spantree.h
@@ -4,7 +4,6 @@
#include "annotation.h"
#include <vector>
-#include <cassert>
namespace document {
struct SpanNode;
@@ -24,7 +23,6 @@ public:
SpanTree(vespalib::stringref name, std::unique_ptr<T> root)
: _name(name),
_root(std::move(root)) {
- assert(_root.get());
}
~SpanTree();
diff --git a/document/src/vespa/document/base/testdocman.cpp b/document/src/vespa/document/base/testdocman.cpp
index 471a2f8c196..d5b24b51f24 100644
--- a/document/src/vespa/document/base/testdocman.cpp
+++ b/document/src/vespa/document/base/testdocman.cpp
@@ -7,6 +7,7 @@
#include <vespa/document/fieldvalue/stringfieldvalue.h>
#include <vespa/vespalib/util/rand48.h>
#include <sstream>
+#include <cassert>
namespace document {
diff --git a/document/src/vespa/document/select/valuenodes.cpp b/document/src/vespa/document/select/valuenodes.cpp
index b3052cc07e2..06205e6b7d1 100644
--- a/document/src/vespa/document/select/valuenodes.cpp
+++ b/document/src/vespa/document/select/valuenodes.cpp
@@ -11,6 +11,7 @@
#include <vespa/vespalib/util/md5.h>
#include <vespa/document/util/stringutil.h>
#include <vespa/vespalib/text/lowercase.h>
+#include <cassert>
#include <iomanip>
#include <sys/time.h>
diff --git a/document/src/vespa/document/serialization/annotationdeserializer.cpp b/document/src/vespa/document/serialization/annotationdeserializer.cpp
index 41bc9ec8aaa..c449029440f 100644
--- a/document/src/vespa/document/serialization/annotationdeserializer.cpp
+++ b/document/src/vespa/document/serialization/annotationdeserializer.cpp
@@ -40,7 +40,7 @@ unique_ptr<SpanTree> AnnotationDeserializer::readSpanTree() {
deserializer.read(tree_name);
_nodes.clear();
SpanNode::UP root = readSpanNode();
- unique_ptr<SpanTree> span_tree(new SpanTree(tree_name.getValue(), std::move(root)));
+ auto span_tree = std::make_unique<SpanTree>(tree_name.getValue(), std::move(root));
uint32_t annotation_count = getInt1_2_4Bytes(_stream);
span_tree->reserveAnnotations(annotation_count);
diff --git a/eval/src/tests/eval/value_cache/dense-short1.json b/eval/src/tests/eval/value_cache/dense-short1.json
new file mode 100644
index 00000000000..4e170001c96
--- /dev/null
+++ b/eval/src/tests/eval/value_cache/dense-short1.json
@@ -0,0 +1 @@
+[ 1, 2.0, 3.5 ]
diff --git a/eval/src/tests/eval/value_cache/dense-short2.json b/eval/src/tests/eval/value_cache/dense-short2.json
new file mode 100644
index 00000000000..40121135544
--- /dev/null
+++ b/eval/src/tests/eval/value_cache/dense-short2.json
@@ -0,0 +1,3 @@
+{
+ "values": [ 1, 2.0, 3.5 ]
+}
diff --git a/eval/src/tests/eval/value_cache/sparse-short1.json b/eval/src/tests/eval/value_cache/sparse-short1.json
new file mode 100644
index 00000000000..949b7b2b8bd
--- /dev/null
+++ b/eval/src/tests/eval/value_cache/sparse-short1.json
@@ -0,0 +1,5 @@
+{
+ "foo": 1.0,
+ "bar": 2.0,
+ "three": 3.0
+}
diff --git a/eval/src/tests/eval/value_cache/sparse-short2.json b/eval/src/tests/eval/value_cache/sparse-short2.json
new file mode 100644
index 00000000000..f10b1b6f9fb
--- /dev/null
+++ b/eval/src/tests/eval/value_cache/sparse-short2.json
@@ -0,0 +1,7 @@
+{
+ "cells": {
+ "foo": 1.0,
+ "bar": 2.0,
+ "three": 3.0
+ }
+}
diff --git a/eval/src/tests/eval/value_cache/tensor_loader_test.cpp b/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
index 1a77cfe847b..4b4ba3fc0d3 100644
--- a/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
+++ b/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
@@ -19,12 +19,26 @@ TensorSpec make_dense_tensor() {
.add({{"x", 1}, {"y", 1}}, 4.0);
}
+TensorSpec make_simple_dense_tensor() {
+ return TensorSpec("tensor(z[3])")
+ .add({{"z", 0}}, 1.0)
+ .add({{"z", 1}}, 2.0)
+ .add({{"z", 2}}, 3.5);
+}
+
TensorSpec make_sparse_tensor() {
return TensorSpec("tensor(x{},y{})")
.add({{"x", "foo"}, {"y", "bar"}}, 1.0)
.add({{"x", "bar"}, {"y", "foo"}}, 2.0);
}
+TensorSpec make_simple_sparse_tensor() {
+ return TensorSpec("tensor(mydim{})")
+ .add({{"mydim", "foo"}}, 1.0)
+ .add({{"mydim", "three"}}, 3.0)
+ .add({{"mydim", "bar"}}, 2.0);
+}
+
TensorSpec make_mixed_tensor() {
return TensorSpec("tensor(x{},y[2])")
.add({{"x", "foo"}, {"y", 0}}, 1.0)
@@ -75,6 +89,16 @@ TEST_F("require that lz4 compressed sparse tensor can be loaded", ConstantTensor
TEST_DO(verify_tensor(make_sparse_tensor(), f1.create(TEST_PATH("sparse.json.lz4"), "tensor(x{},y{})")));
}
+TEST_F("require that sparse tensor short form can be loaded", ConstantTensorLoader(factory)) {
+ TEST_DO(verify_tensor(make_simple_sparse_tensor(), f1.create(TEST_PATH("sparse-short1.json"), "tensor(mydim{})")));
+ TEST_DO(verify_tensor(make_simple_sparse_tensor(), f1.create(TEST_PATH("sparse-short2.json"), "tensor(mydim{})")));
+}
+
+TEST_F("require that dense tensor short form can be loaded", ConstantTensorLoader(factory)) {
+ TEST_DO(verify_tensor(make_simple_dense_tensor(), f1.create(TEST_PATH("dense-short1.json"), "tensor(z[3])")));
+ TEST_DO(verify_tensor(make_simple_dense_tensor(), f1.create(TEST_PATH("dense-short2.json"), "tensor(z[3])")));
+}
+
TEST_F("require that bad lz4 file fails to load creating empty result", ConstantTensorLoader(factory)) {
TEST_DO(verify_tensor(sparse_tensor_nocells(), f1.create(TEST_PATH("bad_lz4.json.lz4"), "tensor(x{},y{})")));
}
diff --git a/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp b/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
index 9af473f1f94..5654a3abcbe 100644
--- a/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
+++ b/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
@@ -41,6 +41,52 @@ struct AddressExtractor : ObjectTraverser {
}
};
+struct SingleMappedExtractor : ObjectTraverser {
+ const vespalib::string &dimension;
+ TensorSpec &spec;
+ SingleMappedExtractor(const vespalib::string &dimension_in, TensorSpec &spec_in)
+ : dimension(dimension_in),
+ spec(spec_in)
+ {}
+ void field(const Memory &symbol, const Inspector &inspector) override {
+ vespalib::string label = symbol.make_string();
+ double value = inspector.asDouble();
+ TensorSpec::Address address;
+ address.emplace(dimension, label);
+ spec.add(address, value);
+ }
+};
+
+
+void decodeSingleMappedForm(const Inspector &root, const ValueType &value_type, TensorSpec &spec) {
+ auto extractor = SingleMappedExtractor(value_type.dimensions()[0].name, spec);
+ root.traverse(extractor);
+}
+
+void decodeSingleDenseForm(const Inspector &values, const ValueType &value_type, TensorSpec &spec) {
+ const auto &dimension = value_type.dimensions()[0].name;
+ for (size_t i = 0; i < values.entries(); ++i) {
+ TensorSpec::Address address;
+ address.emplace(dimension, TensorSpec::Label(i));
+ spec.add(address, values[i].asDouble());
+ }
+}
+
+void decodeLiteralForm(const Inspector &cells, const ValueType &value_type, TensorSpec &spec) {
+ std::set<vespalib::string> indexed;
+ for (const auto &dimension: value_type.dimensions()) {
+ if (dimension.is_indexed()) {
+ indexed.insert(dimension.name);
+ }
+ }
+ for (size_t i = 0; i < cells.entries(); ++i) {
+ TensorSpec::Address address;
+ AddressExtractor extractor(indexed, address);
+ cells[i]["address"].traverse(extractor);
+ spec.add(address, cells[i]["value"].asDouble());
+ }
+}
+
void decode_json(const vespalib::string &path, Input &input, Slime &slime) {
if (slime::JsonFormat::decode(input, slime) == 0) {
LOG(warning, "file contains invalid json: %s", path.c_str());
@@ -90,19 +136,26 @@ ConstantTensorLoader::create(const vespalib::string &path, const vespalib::strin
}
Slime slime;
decode_json(path, slime);
- std::set<vespalib::string> indexed;
- for (const auto &dimension: value_type.dimensions()) {
- if (dimension.is_indexed()) {
- indexed.insert(dimension.name);
- }
- }
TensorSpec spec(type);
- const Inspector &cells = slime.get()["cells"];
- for (size_t i = 0; i < cells.entries(); ++i) {
- TensorSpec::Address address;
- AddressExtractor extractor(indexed, address);
- cells[i]["address"].traverse(extractor);
- spec.add(address, cells[i]["value"].asDouble());
+ bool isSingleDenseType = value_type.is_dense() && (value_type.count_indexed_dimensions() == 1);
+ bool isSingleMappedType = value_type.is_sparse() && (value_type.count_mapped_dimensions() == 1);
+ const Inspector &root = slime.get();
+ const Inspector &cells = root["cells"];
+ const Inspector &values = root["values"];
+ if (cells.type().getId() == vespalib::slime::ARRAY::ID) {
+ decodeLiteralForm(cells, value_type, spec);
+ }
+ else if (cells.type().getId() == vespalib::slime::OBJECT::ID && isSingleMappedType) {
+ decodeSingleMappedForm(cells, value_type, spec);
+ }
+ else if (values.type().getId() == vespalib::slime::ARRAY::ID && isSingleDenseType) {
+ decodeSingleDenseForm(values, value_type, spec);
+ }
+ else if (root.type().getId() == vespalib::slime::OBJECT::ID && isSingleMappedType) {
+ decodeSingleMappedForm(root, value_type, spec);
+ }
+ else if (root.type().getId() == vespalib::slime::ARRAY::ID && isSingleDenseType) {
+ decodeSingleDenseForm(root, value_type, spec);
}
try {
return std::make_unique<SimpleConstantValue>(value_from_spec(spec, _factory));
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index f76eda57226..2429c1f6280 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -48,6 +48,12 @@ public class Flags {
private static volatile TreeMap<FlagId, FlagDefinition> flags = new TreeMap<>();
+ public static final UnboundBooleanFlag IPV6_IN_GCP = defineFeatureFlag(
+ "ipv6-in-gcp", false,
+ List.of("hakonhall"), "2023-05-15", "2023-06-15",
+ "Provision GCP hosts with external IPv6 addresses",
+ "Takes effect on the next host provisioning");
+
public static final UnboundBooleanFlag DROP_CACHES = defineFeatureFlag(
"drop-caches", false,
List.of("hakonhall", "baldersheim"), "2023-03-06", "2023-06-05",
@@ -414,6 +420,12 @@ public class Flags {
"Takes effect at next host-admin tick",
ZONE_ID);
+ public static final UnboundListFlag<String> WEIGHTED_ENDPOINT_RECORD_TTL = defineListFlag(
+ "weighted-endpoint-record-ttl", List.of(), String.class, List.of("jonmv"), "2023-05-16", "2023-06-16",
+ "A list of endpoints and custom TTLs, on the form \"endpoint-fqdn:TTL-seconds\". " +
+ "Where specified, CNAME records are used instead of the default ALIAS records, which have a default 60s TTL.",
+ "Takes effect at redeployment from controller");
+
public static final UnboundBooleanFlag ENABLE_CONDITIONAL_PUT_REMOVE_WRITE_REPAIR = defineFeatureFlag(
"enable-conditional-put-remove-write-repair", false,
List.of("vekterli", "havardpe"), "2023-05-10", "2023-07-01",
@@ -423,6 +435,14 @@ public class Flags {
"Takes effect at redeployment",
ZONE_ID, APPLICATION_ID);
+ public static final UnboundBooleanFlag ENABLE_DATAPLANE_PROXY = defineFeatureFlag(
+ "enable-dataplane-proxy", false,
+ List.of("mortent", "olaa"), "2023-05-15", "2023-08-01",
+ "Whether to enable dataplane proxy",
+ "Takes effect at redeployment",
+ ZONE_ID, APPLICATION_ID
+ );
+
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners,
String createdAt, String expiresAt, String description,
diff --git a/linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java b/linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java
index b92e0678970..d812b85b82e 100644
--- a/linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java
+++ b/linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java
@@ -76,6 +76,7 @@ public class HuggingFaceTokenizer extends AbstractComponent implements Embedder,
public String decode(List<Long> tokens, Language language) { return resolve(language).decode(toArray(tokens)); }
@Override public void close() { models.forEach((__, model) -> model.close()); }
+ @Override public void deconstruct() { close(); }
private ai.djl.huggingface.tokenizers.HuggingFaceTokenizer resolve(Language language) {
// Disregard language if there is default model
diff --git a/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java b/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java
index b791c843357..2728249333e 100644
--- a/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java
+++ b/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java
@@ -57,7 +57,7 @@ public class SimpleTokenizer implements Tokenizer {
}
/** Tokenize the input, and apply the given transform to each token string. */
- public Iterable<Token> tokenize(String input, Function<String, String> tokenProocessor) {
+ public Iterable<Token> tokenize(String input, Function<String, String> tokenProcessor) {
if (input.isEmpty()) return List.of();
List<Token> tokens = new ArrayList<>();
@@ -71,7 +71,7 @@ public class SimpleTokenizer implements Tokenizer {
String original = input.substring(prev, next);
tokens.add(new SimpleToken(original).setOffset(prev)
.setType(tokenType)
- .setTokenString(tokenProocessor.apply(original)));
+ .setTokenString(tokenProcessor.apply(original)));
prev = next;
prevType = nextType;
tokenType = prevType;
diff --git a/logd/src/logd/empty_forwarder.cpp b/logd/src/logd/empty_forwarder.cpp
index dda03c46c01..b601ea6d890 100644
--- a/logd/src/logd/empty_forwarder.cpp
+++ b/logd/src/logd/empty_forwarder.cpp
@@ -5,6 +5,7 @@
#include <vespa/log/exceptions.h>
#include <vespa/log/log_message.h>
#include <vespa/vespalib/util/size_literals.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".logd.empty_forwarder");
diff --git a/maven-plugins/allowed-maven-dependencies.txt b/maven-plugins/allowed-maven-dependencies.txt
index 29c5fa69429..f2334a6ef00 100644
--- a/maven-plugins/allowed-maven-dependencies.txt
+++ b/maven-plugins/allowed-maven-dependencies.txt
@@ -15,7 +15,7 @@ commons-io:commons-io:2.11.0
javax.annotation:javax.annotation-api:1.2
javax.inject:javax.inject:1
org.apache.commons:commons-collections4:4.2
-org.apache.commons:commons-compress:1.22
+org.apache.commons:commons-compress:1.23.0
org.apache.commons:commons-lang3:3.12.0
org.apache.maven:maven-archiver:3.6.0
org.apache.maven:maven-artifact:3.8.7
diff --git a/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java b/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java
index 013c50e77cf..d323026e4ca 100644
--- a/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java
+++ b/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java
@@ -27,7 +27,6 @@ public enum ConfigServerMetrics implements VespaMetrics {
MAINTENANCE_DEPLOYMENT_TRANSIENT_FAILURE("maintenanceDeployment.transientFailure", Unit.OPERATION, "Number of maintenance deployments that failed with a transient failure"),
MAINTENANCE_DEPLOYMENT_FAILURE("maintenanceDeployment.failure", Unit.OPERATION, "Number of maintenance deployments that failed with a permanent failure"),
-
// ZooKeeper related metrics
ZK_CONNECTIONS_LOST("configserver.zkConnectionLost", Unit.CONNECTION, "Number of ZooKeeper connections lost"),
ZK_RECONNECTED("configserver.zkReconnected", Unit.CONNECTION, "Number of ZooKeeper reconnections"),
@@ -45,8 +44,13 @@ public enum ConfigServerMetrics implements VespaMetrics {
ORCHESTRATOR_LOCK_ACQUIRE_TIMEOUT("orchestrator.lock.acquire-timedout", Unit.OPERATION, "Number of times zookeeper lock couldn't be acquired within timeout"),
ORCHESTRATOR_LOCK_ACQUIRE("orchestrator.lock.acquire", Unit.OPERATION, "Number of attempts to acquire zookeeper lock"),
ORCHESTRATOR_LOCK_ACQUIRED("orchestrator.lock.acquired", Unit.OPERATION, "Number of times zookeeper lock was acquired"),
- ORCHESTRATOR_LOCK_HOLD_LATENCY("orchestrator.lock.hold-latency", Unit.SECOND, "Time zookeeper lock was held before it was released");
+ ORCHESTRATOR_LOCK_HOLD_LATENCY("orchestrator.lock.hold-latency", Unit.SECOND, "Time zookeeper lock was held before it was released"),
+ // Node repository metrics
+ CLUSTER_COST("cluster.cost", Unit.DOLLAR_PER_HOUR, "The cost of the nodes allocated to a certain cluster, in $/hr"),
+ CLUSTER_LOAD_IDEAL_CPU("cluster.load.ideal.cpu", Unit.FRACTION, "The ideal cpu load of a certain cluster"),
+ CLUSTER_LOAD_IDEAL_MEMORY("cluster.load.ideal.memory", Unit.FRACTION, "The ideal memory load of a certain cluster"),
+ CLUSTER_LOAD_IDEAL_DISK("cluster.load.ideal.disk", Unit.FRACTION, "The ideal disk load of a certain cluster");
private final String name;
private final Unit unit;
diff --git a/metrics/src/main/java/ai/vespa/metrics/Unit.java b/metrics/src/main/java/ai/vespa/metrics/Unit.java
index a2123d72246..ee6ea569fc4 100644
--- a/metrics/src/main/java/ai/vespa/metrics/Unit.java
+++ b/metrics/src/main/java/ai/vespa/metrics/Unit.java
@@ -12,6 +12,7 @@ public enum Unit {
CONNECTION(BaseUnit.CONNECTION, "A link used for communication between a client and a server"),
DOCUMENT(BaseUnit.DOCUMENT, "Vespa document, a collection of fields defined in a schema file"),
DOCUMENTID(BaseUnit.DOCUMENTID, "A unique document identifier"),
+ DOLLAR_PER_HOUR(BaseUnit.DOLLAR, BaseUnit.HOUR, "Total current cost of the cluster in $/hr"),
FAILURE(BaseUnit.FAILURE, "Failures, typically for requests, operations or nodes"),
FILE(BaseUnit.FILE, "Data file stored on the disk on a node"),
FRACTION(BaseUnit.FRACTION, "A value in the range [0..1]. Higher values can occur for some metrics, but would indicate the value is outside of the allowed range."),
@@ -80,10 +81,12 @@ public enum Unit {
CONNECTION("connection"),
DOCUMENT("document"),
DOCUMENTID("documentid"),
+ DOLLAR("dollar"),
FAILURE("failure"),
FILE("file"),
FRACTION("fraction"),
HIT("hit"),
+ HOUR("hour"),
INSTANCE("instance"),
ITEM("item"),
MILLISECOND("millisecond", "ms"),
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java
index c684487b4f8..2463d293854 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java
@@ -441,8 +441,8 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
var certsDirectory = legacySiaDirectory.resolve("certs");
Files.createDirectories(keysDirectory);
Files.createDirectories(certsDirectory);
- Files.copy(certificateFile, certsDirectory.resolve(certificateFile.getFileName()), StandardCopyOption.values());
- Files.copy(privateKeyFile, keysDirectory.resolve(privateKeyFile.getFileName()), StandardCopyOption.values());
+ writeFile(certsDirectory.resolve(certificateFile.getFileName()), new String(Files.readAllBytes(certificateFile)));
+ writeFile(keysDirectory.resolve(privateKeyFile.getFileName()), new String(Files.readAllBytes(privateKeyFile)));
}
/*
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
index 1cf7bcfa4f2..d1b900b9969 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
@@ -86,10 +86,7 @@ public class NodeList extends AbstractFilteringList<Node, NodeList> {
/** Returns the subset of nodes which have a replaceable root disk */
public NodeList replaceableRootDisk() {
- // TODO(mpolden): Support any architecture if we change how cloud images for other
- // architectures are managed
- return matching(node -> node.resources().storageType() == NodeResources.StorageType.remote &&
- node.resources().architecture() == NodeResources.Architecture.x86_64);
+ return matching(node -> node.resources().storageType() == NodeResources.StorageType.remote);
}
/** Returns the subset of nodes which satisfy the given resources */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
index 6a81c17d362..65e0bc558b2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
@@ -197,9 +197,11 @@ public class Cluster {
Duration totalDuration = Duration.ZERO;
for (ScalingEvent event : scalingEvents()) {
if (event.duration().isEmpty()) continue;
+ // Assume we have missed timely recording completion if it is longer than 4 days, so ignore
+ if ( ! event.duration().get().minus(Duration.ofDays(4)).isNegative()) continue;
+
completedEventCount++;
- // Assume we have missed timely recording completion if it is longer than 4 days
- totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get()));
+ totalDuration = totalDuration.plus(event.duration().get());
}
if (completedEventCount == 0) { // Use defaults
if (clusterSpec.isStateful()) return Duration.ofHours(12);
@@ -223,10 +225,4 @@ public class Cluster {
return duration;
}
- private static Duration maximum(Duration largestAllowed, Duration duration) {
- if ( ! duration.minus(largestAllowed).isNegative())
- return largestAllowed;
- return duration;
- }
-
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
index 4020166a132..a7d5cc50828 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
@@ -55,7 +55,7 @@ public class Autoscaler {
}
private Autoscaling autoscale(Application application, Cluster cluster, NodeList clusterNodes, Limits limits) {
- ClusterModel clusterModel = new ClusterModel(nodeRepository.zone(),
+ ClusterModel clusterModel = new ClusterModel(nodeRepository,
application,
clusterNodes.not().retired().clusterSpec(),
cluster,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
index 2f9ad28a072..bb599b69398 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.autoscale;
+import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.hosted.provision.Node;
@@ -8,6 +9,7 @@ import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
+import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
import java.time.Clock;
import java.time.Duration;
@@ -42,12 +44,16 @@ public class ClusterModel {
static final double idealContainerDiskLoad = 0.95;
static final double idealContentDiskLoad = 0.6;
+ // Memory for other processes running on the node (config-proxy, metrics-proxy).
+ // Keep in sync with config-model/NodeResourcesTuning.
+ static final double nodeMemoryOverheadGb = 0.7;
+
// When a query is issued on a node the cost is the sum of a fixed cost component and a cost component
// proportional to document count. We must account for this when comparing configurations with more or fewer nodes.
// TODO: Measure this, and only take it into account with queries
private static final double fixedCpuCostFraction = 0.1;
- private final Zone zone;
+ private final NodeRepository nodeRepository;
private final Application application;
private final ClusterSpec clusterSpec;
private final Cluster cluster;
@@ -69,14 +75,14 @@ public class ClusterModel {
private Double maxQueryGrowthRate = null;
private OptionalDouble averageQueryRate = null;
- public ClusterModel(Zone zone,
+ public ClusterModel(NodeRepository nodeRepository,
Application application,
ClusterSpec clusterSpec,
Cluster cluster,
NodeList clusterNodes,
MetricsDb metricsDb,
Clock clock) {
- this.zone = zone;
+ this.nodeRepository = nodeRepository;
this.application = application;
this.clusterSpec = clusterSpec;
this.cluster = cluster;
@@ -88,7 +94,7 @@ public class ClusterModel {
this.at = clock.instant();
}
- ClusterModel(Zone zone,
+ ClusterModel(NodeRepository nodeRepository,
Application application,
ClusterSpec clusterSpec,
Cluster cluster,
@@ -96,7 +102,7 @@ public class ClusterModel {
Duration scalingDuration,
ClusterTimeseries clusterTimeseries,
ClusterNodesTimeseries nodeTimeseries) {
- this.zone = zone;
+ this.nodeRepository = nodeRepository;
this.application = application;
this.clusterSpec = clusterSpec;
this.cluster = cluster;
@@ -179,7 +185,7 @@ public class ClusterModel {
double queryCpu = queryCpuPerGroup * groupCount() / groups;
double writeCpu = (double)groupSize() / groupSize;
return new Load(queryCpuFraction() * queryCpu + (1 - queryCpuFraction()) * writeCpu,
- (double)groupSize() / groupSize,
+ (1 - fixedMemoryFraction()) * (double)groupSize() / groupSize + fixedMemoryFraction() * 1,
(double)groupSize() / groupSize);
}
else {
@@ -315,7 +321,7 @@ public class ClusterModel {
/** Returns the headroom for growth during organic traffic growth as a multiple of current resources. */
private double growthRateHeadroom() {
- if ( ! zone.environment().isProduction()) return 1;
+ if ( ! nodeRepository.zone().environment().isProduction()) return 1;
double growthRateHeadroom = 1 + maxQueryGrowthRate() * scalingDuration().toMinutes();
// Cap headroom at 10% above the historical observed peak
if (queryFractionOfMax() != 0)
@@ -329,7 +335,7 @@ public class ClusterModel {
* as a multiple of current resources.
*/
private double trafficShiftHeadroom() {
- if ( ! zone.environment().isProduction()) return 1;
+ if ( ! nodeRepository.zone().environment().isProduction()) return 1;
if (canRescaleWithinBcpDeadline()) return 1;
double trafficShiftHeadroom;
if (application.status().maxReadShare() == 0) // No traffic fraction data
@@ -369,6 +375,34 @@ public class ClusterModel {
return idealContentMemoryLoad;
}
+ /**
+ * Returns the fraction of memory of the current allocation which is currently consumed by
+ * fixed data structures which take the same amount of space regardless of document volume.
+ */
+ private double fixedMemoryFraction() {
+ if (clusterSpec().type().isContainer()) return 1.0;
+ double fixedMemory = nodeMemoryOverheadGb +
+ (averageRealMemory() - nodeMemoryOverheadGb) * 0.05; // TODO: Measure actual content node usage
+ return fixedMemory / averageRealMemory();
+ }
+
+ private double averageRealMemory() {
+ if (nodes.isEmpty()) { // we're estimating
+ var initialResources = new CapacityPolicies(nodeRepository).specifyFully(cluster.minResources().nodeResources(),
+ clusterSpec,
+ application.id());
+ return nodeRepository.resourcesCalculator().requestToReal(initialResources,
+ nodeRepository.exclusiveAllocation(clusterSpec),
+ false).memoryGb();
+ }
+ else {
+ return nodes.stream()
+ .mapToDouble(node -> nodeRepository.resourcesCalculator().realResourcesOf(node, nodeRepository).memoryGb())
+ .average()
+ .getAsDouble();
+ }
+ }
+
private double idealDiskLoad() {
// Stateless clusters are not expected to consume more disk over time -
// if they do it is due to logs which will be rotated away right before the disk is full
@@ -380,7 +414,7 @@ public class ClusterModel {
* This is useful in cases where it's possible to continue without the cluster model,
* as QuestDb is known to temporarily fail during reading of data.
*/
- public static Optional<ClusterModel> create(Zone zone,
+ public static Optional<ClusterModel> create(NodeRepository nodeRepository,
Application application,
ClusterSpec clusterSpec,
Cluster cluster,
@@ -388,7 +422,7 @@ public class ClusterModel {
MetricsDb metricsDb,
Clock clock) {
try {
- return Optional.of(new ClusterModel(zone, application, clusterSpec, cluster, clusterNodes, metricsDb, clock));
+ return Optional.of(new ClusterModel(nodeRepository, application, clusterSpec, cluster, clusterNodes, metricsDb, clock));
}
catch (Exception e) {
log.log(Level.WARNING, "Failed creating a cluster model for " + application + " " + cluster, e);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
index 70e42fe712f..46eb9e9014a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
@@ -68,10 +68,10 @@ public class Limits {
public Limits fullySpecified(ClusterSpec clusterSpec, NodeRepository nodeRepository, ApplicationId applicationId) {
if (this.isEmpty()) throw new IllegalStateException("Unspecified limits can not be made fully specified");
- var defaultResources = new CapacityPolicies(nodeRepository).defaultNodeResources(clusterSpec, applicationId);
- var specifiedMin = min.nodeResources().isUnspecified() ? min.with(defaultResources) : min;
- var specifiedMax = max.nodeResources().isUnspecified() ? max.with(defaultResources) : max;
- return new Limits(specifiedMin, specifiedMax, groupSize);
+ var capacityPolicies = new CapacityPolicies(nodeRepository);
+ return new Limits(capacityPolicies.specifyFully(min, clusterSpec, applicationId),
+ capacityPolicies.specifyFully(max, clusterSpec, applicationId),
+ groupSize);
}
private double between(double min, double max, double value) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java
index e228d31384c..5284511af47 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java
@@ -119,6 +119,10 @@ public class LoadBalancerInstance {
cloudAccount);
}
+ public LoadBalancerInstance with(CloudAccount cloudAccount) {
+ return new LoadBalancerInstance(hostname, ipAddress, dnsZone, ports, networks, reals, settings, serviceIds, cloudAccount);
+ }
+
/** Prepends the given service IDs, possibly changing the order of those we have in this. */
public LoadBalancerInstance withServiceIds(List<PrivateServiceId> serviceIds) {
List<PrivateServiceId> ids = new ArrayList<>(serviceIds);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
index 4f94f0fab53..f01f5a30870 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;
+import ai.vespa.metrics.ConfigServerMetrics;
import com.yahoo.collections.Pair;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
@@ -18,6 +19,7 @@ import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.Node.State;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.node.Allocation;
import com.yahoo.vespa.hosted.provision.node.ClusterId;
import com.yahoo.vespa.hosted.provision.persistence.CacheStats;
@@ -118,7 +120,7 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
byCluster.forEach((clusterId, clusterNodes) -> {
Metric.Context context = getContext(dimensions(clusterId.application(), clusterId.cluster()));
updateExclusiveSwitchMetrics(clusterNodes, nodes, context);
- updateClusterCostMetrics(clusterNodes, context);
+ updateClusterCostMetrics(clusterId, clusterNodes, context);
});
}
@@ -129,9 +131,16 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
metric.set("nodes.exclusiveSwitchFraction", exclusiveSwitchRatio,context);
}
- private void updateClusterCostMetrics(List<Node> clusterNodes, Metric.Context context) {
+ private void updateClusterCostMetrics(ClusterId clusterId,
+ List<Node> clusterNodes, Metric.Context context) {
+ var cluster = nodeRepository().applications().get(clusterId.application())
+ .flatMap(application -> application.cluster(clusterId.cluster()));
+ if (cluster.isEmpty()) return;
double cost = clusterNodes.stream().mapToDouble(node -> node.resources().cost()).sum();
- metric.set("cluster.cost", cost, context);
+ metric.set(ConfigServerMetrics.CLUSTER_COST.baseName(), cost, context);
+ metric.set(ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.baseName(), cluster.get().target().ideal().cpu(), context);
+ metric.set(ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.baseName(), cluster.get().target().ideal().memory(), context);
+ metric.set(ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.baseName(), cluster.get().target().ideal().disk(), context);
}
private void updateZoneMetrics() {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
index 722c9acfdc0..67ab36c725e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
@@ -122,6 +122,8 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
private final NodeFailer.ThrottlePolicy throttlePolicy;
DefaultTimes(Zone zone, Deployer deployer) {
+ boolean isCdZone = zone.system().isCd();
+
autoscalingInterval = Duration.ofMinutes(5);
dynamicProvisionerInterval = Duration.ofMinutes(3);
hostDeprovisionerInterval = Duration.ofMinutes(3);
@@ -137,7 +139,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
nodeMetricsCollectionInterval = Duration.ofMinutes(1);
expeditedChangeRedeployInterval = Duration.ofMinutes(3);
// Vespa upgrade frequency is higher in CD so (de)activate OS upgrades more frequently as well
- osUpgradeActivatorInterval = zone.system().isCd() ? Duration.ofSeconds(30) : Duration.ofMinutes(5);
+ osUpgradeActivatorInterval = isCdZone ? Duration.ofSeconds(30) : Duration.ofMinutes(5);
periodicRedeployInterval = Duration.ofMinutes(60);
provisionedExpiry = zone.cloud().dynamicProvisioning() ? Duration.ofMinutes(40) : Duration.ofHours(4);
rebalancerInterval = Duration.ofMinutes(120);
@@ -150,7 +152,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
throttlePolicy = NodeFailer.ThrottlePolicy.hosted;
hostRetirerInterval = Duration.ofMinutes(30);
- if (zone.environment().isProduction() && ! zone.system().isCd()) {
+ if (zone.environment().isProduction() && ! isCdZone) {
inactiveExpiry = Duration.ofHours(4); // enough time for the application owner to discover and redeploy
retiredInterval = Duration.ofMinutes(15);
dirtyExpiry = Duration.ofHours(2); // enough time to clean the node
@@ -159,8 +161,10 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
// long enough that nodes aren't reused immediately and delete can happen on all config servers
// with time enough to clean up even with ZK connection issues on config servers
inactiveExpiry = Duration.ofMinutes(1);
- retiredInterval = Duration.ofMinutes(1);
dirtyExpiry = Duration.ofMinutes(30);
+ // Longer time in non-CD since we might end up with many deployments in a short time
+ // when retiring many hosts, e.g. when doing OS upgrades
+ retiredInterval = isCdZone ? Duration.ofMinutes(1) : Duration.ofMinutes(5);
retiredExpiry = Duration.ofDays(1);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java
index 32fe9ba9f7b..5def863113c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java
@@ -50,7 +50,7 @@ public abstract class OsUpgrader {
/** The duration this leaves new nodes alone before scheduling any upgrade */
private Duration gracePeriod() {
- return Duration.ofDays(1);
+ return nodeRepository.zone().system().isCd() ? Duration.ofHours(4) : Duration.ofDays(1);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java
index 212bf5ffb12..e0affaae666 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java
@@ -55,7 +55,7 @@ public class RebuildingOsUpgrader extends OsUpgrader {
private List<Node> rebuildableHosts(OsVersionTarget target, NodeList allNodes, Instant now) {
NodeList hostsOfTargetType = allNodes.nodeType(target.nodeType());
if (softRebuild) {
- // Soft rebuild is enabled so this should act on hosts having replacable root disk
+ // Soft rebuild is enabled so this should act on hosts having replaceable root disk
hostsOfTargetType = hostsOfTargetType.replaceableRootDisk();
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index 5732e94956a..8cff57e3005 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -84,7 +84,21 @@ public class CapacityPolicies {
return target;
}
- public NodeResources defaultNodeResources(ClusterSpec clusterSpec, ApplicationId applicationId) {
+ public ClusterResources specifyFully(ClusterResources resources, ClusterSpec clusterSpec, ApplicationId applicationId) {
+ return resources.with(specifyFully(resources.nodeResources(), clusterSpec, applicationId));
+ }
+
+ public NodeResources specifyFully(NodeResources resources, ClusterSpec clusterSpec, ApplicationId applicationId) {
+ if (resources.vcpuIsUnspecified())
+ resources = resources.withVcpu(defaultResources(clusterSpec, applicationId).vcpu());
+ if (resources.memoryGbIsUnspecified())
+ resources = resources.withMemoryGb(defaultResources(clusterSpec, applicationId).memoryGb());
+ if (resources.diskGbIsUnspecified())
+ resources = resources.withDiskGb(defaultResources(clusterSpec, applicationId).diskGb());
+ return resources;
+ }
+
+ private NodeResources defaultResources(ClusterSpec clusterSpec, ApplicationId applicationId) {
if (clusterSpec.type() == ClusterSpec.Type.admin) {
Architecture architecture = adminClusterArchitecture(applicationId);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index ae1edab7fad..fe40b2c5001 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -70,10 +70,16 @@ public class LoadBalancerProvisioner {
this.deactivateRouting = PermanentFlags.DEACTIVATE_ROUTING.bindTo(nodeRepository.flagSource());
// Read and write all load balancers to make sure they are stored in the latest version of the serialization format
+ CloudAccount zoneAccount = nodeRepository.zone().cloud().account();
for (var id : db.readLoadBalancerIds()) {
try (var lock = db.lock(id.application())) {
var loadBalancer = db.readLoadBalancer(id);
- loadBalancer.ifPresent(lb -> db.writeLoadBalancer(lb, lb.state()));
+ loadBalancer.ifPresent(lb -> {
+ // TODO (freva): Remove after 8.166
+ if (!zoneAccount.isUnspecified() && lb.instance().isPresent() && lb.instance().get().cloudAccount().isUnspecified())
+ lb = lb.with(Optional.of(lb.instance().get().with(zoneAccount)));
+ db.writeLoadBalancer(lb, lb.state());
+ });
}
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 1eef438a64e..c8d20d89dfa 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -124,9 +124,7 @@ public class NodeRepositoryProvisioner implements Provisioner {
}
private NodeResources getNodeResources(ClusterSpec cluster, NodeResources nodeResources, ApplicationId applicationId) {
- return nodeResources.isUnspecified()
- ? capacityPolicies.defaultNodeResources(cluster, applicationId)
- : nodeResources;
+ return capacityPolicies.specifyFully(nodeResources, cluster, applicationId);
}
@Override
@@ -179,15 +177,12 @@ public class NodeRepositoryProvisioner implements Provisioner {
firstDeployment // start at min, preserve current resources otherwise
? new AllocatableClusterResources(initialResourcesFrom(requested, clusterSpec, application.id()), clusterSpec, nodeRepository)
: new AllocatableClusterResources(nodes, nodeRepository);
- var clusterModel = new ClusterModel(zone, application, clusterSpec, cluster, nodes, nodeRepository.metricsDb(), nodeRepository.clock());
+ var clusterModel = new ClusterModel(nodeRepository, application, clusterSpec, cluster, nodes, nodeRepository.metricsDb(), nodeRepository.clock());
return within(Limits.of(requested), currentResources, firstDeployment, clusterModel);
}
private ClusterResources initialResourcesFrom(Capacity requested, ClusterSpec clusterSpec, ApplicationId applicationId) {
- var initial = requested.minResources();
- if (initial.nodeResources().isUnspecified())
- initial = initial.with(capacityPolicies.defaultNodeResources(clusterSpec, applicationId));
- return initial;
+ return capacityPolicies.specifyFully(requested.minResources(), clusterSpec, applicationId);
}
@@ -274,34 +269,32 @@ public class NodeRepositoryProvisioner implements Provisioner {
private IllegalArgumentException newNoAllocationPossible(ClusterSpec spec, Limits limits) {
StringBuilder message = new StringBuilder("No allocation possible within ").append(limits);
- if (nodeRepository.exclusiveAllocation(spec))
- message.append(". Nearest allowed node resources: ").append(findNearestNodeResources(limits));
+ if (nodeRepository.exclusiveAllocation(spec) && findNearestNodeResources(limits).isPresent())
+ message.append(". Nearest allowed node resources: ").append(findNearestNodeResources(limits).get());
return new IllegalArgumentException(message.toString());
}
- private NodeResources findNearestNodeResources(Limits limits) {
- NodeResources nearestMin = nearestFlavorResources(limits.min().nodeResources());
- NodeResources nearestMax = nearestFlavorResources(limits.max().nodeResources());
- if (limits.min().nodeResources().distanceTo(nearestMin) < limits.max().nodeResources().distanceTo(nearestMax))
+ private Optional<NodeResources> findNearestNodeResources(Limits limits) {
+ Optional<NodeResources> nearestMin = nearestFlavorResources(limits.min().nodeResources());
+ Optional<NodeResources> nearestMax = nearestFlavorResources(limits.max().nodeResources());
+ if (nearestMin.isEmpty()) return nearestMax;
+ if (nearestMax.isEmpty()) return nearestMin;
+ if (limits.min().nodeResources().distanceTo(nearestMin.get()) < limits.max().nodeResources().distanceTo(nearestMax.get()))
return nearestMin;
else
return nearestMax;
}
/** Returns the advertised flavor resources which are nearest to the given resources */
- private NodeResources nearestFlavorResources(NodeResources requestedResources) {
- NodeResources nearestHostResources = nodeRepository.flavors().getFlavors().stream()
- .map(flavor -> nodeRepository.resourcesCalculator().advertisedResourcesOf(flavor))
- .filter(resources -> resources.diskSpeed().compatibleWith(requestedResources.diskSpeed()))
- .filter(resources -> resources.storageType().compatibleWith(requestedResources.storageType()))
- .filter(resources -> resources.architecture().compatibleWith(requestedResources.architecture()))
- .min(Comparator.comparingDouble(resources -> resources.distanceTo(requestedResources)))
- .orElseThrow()
- .withBandwidthGbps(requestedResources.bandwidthGbps());
- if ( nearestHostResources.storageType() == NodeResources.StorageType.remote)
- nearestHostResources = nearestHostResources.withDiskGb(requestedResources.diskGb());
- return nearestHostResources;
+ private Optional<NodeResources> nearestFlavorResources(NodeResources requestedResources) {
+ return nodeRepository.flavors().getFlavors().stream()
+ .map(flavor -> nodeRepository.resourcesCalculator().advertisedResourcesOf(flavor))
+ .filter(resources -> resources.satisfies(requestedResources))
+ .min(Comparator.comparingDouble(resources -> resources.distanceTo(requestedResources)))
+ .map(resources -> resources.withBandwidthGbps(requestedResources.bandwidthGbps()))
+ .map(resources -> resources.storageType() == NodeResources.StorageType.remote ?
+ resources.withDiskGb(requestedResources.diskGb()) : resources);
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index f1cf33d0477..b7d96dbe3d2 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -61,7 +61,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.7f, 10);
var scaledResources = fixture.tester().assertResources("Scaling up since resource usage is too high",
- 9, 1, 3.6, 7.7, 31.7,
+ 8, 1, 4.0, 9.3, 36.2,
fixture.autoscale());
fixture.deploy(Capacity.from(scaledResources));
@@ -83,7 +83,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(7));
fixture.loader().applyCpuLoad(0.1f, 10);
fixture.tester().assertResources("Scaling cpu down since usage has gone down significantly",
- 8, 1, 1.0, 7.3, 22.1,
+ 7, 1, 1.1, 8.7, 25.4,
fixture.autoscale());
}
@@ -107,7 +107,7 @@ public class AutoscalingTest {
fixture.loader().applyLoad(new Load(0.1, 0.1, 0.1), 3);
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 1);
fixture.tester().assertResources("Scaling up since resource usage is too high",
- 9, 1, 4.7, 14.8, 66.0,
+ 8, 1, 5.3, 17.5, 75.4,
fixture.autoscale());
}
@@ -167,7 +167,7 @@ public class AutoscalingTest {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10);
fixture.tester().assertResources("Scaling up (only) since resource usage is too high",
- 8, 1, 7.1, 8.8, 75.4,
+ 8, 1, 7.1, 9.3, 75.4,
fixture.autoscale());
}
@@ -199,7 +199,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
- 9, 1, 3.8, 7.7, 31.7,
+ 8, 1, 4.3, 9.3, 36.2,
fixture.autoscale());
}
@@ -210,7 +210,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
- 10, 1, 4, 8.0, 22.7,
+ 9, 1, 4, 16.0, 25.5,
fixture.autoscale());
}
@@ -221,7 +221,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
- 9, 1, 3.8, 8.0, 37.5,
+ 8, 1, 4.3, 9.7, 42.9,
fixture.autoscale());
}
@@ -283,7 +283,7 @@ public class AutoscalingTest {
new NodeResources(100, 1000, 1000, 1, DiskSpeed.any));
var capacity = Capacity.from(min, max);
ClusterResources scaledResources = fixture.tester().assertResources("Scaling up",
- 13, 1, 1.5, 26.7, 26.7,
+ 13, 1, 1.5, 29.1, 26.7,
fixture.autoscale(capacity));
assertEquals("Disk speed from new capacity is used",
DiskSpeed.any, scaledResources.nodeResources().diskSpeed());
@@ -358,7 +358,7 @@ public class AutoscalingTest {
.build();
NodeResources defaultResources =
- new CapacityPolicies(fixture.tester().nodeRepository()).defaultNodeResources(fixture.clusterSpec, fixture.applicationId);
+ new CapacityPolicies(fixture.tester().nodeRepository()).specifyFully(NodeResources.unspecified(), fixture.clusterSpec, fixture.applicationId);
fixture.tester().assertResources("Min number of nodes and default resources",
2, 1, defaultResources,
@@ -401,7 +401,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.4, 240);
fixture.tester().assertResources("Scaling cpu up",
- 8, 4, 4.6, 4.0, 10.0,
+ 8, 4, 4.6, 4.2, 10.0,
fixture.autoscale());
}
@@ -446,7 +446,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(1.0, 120);
fixture.tester().assertResources("Suggesting above capacity limit",
- 8, 1, 6.2, 7.0, 29.0,
+ 8, 1, 6.2, 7.4, 29.0,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
}
@@ -492,7 +492,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.9, 120);
fixture.tester().assertResources("Scaling up to 2 nodes, scaling memory and disk down at the same time",
- 10, 5, 7.7, 39.3, 38.5,
+ 10, 5, 7.7, 41.5, 38.5,
fixture.autoscale());
}
@@ -528,7 +528,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 200.0 : 100.0, t -> 10.0);
fixture.tester().assertResources("Scaling up cpu, others down, changing to 1 group is cheaper",
- 9, 1, 2.5, 30.7, 30.1,
+ 7, 1, 3.2, 43.3, 40.1,
fixture.autoscale());
}
@@ -548,7 +548,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 20.0 : 10.0, t -> 100.0);
fixture.tester().assertResources("Scaling down since resource usage is too high, changing to 1 group is cheaper",
- 6, 1, 1.0, 49.1, 48.1,
+ 5, 1, 1.0, 62.6, 60.1,
fixture.autoscale());
}
@@ -565,7 +565,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyMemLoad(1.0, 1000);
fixture.tester().assertResources("Increase group size to reduce memory load",
- 8, 2, 13.9, 94.5, 60.1,
+ 8, 2, 13.9, 96.3, 60.1,
fixture.autoscale());
}
@@ -594,7 +594,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofHours(12 * 3 + 1));
fixture.loader().applyCpuLoad(0.02, 120);
fixture.tester().assertResources("Scaling down since enough time has passed",
- 3, 1, 1.0, 24.6, 101.4,
+ 3, 1, 1.0, 23.6, 101.4,
fixture.autoscale());
}
@@ -638,7 +638,7 @@ public class AutoscalingTest {
fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale up since we assume we need 2x cpu for growth when no data scaling time data",
- 10, 1, 1.2, 5.5, 22.5,
+ 8, 1, 1.5, 7.4, 29.0,
fixture.autoscale());
fixture.setScalingDuration(Duration.ofMinutes(5));
@@ -647,7 +647,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale down since observed growth is slower than scaling time",
- 10, 1, 1.0, 5.5, 22.5,
+ 8, 1, 1.3, 7.4, 29.0,
fixture.autoscale());
fixture.setScalingDuration(Duration.ofMinutes(60));
@@ -658,7 +658,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale up since observed growth is faster than scaling time",
- 9, 1, 1.4, 6.1, 25.3,
+ 8, 1, 1.6, 7.4, 29.0,
fixture.autoscale());
}
@@ -670,12 +670,12 @@ public class AutoscalingTest {
fixture.setScalingDuration(Duration.ofMinutes(60));
fixture.tester().clock().advance(Duration.ofDays(2));
Duration timeAdded = fixture.loader().addLoadMeasurements(100,
- t -> scalingFactor * (100.0 + (t < 50 ? t * t * t : 125000 - (t - 49) * (t - 49) * (t - 49))),
+ t -> scalingFactor * (100.0 + (t < 50 ? t * t * t : 155000 - (t - 49) * (t - 49) * (t - 49))),
t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.7, 200);
fixture.tester().assertResources("Scale up slightly since observed growth is faster than scaling time, but we are not confident",
- 10, 1, 1.0, 5.5, 22.5,
+ 8, 1, 1.3, 7.4, 29.0,
fixture.autoscale());
}
@@ -693,7 +693,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester.assertResources("Query and write load is equal -> scale up somewhat",
- 10, 1, 1.4, 5.5, 22.5,
+ 8, 1, 1.8, 7.4, 29.0,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -702,7 +702,7 @@ public class AutoscalingTest {
fixture.loader().addCpuMeasurements(0.4, 200);
// TODO: Ackhually, we scale down here - why?
fixture.tester().assertResources("Query load is 4x write load -> scale up more",
- 10, 1, 1.3, 5.5, 22.5,
+ 8, 1, 1.6, 7.4, 29.0,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -710,7 +710,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Write load is 10x query load -> scale down",
- 6, 1, 1.1, 9.8, 40.5,
+ 6, 1, 1.1, 10.0, 40.5,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -718,7 +718,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Query only -> largest possible",
- 9, 1, 2.7, 6.1, 25.3,
+ 8, 1, 3.1, 7.4, 29.0,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -726,7 +726,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Write only -> smallest possible",
- 4, 1, 1.1, 16.4, 67.6,
+ 4, 1, 1.1, 16.1, 67.6,
fixture.autoscale());
}
@@ -781,7 +781,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 200);
fixture.tester().assertResources("We scale even in dev because resources are 'required'",
- 3, 1, 1.0, 12.3, 62.5,
+ 3, 1, 1.0, 13.4, 62.5,
fixture.autoscale());
}
@@ -851,7 +851,7 @@ public class AutoscalingTest {
fixture.loader().applyLoad(new Load(0.06, 0.52, 0.27), 100);
var autoscaling = fixture.autoscale();
fixture.tester().assertResources("Scaling down",
- 7, 1, 2, 14.7, 384.0,
+ 7, 1, 2, 14.5, 384.0,
autoscaling);
fixture.deploy(Capacity.from(autoscaling.resources().get()));
assertEquals("Initial nodes are kept", initialNodes, fixture.nodes().asList());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java
index 704491ed44f..d748280cba2 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java
@@ -32,7 +32,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 3.6, 6.1, 25.3,
+ 8, 1, 4.0, 7.4, 29.0,
fixture.autoscale());
// Higher query rate
@@ -40,7 +40,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 7.1, 6.1, 25.3,
+ 8, 1, 8.0, 7.4, 29.0,
fixture.autoscale());
// Higher headroom
@@ -48,7 +48,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.3, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 4.2, 6.1, 25.3,
+ 8, 1, 4.8, 7.4, 29.0,
fixture.autoscale());
// Higher per query cost
@@ -56,7 +56,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 5.4, 6.1, 25.3,
+ 8, 1, 6.0, 7.4, 29.0,
fixture.autoscale());
// Bcp elsewhere is 0 - use local only
@@ -64,7 +64,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(0, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling using local info",
- 8, 1, 1, 7.0, 29.0,
+ 8, 1, 1, 7.4, 29.0,
fixture.autoscale());
}
@@ -85,7 +85,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 10.5, 41.0, 168.9,
+ 3, 3, 10.5, 38.4, 168.9,
fixture.autoscale());
// Higher query rate
@@ -93,7 +93,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 20.9, 41.0, 168.9,
+ 3, 3, 20.9, 38.4, 168.9,
fixture.autoscale());
// Higher headroom
@@ -101,7 +101,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.3, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 12.4, 41.0, 168.9,
+ 3, 3, 12.4, 38.4, 168.9,
fixture.autoscale());
// Higher per query cost
@@ -109,7 +109,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 15.7, 41.0, 168.9,
+ 3, 3, 15.7, 38.4, 168.9,
fixture.autoscale());
}
@@ -186,7 +186,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.3, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 14.2, 7.0, 29.0,
+ 8, 1, 14.2, 7.4, 29.0,
fixture.autoscale());
// Some local traffic
@@ -196,7 +196,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration1.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 10.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 6.9, 7.0, 29.0,
+ 8, 1, 6.9, 7.4, 29.0,
fixture.autoscale());
// Enough local traffic to get half the votes
@@ -206,7 +206,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration2.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 50.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 2.7, 6.1, 25.3,
+ 8, 1, 3.0, 7.4, 29.0,
fixture.autoscale());
// Mostly local
@@ -216,7 +216,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration3.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 90.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 2.1, 6.1, 25.3,
+ 8, 1, 2.4, 7.4, 29.0,
fixture.autoscale());
// Local only
@@ -226,7 +226,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration4.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 100.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 2.0, 6.1, 25.3,
+ 8, 1, 2.3, 7.4, 29.0,
fixture.autoscale());
// No group info, should be the same as the above
@@ -236,7 +236,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration5.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 100.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 2.0, 6.1, 25.3,
+ 8, 1, 2.3, 7.4, 29.0,
fixture.autoscale());
// 40 query rate, no group info (for reference to the below)
@@ -246,7 +246,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration6.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 40.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 1.4, 6.1, 25.3,
+ 8, 1, 1.6, 7.4, 29.0,
fixture.autoscale());
// Local query rate is too low but global is even lower so disregard it, giving the same as above
@@ -256,7 +256,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration7.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 40.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 1.4, 6.1, 25.3,
+ 8, 1, 1.6, 7.4, 29.0,
fixture.autoscale());
// Local query rate is too low to be fully confident, and so is global but as it is slightly larger, incorporate it slightly
@@ -266,7 +266,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration8.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 40.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 1.8, 6.1, 25.3,
+ 8, 1, 2.0, 7.4, 29.0,
fixture.autoscale());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
index ed00134af55..ec084014a6a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
@@ -5,12 +5,17 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.Zone;
import com.yahoo.test.ManualClock;
+import com.yahoo.vespa.curator.mock.MockCurator;
+import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.Status;
+import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
+import com.yahoo.vespa.hosted.provision.testutils.MockNodeRepository;
import org.junit.Test;
import java.time.Duration;
@@ -84,12 +89,11 @@ public class ClusterModelTest {
private ClusterModel clusterModel(Status status, IntFunction<Double> queryRate, IntFunction<Double> writeRate) {
ManualClock clock = new ManualClock();
- Zone zone = Zone.defaultZone();
Application application = Application.empty(ApplicationId.from("t1", "a1", "i1"));
ClusterSpec clusterSpec = clusterSpec();
Cluster cluster = cluster(resources());
application = application.with(cluster);
- return new ClusterModel(zone,
+ return new ClusterModel(new ProvisioningTester.Builder().build().nodeRepository(),
application.with(status),
clusterSpec, cluster, clock, Duration.ofMinutes(10),
timeseries(cluster,100, queryRate, writeRate, clock),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
index 5d1fd58489b..b150b372fe8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
@@ -10,10 +10,12 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
+import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.curator.mock.MockCurator;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.custom.HostResources;
@@ -27,6 +29,8 @@ import com.yahoo.vespa.hosted.provision.autoscale.awsnodes.AwsHostResourcesCalcu
import com.yahoo.vespa.hosted.provision.autoscale.awsnodes.AwsNodeTypes;
import com.yahoo.vespa.hosted.provision.provisioning.DynamicProvisioningTester;
import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
+import com.yahoo.vespa.hosted.provision.testutils.MockNodeRepository;
+
import java.time.Duration;
import java.util.Arrays;
import java.util.List;
@@ -40,14 +44,12 @@ import java.util.Optional;
public class Fixture {
final DynamicProvisioningTester tester;
- final Zone zone;
final ApplicationId applicationId;
final ClusterSpec clusterSpec;
final Capacity capacity;
final Loader loader;
public Fixture(Fixture.Builder builder, Optional<ClusterResources> initialResources, int hostCount) {
- zone = builder.zone;
applicationId = builder.application;
clusterSpec = builder.cluster;
capacity = builder.capacity;
@@ -80,7 +82,7 @@ public class Fixture {
public Capacity capacity() { return capacity; }
public ClusterModel clusterModel() {
- return new ClusterModel(zone,
+ return new ClusterModel(tester.nodeRepository(),
application(),
clusterSpec,
cluster(),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java
index d75f51680d7..3c459871490 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java
@@ -154,10 +154,10 @@ public class AutoscalingMaintainerTest {
@Test
public void test_toString() {
- assertEquals("4 nodes with [vcpu: 1.0, memory: 2.0 Gb, disk 4.0 Gb, bandwidth: 1.0 Gbps, architecture: any] (total: [vcpu: 4.0, memory: 8.0 Gb, disk 16.0 Gb, bandwidth: 4.0 Gbps, architecture: any])",
+ assertEquals("4 nodes with [vcpu: 1.0, memory: 2.0 Gb, disk: 4.0 Gb, bandwidth: 1.0 Gbps, architecture: any] (total: [vcpu: 4.0, memory: 8.0 Gb, disk: 16.0 Gb, bandwidth: 4.0 Gbps, architecture: any])",
AutoscalingMaintainer.toString(new ClusterResources(4, 1, new NodeResources(1, 2, 4, 1))));
- assertEquals("4 nodes (in 2 groups) with [vcpu: 1.0, memory: 2.0 Gb, disk 4.0 Gb, bandwidth: 1.0 Gbps, architecture: any] (total: [vcpu: 4.0, memory: 8.0 Gb, disk 16.0 Gb, bandwidth: 4.0 Gbps, architecture: any])",
+ assertEquals("4 nodes (in 2 groups) with [vcpu: 1.0, memory: 2.0 Gb, disk: 4.0 Gb, bandwidth: 1.0 Gbps, architecture: any] (total: [vcpu: 4.0, memory: 8.0 Gb, disk: 16.0 Gb, bandwidth: 4.0 Gbps, architecture: any])",
AutoscalingMaintainer.toString(new ClusterResources(4, 2, new NodeResources(1, 2, 4, 1))));
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
index 487355a0b75..de2c060a0eb 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
@@ -19,8 +19,11 @@ import com.yahoo.vespa.curator.stats.LockStats;
import com.yahoo.vespa.hosted.provision.LockedNodeList;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
+import com.yahoo.vespa.hosted.provision.autoscale.Load;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.Allocation;
+import com.yahoo.vespa.hosted.provision.node.ClusterId;
import com.yahoo.vespa.hosted.provision.node.Generation;
import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.provisioning.FlavorConfigBuilder;
@@ -240,7 +243,7 @@ public class MetricsReporterTest {
}
@Test
- public void non_active_metric() {
+ public void node_and_cluster_metrics() {
ProvisioningTester tester = new ProvisioningTester.Builder().build();
tester.makeReadyHosts(5, new NodeResources(64, 256, 2000, 10));
tester.activateTenantHosts();
@@ -248,18 +251,36 @@ public class MetricsReporterTest {
MetricsReporter metricsReporter = metricsReporter(metric, tester);
// Application is deployed
- ApplicationId application = ApplicationId.from("t1", "a1", "default");
- Map<String, String> dimensions = Map.of("applicationId", application.toFullString());
+ ApplicationId applicationId = ApplicationId.from("t1", "a1", "default");
+ ClusterSpec clusterSpec = ProvisioningTester.contentClusterSpec();
NodeResources resources = new NodeResources(2, 8, 100, 1);
- List<Node> activeNodes = tester.deploy(application, ProvisioningTester.contentClusterSpec(), Capacity.from(new ClusterResources(4, 1, resources)));
+ Capacity capacity = Capacity.from(new ClusterResources(4, 1, resources));
+
+ List<Node> activeNodes = tester.deploy(applicationId, clusterSpec, capacity);
+ var application = tester.nodeRepository().applications().require(applicationId);
+ application = application.withCluster(clusterSpec.id(), false, capacity);
+ var cluster = application.cluster(clusterSpec.id()).get().withTarget(new Autoscaling(Autoscaling.Status.ideal,
+ "test",
+ Optional.empty(),
+ tester.clock().instant(),
+ Load.zero(),
+ new Load(0.1, 0.2, 0.3),
+ Autoscaling.Metrics.zero()));
+ tester.nodeRepository().applications().put(application.with(cluster), tester.nodeRepository().applications().lock(applicationId));
+
metricsReporter.maintain();
+ Map<String, String> dimensions = Map.of("applicationId", applicationId.toFullString());
assertEquals(0D, getMetric("nodes.nonActiveFraction", metric, dimensions));
assertEquals(4, getMetric("nodes.active", metric, dimensions));
assertEquals(0, getMetric("nodes.nonActive", metric, dimensions));
- Map<String, String> clusterDimensions = Map.of("applicationId", application.toFullString(),
- "clusterid", ProvisioningTester.contentClusterSpec().id().value());
+
+ Map<String, String> clusterDimensions = Map.of("applicationId", applicationId.toFullString(),
+ "clusterid", clusterSpec.id().value());
assertEquals(1.392, getMetric("cluster.cost", metric, clusterDimensions));
+ assertEquals(0.1, getMetric("cluster.load.ideal.cpu", metric, clusterDimensions));
+ assertEquals(0.2, getMetric("cluster.load.ideal.memory", metric, clusterDimensions));
+ assertEquals(0.3, getMetric("cluster.load.ideal.disk", metric, clusterDimensions));
// One node fails
tester.fail(activeNodes.get(0).hostname());
@@ -269,7 +290,7 @@ public class MetricsReporterTest {
assertEquals(1, getMetric("nodes.nonActive", metric, dimensions));
// Cluster is removed
- tester.deactivate(application);
+ tester.deactivate(applicationId);
metricsReporter.maintain();
assertEquals(1D, getMetric("nodes.nonActiveFraction", metric, dimensions).doubleValue(), Double.MIN_VALUE);
assertEquals(0, getMetric("nodes.active", metric, dimensions));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
index f73d6f2ce01..1b677224295 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
@@ -73,9 +73,9 @@ public class ScalingSuggestionsMaintainerTest {
new TestMetric());
maintainer.maintain();
- assertEquals("8 nodes with [vcpu: 3.2, memory: 4.5 Gb, disk 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
+ assertEquals("8 nodes with [vcpu: 3.2, memory: 4.5 Gb, disk: 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app1, cluster1, tester).resources().get().toString());
- assertEquals("8 nodes with [vcpu: 3.6, memory: 4.4 Gb, disk 11.8 Gb, bandwidth: 0.1 Gbps, architecture: any]",
+ assertEquals("8 nodes with [vcpu: 3.6, memory: 4.7 Gb, disk: 11.8 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app2, cluster2, tester).resources().get().toString());
// Utilization goes way down
@@ -83,14 +83,14 @@ public class ScalingSuggestionsMaintainerTest {
addMeasurements(0.10f, 0.10f, 0.10f, 0, 500, app1, tester.nodeRepository());
maintainer.maintain();
assertEquals("Suggestion stays at the peak value observed",
- "8 nodes with [vcpu: 3.2, memory: 4.5 Gb, disk 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
+ "8 nodes with [vcpu: 3.2, memory: 4.5 Gb, disk: 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app1, cluster1, tester).resources().get().toString());
// Utilization is still way down and a week has passed
tester.clock().advance(Duration.ofDays(7));
addMeasurements(0.10f, 0.10f, 0.10f, 0, 500, app1, tester.nodeRepository());
maintainer.maintain();
assertEquals("Peak suggestion has been outdated",
- "3 nodes with [vcpu: 1.2, memory: 4.0 Gb, disk 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
+ "3 nodes with [vcpu: 1.2, memory: 4.0 Gb, disk: 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app1, cluster1, tester).resources().get().toString());
assertTrue(shouldSuggest(app1, cluster1, tester));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index 28cd3067155..3107d9738a9 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -525,14 +525,14 @@ public class ProvisioningTest {
tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 20),
resources(6, 3, 3, 15, 25)));
tester.assertNodes("Allocation preserving resources within new limits",
- 6, 2, 3, 8.0/4*21 / (6.0/2), 25,
+ 6, 2, 3, 14.57, 25,
app1, cluster1);
// Widening window does not change allocation
tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 1, 5, 15),
resources(8, 4, 4, 21, 30)));
tester.assertNodes("Same allocation",
- 6, 2, 3, 8.0/4*21 / (6.0/2), 25,
+ 6, 2, 3, 14.57, 25,
app1, cluster1);
// Changing limits in opposite directions cause a mixture of min and max
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
index f40c8037f41..0b4d345b8a5 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
@@ -440,7 +440,7 @@ public class VirtualNodeProvisioningTest {
catch (Exception e) {
assertEquals("No room for 3 nodes as 2 of 4 hosts are exclusive",
"Could not satisfy request for 3 nodes with " +
- "[vcpu: 2.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
+ "[vcpu: 2.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
"in tenant2.app2 container cluster 'my-container' 6.39: " +
"Node allocation failure on group 0: " +
"Not enough suitable nodes available due to host exclusivity constraints",
@@ -467,7 +467,7 @@ public class VirtualNodeProvisioningTest {
}
catch (NodeAllocationException e) {
assertEquals("Could not satisfy request for 2 nodes with " +
- "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote, architecture: any] " +
+ "[vcpu: 1.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote, architecture: any] " +
"in tenant.app1 content cluster 'my-content'" +
" 6.42: Node allocation failure on group 0",
e.getMessage());
@@ -549,8 +549,8 @@ public class VirtualNodeProvisioningTest {
}
catch (IllegalArgumentException e) {
assertEquals("No allocation possible within limits: " +
- "from 2 nodes with [vcpu: 1.0, memory: 5.0 Gb, disk 10.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
- "to 4 nodes with [vcpu: 1.0, memory: 5.0 Gb, disk 10.0 Gb, bandwidth: 1.0 Gbps, architecture: any]",
+ "from 2 nodes with [vcpu: 1.0, memory: 5.0 Gb, disk: 10.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
+ "to 4 nodes with [vcpu: 1.0, memory: 5.0 Gb, disk: 10.0 Gb, bandwidth: 1.0 Gbps, architecture: any]",
e.getMessage());
}
}
@@ -573,9 +573,9 @@ public class VirtualNodeProvisioningTest {
}
catch (IllegalArgumentException e) {
assertEquals("No allocation possible within limits: " +
- "from 2 nodes with [vcpu: 20.0, memory: 37.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
- "to 4 nodes with [vcpu: 20.0, memory: 37.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any]. " +
- "Nearest allowed node resources: [vcpu: 20.0, memory: 40.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote, architecture: any]",
+ "from 2 nodes with [vcpu: 20.0, memory: 37.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
+ "to 4 nodes with [vcpu: 20.0, memory: 37.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any]. " +
+ "Nearest allowed node resources: [vcpu: 20.0, memory: 40.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote, architecture: any]",
e.getMessage());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/capacity-zone.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/capacity-zone.json
index 0e14dd8b36f..87b823fbb33 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/capacity-zone.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/capacity-zone.json
@@ -3,7 +3,7 @@
"couldLoseHosts": 4,
"failedTenantParent": "dockerhost1.yahoo.com",
"failedTenant": "host4.yahoo.com",
- "failedTenantResources": "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "failedTenantResources": "[vcpu: 1.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"failedTenantAllocation": "allocated to tenant3.application3.instance3 as 'content/id3/0/0/stateful'",
"hostCandidateRejectionReasons": {
"singularReasonFailures": {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json
index 8ef88eae97d..b2212179647 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json
@@ -5,7 +5,7 @@
"type": "tenant",
"hostname": "test-node-pool-102-2",
"parentHostname": "dockerhost3.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote, architecture: x86_64]",
+ "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote, architecture: x86_64]",
"resources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
"realResources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json
index d90ed692f1c..0cd34f3551e 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json
@@ -4,7 +4,7 @@
"state": "active",
"type": "tenant",
"hostname": "host1.yahoo.com",
- "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk: 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json
index bec194ea325..4125f6e4913 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json
@@ -5,7 +5,7 @@
"type": "tenant",
"hostname": "host10.yahoo.com",
"parentHostname": "parent1.yahoo.com",
- "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk: 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json
index d7e07f02f3a..892bb412edf 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json
@@ -5,7 +5,7 @@
"type": "tenant",
"hostname": "host11.yahoo.com",
"parentHostname": "parent.host.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 1.0 Gb, disk 100.0 Gb, bandwidth: 0.3 Gbps, architecture: any]",
+ "flavor": "[vcpu: 1.0, memory: 1.0 Gb, disk: 100.0 Gb, bandwidth: 0.3 Gbps, architecture: any]",
"resources":{"vcpu":1.0,"memoryGb":1.0,"diskGb":100.0,"bandwidthGbps":0.3,"diskSpeed":"fast","storageType":"any","architecture":"any"},
"realResources":{"vcpu":1.0,"memoryGb":1.0,"diskGb":100.0,"bandwidthGbps":0.3,"diskSpeed":"fast","storageType":"any","architecture":"any"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json
index 73c34a7fa9e..f5152efd7cb 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json
@@ -4,7 +4,7 @@
"state": "active",
"type": "tenant",
"hostname": "host13.yahoo.com",
- "flavor": "[vcpu: 10.0, memory: 48.0 Gb, disk 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 10.0, memory: 48.0 Gb, disk: 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":10.0,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":10.0,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json
index abb0ba57e49..f48e52b18bf 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json
@@ -4,7 +4,7 @@
"state": "active",
"type": "tenant",
"hostname": "host14.yahoo.com",
- "flavor": "[vcpu: 10.0, memory: 48.0 Gb, disk 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 10.0, memory: 48.0 Gb, disk: 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":10.0,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0, "diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":10.0,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0, "diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json
index 9cd675163f0..815a9a3d2d2 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json
@@ -4,7 +4,7 @@
"state": "active",
"type": "tenant",
"hostname": "host2.yahoo.com",
- "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk: 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json
index 1c560c2f95b..23f3594bba4 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json
@@ -4,7 +4,7 @@
"state": "ready",
"type": "tenant",
"hostname": "host3.yahoo.com",
- "flavor": "[vcpu: 0.5, memory: 48.0 Gb, disk 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 0.5, memory: 48.0 Gb, disk: 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":0.5,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":0.5,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-wg.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-wg.json
index a1883ba4b25..0993c3fbcc2 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-wg.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-wg.json
@@ -5,7 +5,7 @@
"type": "tenant",
"hostname": "host4.yahoo.com",
"parentHostname": "dockerhost1.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json
index 50007fd6610..8232e6c7085 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json
@@ -5,7 +5,7 @@
"type": "tenant",
"hostname": "host4.yahoo.com",
"parentHostname": "dockerhost1.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json
index f206adf4366..5f25bac926b 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json
@@ -5,7 +5,7 @@
"type": "tenant",
"hostname": "host4.yahoo.com",
"parentHostname": "dockerhost1.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json
index 77dd81c736a..18829647a2a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json
@@ -5,7 +5,7 @@
"type": "tenant",
"hostname": "host5.yahoo.com",
"parentHostname": "dockerhost2.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 8.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, disk speed: slow, storage type: remote, architecture: x86_64]",
+ "flavor": "[vcpu: 1.0, memory: 8.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, disk speed: slow, storage type: remote, architecture: x86_64]",
"resources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote","architecture":"x86_64"},
"realResources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json
index 8a397fe5faa..a2633c5d71c 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json
@@ -5,7 +5,7 @@
"type": "tenant",
"hostname": "host5.yahoo.com",
"parentHostname": "dockerhost2.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 8.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, disk speed: slow, storage type: remote, architecture: x86_64]",
+ "flavor": "[vcpu: 1.0, memory: 8.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, disk speed: slow, storage type: remote, architecture: x86_64]",
"resources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote","architecture":"x86_64"},
"realResources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json
index 2bbe4a3024e..be0efad7f82 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json
@@ -4,7 +4,7 @@
"state": "dirty",
"type": "tenant",
"hostname": "host55.yahoo.com",
- "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk: 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json
index 69316b1ca7f..b16b9dfe3cd 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json
@@ -4,7 +4,7 @@
"state": "active",
"type": "tenant",
"hostname": "host6.yahoo.com",
- "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk: 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json
index 19fa81b82e0..806e1ddc705 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json
@@ -4,7 +4,7 @@
"state": "provisioned",
"type": "tenant",
"hostname": "host7.yahoo.com",
- "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk: 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/parent/pom.xml b/parent/pom.xml
index 71e0c35eb6c..f6fdaf20a7a 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -716,7 +716,7 @@
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-compress</artifactId>
- <version>1.22</version>
+ <version>1.23.0</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
diff --git a/screwdriver.yaml b/screwdriver.yaml
index 19fba104de6..76ec7f2aa71 100644
--- a/screwdriver.yaml
+++ b/screwdriver.yaml
@@ -48,13 +48,6 @@ shared:
fi
fi
- install-podman-multi-arch: &install-podman-multi-arch
- install-podman-multi-arch: |
- dnf install -y podman podman-docker buildah skopeo
- sed -i 's,.*netns.*=.*private.*,netns = "host",' /usr/share/containers/containers.conf
- touch /etc/containers/nodocker
- podman run --rm --quiet --cap-add SYS_ADMIN docker.io/multiarch/qemu-user-static --reset -p yes
-
jobs:
build-vespa:
requires: [~pr]
@@ -96,6 +89,10 @@ jobs:
screwdriver.cd/ram: 16
screwdriver.cd/disk: HIGH
screwdriver.cd/timeout: 90
+ screwdriver.cd/dockerEnabled: true
+ screwdriver.cd/dockerCpu: TURBO
+ screwdriver.cd/dockerRam: HIGH
+
environment:
LOCAL_MVN_REPO: "/tmp/vespa/mvnrepo"
VESPA_MAVEN_EXTRA_OPTS: "--show-version --batch-mode --no-snapshot-updates -Dmaven.repo.local=/tmp/vespa/mvnrepo"
@@ -117,7 +114,10 @@ jobs:
(got VESPA_VERSION=$VESPA_VERSION, VESPA_REF=$VESPA_REF, SYSTEM_TEST_REF=$SYSTEM_TEST_REF)."
exit 1
fi
- - *install-podman-multi-arch
+ - install-dependencies: |
+ dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+ dnf -y install docker-ce docker-ce-cli containerd.io
+ docker system info
- checkout: |
mkdir -p workdir
cd workdir
@@ -170,22 +170,23 @@ jobs:
git archive HEAD --format tar | tar x -C docker/vespa-systemtests
cp -a $LOCAL_MVN_REPO docker/repository
cd docker
- buildah bud --file Dockerfile.systemtest \
- --build-arg VESPA_BASE_IMAGE=docker.io/vespaengine/vespa-systemtest-base-centos-stream8:latest \
- --build-arg SYSTEMTEST_BASE_IMAGE=vespa --build-arg SKIP_M2_POPULATE=false \
- --target systemtest \
- --tag docker.io/vespaengine/vespa-systemtest-centos-stream8:$VESPA_VERSION .
+ docker build --file Dockerfile.systemtest \
+ --build-arg VESPA_BASE_IMAGE=vespaengine/vespa-systemtest-base-centos-stream8:latest \
+ --build-arg SYSTEMTEST_BASE_IMAGE=vespa --build-arg SKIP_M2_POPULATE=false \
+ --target systemtest \
+ --tag vespaengine/vespa-systemtest-centos-stream8:$VESPA_VERSION \
+ --tag vespaengine/vespa-systemtest-centos-stream8:latest .
- verify-test-image: |
- podman run --rm -ti --entrypoint bash docker.io/vespaengine/vespa-systemtest-centos-stream8:$VESPA_VERSION -lc \
+ docker run --rm -ti --entrypoint bash vespaengine/vespa-systemtest-centos-stream8:$VESPA_VERSION -lc \
"/opt/vespa-systemtests/lib/node_server.rb & sleep 3 && ruby /opt/vespa-systemtests/tests/search/basicsearch/basic_search.rb --run test_basicsearch__ELASTIC"
- publish-test-image: |
if [[ -z "$SD_PULL_REQUEST" ]]; then
OPT_STATE="$(set +o)"
set +x
- buildah login --username aressem --password "$DOCKER_HUB_DEPLOY_KEY" docker.io
+ docker login --username aressem --password "$DOCKER_HUB_DEPLOY_KEY"
eval "$OPT_STATE"
- buildah push --format v2s2 docker.io/vespaengine/vespa-systemtest-centos-stream8:$VESPA_VERSION docker://docker.io/vespaengine/vespa-systemtest-centos-stream8:$VESPA_VERSION
- buildah push --format v2s2 docker.io/vespaengine/vespa-systemtest-centos-stream8:$VESPA_VERSION docker://docker.io/vespaengine/vespa-systemtest-centos-stream8:latest
+ docker push docker.io/vespaengine/vespa-systemtest-centos-stream8:$VESPA_VERSION
+ docker push docker.io/vespaengine/vespa-systemtest-centos-stream8:latest
fi
- *save-cache
- update-build-status: |
@@ -209,6 +210,10 @@ jobs:
screwdriver.cd/ram: 16
screwdriver.cd/disk: HIGH
screwdriver.cd/timeout: 300
+ screwdriver.cd/dockerEnabled: true
+ screwdriver.cd/dockerCpu: TURBO
+ screwdriver.cd/dockerRam: HIGH
+
screwdriver.cd/buildPeriodically: H 4,10,16,22 * * 1,2,3,4
secrets:
@@ -240,13 +245,16 @@ jobs:
return 1
fi
meta set vespa.version $VESPA_VERSION
- - *install-podman-multi-arch
+ - install-dependencies: |
+ dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+ dnf install -y docker-ce docker-ce-cli containerd.io
+ docker system info
- release-java-artifacts: |
screwdriver/release-java-artifacts.sh $VESPA_VERSION $VESPA_REF
- release-rpms: |
screwdriver/release-rpms.sh $VESPA_VERSION $VESPA_REF
- release-container-image: |
- screwdriver/release-container-image.sh $VESPA_VERSION
+ screwdriver/release-container-image-docker.sh $VESPA_VERSION
- update-sample-apps: |
screwdriver/update-vespa-version-in-sample-apps.sh $VESPA_VERSION
- update-released-time: |
@@ -260,6 +268,9 @@ jobs:
screwdriver.cd/ram: 16
screwdriver.cd/disk: HIGH
screwdriver.cd/timeout: 300
+ screwdriver.cd/dockerEnabled: true
+ screwdriver.cd/dockerCpu: TURBO
+ screwdriver.cd/dockerRam: HIGH
screwdriver.cd/buildPeriodically: H 6 1 * *
environment:
@@ -276,7 +287,10 @@ jobs:
echo "Must have valid Vespa version to continue (got VESPA_VERSION=$VESPA_VERSION)."
return 1
fi
- - *install-podman-multi-arch
+ - install-dependencies: |
+ dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+ dnf install -y docker-ce docker-ce-cli containerd.io
+ docker system info
- checkout: |
mkdir -p workdir
cd workdir
@@ -305,11 +319,11 @@ jobs:
RUN --mount=type=bind,target=/rpms/,source=. dnf reinstall -y /rpms/vespa*rpm && dnf clean all
USER vespa
EOF
- buildah bud --security-opt label=disable --network host --squash --build-arg VESPA_VERSION=$VESPA_VERSION --tag docker.io/$IMAGE_NAME:$VESPA_VERSION \
+ docker build --progress plain --build-arg VESPA_VERSION=$VESPA_VERSION --tag docker.io/$IMAGE_NAME:$VESPA_VERSION \
--tag docker.io/$IMAGE_NAME:latest --file Dockerfile .
- verify-container-image: |
# Trick to be able to use the documentation testing to verify the image built locally
- buildah tag docker.io/$IMAGE_NAME:$VESPA_VERSION vespaengine/vespa:latest
+ docker tag docker.io/$IMAGE_NAME:$VESPA_VERSION vespaengine/vespa:latest
# Run quick start guide
$SD_SOURCE_DIR/screwdriver/test-quick-start-guide.sh
- publish-image: |
@@ -319,10 +333,10 @@ jobs:
else
OPT_STATE="$(set +o)"
set +x
- buildah login --username aressem --password "$DOCKER_HUB_DEPLOY_KEY" docker.io
+ docker login --username aressem --password "$DOCKER_HUB_DEPLOY_KEY"
eval "$OPT_STATE"
- buildah push --format v2s2 docker.io/$IMAGE_NAME:$VESPA_VERSION docker://docker.io/$IMAGE_NAME:$VESPA_VERSION
- buildah push --format v2s2 docker.io/$IMAGE_NAME:$VESPA_VERSION docker://docker.io/$IMAGE_NAME:latest
+ docker push docker.io/$IMAGE_NAME:$VESPA_VERSION
+ docker push docker.io/$IMAGE_NAME:latest
fi
fi
@@ -333,6 +347,9 @@ jobs:
screwdriver.cd/ram: 16
screwdriver.cd/disk: HIGH
screwdriver.cd/timeout: 300
+ screwdriver.cd/dockerEnabled: true
+ screwdriver.cd/dockerCpu: TURBO
+ screwdriver.cd/dockerRam: HIGH
screwdriver.cd/buildPeriodically: H 6 1 * *
environment:
@@ -349,19 +366,32 @@ jobs:
echo "Must have valid Vespa version to continue (got VESPA_VERSION=$VESPA_VERSION)."
return 1
fi
- - *install-podman-multi-arch
+ - install-dependencies: |
+ dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+ dnf -y install docker-ce docker-ce-cli containerd.io
+ docker system info
- checkout: |
git clone https://github.com/vespa-engine/docker-image
cd docker-image
- build-container-image: |
- buildah bud \
+ docker buildx install
+ unset DOCKER_HOST
+ docker context create vespa-context --docker "host=tcp://localhost:2376,ca=/certs/client/ca.pem,cert=/certs/client/cert.pem,key=/certs/client/key.pem"
+ docker context use vespa-context
+ docker buildx create --name vespa-builder --driver docker-container --use
+ docker buildx inspect --bootstrap
+ docker login --username aressem --password "$DOCKER_HUB_DEPLOY_KEY"
+ docker buildx build \
+ --progress plain \
+ --load \
+ --platform linux/amd64,linux/arm64 \
--build-arg VESPA_BASE_IMAGE=el9 \
--build-arg VESPA_VERSION=$VESPA_VERSION \
- --file Dockerfile \
- --jobs 2 \
- --layers=false \
- --manifest "$IMAGE_NAME:$VESPA_VERSION" \
- --platform linux/amd64,linux/arm64
+ --file Dockerfile \
+ --tag docker.io/vespaengine/$IMAGE_NAME:$VESPA_VERSION \
+ --tag docker.io/vespaengine/$IMAGE_NAME:$VESPA_MAJOR \
+ --tag docker.io/vespaengine/$IMAGE_NAME:latest \
+ .
- verify-container-image: |
# Trick to be able to use the documentation testing to verify the image built locally
buildah tag $IMAGE_NAME:$VESPA_VERSION vespaengine/vespa:latest
@@ -374,11 +404,19 @@ jobs:
else
OPT_STATE="$(set +o)"
set +x
- buildah login --username aressem --password "$DOCKER_HUB_DEPLOY_KEY" docker.io
+ docker login --username aressem --password "$DOCKER_HUB_DEPLOY_KEY"
eval "$OPT_STATE"
- buildah manifest push --all --format v2s2 $IMAGE_NAME:$VESPA_VERSION docker://docker.io/$IMAGE_NAME:$VESPA_VERSION
- buildah manifest push --all --format v2s2 $IMAGE_NAME:$VESPA_VERSION docker://docker.io/$IMAGE_NAME:$VESPA_MAJOR
- buildah manifest push --all --format v2s2 $IMAGE_NAME:$VESPA_VERSION docker://docker.io/$IMAGE_NAME:latest
+ docker buildx build \
+ --progress plain \
+ --push \
+ --platform linux/amd64,linux/arm64 \
+ --build-arg VESPA_BASE_IMAGE=el9 \
+ --build-arg VESPA_VERSION=$VESPA_VERSION \
+ --file Dockerfile \
+ --tag docker.io/vespaengine/$IMAGE_NAME:$VESPA_VERSION \
+ --tag docker.io/vespaengine/$IMAGE_NAME:$VESPA_MAJOR \
+ --tag docker.io/vespaengine/$IMAGE_NAME:latest \
+ .
fi
fi
diff --git a/screwdriver/release-container-image-docker.sh b/screwdriver/release-container-image-docker.sh
new file mode 100755
index 00000000000..6d8babe3dcc
--- /dev/null
+++ b/screwdriver/release-container-image-docker.sh
@@ -0,0 +1,81 @@
+#!/usr/bin/ssh-agent /bin/bash
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+set -euo pipefail
+
+if [[ $# -ne 1 ]]; then
+ echo "Usage: $0 <Vespa version>"
+ exit 1
+fi
+
+readonly VESPA_VERSION=$1
+readonly VESPA_MAJOR=$(echo $VESPA_VERSION | cut -d. -f1)
+
+if [[ -z "$DOCKER_HUB_DEPLOY_KEY" ]]; then
+ echo "Environment variable DOCKER_HUB_DEPLOY_KEY must be set, but is empty."
+ exit 1
+fi
+if [[ -z "$GHCR_DEPLOY_KEY" ]]; then
+ echo "Environment variable GHCR_DEPLOY_KEY must be set, but is empty."
+ exit 1
+fi
+
+BUILD_DIR=$(mktemp -d)
+trap "rm -rf $BUILD_DIR" EXIT
+cd $BUILD_DIR
+
+ssh-add -D
+ssh-add <(echo $DOCKER_IMAGE_DEPLOY_KEY | base64 -d)
+git clone git@github.com:vespa-engine/docker-image
+cd docker-image
+
+RELEASE_TAG="v$VESPA_VERSION"
+if git rev-parse $RELEASE_TAG &> /dev/null; then
+ git checkout $RELEASE_TAG
+else
+ git tag -a "$RELEASE_TAG" -m "Release version $VESPA_VERSION"
+ git push origin "$RELEASE_TAG"
+fi
+
+docker info
+docker version
+docker buildx version
+docker buildx install
+
+unset DOCKER_HOST
+docker context create vespa-context --docker "host=tcp://localhost:2376,ca=/certs/client/ca.pem,cert=/certs/client/cert.pem,key=/certs/client/key.pem"
+docker context use vespa-context
+
+docker buildx create --name vespa-builder --driver docker-container --use
+docker buildx inspect --bootstrap
+
+#The minimal image seem to have issues building on cd.screwdriver.cd. Needs investigation.
+#for data in "Dockerfile vespa" "Dockerfile.minimal vespa-minimal"; do
+
+for data in "Dockerfile vespa"; do
+ set -- $data
+ DOCKER_FILE=$1
+ IMAGE_NAME=$2
+
+ # Push to Docker Hub
+ if curl -fsSL https://index.docker.io/v1/repositories/vespaengine/$IMAGE_NAME/tags/$VESPA_VERSION &> /dev/null; then
+ echo "Container image docker.io/vespaengine/$IMAGE_NAME:$VESPA_VERSION aldready exists."
+ else
+ docker login --username aressem --password "$DOCKER_HUB_DEPLOY_KEY"
+ docker buildx build --progress plain --push --platform linux/amd64,linux/arm64 --build-arg VESPA_VERSION=$VESPA_VERSION \
+ --file $DOCKER_FILE --tag docker.io/vespaengine/$IMAGE_NAME:$VESPA_VERSION \
+ --tag docker.io/vespaengine/$IMAGE_NAME:$VESPA_MAJOR --tag docker.io/vespaengine/$IMAGE_NAME:latest .
+ fi
+done
+
+# Push to GitHub Container Registry
+JWT=$(curl -sSL -u aressem:$GHCR_DEPLOY_KEY "https://ghcr.io/token?service=ghcr.io&scope=repository:vespa-engine/vespa:pull" | jq -re '.token')
+IMAGE_TAGS=$(curl -sSL -H "Authorization: Bearer $JWT" https://ghcr.io/v2/vespa-engine/vespa/tags/list | jq -re '.tags[]')
+if grep $VESPA_VERSION <<< "$IMAGE_TAGS" &> /dev/null; then
+ echo "Container image ghcr.io/vespa-engine/vespa:$VESPA_VERSION aldready exists."
+else
+ docker login --username aressem --password "$GHCR_DEPLOY_KEY" ghcr.io
+ docker buildx build --progress plain --push --platform linux/amd64,linux/arm64 --build-arg VESPA_VERSION=$VESPA_VERSION \
+ --tag ghcr.io/vespa-engine/vespa:$VESPA_VERSION --tag ghcr.io/vespa-engine/vespa:$VESPA_MAJOR \
+ --tag ghcr.io/vespa-engine/vespa:latest .
+fi
diff --git a/searchcore/src/tests/grouping/grouping.cpp b/searchcore/src/tests/grouping/grouping.cpp
index f71912b7100..eabbaf3d50f 100644
--- a/searchcore/src/tests/grouping/grouping.cpp
+++ b/searchcore/src/tests/grouping/grouping.cpp
@@ -197,8 +197,8 @@ TEST_F("testGroupingContextUsage", DoomFixture()) {
.addLevel(createGL(MU<AttributeNode>("attr3"), MU<AttributeNode>("attr1")));
- GroupingContext::GroupingPtr r1(new Grouping(request1));
- GroupingContext::GroupingPtr r2(new Grouping(request2));
+ auto r1 = std::make_shared<Grouping>(request1);
+ auto r2 = std::make_shared<Grouping>(request2);
GroupingContext context(f1.clock.clock(), f1.timeOfDoom);
ASSERT_TRUE(context.empty());
context.addGrouping(r1);
@@ -222,7 +222,7 @@ TEST_F("testGroupingContextSerializing", DoomFixture()) {
baseRequest.serialize(nos);
GroupingContext context(f1.clock.clock(), f1.timeOfDoom);
- GroupingContext::GroupingPtr bp(new Grouping(baseRequest));
+ auto bp = std::make_shared<Grouping>(baseRequest);
context.addGrouping(bp);
context.serialize();
vespalib::nbostream & res(context.getResult());
@@ -240,7 +240,7 @@ TEST_F("testGroupingManager", DoomFixture()) {
.addLevel(createGL(MU<AttributeNode>("attr2"), MU<AttributeNode>("attr3")));
GroupingContext context(f1.clock.clock(), f1.timeOfDoom);
- GroupingContext::GroupingPtr bp(new Grouping(request1));
+ auto bp = std::make_shared<Grouping>(request1);
context.addGrouping(bp);
GroupingManager manager(context);
ASSERT_TRUE(!manager.empty());
@@ -272,8 +272,8 @@ TEST_F("testGroupingSession", DoomFixture()) {
request2.select(attrCheck, attrCheck);
EXPECT_EQUAL(0u, attrCheck._numrefs);
- GroupingContext::GroupingPtr r1(new Grouping(request1));
- GroupingContext::GroupingPtr r2(new Grouping(request2));
+ auto r1 = std::make_shared<Grouping>(request1);
+ auto r2 = std::make_shared<Grouping>(request2);
GroupingContext initContext(f1.clock.clock(), f1.timeOfDoom);
initContext.addGrouping(r1);
initContext.addGrouping(r2);
@@ -307,7 +307,7 @@ TEST_F("testGroupingSession", DoomFixture()) {
// Test second pass
{
GroupingContext context(f1.clock.clock(), f1.timeOfDoom);
- GroupingContext::GroupingPtr r(new Grouping(request1));
+ auto r = std::make_shared<Grouping>(request1);
r->setFirstLevel(1);
r->setLastLevel(1);
context.addGrouping(r);
@@ -318,7 +318,7 @@ TEST_F("testGroupingSession", DoomFixture()) {
// Test last pass. Session should be marked as finished
{
GroupingContext context(f1.clock.clock(), f1.timeOfDoom);
- GroupingContext::GroupingPtr r(new Grouping(request1));
+ auto r = std::make_shared<Grouping>(request1);
r->setFirstLevel(2);
r->setLastLevel(2);
context.addGrouping(r);
@@ -340,7 +340,7 @@ TEST_F("testEmptySessionId", DoomFixture()) {
.addLevel(createGL(MU<AttributeNode>("attr1"), MU<AttributeNode>("attr2")))
.addLevel(createGL(MU<AttributeNode>("attr2"), MU<AttributeNode>("attr3")));
- GroupingContext::GroupingPtr r1(new Grouping(request1));
+ auto r1 = std::make_shared<Grouping>(request1);
GroupingContext initContext(f1.clock.clock(), f1.timeOfDoom);
initContext.addGrouping(r1);
SessionId id;
@@ -373,7 +373,7 @@ TEST_F("testSessionManager", DoomFixture()) {
.setExpression(MU<AttributeNode>("attr0"))
.setResult(Int64ResultNode(0))));
- GroupingContext::GroupingPtr r1(new Grouping(request1));
+ auto r1 = std::make_shared<Grouping>(request1);
GroupingContext initContext(f1.clock.clock(), f1.timeOfDoom);
initContext.addGrouping(r1);
@@ -381,9 +381,9 @@ TEST_F("testSessionManager", DoomFixture()) {
SessionId id1("foo");
SessionId id2("bar");
SessionId id3("baz");
- GroupingSession::UP s1(new GroupingSession(id1, initContext, world.attributeContext));
- GroupingSession::UP s2(new GroupingSession(id2, initContext, world.attributeContext));
- GroupingSession::UP s3(new GroupingSession(id3, initContext, world.attributeContext));
+ auto s1 = std::make_unique<GroupingSession>(id1, initContext, world.attributeContext);
+ auto s2 = std::make_unique<GroupingSession>(id2, initContext, world.attributeContext);
+ auto s3 = std::make_unique<GroupingSession>(id3, initContext, world.attributeContext);
ASSERT_EQUAL(f1.timeOfDoom, s1->getTimeOfDoom());
mgr.insert(std::move(s1));
@@ -431,7 +431,7 @@ TEST_F("test grouping fork/join", DoomFixture()) {
.setFirstLevel(0)
.setLastLevel(1);
- GroupingContext::GroupingPtr g1(new Grouping(request));
+ auto g1 = std::make_shared<Grouping>(request);
GroupingContext context(f1.clock.clock(), f1.timeOfDoom);
context.addGrouping(g1);
GroupingSession session(SessionId(), context, world.attributeContext);
@@ -476,24 +476,20 @@ TEST_F("test session timeout", DoomFixture()) {
GroupingContext initContext1(f1.clock.clock(), steady_time(duration(10)));
GroupingContext initContext2(f1.clock.clock(), steady_time(duration(20)));
- GroupingSession::UP s1(new GroupingSession(id1, initContext1, world.attributeContext));
- GroupingSession::UP s2(new GroupingSession(id2, initContext2, world.attributeContext));
+ auto s1 = std::make_unique<GroupingSession>(id1, initContext1, world.attributeContext);
+ auto s2 = std::make_unique<GroupingSession>(id2, initContext2, world.attributeContext);
mgr.insert(std::move(s1));
mgr.insert(std::move(s2));
mgr.pruneTimedOutSessions(steady_time(5ns));
- SessionManager::Stats stats(mgr.getGroupingStats());
- ASSERT_EQUAL(2u, stats.numCached);
+ ASSERT_EQUAL(2u, mgr.getGroupingStats().numCached);
mgr.pruneTimedOutSessions(steady_time(10ns));
- stats = mgr.getGroupingStats();
- ASSERT_EQUAL(2u, stats.numCached);
+ ASSERT_EQUAL(2u, mgr.getGroupingStats().numCached);
mgr.pruneTimedOutSessions(steady_time(11ns));
- stats = mgr.getGroupingStats();
- ASSERT_EQUAL(1u, stats.numCached);
+ ASSERT_EQUAL(1u, mgr.getGroupingStats().numCached);
mgr.pruneTimedOutSessions(steady_time(21ns));
- stats = mgr.getGroupingStats();
- ASSERT_EQUAL(0u, stats.numCached);
+ ASSERT_EQUAL(0u, mgr.getGroupingStats().numCached);
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/common/timer/timer_test.cpp b/searchcore/src/tests/proton/common/timer/timer_test.cpp
index 4ff970df84e..56bad3981c9 100644
--- a/searchcore/src/tests/proton/common/timer/timer_test.cpp
+++ b/searchcore/src/tests/proton/common/timer/timer_test.cpp
@@ -5,9 +5,9 @@
#include <vespa/searchcore/proton/common/scheduledexecutor.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/util/count_down_latch.h>
-#include <vespa/vespalib/util/size_literals.h>
#include <vespa/vespalib/util/threadstackexecutor.h>
#include <vespa/vespalib/util/lambdatask.h>
+#include <vespa/vespalib/testkit/time_bomb.h>
#include <thread>
using vespalib::Executor;
@@ -84,29 +84,36 @@ TYPED_TEST(ScheduledExecutorTest, test_drop_handle) {
}
TYPED_TEST(ScheduledExecutorTest, test_only_one_instance_running) {
+ vespalib::TimeBomb time_bomb(60s);
vespalib::Gate latch;
std::atomic<uint64_t> counter = 0;
auto handleA = this->timer->scheduleAtFixedRate(makeLambdaTask([&]() { counter++; latch.await();}), 0ms, 1ms);
std::this_thread::sleep_for(2s);
EXPECT_EQ(1, counter);
latch.countDown();
- std::this_thread::sleep_for(2s);
+ while (counter <= 10) { std::this_thread::sleep_for(1ms); }
EXPECT_GT(counter, 10);
}
TYPED_TEST(ScheduledExecutorTest, test_sync_delete) {
+ vespalib::TimeBomb time_bomb(60s);
vespalib::Gate latch;
std::atomic<uint64_t> counter = 0;
std::atomic<uint64_t> reset_counter = 0;
+ std::mutex handleLock;
auto handleA = this->timer->scheduleAtFixedRate(makeLambdaTask([&]() { counter++; latch.await();}), 0ms, 1ms);
- auto handleB = this->timer->scheduleAtFixedRate(makeLambdaTask([&]() { handleA.reset(); reset_counter++; }), 0ms, 1ms);
- std::this_thread::sleep_for(2s);
+ auto handleB = this->timer->scheduleAtFixedRate(makeLambdaTask([&]() {
+ std::lock_guard guard(handleLock);
+ handleA.reset();
+ reset_counter++;
+ }), 0ms, 1ms);
+ while (counter < 1) { std::this_thread::sleep_for(1ms); }
EXPECT_EQ(1, counter);
EXPECT_EQ(0, reset_counter);
latch.countDown();
- std::this_thread::sleep_for(2s);
+ while (reset_counter <= 10) { std::this_thread::sleep_for(1ms); }
EXPECT_EQ(1, counter);
- EXPECT_GT(reset_counter, 10);
+ std::lock_guard guard(handleLock);
EXPECT_EQ(nullptr, handleA.get());
EXPECT_FALSE(nullptr == handleB.get());
}
diff --git a/searchcore/src/tests/proton/flushengine/flushengine_test.cpp b/searchcore/src/tests/proton/flushengine/flushengine_test.cpp
index 4033fe679ca..054d41be89a 100644
--- a/searchcore/src/tests/proton/flushengine/flushengine_test.cpp
+++ b/searchcore/src/tests/proton/flushengine/flushengine_test.cpp
@@ -548,7 +548,11 @@ TEST_F("require that GC targets are not considered when oldest serial is found",
// Before anything is flushed the oldest serial is 5.
// After 'foo' has been flushed the oldest serial is 20 as GC target 'bar' is not considered.
- EXPECT_EQUAL(FlushDoneHistory({ 5, 20, 20, 25 }), handler->getFlushDoneHistory());
+ FlushDoneHistory history = handler->getFlushDoneHistory();
+ EXPECT_TRUE(history.end() == std::find(history.begin(), history.end(), 10));
+ auto last_unique = std::unique(history.begin(), history.end());
+ history.erase(last_unique, history.end());
+ EXPECT_EQUAL(FlushDoneHistory({ 5, 20, 25 }), history);
}
TEST_F("require that oldest serial is found in group", Fixture(2, IINTERVAL))
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/document_field_extractor.cpp b/searchcore/src/vespa/searchcore/proton/attribute/document_field_extractor.cpp
index c0c6f729509..9364be4570e 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/document_field_extractor.cpp
+++ b/searchcore/src/vespa/searchcore/proton/attribute/document_field_extractor.cpp
@@ -16,6 +16,7 @@
#include <vespa/searchcommon/common/undefinedvalues.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
#include <vespa/vespalib/util/exceptions.h>
+#include <cassert>
using document::FieldValue;
using document::BoolFieldValue;
diff --git a/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp b/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp
index eee2b7a7203..03e15830ac5 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp
@@ -30,21 +30,21 @@ struct Mixer {
}
Blueprint::UP mix(Blueprint::UP indexes) {
- if (attributes.get() == 0) {
- if (indexes.get() == 0) {
+ if ( ! attributes) {
+ if ( ! indexes) {
return std::make_unique<EmptyBlueprint>();
}
- return Blueprint::UP(std::move(indexes));
+ return indexes;
}
- if (indexes.get() == 0) {
+ if ( ! indexes) {
if (attributes->childCnt() == 1) {
return attributes->removeChild(0);
} else {
- return Blueprint::UP(std::move(attributes));
+ return std::move(attributes);
}
}
- attributes->addChild(Blueprint::UP(std::move(indexes)));
- return Blueprint::UP(std::move(attributes));
+ attributes->addChild(std::move(indexes));
+ return std::move(attributes);
}
};
@@ -88,6 +88,7 @@ private:
void buildEquiv(ProtonEquiv &n) {
double eqw = n.getWeight().percent();
FieldSpecBaseList specs;
+ specs.reserve(n.numFields());
for (size_t i = 0; i < n.numFields(); ++i) {
specs.add(n.field(i).fieldSpec());
}
@@ -123,9 +124,7 @@ private:
assert(field.getFieldId() != search::fef::IllegalFieldId);
assert(field.getHandle() != search::fef::IllegalHandle);
if (field.attribute_field) {
- FieldSpecList attrField;
- attrField.add(field.fieldSpec());
- mixer.addAttribute(_context.getAttributes().createBlueprint(_requestContext, attrField, n));
+ mixer.addAttribute(_context.getAttributes().createBlueprint(_requestContext, field.fieldSpec(), n));
} else {
indexFields.add(field.fieldSpec());
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/partial_result.h b/searchcore/src/vespa/searchcore/proton/matching/partial_result.h
index f4dc2e31d4d..314fefa3cc0 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/partial_result.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/partial_result.h
@@ -48,7 +48,7 @@ public:
_sortData.push_back(sd);
_sortDataSize += sd.second;
}
- virtual void merge(Source &rhs) override;
+ void merge(Source &rhs) override;
};
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/sessionmanager.cpp b/searchcore/src/vespa/searchcore/proton/matching/sessionmanager.cpp
index 1e74d83185f..b78d638d702 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/sessionmanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/sessionmanager.cpp
@@ -3,6 +3,8 @@
#include "sessionmanager.h"
#include <vespa/vespalib/stllike/lrucache_map.hpp>
#include <vespa/vespalib/stllike/hash_map.hpp>
+#include <vespa/vespalib/util/lambdatask.h>
+#include <vespa/vespalib/util/foreground_thread_executor.h>
#include <mutex>
#include <algorithm>
@@ -50,10 +52,6 @@ struct SessionCache : SessionCacheBase {
}
return ret;
}
- void pruneTimedOutSessions(vespalib::steady_time currentTime) {
- std::vector<EntryUP> toDestruct = stealTimedOutSessions(currentTime);
- toDestruct.clear();
- }
std::vector<EntryUP> stealTimedOutSessions(vespalib::steady_time currentTime) {
std::vector<EntryUP> toDestruct;
std::lock_guard<std::mutex> guard(_lock);
@@ -103,10 +101,6 @@ struct SessionMap : SessionCacheBase {
}
return EntrySP();
}
- void pruneTimedOutSessions(vespalib::steady_time currentTime) {
- std::vector<EntrySP> toDestruct = stealTimedOutSessions(currentTime);
- toDestruct.clear();
- }
std::vector<EntrySP> stealTimedOutSessions(vespalib::steady_time currentTime) {
std::vector<EntrySP> toDestruct;
std::vector<SessionId> keys;
@@ -151,7 +145,8 @@ struct SessionMap : SessionCacheBase {
}
};
-void SessionCacheBase::entryDropped(const SessionId &id) {
+void
+SessionCacheBase::entryDropped(const SessionId &id) {
LOG(debug, "Session cache is full, dropping entry to fit session '%s'", id.c_str());
_stats.numDropped++;
}
@@ -179,19 +174,23 @@ SessionManager::~SessionManager() {
assert(_search_map->empty());
}
-void SessionManager::insert(search::grouping::GroupingSession::UP session) {
+void
+SessionManager::insert(search::grouping::GroupingSession::UP session) {
_grouping_cache->insert(std::move(session));
}
-void SessionManager::insert(SearchSession::SP session) {
+void
+SessionManager::insert(SearchSession::SP session) {
_search_map->insert(std::move(session));
}
-GroupingSession::UP SessionManager::pickGrouping(const SessionId &id) {
+GroupingSession::UP
+SessionManager::pickGrouping(const SessionId &id) {
return _grouping_cache->pick(id);
}
-SearchSession::SP SessionManager::pickSearch(const SessionId &id) {
+SearchSession::SP
+SessionManager::pickSearch(const SessionId &id) {
return _search_map->pick(id);
}
@@ -199,33 +198,56 @@ std::vector<SessionManager::SearchSessionInfo>
SessionManager::getSortedSearchSessionInfo() const
{
std::vector<SearchSessionInfo> sessions;
- _search_map->each([&sessions](const SearchSession &session)
- {
- sessions.emplace_back(session.getSessionId(),
- session.getCreateTime(),
- session.getTimeOfDoom());
- });
+ _search_map->each([&sessions](const SearchSession &session) {
+ sessions.emplace_back(session.getSessionId(), session.getCreateTime(), session.getTimeOfDoom());
+ });
std::sort(sessions.begin(), sessions.end(),
- [](const SearchSessionInfo &a,
- const SearchSessionInfo &b)
- {
+ [](const SearchSessionInfo &a, const SearchSessionInfo &b) {
return (a.created < b.created);
});
return sessions;
}
-void SessionManager::pruneTimedOutSessions(vespalib::steady_time currentTime) {
- _grouping_cache->pruneTimedOutSessions(currentTime);
- _search_map->pruneTimedOutSessions(currentTime);
+void
+SessionManager::pruneTimedOutSessions(vespalib::steady_time currentTime) {
+ vespalib::ForegroundThreadExecutor executor;
+ pruneTimedOutSessions(currentTime, executor);
+}
+
+namespace {
+
+template <typename T>
+void
+split_and_execute(std::vector<T> tasks, vespalib::ThreadExecutor & executor) {
+ size_t num_bundles = std::max(1ul, std::min(tasks.size(), 2*executor.getNumThreads()));
+ std::vector<std::vector<T>> bundles(num_bundles);
+ for (size_t i = 0; i < tasks.size(); i++) {
+ bundles[i%bundles.size()].push_back(std::move(tasks[i]));
+ }
+ for (size_t i = 0; i < bundles.size(); i++) {
+ executor.execute(vespalib::makeLambdaTask([part=std::move(bundles[i])]() {
+ // Objects will be destructed in the given executor;
+ }));
+ }
+}
+
+}
+void
+SessionManager::pruneTimedOutSessions(vespalib::steady_time currentTime, vespalib::ThreadExecutor & executor) {
+ split_and_execute(_grouping_cache->stealTimedOutSessions(currentTime), executor);
+ split_and_execute(_search_map->stealTimedOutSessions(currentTime), executor);
}
-SessionManager::Stats SessionManager::getGroupingStats() {
+SessionManager::Stats
+SessionManager::getGroupingStats() {
return _grouping_cache->getStats();
}
-SessionManager::Stats SessionManager::getSearchStats() {
+SessionManager::Stats
+SessionManager::getSearchStats() {
return _search_map->getStats();
}
-size_t SessionManager::getNumSearchSessions() const {
+size_t
+SessionManager::getNumSearchSessions() const {
return _search_map->size();
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/sessionmanager.h b/searchcore/src/vespa/searchcore/proton/matching/sessionmanager.h
index 3ed4760b52e..872f1a90bd1 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/sessionmanager.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/sessionmanager.h
@@ -6,6 +6,7 @@
#include <vespa/searchcore/grouping/sessionid.h>
#include <vespa/vespalib/stllike/lrucache_map.h>
+namespace vespalib { class ThreadExecutor; }
namespace proton::matching {
using SessionId = vespalib::string;
@@ -58,6 +59,8 @@ public:
size_t getNumSearchSessions() const;
std::vector<SearchSessionInfo> getSortedSearchSessionInfo() const;
+ void pruneTimedOutSessions(vespalib::steady_time currentTime, vespalib::ThreadExecutor & executor);
+ // Only used for testing
void pruneTimedOutSessions(vespalib::steady_time currentTime);
};
diff --git a/searchcore/src/vespa/searchcore/proton/server/proton.cpp b/searchcore/src/vespa/searchcore/proton/server/proton.cpp
index 7f3dc02aba8..d70bff52ed4 100644
--- a/searchcore/src/vespa/searchcore/proton/server/proton.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/proton.cpp
@@ -380,7 +380,9 @@ Proton::init(const BootstrapConfig::SP & configSnapshot)
_flushEngine->start();
vespalib::duration pruneSessionsInterval = vespalib::from_s(protonConfig.grouping.sessionmanager.pruning.interval);
- _sessionPruneHandle = _scheduler->scheduleAtFixedRate(makeLambdaTask([&]() { _sessionManager->pruneTimedOutSessions(vespalib::steady_clock::now()); }), pruneSessionsInterval, pruneSessionsInterval);
+ _sessionPruneHandle = _scheduler->scheduleAtFixedRate(makeLambdaTask([&]() {
+ _sessionManager->pruneTimedOutSessions(vespalib::steady_clock::now(), _shared_service->shared());
+ }), pruneSessionsInterval, pruneSessionsInterval);
_isInitializing = false;
_protonConfigurer.setAllowReconfig(true);
_initComplete = true;
diff --git a/searchcore/src/vespa/searchcorespi/index/iindexmaintaineroperations.h b/searchcore/src/vespa/searchcorespi/index/iindexmaintaineroperations.h
index 9025b56dc27..0b089daff7c 100644
--- a/searchcore/src/vespa/searchcorespi/index/iindexmaintaineroperations.h
+++ b/searchcore/src/vespa/searchcorespi/index/iindexmaintaineroperations.h
@@ -19,7 +19,7 @@ struct IIndexMaintainerOperations {
using IFieldLengthInspector = search::index::IFieldLengthInspector;
using Schema = search::index::Schema;
using SelectorArray = search::diskindex::SelectorArray;
- virtual ~IIndexMaintainerOperations() {}
+ virtual ~IIndexMaintainerOperations() = default;
/**
* Creates a new memory index using the given schema.
diff --git a/searchlib/src/apps/uniform/uniform.cpp b/searchlib/src/apps/uniform/uniform.cpp
index 807b8d61a9e..95d2bb1a7d1 100644
--- a/searchlib/src/apps/uniform/uniform.cpp
+++ b/searchlib/src/apps/uniform/uniform.cpp
@@ -3,6 +3,7 @@
#include <vespa/vespalib/util/signalhandler.h>
#include <vespa/searchlib/bitcompression/compression.h>
#include <cinttypes>
+#include <cassert>
static uint64_t
maxExpGolombVal(uint64_t kValue, uint64_t maxBits)
diff --git a/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp b/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp
index 54a7d6ea286..4ac4c92f658 100644
--- a/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp
+++ b/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp
@@ -27,6 +27,26 @@ struct State {
State::State() : term(), md(), f3(nullptr), f5(nullptr), f7(nullptr), array() {}
State::~State() = default;
+/**
+ * convenience adapter for easy iteration
+ **/
+class SimpleTermFieldRangeAdapter
+{
+ SimpleTermData& _ref;
+ size_t _idx;
+ size_t _lim;
+public:
+ explicit SimpleTermFieldRangeAdapter(SimpleTermData& ref)
+ : _ref(ref), _idx(0), _lim(ref.numFields())
+ {}
+
+ [[nodiscard]] bool valid() const { return (_idx < _lim); }
+
+ [[nodiscard]] SimpleTermFieldData& get() const { return _ref.field(_idx); }
+
+ void next() { assert(valid()); ++_idx; }
+};
+
void testInvalidId() {
const TermFieldMatchData empty;
using search::queryeval::SearchIterator;
@@ -44,7 +64,7 @@ void testSetup(State &state) {
state.term.addField(5); // docfreq = 3
using FRA = search::fef::ITermFieldRangeAdapter;
- using SFR = search::fef::SimpleTermFieldRangeAdapter;
+ using SFR = SimpleTermFieldRangeAdapter;
// lookup terms
{
diff --git a/searchlib/src/vespa/searchcommon/attribute/config.cpp b/searchlib/src/vespa/searchcommon/attribute/config.cpp
index 70c2377289f..91495025dee 100644
--- a/searchlib/src/vespa/searchcommon/attribute/config.cpp
+++ b/searchlib/src/vespa/searchcommon/attribute/config.cpp
@@ -65,4 +65,11 @@ Config::operator==(const Config &b) const
_hnsw_index_params == b._hnsw_index_params;
}
+Config&
+Config::set_hnsw_index_params(const HnswIndexParams& params) {
+ assert(_distance_metric == params.distance_metric());
+ _hnsw_index_params = params;
+ return *this;
+}
+
}
diff --git a/searchlib/src/vespa/searchcommon/attribute/config.h b/searchlib/src/vespa/searchcommon/attribute/config.h
index 0102f362532..32cac7ec9d6 100644
--- a/searchlib/src/vespa/searchcommon/attribute/config.h
+++ b/searchlib/src/vespa/searchcommon/attribute/config.h
@@ -10,7 +10,6 @@
#include <vespa/searchcommon/common/dictionary_config.h>
#include <vespa/eval/eval/value_type.h>
#include <vespa/vespalib/datastore/compaction_strategy.h>
-#include <cassert>
#include <optional>
namespace search::attribute {
@@ -72,11 +71,7 @@ public:
_distance_metric = value;
return *this;
}
- Config& set_hnsw_index_params(const HnswIndexParams& params) {
- assert(_distance_metric == params.distance_metric());
- _hnsw_index_params = params;
- return *this;
- }
+ Config& set_hnsw_index_params(const HnswIndexParams& params);
Config& clear_hnsw_index_params() {
_hnsw_index_params.reset();
return *this;
diff --git a/searchlib/src/vespa/searchlib/attribute/load_utils.hpp b/searchlib/src/vespa/searchlib/attribute/load_utils.hpp
index 463a62ab01a..614e327942a 100644
--- a/searchlib/src/vespa/searchlib/attribute/load_utils.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/load_utils.hpp
@@ -5,6 +5,7 @@
#include "load_utils.h"
#include "attributevector.h"
#include <vespa/searchcommon/attribute/multivalue.h>
+#include <cassert>
namespace search::attribute {
diff --git a/searchlib/src/vespa/searchlib/attribute/readerbase.cpp b/searchlib/src/vespa/searchlib/attribute/readerbase.cpp
index e4bc2c02ad6..382d9ccb110 100644
--- a/searchlib/src/vespa/searchlib/attribute/readerbase.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/readerbase.cpp
@@ -6,6 +6,7 @@
#include <vespa/fastlib/io/bufferedfile.h>
#include <vespa/searchlib/util/filesizecalculator.h>
#include <vespa/vespalib/util/size_literals.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".search.attribute.readerbase");
@@ -73,6 +74,13 @@ ReaderBase::ReaderBase(AttributeVector &attr)
ReaderBase::~ReaderBase() = default;
+size_t
+ReaderBase::getEnumCount() const {
+ size_t dataSize = _datFile.data_size();
+ assert((dataSize % sizeof(uint32_t)) == 0);
+ return dataSize / sizeof(uint32_t);
+}
+
bool
ReaderBase::hasWeight() const {
return _weightFile.valid();
diff --git a/searchlib/src/vespa/searchlib/attribute/readerbase.h b/searchlib/src/vespa/searchlib/attribute/readerbase.h
index 070dc1f99fb..ff400acc824 100644
--- a/searchlib/src/vespa/searchlib/attribute/readerbase.h
+++ b/searchlib/src/vespa/searchlib/attribute/readerbase.h
@@ -4,7 +4,6 @@
#include <vespa/searchlib/util/file_with_header.h>
#include <vespa/searchlib/util/fileutil.h>
-#include <cassert>
namespace search {
@@ -25,11 +24,7 @@ public:
return (_idxFile.data_size()) /sizeof(uint32_t);
}
- size_t getEnumCount() const {
- size_t dataSize = _datFile.data_size();
- assert((dataSize % sizeof(uint32_t)) == 0);
- return dataSize / sizeof(uint32_t);
- }
+ size_t getEnumCount() const;
size_t getNumValues();
int32_t getNextWeight() { return _weightReader.readHostOrder(); }
diff --git a/searchlib/src/vespa/searchlib/bitcompression/compression.h b/searchlib/src/vespa/searchlib/bitcompression/compression.h
index a77d82d9e8f..2d6b8083d43 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/compression.h
+++ b/searchlib/src/vespa/searchlib/bitcompression/compression.h
@@ -4,7 +4,6 @@
#include <vespa/searchlib/util/comprfile.h>
#include <vespa/vespalib/stllike/string.h>
-#include <cassert>
namespace vespalib {
@@ -1400,7 +1399,6 @@ public:
const uint8_t *
getByteCompr() const
{
- assert((_preRead & 7) == 0);
return reinterpret_cast<const uint8_t *>(getCompr()) +
(getBitOffset() >> 3);
}
diff --git a/searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp b/searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp
index cdae8058d76..7c38931df77 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp
+++ b/searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp
@@ -2,6 +2,7 @@
#include "countcompression.h"
#include <vespa/searchlib/index/postinglistcounts.h>
+#include <cassert>
namespace search::bitcompression {
diff --git a/searchlib/src/vespa/searchlib/bitcompression/pagedict4.h b/searchlib/src/vespa/searchlib/bitcompression/pagedict4.h
index 9208a5be3b8..b162bdc3f2b 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/pagedict4.h
+++ b/searchlib/src/vespa/searchlib/bitcompression/pagedict4.h
@@ -3,8 +3,8 @@
#pragma once
#include "countcompression.h"
-#include <limits>
#include <vespa/vespalib/stllike/string.h>
+#include <cassert>
namespace search::bitcompression {
diff --git a/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.cpp b/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.cpp
index d2269787068..9d6258ce26f 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.cpp
+++ b/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.cpp
@@ -5,6 +5,7 @@
#include <vespa/searchlib/index/schemautil.h>
#include <vespa/vespalib/data/fileheader.h>
#include <vespa/vespalib/stllike/asciistream.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".posocc_fields_params");
@@ -38,6 +39,12 @@ PosOccFieldsParams::operator=(const PosOccFieldsParams &rhs)
return *this;
}
+void
+PosOccFieldsParams::assertCachedParamsRef() const {
+ assert(_numFields == _params.size());
+ assert(_fieldParams == (_params.empty() ? nullptr : &_params[0]));
+}
+
bool
PosOccFieldsParams::operator==(const PosOccFieldsParams &rhs) const
diff --git a/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.h b/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.h
index 963a80f06dc..8748557e5a7 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.h
+++ b/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.h
@@ -4,7 +4,6 @@
#include "posocc_field_params.h"
#include <vector>
-#include <cassert>
namespace search::bitcompression {
@@ -32,10 +31,7 @@ public:
_fieldParams = _params.empty() ? nullptr : &_params[0];
}
- void assertCachedParamsRef() const {
- assert(_numFields == _params.size());
- assert(_fieldParams == (_params.empty() ? nullptr : &_params[0]));
- }
+ void assertCachedParamsRef() const;
uint32_t getNumFields() const { return _numFields; }
const PosOccFieldParams *getFieldParams() const { return _fieldParams; }
diff --git a/searchlib/src/vespa/searchlib/bitcompression/posocccompression.cpp b/searchlib/src/vespa/searchlib/bitcompression/posocccompression.cpp
index fd6c723e901..8e1bfd2875c 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/posocccompression.cpp
+++ b/searchlib/src/vespa/searchlib/bitcompression/posocccompression.cpp
@@ -7,6 +7,7 @@
#include <vespa/searchlib/index/postinglistparams.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/data/fileheader.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".posocccompression");
diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldreader.h b/searchlib/src/vespa/searchlib/diskindex/fieldreader.h
index 73b56559115..e97d5deb95c 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fieldreader.h
+++ b/searchlib/src/vespa/searchlib/diskindex/fieldreader.h
@@ -1,14 +1,14 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
+#include "wordnummapper.h"
+#include "docidmapper.h"
+#include "fieldwriter.h"
#include <vespa/searchlib/index/postinglistcounts.h>
#include <vespa/searchlib/index/dictionaryfile.h>
#include <vespa/searchlib/index/docidandfeatures.h>
#include <vespa/searchlib/index/postinglistfile.h>
#include <vespa/searchlib/index/schemautil.h>
-#include "wordnummapper.h"
-#include "docidmapper.h"
-#include "fieldwriter.h"
namespace search::diskindex {
@@ -40,7 +40,7 @@ public:
using PostingListCounts = index::PostingListCounts;
using PostingListParams = index::PostingListParams;
- uint64_t _wordNum;
+ uint64_t _wordNum;
DocIdAndFeatures _docIdAndFeatures;
protected:
std::unique_ptr<DictionaryFileSeqRead> _dictFile;
diff --git a/searchlib/src/vespa/searchlib/diskindex/fileheader.cpp b/searchlib/src/vespa/searchlib/diskindex/fileheader.cpp
index 5399d70fbe7..432651278e0 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fileheader.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/fileheader.cpp
@@ -6,6 +6,7 @@
#include <vespa/vespalib/data/fileheader.h>
#include <vespa/fastos/file.h>
#include <cinttypes>
+#include <cassert>
#include <arpa/inet.h>
#include <vespa/log/log.h>
diff --git a/searchlib/src/vespa/searchlib/diskindex/fusion.cpp b/searchlib/src/vespa/searchlib/diskindex/fusion.cpp
index 4fd9d116244..d12081ee89c 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fusion.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/fusion.cpp
@@ -8,11 +8,9 @@
#include <vespa/searchlib/common/documentsummary.h>
#include <vespa/searchlib/common/i_flush_token.h>
#include <vespa/searchlib/index/schemautil.h>
-#include <vespa/vespalib/io/fileutil.h>
#include <vespa/vespalib/util/error.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/util/lambdatask.h>
-#include <vespa/document/util/queue.h>
#include <filesystem>
#include <system_error>
diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp
index 2170777dbd3..460fac36acc 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp
@@ -3,6 +3,7 @@
#include "zc4_posting_reader.h"
#include "zc4_posting_header.h"
#include <vespa/searchlib/index/docidandfeatures.h>
+#include <cassert>
namespace search::diskindex {
@@ -19,9 +20,7 @@ Zc4PostingReader<bigEndian>::Zc4PostingReader(bool dynamic_k)
}
template <bool bigEndian>
-Zc4PostingReader<bigEndian>::~Zc4PostingReader()
-{
-}
+Zc4PostingReader<bigEndian>::~Zc4PostingReader() = default;
template <bool bigEndian>
void
diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp
index b50835a648d..c71404d449b 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp
@@ -3,7 +3,7 @@
#include "zc4_posting_reader_base.h"
#include "zc4_posting_header.h"
#include <vespa/searchlib/index/docidandfeatures.h>
-
+#include <cassert>
namespace search::diskindex {
using index::PostingListCounts;
diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp
index 202ed5a23cd..3a1b7928c93 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp
@@ -3,6 +3,7 @@
#include "zc4_posting_writer.h"
#include <vespa/searchlib/index/docidandfeatures.h>
#include <vespa/searchlib/index/postinglistcounts.h>
+#include <cassert>
using search::index::DocIdAndFeatures;
using search::index::PostingListCounts;
diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp
index 3f44b56706a..8a84ccc5731 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp
@@ -3,6 +3,7 @@
#include "zc4_posting_writer_base.h"
#include <vespa/searchlib/index/postinglistcounts.h>
#include <vespa/searchlib/index/postinglistparams.h>
+#include <cassert>
using search::index::PostingListCounts;
using search::index::PostingListParams;
diff --git a/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp b/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp
index b67a8409581..df33091a4e8 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp
@@ -1,9 +1,10 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "zcposocciterators.h"
+#include "zc4_posting_params.h"
#include <vespa/searchlib/bitcompression/posocc_fields_params.h>
#include <vespa/searchlib/fef/termfieldmatchdata.h>
-#include "zc4_posting_params.h"
+#include <cassert>
namespace search::diskindex {
diff --git a/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp b/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp
index ef540365208..c9a1563a8e3 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp
@@ -8,6 +8,7 @@
#include <vespa/searchlib/index/postinglistparams.h>
#include <vespa/searchlib/common/fileheadercontext.h>
#include <vespa/vespalib/data/fileheader.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".diskindex.zcposting");
diff --git a/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp b/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp
index 66404c7a0ff..83a4ae20db5 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp
@@ -4,6 +4,7 @@
#include <vespa/searchlib/fef/termfieldmatchdata.h>
#include <vespa/searchlib/fef/termfieldmatchdataarray.h>
#include <vespa/searchlib/bitcompression/posocccompression.h>
+#include <cassert>
namespace search::diskindex {
diff --git a/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp b/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp
index bd13c032a03..16f5ee04be4 100644
--- a/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp
+++ b/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp
@@ -6,6 +6,7 @@
#include <vespa/document/datatype/documenttype.h>
#include <vespa/vespalib/encoding/base64.h>
#include <vespa/vespalib/locale/c.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".searchlib.documentfieldnode");
diff --git a/searchlib/src/vespa/searchlib/fef/matchdatalayout.cpp b/searchlib/src/vespa/searchlib/fef/matchdatalayout.cpp
index 99326b2c1e6..632bd422581 100644
--- a/searchlib/src/vespa/searchlib/fef/matchdatalayout.cpp
+++ b/searchlib/src/vespa/searchlib/fef/matchdatalayout.cpp
@@ -6,8 +6,7 @@
namespace search::fef {
MatchDataLayout::MatchDataLayout()
- : _numTermFields(0),
- _fieldIds()
+ : _fieldIds()
{
}
@@ -17,9 +16,8 @@ MatchDataLayout::~MatchDataLayout() = default;
MatchData::UP
MatchDataLayout::createMatchData() const
{
- assert(_numTermFields == _fieldIds.size());
- auto md = std::make_unique<MatchData>(MatchData::params().numTermFields(_numTermFields));
- for (size_t i = 0; i < _numTermFields; ++i) {
+ auto md = std::make_unique<MatchData>(MatchData::params().numTermFields(_fieldIds.size()));
+ for (size_t i = 0; i < _fieldIds.size(); ++i) {
md->resolveTermField(i)->setFieldId(_fieldIds[i]);
}
return md;
diff --git a/searchlib/src/vespa/searchlib/fef/matchdatalayout.h b/searchlib/src/vespa/searchlib/fef/matchdatalayout.h
index 05d25a322db..8f7717ce7ac 100644
--- a/searchlib/src/vespa/searchlib/fef/matchdatalayout.h
+++ b/searchlib/src/vespa/searchlib/fef/matchdatalayout.h
@@ -14,14 +14,16 @@ namespace search::fef {
class MatchDataLayout
{
private:
- uint32_t _numTermFields;
std::vector<uint32_t> _fieldIds;
-
public:
/**
* Create an empty object.
**/
MatchDataLayout();
+ MatchDataLayout(MatchDataLayout &&) noexcept = default;
+ MatchDataLayout & operator=(MatchDataLayout &&) noexcept = default;
+ MatchDataLayout(const MatchDataLayout &) = default;
+ MatchDataLayout & operator=(const MatchDataLayout &) = delete;
~MatchDataLayout();
/**
@@ -32,8 +34,9 @@ public:
**/
TermFieldHandle allocTermField(uint32_t fieldId) {
_fieldIds.push_back(fieldId);
- return _numTermFields++;
+ return _fieldIds.size() - 1;
}
+ void reserve(size_t sz) { _fieldIds.reserve(sz); }
/**
* Create a match data object with the layout described by this
diff --git a/searchlib/src/vespa/searchlib/fef/objectstore.h b/searchlib/src/vespa/searchlib/fef/objectstore.h
index 06575c61eb5..7ba08284111 100644
--- a/searchlib/src/vespa/searchlib/fef/objectstore.h
+++ b/searchlib/src/vespa/searchlib/fef/objectstore.h
@@ -2,7 +2,6 @@
#pragma once
#include <vespa/vespalib/stllike/hash_map.h>
-#include <cassert>
namespace search::fef {
@@ -66,7 +65,6 @@ const T &
as_value(const Anything &val) {
using WrapperType = AnyWrapper<T>;
const auto *wrapper = dynamic_cast<const WrapperType *>(&val);
- assert(wrapper != nullptr);
return wrapper->getValue();
}
diff --git a/searchlib/src/vespa/searchlib/fef/phrase_splitter_query_env.cpp b/searchlib/src/vespa/searchlib/fef/phrase_splitter_query_env.cpp
index dfa1d9886f7..bbbdbd69c67 100644
--- a/searchlib/src/vespa/searchlib/fef/phrase_splitter_query_env.cpp
+++ b/searchlib/src/vespa/searchlib/fef/phrase_splitter_query_env.cpp
@@ -43,7 +43,6 @@ PhraseSplitterQueryEnv::PhraseSplitterQueryEnv(const IQueryEnvironment & queryEn
TermFieldHandle numHandles = 0; // how many handles existed in underlying data
for (uint32_t i = 0; i < queryEnv.getNumTerms(); ++i) {
const ITermData *td = queryEnv.getTerm(i);
- assert(td != nullptr);
considerTerm(i, *td, fieldId);
numHandles += td->numFields();
}
diff --git a/searchlib/src/vespa/searchlib/fef/phrasesplitter.cpp b/searchlib/src/vespa/searchlib/fef/phrasesplitter.cpp
index 90a058eda00..b74f12bdb97 100644
--- a/searchlib/src/vespa/searchlib/fef/phrasesplitter.cpp
+++ b/searchlib/src/vespa/searchlib/fef/phrasesplitter.cpp
@@ -42,7 +42,6 @@ PhraseSplitter::update()
for (const auto &copy_info : _phrase_splitter_query_env.get_copy_info()) {
const TermFieldMatchData *src = _matchData->resolveTermField(copy_info.orig_handle);
TermFieldMatchData *dst = resolveSplittedTermField(copy_info.split_handle);
- assert(src != nullptr && dst != nullptr);
copyTermFieldMatchData(*dst, *src, copy_info.offsetInPhrase);
}
diff --git a/searchlib/src/vespa/searchlib/fef/simpletermdata.h b/searchlib/src/vespa/searchlib/fef/simpletermdata.h
index d501d0848e8..391a00e4c8a 100644
--- a/searchlib/src/vespa/searchlib/fef/simpletermdata.h
+++ b/searchlib/src/vespa/searchlib/fef/simpletermdata.h
@@ -7,7 +7,6 @@
#include "simpletermfielddata.h"
#include <vespa/searchlib/query/weight.h>
#include <vector>
-#include <cassert>
namespace search::fef {
@@ -128,24 +127,4 @@ public:
}
};
-/**
- * convenience adapter for easy iteration
- **/
-class SimpleTermFieldRangeAdapter
-{
- SimpleTermData& _ref;
- size_t _idx;
- size_t _lim;
-public:
- explicit SimpleTermFieldRangeAdapter(SimpleTermData& ref)
- : _ref(ref), _idx(0), _lim(ref.numFields())
- {}
-
- [[nodiscard]] bool valid() const { return (_idx < _lim); }
-
- [[nodiscard]] SimpleTermFieldData& get() const { return _ref.field(_idx); }
-
- void next() { assert(valid()); ++_idx; }
-};
-
}
diff --git a/searchlib/src/vespa/searchlib/fef/termfieldmatchdataarray.h b/searchlib/src/vespa/searchlib/fef/termfieldmatchdataarray.h
index 3c1b76ad40e..46d370ee8fe 100644
--- a/searchlib/src/vespa/searchlib/fef/termfieldmatchdataarray.h
+++ b/searchlib/src/vespa/searchlib/fef/termfieldmatchdataarray.h
@@ -3,7 +3,6 @@
#pragma once
#include <vector>
-#include <cassert>
#include <cstddef>
namespace search::fef {
@@ -43,7 +42,6 @@ public:
* @param value the pointer to be added
**/
TermFieldMatchDataArray &add(TermFieldMatchData *value) {
- assert(value != nullptr);
_array.push_back(value);
return *this;
}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/url_field_inverter.cpp b/searchlib/src/vespa/searchlib/memoryindex/url_field_inverter.cpp
index c79d856676d..c1537c6b290 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/url_field_inverter.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/url_field_inverter.cpp
@@ -10,6 +10,7 @@
#include <vespa/vespalib/text/utf8.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <stdexcept>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".memoryindex.url_field_inverter");
diff --git a/searchlib/src/vespa/searchlib/query/tree/node.h b/searchlib/src/vespa/searchlib/query/tree/node.h
index af9925e2ea3..7123d52a503 100644
--- a/searchlib/src/vespa/searchlib/query/tree/node.h
+++ b/searchlib/src/vespa/searchlib/query/tree/node.h
@@ -22,4 +22,3 @@ class Node {
};
}
-
diff --git a/searchlib/src/vespa/searchlib/query/tree/termnodes.cpp b/searchlib/src/vespa/searchlib/query/tree/termnodes.cpp
index 00f17f7963c..51882e6d185 100644
--- a/searchlib/src/vespa/searchlib/query/tree/termnodes.cpp
+++ b/searchlib/src/vespa/searchlib/query/tree/termnodes.cpp
@@ -3,6 +3,7 @@
#include "termnodes.h"
#include <vespa/vespalib/util/exceptions.h>
#include <charconv>
+#include <cassert>
using vespalib::IllegalArgumentException;
using vespalib::stringref;
diff --git a/searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp b/searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp
index d9338641a39..a2d244250cf 100644
--- a/searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp
@@ -75,13 +75,12 @@ CreateBlueprintVisitorHelper::handleNumberTermAsText(query::NumberTerm &n)
template <typename WS, typename NODE>
void
CreateBlueprintVisitorHelper::createWeightedSet(std::unique_ptr<WS> bp, NODE &n) {
- FieldSpecList fields;
+ bp->reserve(n.getNumTerms());
for (size_t i = 0; i < n.getNumTerms(); ++i) {
- fields.clear();
- fields.add(bp->getNextChildField(_field));
auto term = n.getAsString(i);
query::SimpleStringTerm node(term.first, n.getView(), 0, term.second); // TODO Temporary
- bp->addTerm(_searchable.createBlueprint(_requestContext, fields, node), term.second.percent());
+ FieldSpec field = bp->getNextChildField(_field);
+ bp->addTerm(_searchable.createBlueprint(_requestContext, field, node), term.second.percent());
}
setResult(std::move(bp));
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp
index 61b717b1104..de5bdc33e3c 100644
--- a/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp
@@ -25,6 +25,13 @@ DotProductBlueprint::getNextChildField(const FieldSpec &outer)
}
void
+DotProductBlueprint::reserve(size_t num_children) {
+ _weights.reserve(num_children);
+ _terms.reserve(num_children);
+ _layout.reserve(num_children);
+}
+
+void
DotProductBlueprint::addTerm(Blueprint::UP term, int32_t weight)
{
HitEstimate childEst = term->getState().estimate();
@@ -41,8 +48,7 @@ DotProductBlueprint::addTerm(Blueprint::UP term, int32_t weight)
}
SearchIterator::UP
-DotProductBlueprint::createLeafSearch(const search::fef::TermFieldMatchDataArray &tfmda,
- bool) const
+DotProductBlueprint::createLeafSearch(const search::fef::TermFieldMatchDataArray &tfmda, bool) const
{
assert(tfmda.size() == 1);
assert(getState().numFields() == 1);
diff --git a/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h
index 4ba59ba755f..2975958b5af 100644
--- a/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h
@@ -26,6 +26,7 @@ public:
FieldSpec getNextChildField(const FieldSpec &outer);
// used by create visitor
+ void reserve(size_t num_children);
void addTerm(Blueprint::UP term, int32_t weight);
SearchIteratorUP createLeafSearch(const search::fef::TermFieldMatchDataArray &tfmda, bool strict) const override;
diff --git a/searchlib/src/vespa/searchlib/queryeval/equiv_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/equiv_blueprint.cpp
index af6b59dd6ca..384dc0cd227 100644
--- a/searchlib/src/vespa/searchlib/queryeval/equiv_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/equiv_blueprint.cpp
@@ -44,7 +44,7 @@ EquivBlueprint::EquivBlueprint(FieldSpecBaseList fields,
fef::MatchDataLayout subtree_mdl)
: ComplexLeafBlueprint(std::move(fields)),
_estimate(),
- _layout(subtree_mdl),
+ _layout(std::move(subtree_mdl)),
_terms(),
_exactness()
{
diff --git a/searchlib/src/vespa/searchlib/queryeval/fake_search.cpp b/searchlib/src/vespa/searchlib/queryeval/fake_search.cpp
index 94814dbb9b3..d2aa72011e6 100644
--- a/searchlib/src/vespa/searchlib/queryeval/fake_search.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/fake_search.cpp
@@ -5,9 +5,20 @@
#include <vespa/searchlib/fef/termfieldmatchdata.h>
#include <vespa/vespalib/objects/visit.h>
#include <vespa/searchcommon/attribute/i_search_context.h>
+#include <cassert>
namespace search::queryeval {
+FakeSearch::FakeSearch(const vespalib::string &tag, const vespalib::string &field,
+ const vespalib::string &term, const FakeResult &res,
+ fef::TermFieldMatchDataArray tfmda)
+ : _tag(tag), _field(field), _term(term),
+ _result(res), _offset(0), _tfmda(std::move(tfmda)),
+ _ctx(nullptr)
+{
+ assert(_tfmda.size() == 1);
+}
+
void
FakeSearch::doSeek(uint32_t docid)
{
diff --git a/searchlib/src/vespa/searchlib/queryeval/fake_search.h b/searchlib/src/vespa/searchlib/queryeval/fake_search.h
index 5cd04f80499..7b7fdf0f078 100644
--- a/searchlib/src/vespa/searchlib/queryeval/fake_search.h
+++ b/searchlib/src/vespa/searchlib/queryeval/fake_search.h
@@ -29,13 +29,7 @@ public:
const vespalib::string &field,
const vespalib::string &term,
const FakeResult &res,
- fef::TermFieldMatchDataArray tfmda)
- : _tag(tag), _field(field), _term(term),
- _result(res), _offset(0), _tfmda(std::move(tfmda)),
- _ctx(nullptr)
- {
- assert(_tfmda.size() == 1);
- }
+ fef::TermFieldMatchDataArray tfmda);
void attr_ctx(const attribute::ISearchContext *ctx) { _ctx = ctx; }
bool is_attr() const { return (_ctx != nullptr); }
void doSeek(uint32_t docid) override;
diff --git a/searchlib/src/vespa/searchlib/queryeval/same_element_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/same_element_blueprint.cpp
index 3be28ab75de..9c3910b20f9 100644
--- a/searchlib/src/vespa/searchlib/queryeval/same_element_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/same_element_blueprint.cpp
@@ -3,8 +3,6 @@
#include "same_element_blueprint.h"
#include "same_element_search.h"
#include "field_spec.hpp"
-#include "andsearch.h"
-#include "emptysearch.h"
#include <vespa/searchlib/fef/termfieldmatchdata.h>
#include <vespa/searchlib/attribute/searchcontextelementiterator.h>
#include <vespa/vespalib/objects/visit.hpp>
@@ -66,8 +64,7 @@ SameElementBlueprint::fetchPostings(const ExecuteInfo &execInfo)
std::unique_ptr<SameElementSearch>
SameElementBlueprint::create_same_element_search(search::fef::TermFieldMatchData& tfmd, bool strict) const
{
- fef::MatchDataLayout my_layout = _layout;
- fef::MatchData::UP md = my_layout.createMatchData();
+ fef::MatchData::UP md = _layout.createMatchData();
std::vector<ElementIterator::UP> children(_terms.size());
for (size_t i = 0; i < _terms.size(); ++i) {
const State &childState = _terms[i]->getState();
diff --git a/searchlib/src/vespa/searchlib/queryeval/same_element_search.cpp b/searchlib/src/vespa/searchlib/queryeval/same_element_search.cpp
index 98c51d7f1ca..5db1e0057cd 100644
--- a/searchlib/src/vespa/searchlib/queryeval/same_element_search.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/same_element_search.cpp
@@ -6,6 +6,7 @@
#include <vespa/vespalib/objects/visit.hpp>
#include <algorithm>
#include <functional>
+#include <cassert>
using TFMD = search::fef::TermFieldMatchData;
diff --git a/searchlib/src/vespa/searchlib/queryeval/searchable.h b/searchlib/src/vespa/searchlib/queryeval/searchable.h
index 2438cbf5a3b..a36a7f34e1c 100644
--- a/searchlib/src/vespa/searchlib/queryeval/searchable.h
+++ b/searchlib/src/vespa/searchlib/queryeval/searchable.h
@@ -22,23 +22,12 @@ class FieldSpecList;
**/
class Searchable
{
-protected:
- /**
- * Create a blueprint searching a single field.
- *
- * @return blueprint
- * @param requestContext that belongs to the query
- * @param field the field to search
- * @param term the query tree term
- **/
- virtual std::unique_ptr<Blueprint> createBlueprint(const IRequestContext & requestContext,
- const FieldSpec &field,
- const search::query::Node &term) = 0;
-
public:
using SP = std::shared_ptr<Searchable>;
Searchable() = default;
+ virtual ~Searchable() = default;
+
/**
* Create a blueprint searching a set of fields. The default
@@ -53,7 +42,17 @@ public:
virtual std::unique_ptr<Blueprint> createBlueprint(const IRequestContext & requestContext,
const FieldSpecList &fields,
const search::query::Node &term);
- virtual ~Searchable() = default;
+ /**
+ * Create a blueprint searching a single field.
+ *
+ * @return blueprint
+ * @param requestContext that belongs to the query
+ * @param field the field to search
+ * @param term the query tree term
+ **/
+ virtual std::unique_ptr<Blueprint> createBlueprint(const IRequestContext & requestContext,
+ const FieldSpec &field,
+ const search::query::Node &term) = 0;
};
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp b/searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp
index f5069fd4f53..ea264935d42 100644
--- a/searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp
@@ -4,6 +4,7 @@
#include <vespa/searchlib/fef/termfieldmatchdata.h>
#include <vespa/vespalib/objects/visit.h>
#include <functional>
+#include <cassert>
using search::fef::TermFieldMatchData;
using std::unique_ptr;
diff --git a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp
index fe212666ec9..b4b55098eaa 100644
--- a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp
@@ -56,6 +56,12 @@ ParallelWeakAndBlueprint::getNextChildField(const FieldSpec &outer)
}
void
+ParallelWeakAndBlueprint::reserve(size_t num_children) {
+ _weights.reserve(num_children);
+ _terms.reserve(num_children);
+}
+
+void
ParallelWeakAndBlueprint::addTerm(Blueprint::UP term, int32_t weight)
{
HitEstimate childEst = term->getState().estimate();
@@ -78,6 +84,7 @@ ParallelWeakAndBlueprint::createLeafSearch(const search::fef::TermFieldMatchData
assert(tfmda.size() == 1);
fef::MatchData::UP childrenMatchData = _layout.createMatchData();
wand::Terms terms;
+ terms.reserve(_terms.size());
for (size_t i = 0; i < _terms.size(); ++i) {
const State &childState = _terms[i]->getState();
assert(childState.numFields() == 1);
diff --git a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h
index 1a481be5c32..a2c13f12485 100644
--- a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h
@@ -44,7 +44,7 @@ public:
score_t scoreThreshold,
double thresholdBoostFactor,
uint32_t scoresAdjustFrequency);
- virtual ~ParallelWeakAndBlueprint() override;
+ ~ParallelWeakAndBlueprint() override;
const WeakAndHeap &getScores() const { return _scores; }
@@ -56,6 +56,7 @@ public:
FieldSpec getNextChildField(const FieldSpec &outer);
// Used by create visitor
+ void reserve(size_t num_children);
void addTerm(Blueprint::UP term, int32_t weight);
SearchIterator::UP createLeafSearch(const fef::TermFieldMatchDataArray &tfmda, bool strict) const override;
diff --git a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp
index f855b72812a..ee55a89dcdc 100644
--- a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp
@@ -74,6 +74,13 @@ WeightedSetTermBlueprint::WeightedSetTermBlueprint(const FieldSpec &field)
WeightedSetTermBlueprint::~WeightedSetTermBlueprint() = default;
void
+WeightedSetTermBlueprint::reserve(size_t num_children) {
+ _weights.reserve(num_children);
+ _terms.reserve(num_children);
+ _layout.reserve(num_children);
+}
+
+void
WeightedSetTermBlueprint::addTerm(Blueprint::UP term, int32_t weight)
{
HitEstimate childEst = term->getState().estimate();
@@ -100,7 +107,7 @@ WeightedSetTermBlueprint::createLeafSearch(const fef::TermFieldMatchDataArray &t
// TODO: pass ownership with unique_ptr
children[i] = _terms[i]->createSearch(*md, true).release();
}
- return SearchIterator::UP(WeightedSetTermSearch::create(children, *tfmda[0], _children_field.isFilter(), _weights, std::move(md)));
+ return WeightedSetTermSearch::create(children, *tfmda[0], _children_field.isFilter(), _weights, std::move(md));
}
SearchIterator::UP
diff --git a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h
index 2a3db3ec52d..3827dc8a35f 100644
--- a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h
@@ -30,6 +30,7 @@ public:
FieldSpec getNextChildField(const FieldSpec &) { return _children_field; }
// used by create visitor
+ void reserve(size_t num_children);
void addTerm(Blueprint::UP term, int32_t weight);
SearchIteratorUP createLeafSearch(const fef::TermFieldMatchDataArray &tfmda, bool strict) const override;
diff --git a/searchlib/src/vespa/searchlib/test/diskindex/threelevelcountbuffers.cpp b/searchlib/src/vespa/searchlib/test/diskindex/threelevelcountbuffers.cpp
index bc92b691ce8..04c8b6b3904 100644
--- a/searchlib/src/vespa/searchlib/test/diskindex/threelevelcountbuffers.cpp
+++ b/searchlib/src/vespa/searchlib/test/diskindex/threelevelcountbuffers.cpp
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "threelevelcountbuffers.h"
+#include <cassert>
namespace search::diskindex {
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp b/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp
index db4ac0f32b0..f1fee7e9ac3 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp
+++ b/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp
@@ -14,6 +14,7 @@
#include <vespa/searchlib/parsequery/stackdumpiterator.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
#include <vespa/vespalib/util/issue.h>
+#include <cassert>
using search::common::GeoLocationParser;
using search::common::GeoLocationSpec;
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/geoposdfw.cpp b/searchsummary/src/vespa/searchsummary/docsummary/geoposdfw.cpp
index 6d668561651..07e4bde54d0 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/geoposdfw.cpp
+++ b/searchsummary/src/vespa/searchsummary/docsummary/geoposdfw.cpp
@@ -10,6 +10,7 @@
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/util/issue.h>
#include <climits>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".searchlib.docsummary.geoposdfw");
diff --git a/storage/src/tests/distributor/check_condition_test.cpp b/storage/src/tests/distributor/check_condition_test.cpp
index 1b5cede8af6..ee8c9b888bb 100644
--- a/storage/src/tests/distributor/check_condition_test.cpp
+++ b/storage/src/tests/distributor/check_condition_test.cpp
@@ -253,4 +253,20 @@ TEST_F(CheckConditionTest, nested_get_traces_are_propagated_to_outcome) {
});
}
+TEST_F(CheckConditionTest, condition_evaluation_increments_probe_latency_metrics) {
+ getClock().setAbsoluteTimeInSeconds(1);
+ EXPECT_EQ(_metrics.latency.getLongValue("count"), 0);
+ EXPECT_EQ(_metrics.ok.getLongValue("last"), 0);
+ test_cond_with_2_gets_sent([&](auto& cond) {
+ cond.handle_reply(_sender, make_matched_reply(0));
+ getClock().setAbsoluteTimeInSeconds(3);
+ cond.handle_reply(_sender, make_matched_reply(1));
+ }, [&](auto& outcome) noexcept {
+ (void)outcome;
+ });
+ EXPECT_EQ(_metrics.latency.getLongValue("count"), 1);
+ EXPECT_EQ(_metrics.ok.getLongValue("last"), 1);
+ EXPECT_DOUBLE_EQ(_metrics.latency.getLast(), 2'000.0); // in millis
+}
+
}
diff --git a/storage/src/tests/distributor/putoperationtest.cpp b/storage/src/tests/distributor/putoperationtest.cpp
index ff375e5b902..76b6741442e 100644
--- a/storage/src/tests/distributor/putoperationtest.cpp
+++ b/storage/src/tests/distributor/putoperationtest.cpp
@@ -73,7 +73,8 @@ public:
operation_context(),
getDistributorBucketSpace(),
msg,
- metrics().puts);
+ metrics().puts,
+ metrics().put_condition_probes);
op->start(_sender);
}
diff --git a/storage/src/tests/distributor/removeoperationtest.cpp b/storage/src/tests/distributor/removeoperationtest.cpp
index d88a0a574cc..d169c80a95d 100644
--- a/storage/src/tests/distributor/removeoperationtest.cpp
+++ b/storage/src/tests/distributor/removeoperationtest.cpp
@@ -6,7 +6,8 @@
#include <vespa/storage/distributor/distributor_stripe.h>
#include <vespa/storage/distributor/operations/external/removeoperation.h>
#include <vespa/storageapi/message/persistence.h>
-#include <vespa/vespalib/gtest/gtest.h>
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
using documentapi::TestAndSetCondition;
using document::test::makeDocumentBucket;
@@ -40,7 +41,8 @@ struct RemoveOperationTest : Test, DistributorStripeTestUtil {
operation_context(),
getDistributorBucketSpace(),
msg,
- metrics().removes);
+ metrics().removes,
+ metrics().remove_condition_probes);
op->start(_sender);
}
@@ -119,6 +121,7 @@ void ExtRemoveOperationTest::set_up_tas_remove_with_2_nodes(ReplicaState replica
auto remove = createRemove(docId);
remove->setCondition(TestAndSetCondition("test.foo"));
+ remove->getTrace().setLevel(9);
sendRemove(std::move(remove));
if (replica_state == ReplicaState::INCONSISTENT) {
ASSERT_EQ("Get => 1,Get => 0", _sender.getCommands(true));
@@ -304,4 +307,41 @@ TEST_F(ExtRemoveOperationTest, failed_condition_probe_fails_op_with_returned_err
_sender.getLastReply());
}
+TEST_F(ExtRemoveOperationTest, trace_is_propagated_from_condition_probe_gets_ok_probe_case) {
+ ASSERT_NO_FATAL_FAILURE(set_up_tas_remove_with_2_nodes(ReplicaState::INCONSISTENT));
+
+ ASSERT_EQ(sent_get_command(0)->getTrace().getLevel(), 9);
+ auto get_reply = make_get_reply(0, 50, false, true);
+ MBUS_TRACE(get_reply->getTrace(), 1, "a foo walks into a bar");
+
+ op->receive(_sender, get_reply);
+ op->receive(_sender, make_get_reply(1, 50, false, true));
+
+ ASSERT_EQ("Get => 1,Get => 0,Remove => 1,Remove => 0", _sender.getCommands(true));
+ reply_with(make_remove_reply(2, 50)); // remove from node 1
+ reply_with(make_remove_reply(3, 50)); // remove from node 0
+ ASSERT_EQ(_sender.replies().size(), 1);
+ auto remove_reply = sent_reply<api::RemoveReply>(0);
+
+ auto trace_str = remove_reply->getTrace().toString();
+ EXPECT_THAT(trace_str, HasSubstr("a foo walks into a bar"));
+}
+
+TEST_F(ExtRemoveOperationTest, trace_is_propagated_from_condition_probe_gets_failed_probe_case) {
+ ASSERT_NO_FATAL_FAILURE(set_up_tas_remove_with_2_nodes(ReplicaState::INCONSISTENT));
+
+ auto get_reply = make_get_reply(0, 50, false, false);
+ MBUS_TRACE(get_reply->getTrace(), 1, "a foo walks into a zoo");
+
+ op->receive(_sender, get_reply);
+ op->receive(_sender, make_get_reply(1, 50, false, false));
+
+ ASSERT_EQ("Get => 1,Get => 0", _sender.getCommands(true));
+ ASSERT_EQ(_sender.replies().size(), 1);
+ auto remove_reply = sent_reply<api::RemoveReply>(0);
+
+ auto trace_str = remove_reply->getTrace().toString();
+ EXPECT_THAT(trace_str, HasSubstr("a foo walks into a zoo"));
+}
+
} // storage::distributor
diff --git a/storage/src/vespa/storage/distributor/distributormetricsset.cpp b/storage/src/vespa/storage/distributor/distributormetricsset.cpp
index fad44782dd4..cbc0e6f6eef 100644
--- a/storage/src/vespa/storage/distributor/distributormetricsset.cpp
+++ b/storage/src/vespa/storage/distributor/distributormetricsset.cpp
@@ -16,11 +16,13 @@ BucketDbMetrics::~BucketDbMetrics() = default;
DistributorMetricSet::DistributorMetricSet()
: MetricSet("distributor", {{"distributor"}}, ""),
puts("puts", this),
+ put_condition_probes("put_condition_probes", this),
updates(this),
update_puts("update_puts", this),
update_gets("update_gets", this),
update_metadata_gets("update_metadata_gets", this),
removes("removes", this),
+ remove_condition_probes("remove_condition_probes", this),
removelocations("removelocations", this),
gets("gets", this),
stats("stats", this),
diff --git a/storage/src/vespa/storage/distributor/distributormetricsset.h b/storage/src/vespa/storage/distributor/distributormetricsset.h
index ac140b85282..739e84759f1 100644
--- a/storage/src/vespa/storage/distributor/distributormetricsset.h
+++ b/storage/src/vespa/storage/distributor/distributormetricsset.h
@@ -20,24 +20,26 @@ struct BucketDbMetrics : metrics::MetricSet {
class DistributorMetricSet : public metrics::MetricSet {
public:
PersistenceOperationMetricSet puts;
- UpdateMetricSet updates;
+ PersistenceOperationMetricSet put_condition_probes;
+ UpdateMetricSet updates;
PersistenceOperationMetricSet update_puts;
PersistenceOperationMetricSet update_gets;
PersistenceOperationMetricSet update_metadata_gets;
PersistenceOperationMetricSet removes;
+ PersistenceOperationMetricSet remove_condition_probes;
PersistenceOperationMetricSet removelocations;
PersistenceOperationMetricSet gets;
PersistenceOperationMetricSet stats;
PersistenceOperationMetricSet getbucketlists;
- VisitorMetricSet visits;
- metrics::DoubleAverageMetric stateTransitionTime;
- metrics::DoubleAverageMetric set_cluster_state_processing_time;
- metrics::DoubleAverageMetric activate_cluster_state_processing_time;
- metrics::DoubleAverageMetric recoveryModeTime;
- metrics::LongValueMetric docsStored;
- metrics::LongValueMetric bytesStored;
- BucketDbMetrics mutable_dbs;
- BucketDbMetrics read_only_dbs;
+ VisitorMetricSet visits;
+ metrics::DoubleAverageMetric stateTransitionTime;
+ metrics::DoubleAverageMetric set_cluster_state_processing_time;
+ metrics::DoubleAverageMetric activate_cluster_state_processing_time;
+ metrics::DoubleAverageMetric recoveryModeTime;
+ metrics::LongValueMetric docsStored;
+ metrics::LongValueMetric bytesStored;
+ BucketDbMetrics mutable_dbs;
+ BucketDbMetrics read_only_dbs;
explicit DistributorMetricSet();
~DistributorMetricSet() override;
diff --git a/storage/src/vespa/storage/distributor/externaloperationhandler.cpp b/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
index 6cb404aaa0a..d6bb5562a07 100644
--- a/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
+++ b/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
@@ -332,7 +332,9 @@ bool ExternalOperationHandler::onPut(const std::shared_ptr<api::PutCommand>& cmd
if (allow) {
_op = std::make_shared<PutOperation>(_node_ctx, _op_ctx,
_op_ctx.bucket_space_repo().get(bucket_space),
- std::move(cmd), getMetrics().puts, std::move(handle));
+ std::move(cmd),
+ getMetrics().puts, getMetrics().put_condition_probes,
+ std::move(handle));
} else {
_msg_sender.sendUp(makeConcurrentMutationRejectionReply(*cmd, cmd->getDocumentId(), metrics));
}
@@ -386,7 +388,8 @@ bool ExternalOperationHandler::onRemove(const std::shared_ptr<api::RemoveCommand
auto &distributorBucketSpace(_op_ctx.bucket_space_repo().get(bucket_space));
_op = std::make_shared<RemoveOperation>(_node_ctx, _op_ctx, distributorBucketSpace, std::move(cmd),
- getMetrics().removes, std::move(handle));
+ getMetrics().removes, getMetrics().remove_condition_probes,
+ std::move(handle));
} else {
_msg_sender.sendUp(makeConcurrentMutationRejectionReply(*cmd, cmd->getDocumentId(), metrics));
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp b/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
index 9f7dbcaa132..fc619d9eb23 100644
--- a/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
@@ -58,7 +58,7 @@ CheckCondition::CheckCondition(const document::Bucket& bucket,
const documentapi::TestAndSetCondition& tas_condition,
const DistributorBucketSpace& bucket_space,
const DistributorNodeContext& node_ctx,
- PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
uint32_t trace_level,
private_ctor_tag)
: _doc_id_bucket(bucket),
@@ -66,7 +66,8 @@ CheckCondition::CheckCondition(const document::Bucket& bucket,
_node_ctx(node_ctx),
_cluster_state_version_at_creation_time(_bucket_space.getClusterState().getVersion()),
_cond_get_op(),
- _sent_message_map()
+ _sent_message_map(),
+ _outcome()
{
// Condition checks only return metadata back to the distributor and thus have an empty fieldset.
// Side note: the BucketId provided to the GetCommand is ignored; GetOperation computes explicitly from the doc ID.
@@ -75,8 +76,8 @@ CheckCondition::CheckCondition(const document::Bucket& bucket,
get_cmd->getTrace().setLevel(trace_level);
_cond_get_op = std::make_shared<GetOperation>(_node_ctx, _bucket_space,
_bucket_space.getBucketDatabase().acquire_read_guard(),
- std::move(get_cmd),
- metric, api::InternalReadConsistency::Strong);
+ std::move(get_cmd), condition_probe_metrics,
+ api::InternalReadConsistency::Strong);
}
CheckCondition::~CheckCondition() = default;
@@ -220,7 +221,7 @@ CheckCondition::create_if_inconsistent_replicas(const document::Bucket& bucket,
const documentapi::TestAndSetCondition& tas_condition,
const DistributorNodeContext& node_ctx,
const DistributorStripeOperationContext& op_ctx,
- PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
uint32_t trace_level)
{
// TODO move this check to the caller?
@@ -237,8 +238,8 @@ CheckCondition::create_if_inconsistent_replicas(const document::Bucket& bucket,
if (!all_nodes_support_document_condition_probe(entries, op_ctx)) {
return {}; // Want write-repair, but one or more nodes are too old to use the feature
}
- return std::make_shared<CheckCondition>(bucket, doc_id, tas_condition, bucket_space,
- node_ctx, metric, trace_level, private_ctor_tag{});
+ return std::make_shared<CheckCondition>(bucket, doc_id, tas_condition, bucket_space, node_ctx,
+ condition_probe_metrics, trace_level, private_ctor_tag{});
}
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/check_condition.h b/storage/src/vespa/storage/distributor/operations/external/check_condition.h
index 2a659c55081..382aec6242c 100644
--- a/storage/src/vespa/storage/distributor/operations/external/check_condition.h
+++ b/storage/src/vespa/storage/distributor/operations/external/check_condition.h
@@ -114,7 +114,7 @@ public:
const documentapi::TestAndSetCondition& tas_condition,
const DistributorBucketSpace& bucket_space,
const DistributorNodeContext& node_ctx,
- PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
uint32_t trace_level,
private_ctor_tag);
~CheckCondition();
@@ -135,8 +135,8 @@ public:
const documentapi::TestAndSetCondition& tas_condition,
const DistributorNodeContext& node_ctx,
const DistributorStripeOperationContext& op_ctx,
- PersistenceOperationMetricSet& metric,
- uint32_t trace_level = 0); // TODO remove default value
+ PersistenceOperationMetricSet& condition_probe_metrics,
+ uint32_t trace_level);
private:
[[nodiscard]] bool replica_set_changed_after_get_operation() const;
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
index 952aeff0800..8c6fdb314f3 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
@@ -26,6 +26,7 @@ PutOperation::PutOperation(const DistributorNodeContext& node_ctx,
DistributorBucketSpace& bucket_space,
std::shared_ptr<api::PutCommand> msg,
PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencing_handle)
: SequencedOperation(std::move(sequencing_handle)),
_tracker_instance(metric, std::make_shared<api::PutReply>(*msg), node_ctx, op_ctx, msg->getTimestamp()),
@@ -34,7 +35,7 @@ PutOperation::PutOperation(const DistributorNodeContext& node_ctx,
_doc_id_bucket_id(document::BucketIdFactory{}.getBucketId(_msg->getDocumentId())),
_node_ctx(node_ctx),
_op_ctx(op_ctx),
- _temp_metric(metric), // TODO
+ _condition_probe_metrics(condition_probe_metrics),
_bucket_space(bucket_space)
{
}
@@ -156,7 +157,7 @@ void PutOperation::start_conditional_put(DistributorStripeMessageSender& sender)
document::Bucket bucket(_msg->getBucket().getBucketSpace(), _doc_id_bucket_id);
_check_condition = CheckCondition::create_if_inconsistent_replicas(bucket, _bucket_space, _msg->getDocumentId(),
_msg->getCondition(), _node_ctx, _op_ctx,
- _temp_metric, _msg->getTrace().getLevel());
+ _condition_probe_metrics, _msg->getTrace().getLevel());
if (!_check_condition) {
start_direct_put_dispatch(sender);
} else {
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.h b/storage/src/vespa/storage/distributor/operations/external/putoperation.h
index 6befb8d3e38..635accc1865 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.h
@@ -28,6 +28,7 @@ public:
DistributorBucketSpace& bucketSpace,
std::shared_ptr<api::PutCommand> msg,
PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencingHandle = SequencingHandle());
~PutOperation() override;
@@ -44,7 +45,7 @@ private:
document::BucketId _doc_id_bucket_id;
const DistributorNodeContext& _node_ctx;
DistributorStripeOperationContext& _op_ctx;
- PersistenceOperationMetricSet& _temp_metric;
+ PersistenceOperationMetricSet& _condition_probe_metrics;
DistributorBucketSpace& _bucket_space;
std::shared_ptr<CheckCondition> _check_condition;
diff --git a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
index d3001c37f7c..96182b0744f 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
@@ -16,6 +16,7 @@ RemoveOperation::RemoveOperation(const DistributorNodeContext& node_ctx,
DistributorBucketSpace& bucketSpace,
std::shared_ptr<api::RemoveCommand> msg,
PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencingHandle)
: SequencedOperation(std::move(sequencingHandle)),
_tracker_instance(metric,
@@ -26,7 +27,7 @@ RemoveOperation::RemoveOperation(const DistributorNodeContext& node_ctx,
_doc_id_bucket_id(document::BucketIdFactory{}.getBucketId(_msg->getDocumentId())),
_node_ctx(node_ctx),
_op_ctx(op_ctx),
- _temp_metric(metric), // TODO
+ _condition_probe_metrics(condition_probe_metrics),
_bucket_space(bucketSpace),
_check_condition()
{
@@ -48,13 +49,13 @@ void RemoveOperation::start_conditional_remove(DistributorStripeMessageSender& s
document::Bucket bucket(_msg->getBucket().getBucketSpace(), _doc_id_bucket_id);
_check_condition = CheckCondition::create_if_inconsistent_replicas(bucket, _bucket_space, _msg->getDocumentId(),
_msg->getCondition(), _node_ctx, _op_ctx,
- _temp_metric);
+ _condition_probe_metrics, _msg->getTrace().getLevel());
if (!_check_condition) {
start_direct_remove_dispatch(sender);
} else {
// Inconsistent replicas; need write repair
_check_condition->start_and_send(sender);
- const auto& outcome = _check_condition->maybe_outcome(); // Might be done immediately
+ auto& outcome = _check_condition->maybe_outcome(); // Might be done immediately
if (outcome) {
on_completed_check_condition(*outcome, sender);
}
@@ -110,7 +111,7 @@ RemoveOperation::onReceive(DistributorStripeMessageSender& sender, const std::sh
{
if (_check_condition) {
_check_condition->handle_reply(sender, msg);
- const auto& outcome = _check_condition->maybe_outcome();
+ auto& outcome = _check_condition->maybe_outcome();
if (!outcome) {
return; // Condition check not done yet
}
@@ -131,9 +132,12 @@ RemoveOperation::onReceive(DistributorStripeMessageSender& sender, const std::sh
_tracker.receiveReply(sender, reply);
}
-void RemoveOperation::on_completed_check_condition(const CheckCondition::Outcome& outcome,
+void RemoveOperation::on_completed_check_condition(CheckCondition::Outcome& outcome,
DistributorStripeMessageSender& sender)
{
+ if (!outcome.trace().isEmpty()) {
+ _tracker.add_trace_tree_to_reply(outcome.steal_trace());
+ }
if (outcome.matched_condition()) {
_msg->clear_condition(); // Transform to unconditional Remove
start_direct_remove_dispatch(sender);
diff --git a/storage/src/vespa/storage/distributor/operations/external/removeoperation.h b/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
index ba6d42c5108..9f3a98294ea 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
@@ -19,6 +19,7 @@ public:
DistributorBucketSpace& bucketSpace,
std::shared_ptr<api::RemoveCommand> msg,
PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencingHandle = SequencingHandle());
~RemoveOperation() override;
@@ -36,13 +37,13 @@ private:
document::BucketId _doc_id_bucket_id;
const DistributorNodeContext& _node_ctx;
DistributorStripeOperationContext& _op_ctx;
- PersistenceOperationMetricSet& _temp_metric;
+ PersistenceOperationMetricSet& _condition_probe_metrics;
DistributorBucketSpace& _bucket_space;
std::shared_ptr<CheckCondition> _check_condition;
void start_direct_remove_dispatch(DistributorStripeMessageSender& sender);
void start_conditional_remove(DistributorStripeMessageSender& sender);
- void on_completed_check_condition(const CheckCondition::Outcome& outcome,
+ void on_completed_check_condition(CheckCondition::Outcome& outcome,
DistributorStripeMessageSender& sender);
[[nodiscard]] bool has_condition() const noexcept;
};
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
index 0cb4b223c11..73c65f54b21 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
@@ -34,6 +34,7 @@ TwoPhaseUpdateOperation::TwoPhaseUpdateOperation(
: SequencedOperation(std::move(sequencingHandle)),
_updateMetric(metrics.updates),
_putMetric(metrics.update_puts),
+ _put_condition_probe_metrics(metrics.put_condition_probes), // Updates never trigger put write repair, so we sneakily use a ref to someone else
_getMetric(metrics.update_gets),
_metadata_get_metrics(metrics.update_metadata_gets),
_updateCmd(std::move(msg)),
@@ -263,7 +264,7 @@ TwoPhaseUpdateOperation::schedulePutsWithUpdatedDocument(std::shared_ptr<documen
document::Bucket bucket(_updateCmd->getBucket().getBucketSpace(), document::BucketId(0));
auto put = std::make_shared<api::PutCommand>(bucket, doc, putTimestamp);
copyMessageSettings(*_updateCmd, *put);
- auto putOperation = std::make_shared<PutOperation>(_node_ctx, _op_ctx, _bucketSpace, std::move(put), _putMetric);
+ auto putOperation = std::make_shared<PutOperation>(_node_ctx, _op_ctx, _bucketSpace, std::move(put), _putMetric, _put_condition_probe_metrics);
PutOperation & op = *putOperation;
IntermediateMessageSender intermediate(_sentMessageMap, std::move(putOperation), sender);
op.start(intermediate, _node_ctx.clock().getSystemTime());
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
index 486ed766510..d2ad5359fa6 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
@@ -139,6 +139,7 @@ private:
UpdateMetricSet& _updateMetric;
PersistenceOperationMetricSet& _putMetric;
+ PersistenceOperationMetricSet& _put_condition_probe_metrics;
PersistenceOperationMetricSet& _getMetric;
PersistenceOperationMetricSet& _metadata_get_metrics;
std::shared_ptr<api::UpdateCommand> _updateCmd;
diff --git a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp
index 944b4bafa0a..e66884c4060 100644
--- a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp
+++ b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp
@@ -58,8 +58,8 @@ PersistenceFailuresMetricSet::clone(std::vector<Metric::UP>& ownerList, CopyType
if (copyType == INACTIVE) {
return MetricSet::clone(ownerList, INACTIVE, owner, includeUnused);
}
- return (PersistenceFailuresMetricSet*)
- (new PersistenceFailuresMetricSet(owner))->assignValues(*this);
+ return dynamic_cast<PersistenceFailuresMetricSet*>(
+ (new PersistenceFailuresMetricSet(owner))->assignValues(*this));
}
PersistenceOperationMetricSet::PersistenceOperationMetricSet(const std::string& name, MetricSet* owner)
@@ -69,6 +69,11 @@ PersistenceOperationMetricSet::PersistenceOperationMetricSet(const std::string&
failures(this)
{ }
+PersistenceOperationMetricSet::PersistenceOperationMetricSet(const std::string& name)
+ : PersistenceOperationMetricSet(name, nullptr)
+{
+}
+
PersistenceOperationMetricSet::~PersistenceOperationMetricSet() = default;
MetricSet *
@@ -78,9 +83,8 @@ PersistenceOperationMetricSet::clone(std::vector<Metric::UP>& ownerList, CopyTyp
if (copyType == INACTIVE) {
return MetricSet::clone(ownerList, INACTIVE, owner, includeUnused);
}
- return (PersistenceOperationMetricSet*)
- (new PersistenceOperationMetricSet(getName(), owner))
- ->assignValues(*this);
+ return dynamic_cast<PersistenceOperationMetricSet*>(
+ (new PersistenceOperationMetricSet(getName(), owner))->assignValues(*this));
}
void
diff --git a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.h b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.h
index b818d1bdd9f..eb1c3f57252 100644
--- a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.h
+++ b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.h
@@ -40,10 +40,11 @@ class PersistenceOperationMetricSet : public metrics::MetricSet
mutable std::mutex _mutex;
public:
metrics::DoubleAverageMetric latency;
- metrics::LongCountMetric ok;
+ metrics::LongCountMetric ok;
PersistenceFailuresMetricSet failures;
- PersistenceOperationMetricSet(const std::string& name, metrics::MetricSet* owner = nullptr);
+ PersistenceOperationMetricSet(const std::string& name, metrics::MetricSet* owner);
+ explicit PersistenceOperationMetricSet(const std::string& name);
~PersistenceOperationMetricSet() override;
MetricSet * clone(std::vector<Metric::UP>& ownerList, CopyType copyType,
@@ -57,7 +58,6 @@ public:
*/
void updateFromResult(const api::ReturnCode& result);
- friend class LockWrapper;
class LockWrapper {
std::unique_lock<std::mutex> _lock;
PersistenceOperationMetricSet& _self;
diff --git a/streamingvisitors/src/vespa/vsm/vsm/docsumfilter.cpp b/streamingvisitors/src/vespa/vsm/vsm/docsumfilter.cpp
index 596525e17d7..146dd487769 100644
--- a/streamingvisitors/src/vespa/vsm/vsm/docsumfilter.cpp
+++ b/streamingvisitors/src/vespa/vsm/vsm/docsumfilter.cpp
@@ -11,6 +11,7 @@
#include <vespa/document/datatype/datatype.h>
#include <vespa/document/fieldvalue/stringfieldvalue.h>
#include <vespa/vespalib/data/slime/inserter.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".vsm.docsumfilter");
diff --git a/tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java b/tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java
index db2e5ac5f95..9ccd0588d6d 100644
--- a/tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java
+++ b/tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java
@@ -35,7 +35,7 @@ class EnabledInRegionsCondition implements ExecutionCondition {
return ConditionEvaluationResult.enabled(EnabledInRegions.class.getSimpleName() + " is not present");
List<String> enablingRegions = List.of(annotation.get().value());
- String thisRegion = TestRuntime.get().application().instance();
+ String thisRegion = TestRuntime.get().zone().region();
String reason = "Enabled in: %s. Current region: %s.".formatted(enablingRegions.isEmpty() ? "no regions" : "regions " + String.join(", ", enablingRegions), thisRegion);
return enablingRegions.contains(thisRegion) ? ConditionEvaluationResult.enabled(reason) : ConditionEvaluationResult.disabled(reason);
}
diff --git a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
index 7c2db779693..de2ee3fb412 100644
--- a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
+++ b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
@@ -90,7 +90,7 @@ net.openhft:zero-allocation-hashing:0.16
org.antlr:antlr-runtime:3.5.3
org.antlr:antlr4-runtime:4.11.1
org.apache.aries.spifly:org.apache.aries.spifly.dynamic.bundle:1.3.6
-org.apache.commons:commons-compress:1.22
+org.apache.commons:commons-compress:1.23.0
org.apache.commons:commons-csv:1.8
org.apache.commons:commons-exec:1.3
org.apache.commons:commons-lang3:3.12.0
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java b/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java
index 45e581d73e8..9c34875dfd7 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java
@@ -213,7 +213,7 @@ public class JsonFormat {
if (root.field("cells").valid() && ! primitiveContent(root.field("cells")))
decodeCells(root.field("cells"), builder);
else if (root.field("values").valid() && builder.type().dimensions().stream().allMatch(d -> d.isIndexed()))
- decodeValues(root.field("values"), builder);
+ decodeValuesAtTop(root.field("values"), builder);
else if (root.field("blocks").valid())
decodeBlocks(root.field("blocks"), builder);
else
@@ -252,11 +252,11 @@ public class JsonFormat {
builder.cell(asAddress(key, builder.type()), decodeNumeric(value));
}
- private static void decodeValues(Inspector values, Tensor.Builder builder) {
- decodeValues(values, builder, new MutableInteger(0));
+ private static void decodeValuesAtTop(Inspector values, Tensor.Builder builder) {
+ decodeNestedValues(values, builder, new MutableInteger(0));
}
- private static void decodeValues(Inspector values, Tensor.Builder builder, MutableInteger index) {
+ private static void decodeNestedValues(Inspector values, Tensor.Builder builder, MutableInteger index) {
if ( ! (builder instanceof IndexedTensor.BoundBuilder indexedBuilder))
throw new IllegalArgumentException("An array of values can only be used with a dense tensor. Use a map instead");
if (values.type() == Type.STRING) {
@@ -275,7 +275,7 @@ public class JsonFormat {
values.traverse((ArrayTraverser) (__, value) -> {
if (value.type() == Type.ARRAY)
- decodeValues(value, builder, index);
+ decodeNestedValues(value, builder, index);
else if (value.type() == Type.LONG || value.type() == Type.DOUBLE)
indexedBuilder.cellByDirectIndex(index.next(), value.asDouble());
else
@@ -300,7 +300,7 @@ public class JsonFormat {
if (block.type() != Type.OBJECT)
throw new IllegalArgumentException("Expected an item in a blocks array to be an object, not " + block.type());
mixedBuilder.block(decodeAddress(block.field("address"), mixedBuilder.type().mappedSubtype()),
- decodeValues(block.field("values"), mixedBuilder));
+ decodeValuesInBlock(block.field("values"), mixedBuilder));
}
/** Decodes a tensor value directly at the root, where the format is decided by the tensor type. */
@@ -311,7 +311,7 @@ public class JsonFormat {
if (isArrayOfObjects(root))
decodeCells(root, builder);
else if ( ! hasMapped)
- decodeValues(root, builder);
+ decodeValuesAtTop(root, builder);
else if (hasMapped && hasIndexed)
decodeBlocks(root, builder);
else
@@ -330,7 +330,7 @@ public class JsonFormat {
if (value.type() != Type.ARRAY)
throw new IllegalArgumentException("Expected an item in a blocks array to be an array, not " + value.type());
mixedBuilder.block(asAddress(key, mixedBuilder.type().mappedSubtype()),
- decodeValues(value, mixedBuilder));
+ decodeValuesInBlock(value, mixedBuilder));
}
private static byte decodeHex(String input, int index) {
@@ -408,7 +408,7 @@ public class JsonFormat {
};
}
- private static double[] decodeValues(Inspector valuesField, MixedTensor.BoundBuilder mixedBuilder) {
+ private static double[] decodeValuesInBlock(Inspector valuesField, MixedTensor.BoundBuilder mixedBuilder) {
double[] values = new double[(int)mixedBuilder.denseSubspaceSize()];
if (valuesField.type() == Type.ARRAY) {
if (valuesField.entries() == 0) {
diff --git a/vespalib/src/tests/btree/btree_test.cpp b/vespalib/src/tests/btree/btree_test.cpp
index afad3523fa3..6305eca41ac 100644
--- a/vespalib/src/tests/btree/btree_test.cpp
+++ b/vespalib/src/tests/btree/btree_test.cpp
@@ -2,7 +2,6 @@
#include <string>
#include <vespa/vespalib/btree/btreeroot.h>
-#include <vespa/vespalib/btree/btreebuilder.h>
#include <vespa/vespalib/btree/btreenodeallocator.h>
#include <vespa/vespalib/btree/btree.h>
#include <vespa/vespalib/btree/btreestore.h>
@@ -297,6 +296,12 @@ BTreeTest::assertMemoryUsage(const vespalib::MemoryUsage & exp, const vespalib::
return result;
}
+TEST_F(BTreeTest, control_iterator_size) {
+ EXPECT_EQ(208u, sizeof(BTreeIteratorBase<uint32_t, uint32_t, NoAggregated>));
+ EXPECT_EQ(208u, sizeof(BTreeIteratorBase<uint32_t, BTreeNoLeafData, NoAggregated>));
+ EXPECT_EQ(544u, sizeof(MyTree::Iterator));
+}
+
TEST_F(BTreeTest, require_that_node_insert_works)
{
GenerationHandler g;
diff --git a/vespalib/src/vespa/vespalib/btree/btree.h b/vespalib/src/vespa/vespalib/btree/btree.h
index c2f5aac01b7..0099da718a3 100644
--- a/vespalib/src/vespa/vespalib/btree/btree.h
+++ b/vespalib/src/vespa/vespalib/btree/btree.h
@@ -39,50 +39,22 @@ public:
using ConstIterator = typename TreeType::ConstIterator;
using FrozenView = typename TreeType::FrozenView;
using AggrCalcType = typename TreeType::AggrCalcType;
-private:
- NodeAllocatorType _alloc;
- TreeType _tree;
-
- BTree(const BTree &rhs);
- BTree &
- operator=(BTree &rhs);
-
-public:
+ BTree(const BTree &rhs) = delete;
+ BTree & operator=(BTree &rhs) = delete;
BTree();
~BTree();
const NodeAllocatorType &getAllocator() const { return _alloc; }
NodeAllocatorType &getAllocator() { return _alloc; }
-
- void
- disableFreeLists() {
- _alloc.disableFreeLists();
- }
-
- void
- disable_entry_hold_list()
- {
- _alloc.disable_entry_hold_list();
- }
-
- // Inherit doc from BTreeRoot
- void clear() {
- _tree.clear(_alloc);
- }
- void assign(Builder & rhs) {
- _tree.assign(rhs, _alloc);
- }
+ void disableFreeLists() { _alloc.disableFreeLists(); }
+ void disable_entry_hold_list() { _alloc.disable_entry_hold_list(); }
+ void clear() { _tree.clear(_alloc); }
+ void assign(Builder & rhs) { _tree.assign(rhs, _alloc); }
bool insert(const KeyType & key, const DataType & data, CompareT comp = CompareT()) {
return _tree.insert(key, data, _alloc, comp);
}
-
- void
- insert(Iterator &itr,
- const KeyType &key, const DataType &data)
- {
- _tree.insert(itr, key, data);
- }
+ void insert(Iterator &itr, const KeyType &key, const DataType &data) { _tree.insert(itr, key, data); }
Iterator find(const KeyType & key, CompareT comp = CompareT()) const {
return _tree.find(key, _alloc, comp);
@@ -97,55 +69,23 @@ public:
return _tree.remove(key, _alloc, comp);
}
- void
- remove(Iterator &itr)
- {
- _tree.remove(itr);
- }
-
- Iterator begin() const {
- return _tree.begin(_alloc);
- }
- FrozenView getFrozenView() const {
- return _tree.getFrozenView(_alloc);
- }
- size_t size() const {
- return _tree.size(_alloc);
- }
- vespalib::string toString() const {
- return _tree.toString(_alloc);
- }
- bool isValid(CompareT comp = CompareT()) const {
- return _tree.isValid(_alloc, comp);
- }
- bool isValidFrozen(CompareT comp = CompareT()) const {
- return _tree.isValidFrozen(_alloc, comp);
- }
- size_t bitSize() const {
- return _tree.bitSize(_alloc);
- }
+ void remove(Iterator &itr) { _tree.remove(itr); }
+ Iterator begin() const { return _tree.begin(_alloc); }
+ FrozenView getFrozenView() const { return _tree.getFrozenView(_alloc); }
+ size_t size() const { return _tree.size(_alloc); }
+ vespalib::string toString() const { return _tree.toString(_alloc); }
+ bool isValid(CompareT comp = CompareT()) const { return _tree.isValid(_alloc, comp); }
+ bool isValidFrozen(CompareT comp = CompareT()) const { return _tree.isValidFrozen(_alloc, comp); }
+ size_t bitSize() const { return _tree.bitSize(_alloc); }
size_t bitSize(BTreeNode::Ref node) const {
return _tree.bitSize(node, _alloc);
}
- void setRoot(BTreeNode::Ref newRoot) {
- _tree.setRoot(newRoot, _alloc);
- }
- BTreeNode::Ref getRoot() const {
- return _tree.getRoot();
- }
- vespalib::MemoryUsage getMemoryUsage() const {
- return _alloc.getMemoryUsage();
- }
-
- const AggrT &
- getAggregated() const
- {
- return _tree.getAggregated(_alloc);
- }
+ void setRoot(BTreeNode::Ref newRoot) { _tree.setRoot(newRoot, _alloc); }
+ BTreeNode::Ref getRoot() const { return _tree.getRoot(); }
+ vespalib::MemoryUsage getMemoryUsage() const { return _alloc.getMemoryUsage(); }
+ const AggrT & getAggregated() const { return _tree.getAggregated(_alloc); }
- void
- thaw(Iterator &itr)
- {
+ void thaw(Iterator &itr) {
assert(&itr.getAllocator() == &getAllocator());
_tree.thaw(itr);
}
@@ -153,18 +93,17 @@ public:
void compact_worst(const datastore::CompactionStrategy& compaction_strategy);
template <typename FunctionType>
- void
- foreach_key(FunctionType func) const
- {
+ void foreach_key(FunctionType func) const {
_alloc.getNodeStore().foreach_key(_tree.getRoot(), func);
}
template <typename FunctionType>
- void
- foreach(FunctionType func) const
- {
+ void foreach(FunctionType func) const {
_alloc.getNodeStore().foreach(_tree.getRoot(), func);
}
+private:
+ NodeAllocatorType _alloc;
+ TreeType _tree;
};
}
diff --git a/vespalib/src/vespa/vespalib/btree/btreeiterator.h b/vespalib/src/vespa/vespalib/btree/btreeiterator.h
index 4b99edf592a..7a754880aa3 100644
--- a/vespalib/src/vespa/vespalib/btree/btreeiterator.h
+++ b/vespalib/src/vespa/vespalib/btree/btreeiterator.h
@@ -36,115 +36,46 @@ class NodeElement
using KeyType = typename NodeType::KeyType;
using DataType = typename NodeType::DataType;
const NodeType *_node;
- uint32_t _idx;
-
- NodeType *
- getWNode() const
- {
- return const_cast<NodeType *>(_node);
- }
+ uint32_t _idx;
+ NodeType * getWNode() const { return const_cast<NodeType *>(_node); }
public:
- NodeElement()
- : _node(nullptr),
- _idx(0u)
- {
- }
+ NodeElement() : _node(nullptr), _idx(0u) { }
+ NodeElement(const NodeType *node, uint32_t idx) : _node(node), _idx(idx) { }
- NodeElement(const NodeType *node, uint32_t idx)
- : _node(node),
- _idx(idx)
- {
- }
+ void setNode(const NodeType *node) { _node = node; }
+ const NodeType * getNode() const { return _node; }
+ void setIdx(uint32_t idx) { _idx = idx; }
+ uint32_t getIdx() const { return _idx; }
+ void incIdx() { ++_idx; }
+ void decIdx() { --_idx; }
- void
- setNode(const NodeType *node)
- {
+ void setNodeAndIdx(const NodeType *node, uint32_t idx) {
_node = node;
- }
-
- const NodeType *
- getNode() const
- {
- return _node;
- }
-
- void
- setIdx(uint32_t idx)
- {
_idx = idx;
}
- uint32_t
- getIdx() const
- {
- return _idx;
- }
-
- void
- incIdx()
- {
- ++_idx;
- }
-
- void
- decIdx()
- {
- --_idx;
- }
-
- void
- setNodeAndIdx(const NodeType *node, uint32_t idx)
- {
- _node = node;
- _idx = idx;
- }
-
- const KeyType &
- getKey() const
- {
- return _node->getKey(_idx);
- }
-
- const DataType &
- getData() const
- {
- return _node->getData(_idx);
- }
-
+ const KeyType & getKey() const { return _node->getKey(_idx); }
+ const DataType & getData() const { return _node->getData(_idx); }
// Only use during compaction when changing reference to moved value
DataType &getWData() { return getWNode()->getWData(_idx); }
-
- bool
- valid() const
- {
- return _node != nullptr;
- }
-
- void
- adjustLeftVictimKilled()
- {
+ bool valid() const { return _node != nullptr; }
+ void adjustLeftVictimKilled() {
assert(_idx > 0);
--_idx;
}
- void
- adjustSteal(uint32_t stolen)
- {
+ void adjustSteal(uint32_t stolen) {
assert(_idx + stolen < _node->validSlots());
_idx += stolen;
}
- void
- adjustSplit(bool inRightSplit)
- {
+ void adjustSplit(bool inRightSplit) {
if (inRightSplit)
++_idx;
}
- bool
- adjustSplit(bool inRightSplit, const NodeType *splitNode)
- {
+ bool adjustSplit(bool inRightSplit, const NodeType *splitNode) {
adjustSplit(inRightSplit);
if (_idx >= _node->validSlots()) {
_idx -= _node->validSlots();
@@ -154,18 +85,13 @@ public:
return false;
}
- void
- swap(NodeElement &rhs)
- {
+ void swap(NodeElement &rhs) {
std::swap(_node, rhs._node);
std::swap(_idx, rhs._idx);
}
- bool
- operator!=(const NodeElement &rhs) const
- {
- return _node != rhs._node ||
- _idx != rhs._idx;
+ bool operator!=(const NodeElement &rhs) const {
+ return (_node != rhs._node) || (_idx != rhs._idx);
}
};
@@ -243,8 +169,7 @@ protected:
*
* @param pidx Number of levels above leaf nodes to take into account.
*/
- size_t
- position(uint32_t pidx) const;
+ size_t position(uint32_t pidx) const;
/**
* Create iterator pointing to first element in the tree referenced
@@ -273,8 +198,7 @@ protected:
/**
* Step iterator forwards. If at end then leave it at end.
*/
- BTreeIteratorBase &
- operator++() {
+ BTreeIteratorBase & operator++() {
if (_leaf.getNode() == nullptr) {
return *this;
}
@@ -290,8 +214,7 @@ protected:
* Step iterator backwards. If at end then place it at last valid
* position in tree (cf. rbegin())
*/
- BTreeIteratorBase &
- operator--();
+ BTreeIteratorBase & operator--();
~BTreeIteratorBase();
BTreeIteratorBase(const BTreeIteratorBase &other);
@@ -311,9 +234,7 @@ protected:
* from this iterator position to end of subtree.
*/
template <typename FunctionType>
- void
- foreach_key_range_start(uint32_t level, FunctionType func) const
- {
+ void foreach_key_range_start(uint32_t level, FunctionType func) const {
if (level > 0u) {
--level;
foreach_key_range_start(level, func);
@@ -332,9 +253,7 @@ protected:
* subtree before this iterator position).
*/
template <typename FunctionType>
- void
- foreach_key_range_end(uint32_t level, FunctionType func) const
- {
+ void foreach_key_range_end(uint32_t level, FunctionType func) const {
if (level > 0u) {
--level;
auto &store = _allocator->getNodeStore();
@@ -348,8 +267,7 @@ protected:
}
public:
- bool
- operator==(const BTreeIteratorBase & rhs) const {
+ bool operator==(const BTreeIteratorBase & rhs) const {
if (_leaf.getIdx() != rhs._leaf.getIdx()) {
return false;
}
@@ -367,83 +285,55 @@ public:
return true;
}
- bool
- operator!=(const BTreeIteratorBase & rhs) const
- {
- return !operator==(rhs);
- }
+ bool operator!=(const BTreeIteratorBase & rhs) const { return !operator==(rhs); }
/**
* Swap iterator with the other.
*
* @param rhs Other iterator.
*/
- void
- swap(BTreeIteratorBase & rhs);
+ void swap(BTreeIteratorBase & rhs);
/**
* Get key at current iterator location.
*/
- const KeyType &
- getKey() const
- {
- return _leaf.getKey();
- }
+ const KeyType & getKey() const { return _leaf.getKey(); }
/**
* Get data at current iterator location.
*/
- const DataType &
- getData() const
- {
- return _leaf.getData();
- }
+ const DataType & getData() const { return _leaf.getData(); }
/**
* Check if iterator is at a valid element, i.e. not at end.
*/
- bool
- valid() const
- {
- return _leaf.valid();
- }
+ bool valid() const { return _leaf.valid(); }
/**
* Return the number of elements in the tree.
*/
- size_t
- size() const;
+ size_t size() const;
/**
* Return the current position in the tree.
*/
- size_t
- position() const
- {
- return position(_pathSize);
- }
+ size_t position() const { return position(_pathSize); }
/**
* Return the distance between two positions in the tree.
*/
- ssize_t
- operator-(const BTreeIteratorBase &rhs) const;
+ ssize_t operator-(const BTreeIteratorBase &rhs) const;
/**
* Return if the tree has data or not (e.g. keys and data or only keys).
*/
- static bool
- hasData()
- {
- return LeafNodeType::hasData();
- }
+ static bool hasData() { return LeafNodeType::hasData(); }
/**
* Move the iterator directly to end. Used by findHelper method in BTree.
*/
- void
- setupEnd();
+ void setupEnd();
/**
* Setup iterator to be empty and not be associated with any tree.
@@ -453,50 +343,41 @@ public:
/**
* Move iterator to beyond last element in the current tree.
*/
- void
- end() __attribute__((noinline));
+ void end() __attribute__((noinline));
/**
* Move iterator to beyond last element in the given tree.
*
* @param rootRef Reference to root of tree.
*/
- void
- end(BTreeNode::Ref rootRef);
+ void end(BTreeNode::Ref rootRef);
/**
* Move iterator to first element in the current tree.
*/
- void
- begin();
+ void begin();
/**
* Move iterator to first element in the given tree.
*
* @param rootRef Reference to root of tree.
*/
- void
- begin(BTreeNode::Ref rootRef);
+ void begin(BTreeNode::Ref rootRef);
/**
* Move iterator to last element in the current tree.
*/
- void
- rbegin();
+ void rbegin();
/*
* Get aggregated values for the current tree.
*/
- const AggrT &
- getAggregated() const;
+ const AggrT & getAggregated() const;
- bool
- identical(const BTreeIteratorBase &rhs) const;
+ bool identical(const BTreeIteratorBase &rhs) const;
template <typename FunctionType>
- void
- foreach_key(FunctionType func) const
- {
+ void foreach_key(FunctionType func) const {
if (_pathSize > 0) {
_path[_pathSize - 1].getNode()->
foreach_key(_allocator->getNodeStore(), func);
@@ -511,9 +392,7 @@ public:
* range [this iterator, end_itr)).
*/
template <typename FunctionType>
- void
- foreach_key_range(const BTreeIteratorBase &end_itr, FunctionType func) const
- {
+ void foreach_key_range(const BTreeIteratorBase &end_itr, FunctionType func) const {
if (!valid()) {
return;
}
@@ -584,9 +463,7 @@ class BTreeConstIterator : public BTreeIteratorBase<KeyT, DataT, AggrT,
TraitsT::PATH_SIZE>
{
protected:
- using ParentType = BTreeIteratorBase<KeyT,
- DataT,
- AggrT,
+ using ParentType = BTreeIteratorBase<KeyT, DataT, AggrT,
TraitsT::INTERNAL_SLOTS,
TraitsT::LEAF_SLOTS,
TraitsT::PATH_SIZE>;
@@ -645,17 +522,12 @@ public:
/**
* Default constructor. Iterator is not associated with a tree.
*/
- BTreeConstIterator()
- : ParentType()
- {
- }
+ BTreeConstIterator() : ParentType() { }
/**
* Step iterator forwards. If at end then leave it at end.
*/
- BTreeConstIterator &
- operator++()
- {
+ BTreeConstIterator & operator++() {
ParentType::operator++();
return *this;
}
@@ -664,9 +536,7 @@ public:
* Step iterator backwards. If at end then place it at last valid
* position in tree (cf. rbegin())
*/
- BTreeConstIterator &
- operator--()
- {
+ BTreeConstIterator & operator--() {
ParentType::operator--();
return *this;
}
@@ -679,8 +549,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- lower_bound(const KeyType & key, CompareT comp = CompareT());
+ void lower_bound(const KeyType & key, CompareT comp = CompareT());
/**
* Position iterator at first position with a key that is greater
@@ -689,9 +558,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- lower_bound(BTreeNode::Ref rootRef,
- const KeyType & key, CompareT comp = CompareT());
+ void lower_bound(BTreeNode::Ref rootRef, const KeyType & key, CompareT comp = CompareT());
/**
* Step iterator forwards until it is at a position with a key
@@ -704,8 +571,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- seek(const KeyType &key, CompareT comp = CompareT());
+ void seek(const KeyType &key, CompareT comp = CompareT());
/**
* Step iterator forwards until it is at a position with a key
@@ -717,8 +583,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- binarySeek(const KeyType &key, CompareT comp = CompareT());
+ void binarySeek(const KeyType &key, CompareT comp = CompareT());
/**
* Step iterator forwards until it is at a position with a key
@@ -730,8 +595,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- linearSeek(const KeyType &key, CompareT comp = CompareT());
+ void linearSeek(const KeyType &key, CompareT comp = CompareT());
/**
* Step iterator forwards until it is at a position with a key
@@ -744,8 +608,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- seekPast(const KeyType &key, CompareT comp = CompareT());
+ void seekPast(const KeyType &key, CompareT comp = CompareT());
/**
* Step iterator forwards until it is at a position with a key
@@ -757,8 +620,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- binarySeekPast(const KeyType &key, CompareT comp = CompareT());
+ void binarySeekPast(const KeyType &key, CompareT comp = CompareT());
/**
* Step iterator forwards until it is at a position with a key
@@ -770,8 +632,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- linearSeekPast(const KeyType &key, CompareT comp = CompareT());
+ void linearSeekPast(const KeyType &key, CompareT comp = CompareT());
/**
* Validate the iterator as a valid iterator or positioned at
@@ -781,8 +642,7 @@ public:
* @param rootRef Reference to root of tree to operate on
* @param comp Comparator for the tree ordering.
*/
- void
- validate(BTreeNode::Ref rootRef, CompareT comp = CompareT());
+ void validate(BTreeNode::Ref rootRef, CompareT comp = CompareT());
};
@@ -795,15 +655,10 @@ template <typename KeyT,
typename AggrT = NoAggregated,
typename CompareT = std::less<KeyT>,
typename TraitsT = BTreeDefaultTraits>
-class BTreeIterator : public BTreeConstIterator<KeyT, DataT, AggrT,
- CompareT, TraitsT>
+class BTreeIterator : public BTreeConstIterator<KeyT, DataT, AggrT, CompareT, TraitsT>
{
public:
- using ParentType = BTreeConstIterator<KeyT,
- DataT,
- AggrT,
- CompareT,
- TraitsT>;
+ using ParentType = BTreeConstIterator<KeyT, DataT, AggrT, CompareT, TraitsT>;
using NodeAllocatorType = typename ParentType::NodeAllocatorType;
using InternalNodeType = typename ParentType::InternalNodeType;
using LeafNodeType = typename ParentType::LeafNodeType;
@@ -844,40 +699,27 @@ public:
{
}
- BTreeIterator()
- : ParentType()
- {
- }
+ BTreeIterator() : ParentType() { }
- BTreeIterator &
- operator++()
- {
+ BTreeIterator & operator++() {
ParentType::operator++();
return *this;
}
- BTreeIterator &
- operator--()
- {
+ BTreeIterator & operator--() {
ParentType::operator--();
return *this;
}
- NodeAllocatorType &
- getAllocator() const
- {
+ NodeAllocatorType & getAllocator() const {
return const_cast<NodeAllocatorType &>(*_allocator);
}
- BTreeNode::Ref
- moveFirstLeafNode(BTreeNode::Ref rootRef);
+ BTreeNode::Ref moveFirstLeafNode(BTreeNode::Ref rootRef);
- void
- moveNextLeafNode();
+ void moveNextLeafNode();
- void
- writeData(const DataType &data)
- {
+ void writeData(const DataType &data) {
_leaf.getWNode()->writeData(_leaf.getIdx(), data);
}
@@ -889,8 +731,7 @@ public:
* The new key must have the same semantic meaning as the old key.
* Typically used when compacting data store containing keys.
*/
- void
- writeKey(const KeyType &key);
+ void writeKey(const KeyType &key);
/**
* Updata data at the current iterator position. The tree should
@@ -900,71 +741,33 @@ public:
* @param aggrCalc Calculator for updating aggregated information.
*/
template <class AggrCalcT>
- void
- updateData(const DataType &data, const AggrCalcT &aggrCalc);
+ void updateData(const DataType &data, const AggrCalcT &aggrCalc);
/**
* Thaw a path from the root node down the the current leaf node in
* the current tree, allowing for updates to be performed without
* disturbing the frozen version of the tree.
*/
- BTreeNode::Ref
- thaw(BTreeNode::Ref rootRef);
+ BTreeNode::Ref thaw(BTreeNode::Ref rootRef);
private:
/* Insert into empty tree */
template <class AggrCalcT>
- BTreeNode::Ref
- insertFirst(const KeyType &key, const DataType &data,
- const AggrCalcT &aggrCalc);
-
- LeafNodeType *
- getLeafNode() const
- {
- return _leaf.getWNode();
- }
-
- bool
- setLeafNodeIdx(uint32_t idx, const LeafNodeType *splitLeafNode);
-
- void
- setLeafNodeIdx(uint32_t idx)
- {
- _leaf.setIdx(idx);
- }
-
- uint32_t
- getLeafNodeIdx() const
- {
- return _leaf.getIdx();
- }
-
- uint32_t
- getPathSize() const
- {
- return _pathSize;
- }
-
- PathElement &
- getPath(uint32_t pidx)
- {
- return _path[pidx];
- }
+ BTreeNode::Ref insertFirst(const KeyType &key, const DataType &data, const AggrCalcT &aggrCalc);
+ LeafNodeType * getLeafNode() const { return _leaf.getWNode(); }
+ bool setLeafNodeIdx(uint32_t idx, const LeafNodeType *splitLeafNode);
+ void setLeafNodeIdx(uint32_t idx) { _leaf.setIdx(idx); }
+ uint32_t getLeafNodeIdx() const { return _leaf.getIdx(); }
+ uint32_t getPathSize() const { return _pathSize; }
+ PathElement & getPath(uint32_t pidx) { return _path[pidx]; }
template <class AggrCalcT>
- BTreeNode::Ref
- addLevel(BTreeNode::Ref rootRef, BTreeNode::Ref splitNodeRef,
- bool inRightSplit, const AggrCalcT &aggrCalc);
+ BTreeNode::Ref addLevel(BTreeNode::Ref rootRef, BTreeNode::Ref splitNodeRef, bool inRightSplit, const AggrCalcT &aggrCalc);
- BTreeNode::Ref
- removeLevel(BTreeNode::Ref rootRef, InternalNodeType *rootNode);
+ BTreeNode::Ref removeLevel(BTreeNode::Ref rootRef, InternalNodeType *rootNode);
+ void removeLast(BTreeNode::Ref rootRef);
- void
- removeLast(BTreeNode::Ref rootRef);
-
- void
- adjustSteal(uint32_t level, bool leftVictimKilled, uint32_t stolen)
- {
+ void adjustSteal(uint32_t level, bool leftVictimKilled, uint32_t stolen) {
assert(_pathSize > level);
if (leftVictimKilled) {
_path[level].adjustLeftVictimKilled();
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
index b537602c703..c7c635b4471 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
@@ -34,10 +34,6 @@ public:
using DataStoreBase = datastore::DataStoreBase;
private:
- BTreeNodeAllocator(const BTreeNodeAllocator &rhs);
-
- BTreeNodeAllocator & operator=(const BTreeNodeAllocator &rhs);
-
NodeStore _nodeStore;
using RefVector = vespalib::Array<BTreeNode::Ref>;
@@ -53,6 +49,8 @@ private:
RefVector _leafHoldUntilFreeze;
public:
+ BTreeNodeAllocator(const BTreeNodeAllocator &rhs) = delete;
+ BTreeNodeAllocator & operator=(const BTreeNodeAllocator &rhs) = delete;
BTreeNodeAllocator();
~BTreeNodeAllocator();
diff --git a/vespalib/src/vespa/vespalib/geo/zcurve.cpp b/vespalib/src/vespa/vespalib/geo/zcurve.cpp
index c207f966704..d04a04fda0a 100644
--- a/vespalib/src/vespa/vespalib/geo/zcurve.cpp
+++ b/vespalib/src/vespa/vespalib/geo/zcurve.cpp
@@ -10,15 +10,38 @@ namespace vespalib::geo {
namespace {
+ /**
+ * An area defined by its upper left and lower right corners. The
+ * z-coordinates between these corners act as a spacial
+ * over-estimation of the actual area. These areas may never cross
+ * signed borders, since that would break the whole concept of
+ * hierarchical spatial partitioning.
+ **/
+struct Area {
+ const ZCurve::Point min;
+ const ZCurve::Point max;
+ Area(const Area &rhs) = default;
+ Area(int32_t min_x, int32_t min_y,
+ int32_t max_x, int32_t max_y)
+ : min(min_x, min_y), max(max_x, max_y)
+ {
+ assert((min_x <= max_x) && ((min_x < 0) == (max_x < 0)));
+ assert((min_y <= max_y) && ((min_y < 0) == (max_y < 0)));
+ }
+ Area &operator=(Area &&rhs) { new ((void*)this) Area(rhs); return *this; }
+ int64_t size() const { return (static_cast<int64_t>(max.x) - min.x + 1) * (static_cast<int64_t>(max.y) - min.y + 1); }
+ int64_t estimate() const { return (max.z - min.z + 1); }
+ int64_t error() const { return estimate() - size(); }
+};
+
class ZAreaQueue
{
private:
struct MaxAreaErrorCmp {
- bool operator()(const ZCurve::Area &a, const ZCurve::Area &b) const {
+ bool operator()(const Area &a, const Area &b) const {
return (a.error() > b.error());
}
};
- using Area = ZCurve::Area;
using Range = ZCurve::Range;
using RangeVector = ZCurve::RangeVector;
using Queue = PriorityQueue<Area, MaxAreaErrorCmp, LeftArrayHeap>;
@@ -61,7 +84,6 @@ public:
class ZAreaSplitter
{
private:
- using Area = ZCurve::Area;
using RangeVector = ZCurve::RangeVector;
ZAreaQueue _queue;
diff --git a/vespalib/src/vespa/vespalib/geo/zcurve.h b/vespalib/src/vespa/vespalib/geo/zcurve.h
index 2f92b3a019b..c5fbdc08dce 100644
--- a/vespalib/src/vespa/vespalib/geo/zcurve.h
+++ b/vespalib/src/vespa/vespalib/geo/zcurve.h
@@ -3,7 +3,6 @@
#pragma once
#include <cstdint>
-#include <cassert>
#include <vector>
namespace vespalib::geo {
@@ -163,30 +162,6 @@ public:
Point(int32_t x_, int32_t y_) : x(x_), y(y_), z(encode(x_, y_)) {}
};
- /**
- * An area defined by its upper left and lower right corners. The
- * z-coordinates between these corners act as a spacial
- * over-estimation of the actual area. These areas may never cross
- * signed borders, since that would break the whole concept of
- * hierarchical spatial partitioning.
- **/
- struct Area {
- const Point min;
- const Point max;
- Area(const Area &rhs) = default;
- Area(int32_t min_x, int32_t min_y,
- int32_t max_x, int32_t max_y)
- : min(min_x, min_y), max(max_x, max_y)
- {
- assert((min_x <= max_x) && ((min_x < 0) == (max_x < 0)));
- assert((min_y <= max_y) && ((min_y < 0) == (max_y < 0)));
- }
- Area &operator=(Area &&rhs) { new ((void*)this) Area(rhs); return *this; }
- int64_t size() const { return (static_cast<int64_t>(max.x) - min.x + 1) * (static_cast<int64_t>(max.y) - min.y + 1); }
- int64_t estimate() const { return (max.z - min.z + 1); }
- int64_t error() const { return estimate() - size(); }
- };
-
class Range
{
private:
@@ -212,11 +187,9 @@ public:
static RangeVector find_ranges(int min_x, int min_y,
int max_x, int max_y);
- static int64_t
- encodeSlow(int32_t x, int32_t y);
+ static int64_t encodeSlow(int32_t x, int32_t y);
- static void
- decodeSlow(int64_t enc, int32_t *xp, int32_t *yp);
+ static void decodeSlow(int64_t enc, int32_t *xp, int32_t *yp);
};
}
diff --git a/vespalib/src/vespa/vespalib/metrics/simple_metrics_manager.cpp b/vespalib/src/vespa/vespalib/metrics/simple_metrics_manager.cpp
index 4b6b82697f7..534177e480a 100644
--- a/vespalib/src/vespa/vespalib/metrics/simple_metrics_manager.cpp
+++ b/vespalib/src/vespa/vespalib/metrics/simple_metrics_manager.cpp
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "simple_metrics_manager.h"
#include "simple_tick.h"
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".vespalib.metrics.simple_metrics_manager");
diff --git a/vespalib/src/vespa/vespalib/metrics/stable_store.h b/vespalib/src/vespa/vespalib/metrics/stable_store.h
index f249fd7729e..d456150ab7e 100644
--- a/vespalib/src/vespa/vespalib/metrics/stable_store.h
+++ b/vespalib/src/vespa/vespalib/metrics/stable_store.h
@@ -4,7 +4,6 @@
#include <memory>
#include <vector>
-#include <assert.h>
namespace vespalib {
@@ -54,8 +53,8 @@ private:
StableStore(size_t sz, UP &&more, std::vector<T> &&mine);
- size_t _size;
- UP _more;
+ size_t _size;
+ UP _more;
std::vector<T> _mine;
};
diff --git a/vespalib/src/vespa/vespalib/testkit/time_bomb.cpp b/vespalib/src/vespa/vespalib/testkit/time_bomb.cpp
index 22cd535f434..9d4b2ea30c8 100644
--- a/vespalib/src/vespa/vespalib/testkit/time_bomb.cpp
+++ b/vespalib/src/vespa/vespalib/testkit/time_bomb.cpp
@@ -8,13 +8,13 @@ namespace vespalib {
namespace {
-void bomb(Gate &gate, size_t seconds) {
- if (seconds > 5) {
- if (gate.await(from_s(seconds - 5))) {
+void bomb(Gate &gate, vespalib::duration timeout) {
+ if (timeout > 5s) {
+ if (gate.await(timeout - 5s)) {
return;
}
}
- size_t countdown = std::min(seconds, size_t(5));
+ size_t countdown = std::min(count_s(timeout), 5l);
while (countdown > 0) {
fprintf(stderr, "...%zu...\n", countdown--);
if (gate.await(1s)) {
@@ -27,9 +27,9 @@ void bomb(Gate &gate, size_t seconds) {
} // namespace vespalib::<unnamed>
-TimeBomb::TimeBomb(size_t seconds)
+TimeBomb::TimeBomb(duration timeout)
: _gate(),
- _thread(bomb, std::ref(_gate), seconds)
+ _thread(bomb, std::ref(_gate), timeout)
{
}
diff --git a/vespalib/src/vespa/vespalib/testkit/time_bomb.h b/vespalib/src/vespa/vespalib/testkit/time_bomb.h
index a88aeeadd9a..ca02e7cda43 100644
--- a/vespalib/src/vespa/vespalib/testkit/time_bomb.h
+++ b/vespalib/src/vespa/vespalib/testkit/time_bomb.h
@@ -21,7 +21,8 @@ private:
Gate _gate;
std::thread _thread;
public:
- TimeBomb(size_t seconds);
+ TimeBomb(size_t seconds) : TimeBomb(from_s(seconds)) {}
+ TimeBomb(vespalib::duration duration);
~TimeBomb(); // defuse the bomb
};
diff --git a/vespalib/src/vespa/vespalib/util/fiddle.h b/vespalib/src/vespa/vespalib/util/fiddle.h
index f4d2ac33695..b6799d9c778 100644
--- a/vespalib/src/vespa/vespalib/util/fiddle.h
+++ b/vespalib/src/vespa/vespalib/util/fiddle.h
@@ -4,8 +4,7 @@
#include <cassert>
-namespace vespalib {
-namespace bits {
+namespace vespalib::bits {
//-----------------------------------------------------------------------------
@@ -79,6 +78,5 @@ uint32_t split_range(uint32_t min, uint32_t max,
//-----------------------------------------------------------------------------
-} // namespace bits
-} // namespace vespalib
+}
diff --git a/vespalib/src/vespa/vespalib/util/latch.h b/vespalib/src/vespa/vespalib/util/latch.h
index 3ae49aeb11f..9110b898372 100644
--- a/vespalib/src/vespa/vespalib/util/latch.h
+++ b/vespalib/src/vespa/vespalib/util/latch.h
@@ -4,7 +4,6 @@
#include <mutex>
#include <condition_variable>
-#include <cassert>
namespace vespalib {