summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ann_benchmark/src/vespa/ann_benchmark/vespa_ann_benchmark.cpp7
-rw-r--r--annotations/pom.xml1
-rw-r--r--application-model/pom.xml3
-rw-r--r--bundle-plugin-test/integration-test/pom.xml7
-rw-r--r--bundle-plugin-test/integration-test/src/test/java/com/yahoo/container/plugin/BundleTest.java31
-rw-r--r--bundle-plugin-test/integration-test/src/test/java/com/yahoo/container/plugin/NonPublicApiDetectionTest.java44
-rw-r--r--bundle-plugin-test/test-bundles/main/pom.xml3
-rw-r--r--bundle-plugin-test/test-bundles/main/src/main/java/com/yahoo/test/package-info.java2
-rw-r--r--bundle-plugin-test/test-bundles/non-public-api-usage/pom.xml45
-rw-r--r--bundle-plugin-test/test-bundles/non-public-api-usage/src/main/java/com/yahoo/test/UsingBothPublicApiAndNonPublicApiPackages.java15
-rw-r--r--bundle-plugin-test/test-bundles/pom.xml1
-rw-r--r--bundle-plugin/src/main/java/com/yahoo/container/plugin/bundle/AnalyzeBundle.java40
-rw-r--r--bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassVisitor.java8
-rw-r--r--bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/ClassFileMetaData.java16
-rw-r--r--bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/ExportPackageAnnotation.java2
-rw-r--r--bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/PackageInfo.java16
-rw-r--r--bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/PackageTally.java51
-rw-r--r--bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/Packages.java21
-rw-r--r--bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/AssembleContainerPluginMojo.java2
-rw-r--r--bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java104
-rw-r--r--bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassTest.java18
-rw-r--r--bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/sampleclasses/package-info.java2
-rw-r--r--client/go/Makefile3
-rw-r--r--client/go/go.mod5
-rw-r--r--client/go/go.sum4
-rw-r--r--client/go/internal/admin/vespa-wrapper/services/start.go1
-rw-r--r--client/go/internal/admin/vespa-wrapper/services/tuning.go30
-rw-r--r--client/go/internal/cli/cmd/api_key.go4
-rw-r--r--client/go/internal/cli/cmd/cert.go10
-rw-r--r--client/go/internal/cli/cmd/config.go4
-rw-r--r--client/go/internal/cli/cmd/config_test.go16
-rw-r--r--client/go/internal/cli/cmd/document.go190
-rw-r--r--client/go/internal/cli/cmd/document_test.go46
-rw-r--r--client/go/internal/cli/cmd/feed.go11
-rw-r--r--client/go/internal/cli/cmd/login.go4
-rw-r--r--client/go/internal/cli/cmd/logout.go4
-rw-r--r--client/go/internal/cli/cmd/root.go66
-rw-r--r--client/go/internal/cli/cmd/status_test.go4
-rw-r--r--client/go/internal/cli/cmd/testdata/A-Head-Full-of-Dreams-Put-Id.json15
-rw-r--r--client/go/internal/cli/cmd/visit.go20
-rw-r--r--client/go/internal/curl/curl.go2
-rw-r--r--client/go/internal/vespa/document.go197
-rw-r--r--client/go/internal/vespa/document/circuit_breaker.go33
-rw-r--r--client/go/internal/vespa/document/circuit_breaker_test.go6
-rw-r--r--client/go/internal/vespa/document/dispatcher.go129
-rw-r--r--client/go/internal/vespa/document/dispatcher_test.go17
-rw-r--r--client/go/internal/vespa/document/document.go307
-rw-r--r--client/go/internal/vespa/document/document_test.go81
-rw-r--r--client/go/internal/vespa/document/feeder_test.go34
-rw-r--r--client/go/internal/vespa/document/http.go166
-rw-r--r--client/go/internal/vespa/document/http_test.go105
-rw-r--r--client/go/internal/vespa/document/stats.go (renamed from client/go/internal/vespa/document/feeder.go)62
-rw-r--r--client/go/internal/vespa/document/stats_test.go43
-rw-r--r--client/go/internal/vespa/document/throttler.go33
-rw-r--r--client/go/internal/vespa/system.go42
-rw-r--r--client/go/internal/vespa/target_cloud.go18
-rw-r--r--client/go/internal/vespa/target_custom.go13
-rw-r--r--client/go/internal/vespa/target_test.go6
-rw-r--r--client/js/app/package.json4
-rw-r--r--client/js/app/yarn.lock2092
-rw-r--r--clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterControllerClusterConfigurer.java1
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AnnotatedClusterState.java9
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ContentCluster.java5
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/EventLog.java10
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java12
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetControllerOptions.java17
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java6
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java73
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java1
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java11
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java50
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java43
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java7
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java16
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java4
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ContentClusterHtmlRendererTest.java14
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java2
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/EventLogTest.java9
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java35
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java106
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NoZooKeeperTest.java26
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeSlobrokConfigurationMembershipTest.java2
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java105
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java231
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicatorTest.java9
-rw-r--r--clustercontroller-utils/pom.xml3
-rwxr-xr-xcomponent/pom.xml4
-rw-r--r--config-application-package/pom.xml3
-rw-r--r--config-bundle/pom.xml4
-rw-r--r--config-lib/pom.xml4
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/Bcp.java27
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentInstanceSpec.java43
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java78
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java64
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java1
-rw-r--r--config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java213
-rw-r--r--config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecWithoutInstanceTest.java110
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/ConfigModelContext.java29
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java2
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java11
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/Host.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java3
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java29
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/AccessControlFilterExcludeValidator.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java76
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java30
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomComponentBuilder.java28
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilder.java16
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java35
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/PlatformBundles.java3
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/component/BertEmbedder.java70
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/component/HuggingFaceEmbedder.java81
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/component/HuggingFaceTokenizer.java47
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/component/TypedComponent.java20
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java25
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ModelIdResolver.java47
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/Content.java48
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java13
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java25
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/routing/DocumentProtocol.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java60
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/IndexingDocproc.java68
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java12
-rw-r--r--config-model/src/main/resources/schema/common.rnc55
-rw-r--r--config-model/src/main/resources/schema/content.rnc3
-rw-r--r--config-model/src/main/resources/schema/deployment.rnc10
-rw-r--r--config-model/src/test/cfg/application/embed/configdefinitions/embedding.bert-base-embedder.def30
-rw-r--r--config-model/src/test/cfg/application/embed/configdefinitions/sentence-embedder.def26
-rw-r--r--config-model/src/test/cfg/application/embed/services.xml47
-rw-r--r--config-model/src/test/cfg/application/embed_cloud_only/configdefinitions/embedding.bert-base-embedder.def30
-rw-r--r--config-model/src/test/cfg/application/embed_cloud_only/configdefinitions/sentence-embedder.def26
-rw-r--r--config-model/src/test/cfg/application/embed_cloud_only/services.xml13
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java25
-rw-r--r--config-model/src/test/java/com/yahoo/schema/processing/NGramTestCase.java6
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidatorTest.java20
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java58
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java1
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/CloudAccountChangeValidatorTest.java6
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomComponentBuilderTest.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSchemaTuningBuilderTest.java10
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java6
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/xml/EmbedderTestCase.java171
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java38
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java24
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java32
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/CloudAccount.java97
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/CloudName.java17
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/ClusterInfo.java20
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java18
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/QuotaExceededException.java17
-rw-r--r--config-provisioning/src/test/java/com/yahoo/config/provision/CapacityTest.java23
-rw-r--r--config-provisioning/src/test/java/com/yahoo/config/provision/CloudAccountTest.java75
-rw-r--r--config-provisioning/src/test/java/com/yahoo/config/provision/CloudNameTest.java22
-rw-r--r--config-provisioning/src/test/java/com/yahoo/config/provision/NodeResourcesTest.java35
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java2
-rwxr-xr-xconfig/pom.xml4
-rw-r--r--configdefinitions/pom.xml4
-rw-r--r--configdefinitions/src/main/java/com/yahoo/embedding/huggingface/package-info.java9
-rw-r--r--configdefinitions/src/main/java/com/yahoo/language/huggingface/config/package-info.java9
-rw-r--r--configdefinitions/src/vespa/CMakeLists.txt6
-rw-r--r--configdefinitions/src/vespa/bert-base-embedder.def (renamed from model-integration/src/main/resources/configdefinitions/embedding.bert-base-embedder.def)3
-rw-r--r--configdefinitions/src/vespa/fleetcontroller.def1
-rw-r--r--configdefinitions/src/vespa/hugging-face-embedder.def (renamed from model-integration/src/main/resources/configdefinitions/hugging-face-embedder.def)5
-rw-r--r--configdefinitions/src/vespa/hugging-face-tokenizer.def (renamed from linguistics-components/src/main/resources/configdefinitions/language.huggingface.hugging-face-tokenizer.def)6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java47
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java14
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java3
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/deploy/HostedDeployNodeAllocationTest.java24
-rw-r--r--container-core/pom.xml3
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/state/HostLifeGatherer.java4
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/utils/MultiPartFormParser.java1
-rw-r--r--container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java16
-rw-r--r--container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java7
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java18
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java2
-rw-r--r--container-disc/pom.xml1
-rw-r--r--container-documentapi/pom.xml4
-rw-r--r--container-onnxruntime/pom.xml3
-rw-r--r--container-search-and-docproc/pom.xml3
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/parser/Tokenizer.java4
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java37
-rw-r--r--container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java22
-rw-r--r--container-search/src/main/java/com/yahoo/search/logging/AbstractSpoolingLogger.java9
-rw-r--r--container-search/src/main/java/com/yahoo/search/logging/LoggerEntry.java8
-rw-r--r--container-search/src/main/java/com/yahoo/search/logging/Spooler.java69
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java12
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/querytransform/test/StemmingSearcherTestCase.java42
-rw-r--r--container-search/src/test/java/com/yahoo/search/logging/SpoolerTest.java128
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java8
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/zone/ZoneRegistry.java8
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java12
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java10
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java39
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java35
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java11
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java14
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobMetrics.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RevisionHistory.java8
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RunStatus.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java8
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ZoneRoutingPolicySerializer.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java32
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiHandler.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java47
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java44
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java39
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java97
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java18
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java15
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java16
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EnclaveAccessMaintainerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java13
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java17
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json18
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-nodes.json1
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/routing/responses/recursion/environment.json4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java28
-rw-r--r--default_build_settings.cmake4
-rw-r--r--defaults/pom.xml20
-rw-r--r--document/src/vespa/document/annotation/spantree.h2
-rw-r--r--document/src/vespa/document/base/testdocman.cpp1
-rw-r--r--document/src/vespa/document/select/valuenodes.cpp1
-rw-r--r--document/src/vespa/document/serialization/annotationdeserializer.cpp2
-rw-r--r--eval/src/tests/eval/value_cache/dense-short1.json1
-rw-r--r--eval/src/tests/eval/value_cache/dense-short2.json3
-rw-r--r--eval/src/tests/eval/value_cache/sparse-short1.json5
-rw-r--r--eval/src/tests/eval/value_cache/sparse-short2.json7
-rw-r--r--eval/src/tests/eval/value_cache/tensor_loader_test.cpp24
-rw-r--r--eval/src/tests/instruction/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp4
-rw-r--r--eval/src/vespa/eval/eval/test/cell_type_space.h2
-rw-r--r--eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp77
-rw-r--r--flags/pom.xml3
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java35
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java6
-rw-r--r--fnet/src/vespa/fnet/connection.cpp2
-rw-r--r--fnet/src/vespa/fnet/connection.h2
-rw-r--r--fnet/src/vespa/fnet/transport.cpp12
-rw-r--r--fsa/pom.xml4
-rw-r--r--hosted-tenant-base/pom.xml2
-rw-r--r--hosted-zone-api/pom.xml3
-rw-r--r--http-client/pom.xml4
-rw-r--r--jaxrs_utils/pom.xml3
-rw-r--r--jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogic.java48
-rw-r--r--jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsPreflightRequestFilter.java9
-rw-r--r--jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilter.java9
-rw-r--r--jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogicTest.java40
-rw-r--r--jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java6
-rw-r--r--jdisc_core/src/main/java/com/yahoo/jdisc/core/ContainerWatchdog.java17
-rw-r--r--jrt/pom.xml4
-rw-r--r--linguistics-components/pom.xml6
-rw-r--r--linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java20
-rw-r--r--linguistics-components/src/test/java/com/yahoo/language/huggingface/HuggingFaceTokenizerTest.java30
-rw-r--r--linguistics/abi-spec.json2
-rw-r--r--linguistics/pom.xml4
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/CharacterClasses.java10
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/GramSplitter.java40
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/TokenType.java9
-rw-r--r--linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenType.java3
-rw-r--r--linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java12
-rw-r--r--linguistics/src/test/java/com/yahoo/language/process/GramSplitterTestCase.java11
-rw-r--r--linguistics/src/test/java/com/yahoo/language/process/TokenTypeTestCase.java4
-rw-r--r--linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenizerTestCase.java19
-rw-r--r--logd/src/logd/empty_forwarder.cpp1
-rw-r--r--logforwarder/src/apps/vespa-logforwarder-start/splunk-starter.cpp28
-rw-r--r--maven-plugins/allowed-maven-dependencies.txt2
-rw-r--r--messagebus/src/tests/routing/routing.cpp10
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/MetricsManager.java2
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasHandler.java38
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/NodeMetricGatherer.java9
-rw-r--r--metrics-proxy/src/test/java/ai/vespa/metricsproxy/core/MetricsManagerTest.java1
-rw-r--r--metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/prometheus/PrometheusHandlerTest.java1
-rw-r--r--metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/yamas/YamasHandlerTest.java55
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java66
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/Unit.java12
-rw-r--r--model-evaluation/pom.xml3
-rw-r--r--model-integration/pom.xml10
-rw-r--r--model-integration/src/main/java/ai/vespa/embedding/BertBaseEmbedder.java20
-rw-r--r--model-integration/src/main/java/ai/vespa/embedding/PoolingStrategy.java48
-rw-r--r--model-integration/src/main/java/ai/vespa/embedding/huggingface/HuggingFaceEmbedder.java29
-rw-r--r--model-integration/src/test/java/ai/vespa/embedding/huggingface/HuggingFaceEmbedderTest.java36
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java11
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java23
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java4
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java9
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java26
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java158
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java29
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java205
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java9
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DeprovisionedExpirer.java31
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java126
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java99
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java13
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java123
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java20
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Rebalancer.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java54
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Status.java42
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java15
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java52
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java22
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisionRequest.java63
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java42
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java120
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java16
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java40
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java21
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisionedHost.java12
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ArchiveResponse.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodePatcher.java11
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/WireguardResponse.java25
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java90
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java64
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java56
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java36
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DeprovisionedExpirerTest.java56
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DirtyExpirerTest.java11
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java154
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgraderTest.java85
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostRetirerTest.java12
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java35
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java13
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java79
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java22
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java18
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java18
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/archives.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/capacity-zone.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/controller1.json26
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json38
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-os-upgrade-complete.json43
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-2.json40
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-3.json35
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-4.json35
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports.json40
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1.json43
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node2.json45
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node3.json43
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node4.json43
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node5.json43
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost1-with-firmware-data.json43
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost6.json22
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json3
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json42
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json39
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json31
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json39
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json29
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json37
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-wg.json39
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json41
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json39
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json29
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json29
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json29
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json39
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json29
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node8.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node9.json27
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/parent2.json27
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/wireguard.json9
-rw-r--r--openai-client/pom.xml5
-rw-r--r--opennlp-linguistics/pom.xml4
-rw-r--r--opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java41
-rw-r--r--opennlp-linguistics/src/test/java/com/yahoo/language/opennlp/OpenNlpTokenizationTestCase.java31
-rw-r--r--orchestrator-restapi/pom.xml3
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ClusterApiImpl.java3
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java9
-rw-r--r--parent/pom.xml3
-rw-r--r--screwdriver.yaml17
-rwxr-xr-xscrewdriver/release-container-image-docker.sh8
-rwxr-xr-xscrewdriver/release-java-artifacts.sh2
-rw-r--r--searchcore/src/apps/vespa-gen-testdocs/vespa-gen-testdocs.cpp4
-rw-r--r--searchcore/src/tests/grouping/grouping.cpp1
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_test.cpp9
-rw-r--r--searchcore/src/tests/proton/matching/matching_test.cpp7
-rw-r--r--searchcore/src/tests/proton/matching/querynodes_test.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/document_field_extractor.cpp1
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/imported_attributes_context.cpp30
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/imported_attributes_context.h16
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/scheduled_forward_executor.cpp14
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/CMakeLists.txt1
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp22
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.h6
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/isearchcontext.h7
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_context.cpp19
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_context.h17
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/matcher.cpp12
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/partial_result.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/querynodes.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/same_element_builder.h13
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/search_session.cpp12
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/search_session.h14
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/matchview.cpp16
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/matchview.h27
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/searchview.cpp2
-rw-r--r--searchcore/src/vespa/searchcorespi/index/iindexmaintaineroperations.h2
-rw-r--r--searchlib/pom.xml4
-rw-r--r--searchlib/src/apps/uniform/uniform.cpp1
-rw-r--r--searchlib/src/tests/attribute/bitvector_search_cache/bitvector_search_cache_test.cpp7
-rw-r--r--searchlib/src/tests/attribute/imported_search_context/imported_search_context_test.cpp2
-rw-r--r--searchlib/src/tests/attribute/stringattribute/stringattribute_test.cpp4
-rw-r--r--searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp52
-rw-r--r--searchlib/src/tests/features/nns_closeness/nns_closeness_test.cpp20
-rw-r--r--searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp22
-rw-r--r--searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp32
-rw-r--r--searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp35
-rw-r--r--searchlib/src/vespa/searchcommon/attribute/config.cpp7
-rw-r--r--searchlib/src/vespa/searchcommon/attribute/config.h7
-rw-r--r--searchlib/src/vespa/searchcommon/attribute/iattributecontext.h5
-rw-r--r--searchlib/src/vespa/searchlib/aggregation/groupinglevel.cpp9
-rw-r--r--searchlib/src/vespa/searchlib/aggregation/groupinglevel.h2
-rw-r--r--searchlib/src/vespa/searchlib/aggregation/perdocexpression.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/CMakeLists.txt1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_header.cpp63
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_header.h9
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attributecontext.cpp35
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attributecontext.h10
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attributemanager.cpp5
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attributevector.h24
-rw-r--r--searchlib/src/vespa/searchlib/attribute/bitvector_search_cache.cpp40
-rw-r--r--searchlib/src/vespa/searchlib/attribute/bitvector_search_cache.h16
-rw-r--r--searchlib/src/vespa/searchlib/attribute/createsinglestd.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/attribute/distance_metric_utils.cpp57
-rw-r--r--searchlib/src/vespa/searchlib/attribute/distance_metric_utils.h16
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumcomparator.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumcomparator.h8
-rw-r--r--searchlib/src/vespa/searchlib/attribute/imported_attribute_vector_read_guard.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/imported_attribute_vector_read_guard.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/imported_search_context.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/load_utils.hpp1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.cpp24
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.h19
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.hpp5
-rw-r--r--searchlib/src/vespa/searchlib/attribute/readerbase.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/attribute/readerbase.h7
-rw-r--r--searchlib/src/vespa/searchlib/attribute/search_context.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/attribute/search_context.h7
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlenumericattribute.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/attribute/string_search_helper.cpp10
-rw-r--r--searchlib/src/vespa/searchlib/attribute/string_search_helper.h11
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/compression.h2
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/pagedict4.h2
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.h6
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/posocccompression.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/common/bitvectorcache.cpp50
-rw-r--r--searchlib/src/vespa/searchlib/common/bitvectorcache.h18
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fieldreader.h8
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fileheader.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fusion.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp5
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zcposting.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/expression/arrayatlookupfunctionnode.cpp29
-rw-r--r--searchlib/src/vespa/searchlib/expression/arrayatlookupfunctionnode.h26
-rw-r--r--searchlib/src/vespa/searchlib/expression/attribute_map_lookup_node.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/expression/attributenode.cpp9
-rw-r--r--searchlib/src/vespa/searchlib/expression/attributenode.h1
-rw-r--r--searchlib/src/vespa/searchlib/expression/debugwaitfunctionnode.h3
-rw-r--r--searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/expression/expressiontree.cpp14
-rw-r--r--searchlib/src/vespa/searchlib/expression/fixedwidthbucketfunctionnode.cpp20
-rw-r--r--searchlib/src/vespa/searchlib/expression/fixedwidthbucketfunctionnode.h9
-rw-r--r--searchlib/src/vespa/searchlib/expression/functionnode.h9
-rw-r--r--searchlib/src/vespa/searchlib/expression/functionnodes.cpp205
-rw-r--r--searchlib/src/vespa/searchlib/expression/interpolatedlookupfunctionnode.cpp51
-rw-r--r--searchlib/src/vespa/searchlib/expression/interpolatedlookupfunctionnode.h11
-rw-r--r--searchlib/src/vespa/searchlib/expression/multiargfunctionnode.h11
-rw-r--r--searchlib/src/vespa/searchlib/expression/orfunctionnode.h6
-rw-r--r--searchlib/src/vespa/searchlib/expression/rangebucketpredef.h8
-rw-r--r--searchlib/src/vespa/searchlib/expression/relevancenode.h5
-rw-r--r--searchlib/src/vespa/searchlib/expression/resultvector.h101
-rw-r--r--searchlib/src/vespa/searchlib/expression/unaryfunctionnode.h7
-rw-r--r--searchlib/src/vespa/searchlib/features/closenessfeature.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/features/distance_calculator_bundle.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/features/distance_calculator_bundle.h3
-rw-r--r--searchlib/src/vespa/searchlib/fef/itermdata.h2
-rw-r--r--searchlib/src/vespa/searchlib/fef/itermfielddata.h2
-rw-r--r--searchlib/src/vespa/searchlib/fef/matchdatalayout.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/fef/matchdatalayout.h9
-rw-r--r--searchlib/src/vespa/searchlib/fef/objectstore.h2
-rw-r--r--searchlib/src/vespa/searchlib/fef/phrase_splitter_query_env.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/fef/phrasesplitter.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/fef/simpletermdata.h21
-rw-r--r--searchlib/src/vespa/searchlib/fef/termfieldmatchdataarray.h2
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_index.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/url_field_inverter.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/parsequery/stackdumpiterator.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/query/query_term_ucs4.cpp19
-rw-r--r--searchlib/src/vespa/searchlib/query/query_term_ucs4.h1
-rw-r--r--searchlib/src/vespa/searchlib/query/tree/node.h1
-rw-r--r--searchlib/src/vespa/searchlib/query/tree/termnodes.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/blueprint.h1
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp10
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h1
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/equiv_blueprint.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/fake_search.cpp11
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/fake_search.h8
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/field_spec.h2
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h4
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/same_element_blueprint.cpp5
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/same_element_search.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/searchable.h27
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h3
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp9
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h1
-rw-r--r--searchlib/src/vespa/searchlib/tensor/angular_distance.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/tensor/distance_calculator.h2
-rw-r--r--searchlib/src/vespa/searchlib/tensor/distance_function.h7
-rw-r--r--searchlib/src/vespa/searchlib/tensor/geo_degrees_distance.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp122
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hnsw_index.h15
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hnsw_multi_best_neighbors.h1
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hnsw_single_best_neighbors.h1
-rw-r--r--searchlib/src/vespa/searchlib/tensor/mips_distance_transform.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/tensor/mips_distance_transform.h18
-rw-r--r--searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h8
-rw-r--r--searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp5
-rw-r--r--searchlib/src/vespa/searchlib/tensor/tensor_attribute_loader.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/test/diskindex/threelevelcountbuffers.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/test/features/distance_closeness_fixture.cpp13
-rw-r--r--searchlib/src/vespa/searchlib/test/features/distance_closeness_fixture.h12
-rw-r--r--searchlib/src/vespa/searchlib/util/rawbuf.cpp60
-rw-r--r--searchlib/src/vespa/searchlib/util/rawbuf.h39
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp1
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/geoposdfw.cpp1
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/juniper_query_adapter.cpp3
-rw-r--r--security-utils/src/main/java/com/yahoo/security/HKDF.java6
-rw-r--r--security-utils/src/main/java/com/yahoo/security/token/Token.java86
-rw-r--r--security-utils/src/main/java/com/yahoo/security/token/TokenCheckHash.java46
-rw-r--r--security-utils/src/main/java/com/yahoo/security/token/TokenDomain.java57
-rw-r--r--security-utils/src/main/java/com/yahoo/security/token/TokenFingerprint.java60
-rw-r--r--security-utils/src/main/java/com/yahoo/security/token/TokenGenerator.java39
-rw-r--r--security-utils/src/test/java/com/yahoo/security/token/TokenTest.java134
-rw-r--r--storage/src/tests/distributor/check_condition_test.cpp42
-rw-r--r--storage/src/tests/distributor/putoperationtest.cpp3
-rw-r--r--storage/src/tests/distributor/removeoperationtest.cpp3
-rw-r--r--storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp57
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_component.cpp5
-rw-r--r--storage/src/vespa/storage/distributor/distributormetricsset.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/distributormetricsset.h22
-rw-r--r--storage/src/vespa/storage/distributor/externaloperationhandler.cpp7
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/check_condition.cpp27
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/check_condition.h5
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.cpp5
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.h3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp5
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removeoperation.h3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h1
-rw-r--r--storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp14
-rw-r--r--storage/src/vespa/storage/distributor/persistence_operation_metric_set.h6
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp3
-rw-r--r--storage/src/vespa/storage/persistence/persistenceutil.cpp3
-rw-r--r--storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp147
-rw-r--r--storage/src/vespa/storage/storageserver/changedbucketownershiphandler.h50
-rw-r--r--streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp5
-rw-r--r--streamingvisitors/src/vespa/vsm/searcher/nearest_neighbor_field_searcher.cpp33
-rw-r--r--streamingvisitors/src/vespa/vsm/vsm/docsumfilter.cpp1
-rw-r--r--streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp17
-rw-r--r--streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.h4
-rw-r--r--tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java2
-rw-r--r--vdslib/pom.xml4
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/client/AthenzIdentityProviderImpl.java4
-rw-r--r--vespa-dependencies-enforcer/allowed-maven-dependencies.txt2
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java7
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpFeedClientTest.java8
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java13
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java42
-rw-r--r--vespajlib/pom.xml4
-rw-r--r--vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java6
-rw-r--r--vespajlib/src/main/java/com/yahoo/slime/SlimeUtils.java12
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java18
-rw-r--r--vespalib/src/apps/vespa-stress-and-validate-memory/stress_and_validate_memory.cpp6
-rw-r--r--vespalib/src/tests/btree/btree_test.cpp7
-rw-r--r--vespalib/src/tests/datastore/array_store/array_store_test.cpp10
-rw-r--r--vespalib/src/tests/util/mmap_file_allocator/mmap_file_allocator_test.cpp9
-rw-r--r--vespalib/src/vespa/vespalib/btree/btree.h111
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreebuilder.hpp1
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreeiterator.h402
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreeiterator.hpp34
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenode.h76
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenode.hpp10
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodeallocator.h6
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreeroot.h32
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreeroot.hpp115
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreerootbase.hpp1
-rw-r--r--vespalib/src/vespa/vespalib/geo/zcurve.cpp28
-rw-r--r--vespalib/src/vespa/vespalib/geo/zcurve.h31
-rw-r--r--vespalib/src/vespa/vespalib/metrics/simple_metrics_manager.cpp1
-rw-r--r--vespalib/src/vespa/vespalib/metrics/stable_store.h5
-rw-r--r--vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp2
-rw-r--r--vespalib/src/vespa/vespalib/net/tls/peer_policies.h2
-rw-r--r--vespalib/src/vespa/vespalib/stllike/allocator.h2
-rw-r--r--vespalib/src/vespa/vespalib/test/memory_allocator_observer.cpp2
-rw-r--r--vespalib/src/vespa/vespalib/testkit/time_bomb.cpp3
-rw-r--r--vespalib/src/vespa/vespalib/util/CMakeLists.txt1
-rw-r--r--vespalib/src/vespa/vespalib/util/alloc.cpp64
-rw-r--r--vespalib/src/vespa/vespalib/util/alloc.h26
-rw-r--r--vespalib/src/vespa/vespalib/util/fake_doom.cpp16
-rw-r--r--vespalib/src/vespa/vespalib/util/fake_doom.h24
-rw-r--r--vespalib/src/vespa/vespalib/util/fiddle.h6
-rw-r--r--vespalib/src/vespa/vespalib/util/generationhandler.cpp50
-rw-r--r--vespalib/src/vespa/vespalib/util/generationhandler.h50
-rw-r--r--vespalib/src/vespa/vespalib/util/latch.h1
-rw-r--r--vespalib/src/vespa/vespalib/util/memory_allocator.h12
-rw-r--r--vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp20
-rw-r--r--vespalog/pom.xml4
-rw-r--r--vespalog/src/vespa/log/control-file.h2
-rw-r--r--vespamalloc/src/vespamalloc/malloc/common.h2
-rw-r--r--vespamalloc/src/vespamalloc/malloc/threadproxy.cpp1
-rw-r--r--vespamalloc/src/vespamalloc/util/osmem.cpp1
665 files changed, 10678 insertions, 6509 deletions
diff --git a/ann_benchmark/src/vespa/ann_benchmark/vespa_ann_benchmark.cpp b/ann_benchmark/src/vespa/ann_benchmark/vespa_ann_benchmark.cpp
index a52e0850b7d..730ee141f83 100644
--- a/ann_benchmark/src/vespa/ann_benchmark/vespa_ann_benchmark.cpp
+++ b/ann_benchmark/src/vespa/ann_benchmark/vespa_ann_benchmark.cpp
@@ -10,6 +10,7 @@
#include <vespa/searchcommon/attribute/config.h>
#include <vespa/eval/eval/value.h>
#include <vespa/vespalib/test/insertion_operators.h>
+#include <vespa/vespalib/util/fake_doom.h>
#include <iostream>
#include <sstream>
#include <limits>
@@ -67,6 +68,7 @@ class HnswIndex
const NearestNeighborIndex* _nearest_neighbor_index;
size_t _dim_size;
bool _normalize_vectors;
+ vespalib::FakeDoom _no_doom;
bool check_lid(uint32_t lid);
bool check_value(const char *op, const std::vector<float>& value);
@@ -87,7 +89,8 @@ HnswIndex::HnswIndex(uint32_t dim_size, const HnswIndexParams &hnsw_index_params
_tensor_attribute(nullptr),
_nearest_neighbor_index(nullptr),
_dim_size(0u),
- _normalize_vectors(normalize_vectors)
+ _normalize_vectors(normalize_vectors),
+ _no_doom()
{
Config cfg(BasicType::TENSOR, CollectionType::SINGLE);
_tensor_type = ValueType::from_spec(make_tensor_spec(dim_size));
@@ -208,7 +211,7 @@ HnswIndex::find_top_k(uint32_t k, const std::vector<float>& value, uint32_t expl
std::vector<float> normalized_value;
auto typed_cells = get_typed_cells(value, normalized_value);
auto df = _nearest_neighbor_index->distance_function_factory().for_query_vector(typed_cells);
- auto raw_result = _nearest_neighbor_index->find_top_k(k, *df, explore_k, std::numeric_limits<double>::max());
+ auto raw_result = _nearest_neighbor_index->find_top_k(k, *df, explore_k, _no_doom.get_doom(), std::numeric_limits<double>::max());
result.reserve(raw_result.size());
switch (_hnsw_index_params.distance_metric()) {
case DistanceMetric::Euclidean:
diff --git a/annotations/pom.xml b/annotations/pom.xml
index 067a6966a11..3e232a9c64f 100644
--- a/annotations/pom.xml
+++ b/annotations/pom.xml
@@ -64,6 +64,7 @@
<Bundle-SymbolicName>${project.artifactId}</Bundle-SymbolicName>
<Bundle-Version>${parsedVersion.majorVersion}.${parsedVersion.minorVersion}.${parsedVersion.incrementalVersion}</Bundle-Version>
<Export-Package>com.yahoo.component.annotation;version=1.0.0;-noimport:=true</Export-Package>
+ <X-JDisc-PublicApi-Package>com.yahoo.component.annotation</X-JDisc-PublicApi-Package>
<_nouses>true</_nouses> <!-- Don't include 'uses' directives for package exports -->
</instructions>
</configuration>
diff --git a/application-model/pom.xml b/application-model/pom.xml
index c47fccc51bc..9f5cd9c23c1 100644
--- a/application-model/pom.xml
+++ b/application-model/pom.xml
@@ -55,6 +55,9 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
diff --git a/bundle-plugin-test/integration-test/pom.xml b/bundle-plugin-test/integration-test/pom.xml
index 7384bf3aea6..acd075d0365 100644
--- a/bundle-plugin-test/integration-test/pom.xml
+++ b/bundle-plugin-test/integration-test/pom.xml
@@ -56,7 +56,14 @@
<classifier>bundle</classifier>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa.bundle-plugin</groupId>
+ <artifactId>non-public-api-usage</artifactId>
+ <classifier>bundle</classifier>
+ <version>${project.version}</version>
+ </dependency>
</dependencies>
+
<build>
<plugins>
<plugin>
diff --git a/bundle-plugin-test/integration-test/src/test/java/com/yahoo/container/plugin/BundleTest.java b/bundle-plugin-test/integration-test/src/test/java/com/yahoo/container/plugin/BundleTest.java
index 2e3a3204ef5..673d7d8e09e 100644
--- a/bundle-plugin-test/integration-test/src/test/java/com/yahoo/container/plugin/BundleTest.java
+++ b/bundle-plugin-test/integration-test/src/test/java/com/yahoo/container/plugin/BundleTest.java
@@ -30,9 +30,9 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
public class BundleTest {
static final String TEST_BUNDLE_PATH = System.getProperty("test.bundle.path", ".") + "/";
- // If bundle-plugin-test is compiled in a mvn command that also built dependencies, e.g. jrt,
- // the artifact is jrt.jar, otherwise the installed and versioned artifact
- // is used: jrt-7-SNAPSHOT.jar or e.g. jrt-7.123.45.jar.
+ // If bundle-plugin-test is compiled in a mvn command that also built dependencies, e.g. 'defaults',
+ // the artifact is defaults.jar, otherwise the installed and versioned artifact
+ // is used: defaults-7-SNAPSHOT.jar or e.g. defaults-7.123.45.jar.
private static final String snapshotOrVersionOrNone = "(-\\d+((-SNAPSHOT)|((\\.\\d+(\\.\\d+)?)?))?)?\\.jar";
private JarFile jarFile;
@@ -103,34 +103,37 @@ public class BundleTest {
assertTrue(exportPackage.contains("com.yahoo.test;version=1.2.3.RELEASE"));
}
- // TODO: use another jar than jrt, which now pulls in a lot of dependencies that pollute the manifest of the
- // generated bundle. (It's compile scoped in pom.xml to be added to the bundle-cp.)
+ @Test
+ void require_that_manifest_contains_public_api_for_this_bundle_and_embedded_bundles() {
+ assertEquals("com.yahoo.test,com.yahoo.vespa.defaults", mainAttributes.getValue("X-JDisc-PublicApi-Package"));
+ }
+
@Test
void require_that_manifest_contains_bundle_class_path() {
String bundleClassPath = mainAttributes.getValue("Bundle-ClassPath");
assertTrue(bundleClassPath.contains(".,"));
- Pattern jrtPattern = Pattern.compile("dependencies/jrt" + snapshotOrVersionOrNone);
- assertTrue(jrtPattern.matcher(bundleClassPath).find(), "Bundle class path did not contain jrt.");
+ Pattern jrtPattern = Pattern.compile("dependencies/defaults" + snapshotOrVersionOrNone);
+ assertTrue(jrtPattern.matcher(bundleClassPath).find(), "Bundle class path did not contain 'defaults''.");
}
@Test
void require_that_component_jar_file_contains_compile_artifacts() {
- String depJrt = "dependencies/jrt";
- Pattern jrtPattern = Pattern.compile(depJrt + snapshotOrVersionOrNone);
- ZipEntry jrtEntry = null;
+ String requiredDep = "dependencies/defaults";
+ Pattern depPattern = Pattern.compile(requiredDep + snapshotOrVersionOrNone);
+ ZipEntry depEntry = null;
Enumeration<JarEntry> entries = jarFile.entries();
while (entries.hasMoreElements()) {
var e = entries.nextElement();
- if (e.getName().startsWith(depJrt)) {
- if (jrtPattern.matcher(e.getName()).matches()) {
- jrtEntry = e;
+ if (e.getName().startsWith(requiredDep)) {
+ if (depPattern.matcher(e.getName()).matches()) {
+ depEntry = e;
break;
}
}
}
- assertNotNull(jrtEntry, "Component jar file did not contain jrt dependency.");
+ assertNotNull(depEntry, "Component jar file did not contain 'defaults' dependency.");
}
diff --git a/bundle-plugin-test/integration-test/src/test/java/com/yahoo/container/plugin/NonPublicApiDetectionTest.java b/bundle-plugin-test/integration-test/src/test/java/com/yahoo/container/plugin/NonPublicApiDetectionTest.java
new file mode 100644
index 00000000000..42ac99c65e5
--- /dev/null
+++ b/bundle-plugin-test/integration-test/src/test/java/com/yahoo/container/plugin/NonPublicApiDetectionTest.java
@@ -0,0 +1,44 @@
+package com.yahoo.container.plugin;
+
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+import java.util.jar.Attributes;
+import java.util.jar.JarFile;
+import java.util.stream.Collectors;
+
+import static com.yahoo.container.plugin.BundleTest.findBundleJar;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+/**
+ * @author gjoranv
+ */
+public class NonPublicApiDetectionTest {
+
+ private static Set<String> usedNonPublicApi;
+
+ @BeforeAll
+ public static void setup() {
+ try {
+ File componentJar = findBundleJar("non-public-api-usage");
+ Attributes mainAttributes = new JarFile(componentJar).getManifest().getMainAttributes();
+ var nonPublicApiAttribute = mainAttributes.getValue("X-JDisc-Non-PublicApi-Import-Package");
+ usedNonPublicApi = Arrays.stream(nonPublicApiAttribute.split(",")).collect(Collectors.toSet());
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Test
+ void usage_of_non_publicApi_packages_is_detected() {
+ assertEquals(2, usedNonPublicApi.size());
+ assertTrue(usedNonPublicApi.contains("ai.vespa.http"));
+ assertTrue(usedNonPublicApi.contains("com.yahoo.io"));
+ }
+
+}
diff --git a/bundle-plugin-test/test-bundles/main/pom.xml b/bundle-plugin-test/test-bundles/main/pom.xml
index b5f8f7b9a6a..a6cf45947f3 100644
--- a/bundle-plugin-test/test-bundles/main/pom.xml
+++ b/bundle-plugin-test/test-bundles/main/pom.xml
@@ -17,7 +17,7 @@
<dependencies>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>jrt</artifactId>
+ <artifactId>defaults</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
@@ -34,6 +34,7 @@
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
<configuration>
+ <bundleType>INTERNAL</bundleType>
<Import-Package>
manualImport.withoutVersion,
manualImport.withVersion;version="12.3.4",
diff --git a/bundle-plugin-test/test-bundles/main/src/main/java/com/yahoo/test/package-info.java b/bundle-plugin-test/test-bundles/main/src/main/java/com/yahoo/test/package-info.java
index 852a8387cfe..0169574fe74 100644
--- a/bundle-plugin-test/test-bundles/main/src/main/java/com/yahoo/test/package-info.java
+++ b/bundle-plugin-test/test-bundles/main/src/main/java/com/yahoo/test/package-info.java
@@ -1,6 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
@ExportPackage(version = @Version(major = 1, minor = 2, micro = 3, qualifier = "RELEASE"))
+@PublicApi
package com.yahoo.test;
+import com.yahoo.api.annotations.PublicApi;
import com.yahoo.osgi.annotation.ExportPackage;
import com.yahoo.osgi.annotation.Version;
diff --git a/bundle-plugin-test/test-bundles/non-public-api-usage/pom.xml b/bundle-plugin-test/test-bundles/non-public-api-usage/pom.xml
new file mode 100644
index 00000000000..5386346b8f7
--- /dev/null
+++ b/bundle-plugin-test/test-bundles/non-public-api-usage/pom.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0"?>
+<!-- Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+ http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>com.yahoo.vespa.bundle-plugin</groupId>
+ <artifactId>test-bundles</artifactId>
+ <version>8-SNAPSHOT</version>
+ <relativePath>../pom.xml</relativePath>
+ </parent>
+ <artifactId>non-public-api-usage</artifactId>
+ <version>8-SNAPSHOT</version>
+ <packaging>container-plugin</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>defaults</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>vespajlib</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <failOnWarnings>false</failOnWarnings>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/bundle-plugin-test/test-bundles/non-public-api-usage/src/main/java/com/yahoo/test/UsingBothPublicApiAndNonPublicApiPackages.java b/bundle-plugin-test/test-bundles/non-public-api-usage/src/main/java/com/yahoo/test/UsingBothPublicApiAndNonPublicApiPackages.java
new file mode 100644
index 00000000000..f2c64661ad6
--- /dev/null
+++ b/bundle-plugin-test/test-bundles/non-public-api-usage/src/main/java/com/yahoo/test/UsingBothPublicApiAndNonPublicApiPackages.java
@@ -0,0 +1,15 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.test;
+
+public class UsingBothPublicApiAndNonPublicApiPackages {
+
+ com.yahoo.vespa.defaults.Defaults publicFromDefaults = null;
+
+ com.yahoo.text.BooleanParser publicFromVespajlib = null;
+
+
+ ai.vespa.http.DomainName nonPublic1 = null;
+
+ com.yahoo.io.ByteWriter nonPublic2 = null;
+
+}
diff --git a/bundle-plugin-test/test-bundles/pom.xml b/bundle-plugin-test/test-bundles/pom.xml
index 3af10826adc..34c6b2e4540 100644
--- a/bundle-plugin-test/test-bundles/pom.xml
+++ b/bundle-plugin-test/test-bundles/pom.xml
@@ -50,6 +50,7 @@
<modules>
<module>artifact-version-for-exports</module>
<module>artifact-version-for-exports-dep</module>
+ <module>non-public-api-usage</module>
<module>main</module>
</modules>
diff --git a/bundle-plugin/src/main/java/com/yahoo/container/plugin/bundle/AnalyzeBundle.java b/bundle-plugin/src/main/java/com/yahoo/container/plugin/bundle/AnalyzeBundle.java
index 2b5941cc5aa..af6c82023ab 100644
--- a/bundle-plugin/src/main/java/com/yahoo/container/plugin/bundle/AnalyzeBundle.java
+++ b/bundle-plugin/src/main/java/com/yahoo/container/plugin/bundle/AnalyzeBundle.java
@@ -7,12 +7,14 @@ import com.yahoo.container.plugin.util.JarFiles;
import java.io.File;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.jar.Manifest;
+import java.util.stream.Collectors;
/**
* Static utilities for analyzing jar files.
@@ -34,20 +36,42 @@ public class AnalyzeBundle {
}
static List<Export> exportedPackages(File jarFile) {
+ var manifest = getOsgiManifest(jarFile);
+ if (manifest == null) return Collections.emptyList();
try {
- Optional<Manifest> jarManifest = JarFiles.getManifest(jarFile);
- if (jarManifest.isPresent()) {
- Manifest manifest = jarManifest.get();
- if (isOsgiManifest(manifest)) {
- return parseExports(manifest);
- }
- }
- return Collections.emptyList();
+ return parseExports(manifest);
} catch (Exception e) {
throw new RuntimeException(String.format("Invalid manifest in bundle '%s'", jarFile.getPath()), e);
}
}
+ public static List<String> publicApiPackagesAggregated(Collection<File> jarFiles) {
+ return jarFiles.stream()
+ .map(AnalyzeBundle::publicApiPackages)
+ .flatMap(List::stream)
+ .distinct()
+ .toList();
+ }
+
+ static List<String> publicApiPackages(File jarFile) {
+ var manifest = getOsgiManifest(jarFile);
+ if (manifest == null) return Collections.emptyList();
+ return getMainAttributeValue(manifest, "X-JDisc-PublicApi-Package")
+ .map(s -> Arrays.asList(s.split(",")))
+ .orElseGet(ArrayList::new);
+ }
+
+ private static Manifest getOsgiManifest(File jarFile) {
+ Optional<Manifest> jarManifest = JarFiles.getManifest(jarFile);
+ if (jarManifest.isPresent()) {
+ Manifest manifest = jarManifest.get();
+ if (isOsgiManifest(manifest)) {
+ return manifest;
+ }
+ }
+ return null;
+ }
+
public static Optional<String> bundleSymbolicName(File jarFile) {
return JarFiles.getManifest(jarFile).flatMap(AnalyzeBundle::getBundleSymbolicName);
}
diff --git a/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassVisitor.java b/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassVisitor.java
index 46a35b07ea7..e57af606b3a 100644
--- a/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassVisitor.java
+++ b/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassVisitor.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.container.plugin.classanalysis;
+import com.yahoo.api.annotations.PublicApi;
import com.yahoo.osgi.annotation.ExportPackage;
import com.yahoo.osgi.annotation.Version;
import org.apache.maven.artifact.versioning.ArtifactVersion;
@@ -28,6 +29,7 @@ class AnalyzeClassVisitor extends ClassVisitor implements ImportCollector {
private String name = null;
private final Set<String> imports = new HashSet<>();
private Optional<ExportPackageAnnotation> exportPackageAnnotation = Optional.empty();
+ private boolean isPublicApi = false;
private final Optional<ArtifactVersion> defaultExportPackageVersion;
@@ -159,6 +161,9 @@ class AnalyzeClassVisitor extends ClassVisitor implements ImportCollector {
public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
if (ExportPackage.class.getName().equals(Type.getType(desc).getClassName())) {
return visitExportPackage();
+ } if (PublicApi.class.getName().equals(Type.getType(desc).getClassName())) {
+ isPublicApi = true;
+ return null;
} else {
if (visible) {
addImportWithTypeDesc(desc);
@@ -169,7 +174,8 @@ class AnalyzeClassVisitor extends ClassVisitor implements ImportCollector {
ClassFileMetaData result() {
assert (!imports.contains("int"));
- return new ClassFileMetaData(name, imports, exportPackageAnnotation);
+ var packageInfo = new PackageInfo(Packages.packageName(name), exportPackageAnnotation, isPublicApi);
+ return new ClassFileMetaData(name, imports, packageInfo);
}
}
diff --git a/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/ClassFileMetaData.java b/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/ClassFileMetaData.java
index 5601430a27f..7e2f59c1e4d 100644
--- a/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/ClassFileMetaData.java
+++ b/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/ClassFileMetaData.java
@@ -14,12 +14,12 @@ public class ClassFileMetaData {
private final String name;
private final Set<String> referencedClasses;
- private final Optional<ExportPackageAnnotation> exportPackage;
+ private final PackageInfo packageInfo;
- public ClassFileMetaData(String name, Set<String> referencedClasses, Optional<ExportPackageAnnotation> exportPackage) {
+ public ClassFileMetaData(String name, Set<String> referencedClasses, PackageInfo packageInfo) {
this.name = name;
this.referencedClasses = referencedClasses;
- this.exportPackage = exportPackage;
+ this.packageInfo = packageInfo;
}
public String getName() {
@@ -30,8 +30,16 @@ public class ClassFileMetaData {
return referencedClasses;
}
+ public PackageInfo packageInfo() {
+ return packageInfo;
+ }
+
public Optional<ExportPackageAnnotation> getExportPackage() {
- return exportPackage;
+ return packageInfo.exportPackage();
+ }
+
+ public boolean isPublicApi() {
+ return packageInfo.isPublicApi();
}
}
diff --git a/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/ExportPackageAnnotation.java b/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/ExportPackageAnnotation.java
index 7f3fb9522f7..517a59a5a06 100644
--- a/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/ExportPackageAnnotation.java
+++ b/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/ExportPackageAnnotation.java
@@ -26,7 +26,7 @@ public class ExportPackageAnnotation {
requireNonNegative(major, "major");
requireNonNegative(minor, "minor");
requireNonNegative(micro, "micro");
- if (QUALIFIER_PATTERN.matcher(qualifier).matches() == false) {
+ if (! QUALIFIER_PATTERN.matcher(qualifier).matches()) {
throw new IllegalArgumentException(
exportPackageError(String.format("qualifier must follow the format (alpha|digit|'_'|'-')* but was '%s'.", qualifier)));
}
diff --git a/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/PackageInfo.java b/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/PackageInfo.java
new file mode 100644
index 00000000000..c19320b8e98
--- /dev/null
+++ b/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/PackageInfo.java
@@ -0,0 +1,16 @@
+package com.yahoo.container.plugin.classanalysis;
+
+import java.util.Optional;
+
+/**
+ * The package
+ *
+ * @author gjoranv
+ */
+record PackageInfo(String name, Optional<ExportPackageAnnotation> exportPackage, boolean isPublicApi) {
+
+ PackageInfo hasExportPackageOrElse(PackageInfo other) {
+ return exportPackage().isPresent() ? this : other;
+ }
+
+}
diff --git a/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/PackageTally.java b/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/PackageTally.java
index e2de90a6463..51fba228b41 100644
--- a/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/PackageTally.java
+++ b/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/PackageTally.java
@@ -17,16 +17,17 @@ import java.util.stream.Collectors;
* @author ollivir
*/
public class PackageTally {
- private final Map<String, Optional<ExportPackageAnnotation>> definedPackagesMap;
+
+ private final Map<String, PackageInfo> definedPackages;
private final Set<String> referencedPackagesUnfiltered;
- PackageTally(Map<String, Optional<ExportPackageAnnotation>> definedPackagesMap, Set<String> referencedPackagesUnfiltered) {
- this.definedPackagesMap = definedPackagesMap;
+ PackageTally(Map<String, PackageInfo> definedPackages, Set<String> referencedPackagesUnfiltered) {
+ this.definedPackages = definedPackages;
this.referencedPackagesUnfiltered = referencedPackagesUnfiltered;
}
public Set<String> definedPackages() {
- return definedPackagesMap.keySet();
+ return definedPackages.keySet();
}
public Set<String> referencedPackages() {
@@ -35,12 +36,19 @@ public class PackageTally {
public Map<String, ExportPackageAnnotation> exportedPackages() {
Map<String, ExportPackageAnnotation> ret = new HashMap<>();
- definedPackagesMap.forEach((k, v) -> {
- v.ifPresent(annotation -> ret.put(k, annotation));
+ definedPackages.forEach((pkg, pkgInfo) -> {
+ pkgInfo.exportPackage().ifPresent(a -> ret.put(pkg, a));
});
return ret;
}
+ public Set<String> publicApiPackages() {
+ return definedPackages.values().stream()
+ .filter(PackageInfo::isPublicApi)
+ .map(PackageInfo::name)
+ .collect(Collectors.toSet());
+ }
+
/**
* Returns the set of packages that is referenced from this tally, but not included in the given set of available packages.
*
@@ -58,36 +66,37 @@ public class PackageTally {
* Represents the classes for two package tallies that are deployed as a single unit.
* <p>
* ExportPackageAnnotations from this has precedence over the other.
+ * TODO: Add unit test and try using Map.merge (as in the functions below). Can't see how Maps.combine is any different.
*/
public PackageTally combine(PackageTally other) {
- Map<String, Optional<ExportPackageAnnotation>> map = Maps.combine(this.definedPackagesMap, other.definedPackagesMap,
- (l, r) -> l.isPresent() ? l : r);
+ var definedPkgs = Maps.combine(this.definedPackages, other.definedPackages, PackageInfo::hasExportPackageOrElse);
Set<String> referencedPkgs = new HashSet<>(this.referencedPackagesUnfiltered);
referencedPkgs.addAll(other.referencedPackagesUnfiltered);
- return new PackageTally(map, referencedPkgs);
+ return new PackageTally(definedPkgs, referencedPkgs);
}
public static PackageTally combine(Collection<PackageTally> packageTallies) {
- Map<String, Optional<ExportPackageAnnotation>> map = new HashMap<>();
+ var definedPkgs = new HashMap<String, PackageInfo>();
Set<String> referencedPkgs = new HashSet<>();
- for (PackageTally pt : packageTallies) {
- pt.definedPackagesMap.forEach((k, v) -> map.merge(k, v, (l, r) -> l.isPresent() ? l : r));
- referencedPkgs.addAll(pt.referencedPackagesUnfiltered);
+ for (PackageTally tally : packageTallies) {
+ tally.definedPackages.forEach((pkg, info) -> definedPkgs.merge(pkg, info, PackageInfo::hasExportPackageOrElse));
+ referencedPkgs.addAll(tally.referencedPackagesUnfiltered);
}
- return new PackageTally(map, referencedPkgs);
+ return new PackageTally(definedPkgs, referencedPkgs);
}
public static PackageTally fromAnalyzedClassFiles(Collection<ClassFileMetaData> analyzedClassFiles) {
- Map<String, Optional<ExportPackageAnnotation>> map = new HashMap<>();
- Set<String> referencedPkgs = new HashSet<>();
+ var definedPkgs = new HashMap<String, PackageInfo>();
+ var referencedPkgs = new HashSet<String>();
- for (ClassFileMetaData metaData : analyzedClassFiles) {
- String packageName = Packages.packageName(metaData.getName());
- map.merge(packageName, metaData.getExportPackage(), (l, r) -> l.isPresent() ? l : r);
- metaData.getReferencedClasses().forEach(className -> referencedPkgs.add(Packages.packageName(className)));
+ for (ClassFileMetaData classData : analyzedClassFiles) {
+ var pkgName = classData.packageInfo().name();
+ definedPkgs.merge(pkgName, classData.packageInfo(), PackageInfo::hasExportPackageOrElse);
+ classData.getReferencedClasses().forEach(className -> referencedPkgs.add(Packages.packageName(className)));
}
- return new PackageTally(map, referencedPkgs);
+ return new PackageTally(definedPkgs, referencedPkgs);
}
+
}
diff --git a/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/Packages.java b/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/Packages.java
index 9eef8a55c01..48a128c2f0d 100644
--- a/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/Packages.java
+++ b/bundle-plugin/src/main/java/com/yahoo/container/plugin/classanalysis/Packages.java
@@ -1,8 +1,13 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.container.plugin.classanalysis;
+import com.yahoo.container.plugin.osgi.ImportPackages;
+
import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
import java.util.Set;
+import java.util.stream.Collectors;
/**
* Utility methods related to packages.
@@ -31,6 +36,22 @@ public class Packages {
}
}
+ /**
+ * Returns the imported Vespa packages that don't exist in the given set of allowed packages.
+ */
+ public static List<String> disallowedVespaImports(Map<String, ImportPackages.Import> imports, List<String> allowed) {
+ if (imports == null || imports.isEmpty()) return List.of();
+
+ var publicApi = allowed == null ? Set.of() : new HashSet<>(allowed);
+
+ Set<String> yahooImports = imports.keySet().stream()
+ .filter(pkg -> pkg.startsWith("com.yahoo") || pkg.startsWith("ai.vespa."))
+ .collect(Collectors.toSet());
+
+ List<String> disallowedImports = yahooImports.stream().collect(Collectors.groupingBy(publicApi::contains)).get(false);
+ return disallowedImports == null ? List.of() : disallowedImports;
+ }
+
public static PackageMetaData analyzePackages(Set<ClassFileMetaData> allClasses) {
Set<String> definedPackages = new HashSet<>();
Set<String> referencedPackages = new HashSet<>();
diff --git a/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/AssembleContainerPluginMojo.java b/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/AssembleContainerPluginMojo.java
index a1d3cd13b3a..bb2d61932f3 100644
--- a/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/AssembleContainerPluginMojo.java
+++ b/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/AssembleContainerPluginMojo.java
@@ -25,7 +25,7 @@ import java.util.jar.JarFile;
*/
@Mojo(name = "assemble-container-plugin", requiresDependencyResolution = ResolutionScope.COMPILE, threadSafe = true)
public class AssembleContainerPluginMojo extends AbstractAssembleBundleMojo {
- private static enum Dependencies {
+ private enum Dependencies {
WITH, WITHOUT
}
diff --git a/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java b/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java
index f5d3259c537..d217273e42b 100644
--- a/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java
+++ b/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java
@@ -26,6 +26,8 @@ import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.yahoo.container.plugin.bundle.AnalyzeBundle.exportedPackagesAggregated;
+import static com.yahoo.container.plugin.bundle.AnalyzeBundle.publicApiPackagesAggregated;
+import static com.yahoo.container.plugin.classanalysis.Packages.disallowedVespaImports;
import static com.yahoo.container.plugin.osgi.ExportPackages.exportsByPackageName;
import static com.yahoo.container.plugin.osgi.ImportPackages.calculateImports;
import static com.yahoo.container.plugin.util.Files.allDescendantFiles;
@@ -38,6 +40,14 @@ import static com.yahoo.container.plugin.util.Files.allDescendantFiles;
@Mojo(name = "generate-osgi-manifest", requiresDependencyResolution = ResolutionScope.TEST, threadSafe = true)
public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
+ private enum BundleType {
+ CORE, // up to container-dev
+ INTERNAL, // other vespa bundles (need not be set for groupId 'com.yahoo.vespa')
+ USER
+ }
+
+ private static final String VESPA_GROUP_ID = "com.yahoo.vespa";
+
@Parameter
private String discApplicationClass = null;
@@ -56,6 +66,19 @@ public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
@Parameter(alias = "Main-Class")
private String mainClass = null;
+ @Parameter(alias = "Bundle-Type")
+ private BundleType bundleType = BundleType.USER;
+
+ @Parameter(defaultValue = "false")
+ private boolean suppressWarningMissingImportPackages;
+ @Parameter(defaultValue = "false")
+ private boolean suppressWarningPublicApi;
+ @Parameter(defaultValue = "false")
+ private boolean suppressWarningOverlappingPackages;
+
+ @Parameter(defaultValue = "false")
+ private boolean failOnWarnings;
+
@Parameter(defaultValue = "false")
private boolean buildLegacyVespaPlatformBundle;
@@ -69,10 +92,12 @@ public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
if (! isContainerDiscArtifact(project.getArtifact()))
throwIfInternalContainerArtifactsAreIncluded(artifactSet.getJarArtifactsToInclude());
- List<Export> exportedPackagesFromProvidedJars = exportedPackagesAggregated(
- artifactSet.getJarArtifactsProvided().stream().map(Artifact::getFile).toList());
+ List<Artifact> providedJarArtifacts = artifactSet.getJarArtifactsProvided();
+ List<File> providedJarFiles = providedJarArtifacts.stream().map(Artifact::getFile).toList();
+ List<Export> exportedPackagesFromProvidedJars = exportedPackagesAggregated(providedJarFiles);
+ List<String> publicApiPackagesFromProvidedJars = publicApiPackagesAggregated(providedJarFiles);
- // Packages from Export-Package headers in provided scoped jars
+ // Packages from Export-Package/PublicApi headers in provided scoped jars
Set<String> exportedPackagesFromProvidedDeps = ExportPackages.packageNames(exportedPackagesFromProvidedJars);
// Packaged defined in this project's code
@@ -86,12 +111,12 @@ public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
logDebugPackageSets(exportedPackagesFromProvidedJars, includedPackages);
- if (hasJdiscCoreProvided(artifactSet.getJarArtifactsProvided())) {
+ if (hasJdiscCoreProvided(providedJarArtifacts)) {
// jdisc_core being provided guarantees that log output does not contain its exported packages
logMissingPackages(exportedPackagesFromProvidedDeps, projectPackages, compileJarsPackages, includedPackages);
- } else {
- getLog().warn("This project does not have jdisc_core as provided dependency, so the " +
- "generated 'Import-Package' OSGi header may be missing important packages.");
+ } else if (! suppressWarningMissingImportPackages) {
+ warnOrThrow(("This project does not have '%s' as provided dependency, so the generated 'Import-Package' " +
+ "OSGi header may be missing important packages.").formatted(wantedProvidedDependency()));
}
logOverlappingPackages(projectPackages, exportedPackagesFromProvidedDeps);
logUnnecessaryPackages(compileJarsPackages, exportedPackagesFromProvidedDeps);
@@ -100,9 +125,12 @@ public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
includedPackages.definedPackages(),
exportsByPackageName(exportedPackagesFromProvidedJars));
+ List<String> nonPublicApiUsed = disallowedVespaImports(calculatedImports, publicApiPackagesFromProvidedJars);
+ logNonPublicApiUsage(nonPublicApiUsed);
Map<String, String> manifestContent = generateManifestContent(artifactSet.getJarArtifactsToInclude(), calculatedImports, includedPackages);
- addAdditionalManifestProperties(manifestContent);
+ addAdditionalManifestProperties(manifestContent, includedPackages);
+ addManifestPropertiesForUserBundles(manifestContent, nonPublicApiUsed);
createManifestFile(Paths.get(project.getBuild().getOutputDirectory()), manifestContent);
} catch (Exception e) {
@@ -110,7 +138,21 @@ public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
}
}
- private void addAdditionalManifestProperties(Map<String, String> manifestContent) {
+ private String wantedProvidedDependency() {
+ return switch (effectiveBundleType()) {
+ case CORE -> "jdisc_core";
+ case INTERNAL -> "container-dev";
+ case USER -> "container";
+ };
+ }
+
+ private BundleType effectiveBundleType() {
+ if (bundleType != BundleType.USER) return bundleType;
+ return isVespaInternalGroupId(project.getGroupId()) ? BundleType.INTERNAL : BundleType.USER;
+ }
+
+ private void addAdditionalManifestProperties(Map<String, String> manifestContent, PackageTally includedPackages) {
+ addIfNotEmpty(manifestContent, "X-JDisc-PublicApi-Package", publicApi(includedPackages));
addIfNotEmpty(manifestContent, "Bundle-Activator", bundleActivator);
addIfNotEmpty(manifestContent, "X-JDisc-Privileged-Activator", jdiscPrivilegedActivator);
addIfNotEmpty(manifestContent, "Main-Class", mainClass);
@@ -119,6 +161,20 @@ public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
addIfNotEmpty(manifestContent, "WebInfUrl", webInfUrl);
}
+ private void addManifestPropertiesForUserBundles(Map<String, String> manifestContent, List<String> nonPublicApiUsed) {
+ if (effectiveBundleType() != BundleType.USER) return;
+ addIfNotEmpty(manifestContent, "X-JDisc-Non-PublicApi-Import-Package", String.join(",", nonPublicApiUsed));
+ }
+
+ private void logNonPublicApiUsage(List<String> nonPublicApiUsed) {
+ if (suppressWarningPublicApi || effectiveBundleType() != BundleType.USER || nonPublicApiUsed.isEmpty()) return;
+ warnOrThrow("This project uses packages that are not part of Vespa's public api: %s".formatted(nonPublicApiUsed));
+ }
+
+ private static String publicApi(PackageTally tally) {
+ return tally.publicApiPackages().stream().sorted().collect(Collectors.joining(","));
+ }
+
private void logDebugPackageSets(List<Export> exportedPackagesFromProvidedJars, PackageTally includedPackages) {
if (getLog().isDebugEnabled()) {
getLog().debug("Referenced packages = " + includedPackages.referencedPackages());
@@ -154,10 +210,12 @@ public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
private void logOverlappingPackages(PackageTally projectPackages,
Set<String> exportedPackagesFromProvidedDeps) {
+ if (suppressWarningOverlappingPackages) return;
+
Set<String> overlappingProjectPackages = Sets.intersection(projectPackages.definedPackages(), exportedPackagesFromProvidedDeps);
if (! overlappingProjectPackages.isEmpty()) {
- getLog().warn("This project defines packages that are also defined in provided scoped dependencies " +
- "(overlapping packages are strongly discouraged): " + overlappingProjectPackages);
+ warnOrThrow("This project defines packages that are also defined in provided scoped dependencies " +
+ "(overlapping packages are strongly discouraged): " + overlappingProjectPackages);
}
}
@@ -184,9 +242,8 @@ public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
List<Artifact> unsupportedArtifacts = nonJarArtifacts.stream().filter(a -> ! a.getType().equals("pom"))
.toList();
- unsupportedArtifacts.forEach(artifact -> getLog()
- .warn(String.format("Unsupported artifact '%s': Type '%s' is not supported. Please file a feature request.",
- artifact.getId(), artifact.getType())));
+ unsupportedArtifacts.forEach(artifact -> warnOrThrow(String.format("Unsupported artifact '%s': Type '%s' is not supported. Please file a feature request.",
+ artifact.getId(), artifact.getType())));
}
private void throwIfInternalContainerArtifactsAreIncluded(Collection<Artifact> includedArtifacts) throws MojoExecutionException {
@@ -201,12 +258,18 @@ public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
}
}
+ private boolean isVespaInternalGroupId(String groupId) {
+ return groupId.equals(VESPA_GROUP_ID)
+ || groupId.equals(VESPA_GROUP_ID + ".hosted")
+ || groupId.equals(VESPA_GROUP_ID + ".hosted.controller");
+ }
+
private boolean isJdiscComponentArtifact(Artifact a) {
- return a.getArtifactId().equals("component") && a.getGroupId().equals("com.yahoo.vespa");
+ return a.getArtifactId().equals("component") && a.getGroupId().equals(VESPA_GROUP_ID);
}
private boolean isContainerDiscArtifact(Artifact a) {
- return a.getArtifactId().equals("container-disc") && a.getGroupId().equals("com.yahoo.vespa");
+ return a.getArtifactId().equals("container-disc") && a.getGroupId().equals(VESPA_GROUP_ID);
}
private PackageTally getProjectClassesTally() {
@@ -219,4 +282,13 @@ public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
return PackageTally.fromAnalyzedClassFiles(analyzedClasses);
}
+
+ private void warnOrThrow(String... messages){
+ String message = String.join("\n", messages);
+ if (failOnWarnings) {
+ throw new RuntimeException(message);
+ }
+ getLog().warn(message);
+ }
+
}
diff --git a/bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassTest.java b/bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassTest.java
index 9a7aade7ffb..11fe4a14d74 100644
--- a/bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassTest.java
+++ b/bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassTest.java
@@ -3,28 +3,27 @@ package com.yahoo.container.plugin.classanalysis;
import com.yahoo.container.plugin.classanalysis.sampleclasses.Base;
import com.yahoo.container.plugin.classanalysis.sampleclasses.ClassAnnotation;
-import com.yahoo.container.plugin.classanalysis.sampleclasses.InvisibleAnnotation;
import com.yahoo.container.plugin.classanalysis.sampleclasses.Derived;
import com.yahoo.container.plugin.classanalysis.sampleclasses.DummyAnnotation;
-import com.yahoo.container.plugin.classanalysis.sampleclasses.InvisibleDummyAnnotation;
import com.yahoo.container.plugin.classanalysis.sampleclasses.Fields;
import com.yahoo.container.plugin.classanalysis.sampleclasses.Interface1;
import com.yahoo.container.plugin.classanalysis.sampleclasses.Interface2;
-import com.yahoo.container.plugin.classanalysis.sampleclasses.RecordWithOverride;
-import com.yahoo.container.plugin.classanalysis.sampleclasses.SwitchStatement;
+import com.yahoo.container.plugin.classanalysis.sampleclasses.InvisibleAnnotation;
+import com.yahoo.container.plugin.classanalysis.sampleclasses.InvisibleDummyAnnotation;
import com.yahoo.container.plugin.classanalysis.sampleclasses.MethodAnnotation;
import com.yahoo.container.plugin.classanalysis.sampleclasses.MethodInvocation;
+import com.yahoo.container.plugin.classanalysis.sampleclasses.RecordWithOverride;
+import com.yahoo.container.plugin.classanalysis.sampleclasses.SwitchStatement;
import com.yahoo.osgi.annotation.ExportPackage;
import com.yahoo.osgi.annotation.Version;
import org.junit.jupiter.api.Test;
import javax.security.auth.login.LoginException;
-import java.awt.Image;
+import java.awt.*;
import java.awt.image.ImagingOpException;
import java.awt.image.Kernel;
import java.util.List;
import java.util.Optional;
-import java.util.Set;
import static com.yahoo.container.plugin.classanalysis.TestUtilities.analyzeClass;
import static com.yahoo.container.plugin.classanalysis.TestUtilities.classFile;
@@ -133,7 +132,12 @@ public class AnalyzeClassTest {
@Test
void export_annotations_are_processed() {
assertEquals(Optional.of(new ExportPackageAnnotation(3, 1, 4, "TEST_QUALIFIER-2")),
- Analyze.analyzeClass(classFile("com.yahoo.container.plugin.classanalysis.sampleclasses.package-info")).getExportPackage());
+ Analyze.analyzeClass(classFile("com.yahoo.container.plugin.classanalysis.sampleclasses.package-info")).getExportPackage());
+ }
+
+ @Test
+ void publicApi_annotations_are_processed() {
+ assertTrue(Analyze.analyzeClass(classFile("com.yahoo.container.plugin.classanalysis.sampleclasses.package-info")).isPublicApi());
}
@Test
diff --git a/bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/sampleclasses/package-info.java b/bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/sampleclasses/package-info.java
index d0d1db97c26..5f69032db17 100644
--- a/bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/sampleclasses/package-info.java
+++ b/bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/sampleclasses/package-info.java
@@ -1,6 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
@ExportPackage(version = @Version(major = 3, minor = 1, micro = 4, qualifier = "TEST_QUALIFIER-2"))
+@PublicApi
package com.yahoo.container.plugin.classanalysis.sampleclasses;
+import com.yahoo.api.annotations.PublicApi;
import com.yahoo.osgi.annotation.ExportPackage;
import com.yahoo.osgi.annotation.Version;
diff --git a/client/go/Makefile b/client/go/Makefile
index 9edfc940151..95da52c2383 100644
--- a/client/go/Makefile
+++ b/client/go/Makefile
@@ -86,7 +86,8 @@ $(DIST_TARGETS): DIST_NAME=vespa-cli_$(VERSION)_$(GOOS)_$(GOARCH)
$(DIST_TARGETS): dist-version ci manpages
$(DIST_TARGETS):
mkdir -p $(DIST)/$(DIST_NAME)/bin
- env CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $(DIST)/$(DIST_NAME)/bin $(GO_FLAGS) ./...
+ env CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $(DIST)/$(DIST_NAME)/bin $(GO_FLAGS) \
+ github.com/vespa-engine/vespa/client/go/internal/cli/cmd/vespa
cp -a $(PROJECT_ROOT)/LICENSE $(DIST)/$(DIST_NAME)
if [ "$(GOOS)" = "windows" ]; then \
cd $(DIST) && zip -r $(DIST)/$(DIST_NAME).zip $(DIST_NAME); \
diff --git a/client/go/go.mod b/client/go/go.mod
index c70ee5b75c8..5d1f6175e55 100644
--- a/client/go/go.mod
+++ b/client/go/go.mod
@@ -1,12 +1,13 @@
module github.com/vespa-engine/vespa/client/go
-go 1.18
+go 1.19
require (
github.com/alessio/shellescape v1.4.1
github.com/briandowns/spinner v1.23.0
github.com/fatih/color v1.15.0
- github.com/goccy/go-json v0.10.2
+ // This is the most recent version compatible with Go 1.19. Upgrade when we upgrade our Go version
+ github.com/go-json-experiment/json v0.0.0-20230216065249-540f01442424
github.com/klauspost/compress v1.16.5
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.18
diff --git a/client/go/go.sum b/client/go/go.sum
index 9b79c215864..03206b0c5e8 100644
--- a/client/go/go.sum
+++ b/client/go/go.sum
@@ -11,8 +11,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
-github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
-github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/go-json-experiment/json v0.0.0-20230216065249-540f01442424 h1:I1EK0t+BDH+kvlozNqrvzKqsWeM2QUKxXH0iW2fjDDw=
+github.com/go-json-experiment/json v0.0.0-20230216065249-540f01442424/go.mod h1:I+I5/LT2lLP0eZsBNaVDrOrYASx9h7o7mRHmy+535/A=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
diff --git a/client/go/internal/admin/vespa-wrapper/services/start.go b/client/go/internal/admin/vespa-wrapper/services/start.go
index f47d99714f1..5e17ddb8c8d 100644
--- a/client/go/internal/admin/vespa-wrapper/services/start.go
+++ b/client/go/internal/admin/vespa-wrapper/services/start.go
@@ -54,6 +54,7 @@ func VespaStartServices() int {
vespa.RunPreStart()
trace.Debug("prestart ok")
util.TuneResourceLimits()
+ increase_vm_max_map_count()
trace.Debug("resource limits ok")
checkjava()
trace.Debug("java ok")
diff --git a/client/go/internal/admin/vespa-wrapper/services/tuning.go b/client/go/internal/admin/vespa-wrapper/services/tuning.go
index f922495812f..11b4030c4bb 100644
--- a/client/go/internal/admin/vespa-wrapper/services/tuning.go
+++ b/client/go/internal/admin/vespa-wrapper/services/tuning.go
@@ -6,6 +6,8 @@ package services
import (
"fmt"
"os"
+ "strconv"
+ "strings"
"github.com/vespa-engine/vespa/client/go/internal/admin/trace"
)
@@ -16,6 +18,7 @@ const (
TRANSPARENT_HUGEPAGE_DEFRAG = "/sys/kernel/mm/transparent_hugepage/defrag"
TRANSPARENT_HUGEPAGE_KH_DEFRAG = "/sys/kernel/mm/transparent_hugepage/khugepaged/defrag"
ZONE_RECLAIM_CTL = "/proc/sys/vm/zone_reclaim_mode"
+ VM_MAX_MAP_COUNT = "/proc/sys/vm/max_map_count"
)
func maybeEcho(fileName, toWrite string) bool {
@@ -49,3 +52,30 @@ func drop_caches() {
trace.Debug("dropped caches")
}
}
+
+func increase_vm_max_map_count() {
+ const need_minimum = 262144
+ var min_as_text string = strconv.Itoa(need_minimum)
+ const name = "vm.max_map_count"
+ trace.Debug("Checking: " + VM_MAX_MAP_COUNT)
+ data, err := os.ReadFile(VM_MAX_MAP_COUNT)
+ if err != nil {
+ trace.Info("Could not check", name, " - assuming it is OK and proceeding")
+ return
+ }
+ line := strings.TrimSuffix(string(data), "\n")
+ qline := "[" + line + "]"
+ num, err := strconv.Atoi(line)
+ if err != nil || num <= 0 {
+ trace.Info("Bad data", qline, "checking", name, " - assuming it is OK and proceeding")
+ return
+ }
+ if num < need_minimum {
+ trace.Info("Too low", name, "["+line+"] - trying to increase it to", min_as_text)
+ if maybeEcho(VM_MAX_MAP_COUNT, min_as_text) {
+ trace.Debug("Increased:", name)
+ } else {
+ trace.Warning("Could not increase", name, "- current value", qline, "too low, should be at least", min_as_text)
+ }
+ }
+}
diff --git a/client/go/internal/cli/cmd/api_key.go b/client/go/internal/cli/cmd/api_key.go
index 367a515f3c3..8b3780ab82b 100644
--- a/client/go/internal/cli/cmd/api_key.go
+++ b/client/go/internal/cli/cmd/api_key.go
@@ -58,11 +58,11 @@ func doApiKey(cli *CLI, overwriteKey bool, args []string) error {
if err != nil {
return err
}
- targetType, err := cli.config.targetType()
+ targetType, err := cli.targetType()
if err != nil {
return err
}
- system, err := cli.system(targetType)
+ system, err := cli.system(targetType.name)
if err != nil {
return err
}
diff --git a/client/go/internal/cli/cmd/cert.go b/client/go/internal/cli/cmd/cert.go
index 48bad974c3f..95206b7e77d 100644
--- a/client/go/internal/cli/cmd/cert.go
+++ b/client/go/internal/cli/cmd/cert.go
@@ -107,15 +107,15 @@ func doCert(cli *CLI, overwriteCertificate, noApplicationPackage bool, args []st
return err
}
}
- targetType, err := cli.config.targetType()
+ targetType, err := cli.targetType()
if err != nil {
return err
}
- privateKeyFile, err := cli.config.privateKeyPath(app, targetType)
+ privateKeyFile, err := cli.config.privateKeyPath(app, targetType.name)
if err != nil {
return err
}
- certificateFile, err := cli.config.certificatePath(app, targetType)
+ certificateFile, err := cli.config.certificatePath(app, targetType.name)
if err != nil {
return err
}
@@ -178,11 +178,11 @@ func doCertAdd(cli *CLI, overwriteCertificate bool, args []string) error {
if err != nil {
return err
}
- targetType, err := cli.config.targetType()
+ targetType, err := cli.targetType()
if err != nil {
return err
}
- certificateFile, err := cli.config.certificatePath(app, targetType)
+ certificateFile, err := cli.config.certificatePath(app, targetType.name)
if err != nil {
return err
}
diff --git a/client/go/internal/cli/cmd/config.go b/client/go/internal/cli/cmd/config.go
index e2132814386..409254c4349 100644
--- a/client/go/internal/cli/cmd/config.go
+++ b/client/go/internal/cli/cmd/config.go
@@ -329,7 +329,7 @@ func (c *Config) write() error {
return c.config.WriteFile(configFile)
}
-func (c *Config) targetType() (string, error) {
+func (c *Config) targetOrURL() (string, error) {
targetType, ok := c.get(targetFlag)
if !ok {
return "", fmt.Errorf("target is unset")
@@ -515,7 +515,7 @@ func (c *Config) readAPIKey(cli *CLI, system vespa.System, tenantName string) ([
if _, err := os.Stat(c.authConfigPath()); err == nil {
return nil, nil // We have auth config, so we should prefer Auth0 over API key
}
- cli.printWarning("Authenticating with API key. This is discouraged in non-CI environments", "Authenticate with 'vespa auth login' instead")
+ cli.printWarning("Authenticating with API key, intended for use in CI environments.", "Authenticate with 'vespa auth login' instead")
}
return os.ReadFile(c.apiKeyPath(tenantName))
}
diff --git a/client/go/internal/cli/cmd/config_test.go b/client/go/internal/cli/cmd/config_test.go
index 66b65bf402b..3a81b93ea0d 100644
--- a/client/go/internal/cli/cmd/config_test.go
+++ b/client/go/internal/cli/cmd/config_test.go
@@ -261,6 +261,22 @@ func TestConfigReadTLSOptions(t *testing.T) {
)
}
+func TestConfigTargetResolving(t *testing.T) {
+ cli, _, _ := newTestCLI(t)
+ require.Nil(t, cli.Run("config", "set", "target", "https://example.com"))
+ assertTargetType(t, vespa.TargetCustom, cli)
+ require.Nil(t, cli.Run("config", "set", "target", "https://foo.bar.vespa-team.no-north-1.dev.z.vespa-app.cloud"))
+ assertTargetType(t, vespa.TargetCloud, cli)
+ require.Nil(t, cli.Run("config", "set", "target", "https://foo.bar.vespa-team.no-north-1.dev.z.vespa.oath.cloud:4443"))
+ assertTargetType(t, vespa.TargetHosted, cli)
+}
+
+func assertTargetType(t *testing.T, expected string, cli *CLI) {
+ targetType, err := cli.targetType()
+ require.Nil(t, err)
+ assert.Equal(t, expected, targetType.name)
+}
+
func assertTLSOptions(t *testing.T, homeDir string, app vespa.ApplicationID, target string, want vespa.TLSOptions, envVars ...string) {
t.Helper()
envVars = append(envVars, "VESPA_CLI_HOME="+homeDir)
diff --git a/client/go/internal/cli/cmd/document.go b/client/go/internal/cli/cmd/document.go
index b5b63fd32df..07a98d2e626 100644
--- a/client/go/internal/cli/cmd/document.go
+++ b/client/go/internal/cli/cmd/document.go
@@ -5,15 +5,22 @@
package cmd
import (
+ "bytes"
+ "errors"
"fmt"
"io"
+ "net/http"
+ "os"
+ "strconv"
"strings"
"time"
"github.com/fatih/color"
"github.com/spf13/cobra"
+ "github.com/vespa-engine/vespa/client/go/internal/curl"
"github.com/vespa-engine/vespa/client/go/internal/util"
"github.com/vespa-engine/vespa/client/go/internal/vespa"
+ "github.com/vespa-engine/vespa/client/go/internal/vespa/document"
)
func addDocumentFlags(cmd *cobra.Command, printCurl *bool, timeoutSecs *int) {
@@ -21,6 +28,128 @@ func addDocumentFlags(cmd *cobra.Command, printCurl *bool, timeoutSecs *int) {
cmd.PersistentFlags().IntVarP(timeoutSecs, "timeout", "T", 60, "Timeout for the document request in seconds")
}
+type serviceWithCurl struct {
+ curlCmdWriter io.Writer
+ bodyFile string
+ service *vespa.Service
+}
+
+func (s *serviceWithCurl) Do(request *http.Request, timeout time.Duration) (*http.Response, error) {
+ cmd, err := curl.RawArgs(request.URL.String())
+ if err != nil {
+ return nil, err
+ }
+ cmd.Method = request.Method
+ for k, vs := range request.Header {
+ for _, v := range vs {
+ cmd.Header(k, v)
+ }
+ }
+ if s.bodyFile != "" {
+ cmd.WithBodyFile(s.bodyFile)
+ }
+ cmd.Certificate = s.service.TLSOptions.CertificateFile
+ cmd.PrivateKey = s.service.TLSOptions.PrivateKeyFile
+ out := cmd.String() + "\n"
+ if _, err := io.WriteString(s.curlCmdWriter, out); err != nil {
+ return nil, err
+ }
+ return s.service.Do(request, timeout)
+}
+
+func documentClient(cli *CLI, timeoutSecs int, printCurl bool) (*document.Client, *serviceWithCurl, error) {
+ docService, err := documentService(cli)
+ if err != nil {
+ return nil, nil, err
+ }
+ service := &serviceWithCurl{curlCmdWriter: io.Discard, service: docService}
+ if printCurl {
+ service.curlCmdWriter = cli.Stderr
+ }
+ client, err := document.NewClient(document.ClientOptions{
+ Compression: document.CompressionAuto,
+ Timeout: time.Duration(timeoutSecs) * time.Second,
+ BaseURL: docService.BaseURL,
+ NowFunc: time.Now,
+ }, []util.HTTPClient{service})
+ if err != nil {
+ return nil, nil, err
+ }
+ return client, service, nil
+}
+
+func sendOperation(op document.Operation, args []string, timeoutSecs int, printCurl bool, cli *CLI) error {
+ client, service, err := documentClient(cli, timeoutSecs, printCurl)
+ if err != nil {
+ return err
+ }
+ id := ""
+ filename := args[0]
+ if len(args) > 1 {
+ id = args[0]
+ filename = args[1]
+ }
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ doc, err := document.NewDecoder(f).Decode()
+ if errors.Is(err, document.ErrMissingId) {
+ if id == "" {
+ return fmt.Errorf("no document id given neither as argument or as a 'put', 'update' or 'remove' key in the JSON file")
+ }
+ } else if err != nil {
+ return err
+ }
+ if id != "" {
+ docId, err := document.ParseId(id)
+ if err != nil {
+ return err
+ }
+ doc.Id = docId
+ }
+ if op > -1 {
+ if id == "" && op != doc.Operation {
+ return fmt.Errorf("wanted document operation is %s, but JSON file specifies %s", op, doc.Operation)
+ }
+ doc.Operation = op
+ }
+ if doc.Body != nil {
+ service.bodyFile = f.Name()
+ }
+ result := client.Send(doc)
+ return printResult(cli, operationResult(false, doc, service.service, result), false)
+}
+
+func readDocument(id string, timeoutSecs int, printCurl bool, cli *CLI) error {
+ client, service, err := documentClient(cli, timeoutSecs, printCurl)
+ if err != nil {
+ return err
+ }
+ docId, err := document.ParseId(id)
+ if err != nil {
+ return err
+ }
+ result := client.Get(docId)
+ return printResult(cli, operationResult(true, document.Document{Id: docId}, service.service, result), true)
+}
+
+func operationResult(read bool, doc document.Document, service *vespa.Service, result document.Result) util.OperationResult {
+ bodyReader := bytes.NewReader(result.Body)
+ if result.HTTPStatus == 200 {
+ if read {
+ return util.SuccessWithPayload("Read "+doc.Id.String(), util.ReaderToJSON(bodyReader))
+ } else {
+ return util.Success(doc.Operation.String() + " " + doc.Id.String())
+ }
+ }
+ if result.HTTPStatus/100 == 4 {
+ return util.FailureWithPayload("Invalid document operation: Status "+strconv.Itoa(result.HTTPStatus), util.ReaderToJSON(bodyReader))
+ }
+ return util.FailureWithPayload(service.Description()+" at "+service.BaseURL+": Status "+strconv.Itoa(result.HTTPStatus), util.ReaderToJSON(bodyReader))
+}
+
func newDocumentCmd(cli *CLI) *cobra.Command {
var (
printCurl bool
@@ -44,11 +173,7 @@ should be used instead of this.`,
SilenceUsage: true,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
- service, err := documentService(cli)
- if err != nil {
- return err
- }
- return printResult(cli, vespa.Send(args[0], service, operationOptions(cli.Stderr, printCurl, timeoutSecs)), false)
+ return sendOperation(-1, args, timeoutSecs, printCurl, cli)
},
}
addDocumentFlags(cmd, &printCurl, &timeoutSecs)
@@ -72,15 +197,7 @@ $ vespa document put id:mynamespace:music::a-head-full-of-dreams src/test/resour
DisableAutoGenTag: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
- service, err := documentService(cli)
- if err != nil {
- return err
- }
- if len(args) == 1 {
- return printResult(cli, vespa.Put("", args[0], service, operationOptions(cli.Stderr, printCurl, timeoutSecs)), false)
- } else {
- return printResult(cli, vespa.Put(args[0], args[1], service, operationOptions(cli.Stderr, printCurl, timeoutSecs)), false)
- }
+ return sendOperation(document.OperationPut, args, timeoutSecs, printCurl, cli)
},
}
addDocumentFlags(cmd, &printCurl, &timeoutSecs)
@@ -103,15 +220,7 @@ $ vespa document update id:mynamespace:music::a-head-full-of-dreams src/test/res
DisableAutoGenTag: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
- service, err := documentService(cli)
- if err != nil {
- return err
- }
- if len(args) == 1 {
- return printResult(cli, vespa.Update("", args[0], service, operationOptions(cli.Stderr, printCurl, timeoutSecs)), false)
- } else {
- return printResult(cli, vespa.Update(args[0], args[1], service, operationOptions(cli.Stderr, printCurl, timeoutSecs)), false)
- }
+ return sendOperation(document.OperationUpdate, args, timeoutSecs, printCurl, cli)
},
}
addDocumentFlags(cmd, &printCurl, &timeoutSecs)
@@ -134,14 +243,20 @@ $ vespa document remove id:mynamespace:music::a-head-full-of-dreams`,
DisableAutoGenTag: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
- service, err := documentService(cli)
- if err != nil {
- return err
- }
if strings.HasPrefix(args[0], "id:") {
- return printResult(cli, vespa.RemoveId(args[0], service, operationOptions(cli.Stderr, printCurl, timeoutSecs)), false)
+ client, service, err := documentClient(cli, timeoutSecs, printCurl)
+ if err != nil {
+ return err
+ }
+ id, err := document.ParseId(args[0])
+ if err != nil {
+ return err
+ }
+ doc := document.Document{Id: id, Operation: document.OperationRemove}
+ result := client.Send(doc)
+ return printResult(cli, operationResult(false, doc, service.service, result), false)
} else {
- return printResult(cli, vespa.RemoveOperation(args[0], service, operationOptions(cli.Stderr, printCurl, timeoutSecs)), false)
+ return sendOperation(document.OperationRemove, args, timeoutSecs, printCurl, cli)
}
},
}
@@ -162,11 +277,7 @@ func newDocumentGetCmd(cli *CLI) *cobra.Command {
SilenceUsage: true,
Example: `$ vespa document get id:mynamespace:music::a-head-full-of-dreams`,
RunE: func(cmd *cobra.Command, args []string) error {
- service, err := documentService(cli)
- if err != nil {
- return err
- }
- return printResult(cli, vespa.Get(args[0], service, operationOptions(cli.Stderr, printCurl, timeoutSecs)), true)
+ return readDocument(args[0], timeoutSecs, printCurl, cli)
},
}
addDocumentFlags(cmd, &printCurl, &timeoutSecs)
@@ -181,17 +292,6 @@ func documentService(cli *CLI) (*vespa.Service, error) {
return cli.service(target, vespa.DocumentService, 0, cli.config.cluster())
}
-func operationOptions(stderr io.Writer, printCurl bool, timeoutSecs int) vespa.OperationOptions {
- curlOutput := io.Discard
- if printCurl {
- curlOutput = stderr
- }
- return vespa.OperationOptions{
- CurlOutput: curlOutput,
- Timeout: time.Second * time.Duration(timeoutSecs),
- }
-}
-
func printResult(cli *CLI, result util.OperationResult, payloadOnlyOnSuccess bool) error {
out := cli.Stdout
if !result.Success {
diff --git a/client/go/internal/cli/cmd/document_test.go b/client/go/internal/cli/cmd/document_test.go
index bf9cc0404dc..00f98ee1333 100644
--- a/client/go/internal/cli/cmd/document_test.go
+++ b/client/go/internal/cli/cmd/document_test.go
@@ -5,6 +5,7 @@
package cmd
import (
+ "encoding/json"
"os"
"strconv"
"testing"
@@ -20,6 +21,11 @@ func TestDocumentSendPut(t *testing.T) {
"put", "POST", "id:mynamespace:music::a-head-full-of-dreams", "testdata/A-Head-Full-of-Dreams-Put.json", t)
}
+func TestDocumentSendPutWithIdInFile(t *testing.T) {
+ assertDocumentSend([]string{"document", "testdata/A-Head-Full-of-Dreams-Put-Id.json"},
+ "put", "POST", "id:mynamespace:music::a-head-full-of-dreams", "testdata/A-Head-Full-of-Dreams-Put-Id.json", t)
+}
+
func TestDocumentSendPutVerbose(t *testing.T) {
assertDocumentSend([]string{"document", "-v", "testdata/A-Head-Full-of-Dreams-Put.json"},
"put", "POST", "id:mynamespace:music::a-head-full-of-dreams", "testdata/A-Head-Full-of-Dreams-Put.json", t)
@@ -32,7 +38,7 @@ func TestDocumentSendUpdate(t *testing.T) {
func TestDocumentSendRemove(t *testing.T) {
assertDocumentSend([]string{"document", "testdata/A-Head-Full-of-Dreams-Remove.json"},
- "remove", "DELETE", "id:mynamespace:music::a-head-full-of-dreams", "testdata/A-Head-Full-of-Dreams-Remove.json", t)
+ "remove", "DELETE", "id:mynamespace:music::a-head-full-of-dreams", "", t)
}
func TestDocumentPutWithIdArg(t *testing.T) {
@@ -57,19 +63,24 @@ func TestDocumentUpdateWithoutIdArg(t *testing.T) {
func TestDocumentRemoveWithIdArg(t *testing.T) {
assertDocumentSend([]string{"document", "remove", "id:mynamespace:music::a-head-full-of-dreams"},
- "remove", "DELETE", "id:mynamespace:music::a-head-full-of-dreams", "testdata/A-Head-Full-of-Dreams-Remove.json", t)
+ "remove", "DELETE", "id:mynamespace:music::a-head-full-of-dreams", "", t)
}
func TestDocumentRemoveWithoutIdArg(t *testing.T) {
assertDocumentSend([]string{"document", "remove", "testdata/A-Head-Full-of-Dreams-Remove.json"},
- "remove", "DELETE", "id:mynamespace:music::a-head-full-of-dreams", "testdata/A-Head-Full-of-Dreams-Remove.json", t)
+ "remove", "DELETE", "id:mynamespace:music::a-head-full-of-dreams", "", t)
+}
+
+func TestDocumentRemoveWithoutIdArgVerbose(t *testing.T) {
+ assertDocumentSend([]string{"document", "remove", "-v", "testdata/A-Head-Full-of-Dreams-Remove.json"},
+ "remove", "DELETE", "id:mynamespace:music::a-head-full-of-dreams", "", t)
}
func TestDocumentSendMissingId(t *testing.T) {
cli, _, stderr := newTestCLI(t)
assert.NotNil(t, cli.Run("document", "put", "testdata/A-Head-Full-of-Dreams-Without-Operation.json"))
assert.Equal(t,
- "Error: No document id given neither as argument or as a 'put' key in the json file\n",
+ "Error: no document id given neither as argument or as a 'put', 'update' or 'remove' key in the JSON file\n",
stderr.String())
}
@@ -77,7 +88,7 @@ func TestDocumentSendWithDisagreeingOperations(t *testing.T) {
cli, _, stderr := newTestCLI(t)
assert.NotNil(t, cli.Run("document", "update", "testdata/A-Head-Full-of-Dreams-Put.json"))
assert.Equal(t,
- "Error: Wanted document operation is update but the JSON file specifies put\n",
+ "Error: wanted document operation is update, but JSON file specifies put\n",
stderr.String())
}
@@ -103,7 +114,7 @@ func assertDocumentSend(arguments []string, expectedOperation string, expectedMe
t.Fatal(err)
}
expectedPath, _ := vespa.IdToURLPath(expectedDocumentId)
- expectedURL := documentURL + "/document/v1/" + expectedPath
+ expectedURL := documentURL + "/document/v1/" + expectedPath + "?timeout=60000ms"
assert.Nil(t, cli.Run(arguments...))
verbose := false
@@ -113,16 +124,29 @@ func assertDocumentSend(arguments []string, expectedOperation string, expectedMe
}
}
if verbose {
- expectedCurl := "curl -X " + expectedMethod + " -H 'Content-Type: application/json' --data-binary @" + expectedPayloadFile + " " + expectedURL + "\n"
+ expectedCurl := "curl -X " + expectedMethod + " -H 'Content-Type: application/json; charset=utf-8' -H 'User-Agent: Vespa CLI/0.0.0-devel'"
+ if expectedPayloadFile != "" {
+ expectedCurl += " --data-binary @" + expectedPayloadFile
+ }
+ expectedCurl += " '" + expectedURL + "'\n"
assert.Equal(t, expectedCurl, stderr.String())
}
assert.Equal(t, "Success: "+expectedOperation+" "+expectedDocumentId+"\n", stdout.String())
assert.Equal(t, expectedURL, client.LastRequest.URL.String())
- assert.Equal(t, "application/json", client.LastRequest.Header.Get("Content-Type"))
+ assert.Equal(t, "application/json; charset=utf-8", client.LastRequest.Header.Get("Content-Type"))
assert.Equal(t, expectedMethod, client.LastRequest.Method)
- expectedPayload, _ := os.ReadFile(expectedPayloadFile)
- assert.Equal(t, string(expectedPayload), util.ReaderToString(client.LastRequest.Body))
+ if expectedPayloadFile != "" {
+ data, err := os.ReadFile(expectedPayloadFile)
+ assert.Nil(t, err)
+ var expectedPayload struct {
+ Fields json.RawMessage `json:"fields"`
+ }
+ assert.Nil(t, json.Unmarshal(data, &expectedPayload))
+ assert.Equal(t, `{"fields":`+string(expectedPayload.Fields)+"}", util.ReaderToString(client.LastRequest.Body))
+ } else {
+ assert.Nil(t, client.LastRequest.Body)
+ }
}
func assertDocumentGet(arguments []string, documentId string, t *testing.T) {
@@ -170,7 +194,7 @@ func assertDocumentServerError(t *testing.T, status int, errorMessage string) {
"id:mynamespace:music::a-head-full-of-dreams",
"testdata/A-Head-Full-of-Dreams-Put.json"))
assert.Equal(t,
- "Error: Container (document API) at 127.0.0.1:8080: Status "+strconv.Itoa(status)+"\n\n"+errorMessage+"\n",
+ "Error: Container (document API) at http://127.0.0.1:8080: Status "+strconv.Itoa(status)+"\n\n"+errorMessage+"\n",
stderr.String())
}
diff --git a/client/go/internal/cli/cmd/feed.go b/client/go/internal/cli/cmd/feed.go
index fa87c420f16..6d368cb210b 100644
--- a/client/go/internal/cli/cmd/feed.go
+++ b/client/go/internal/cli/cmd/feed.go
@@ -1,6 +1,7 @@
package cmd
import (
+ "bufio"
"encoding/json"
"fmt"
"io"
@@ -164,7 +165,7 @@ func feedFiles(files []string, dispatcher *document.Dispatcher, cli *CLI) {
}
func dispatchFrom(r io.ReadCloser, dispatcher *document.Dispatcher, cli *CLI) {
- dec := document.NewDecoder(r)
+ dec := document.NewDecoder(bufio.NewReaderSize(r, 1<<26)) // Buffer up to 64M of data at a time
defer r.Close()
for {
doc, err := dec.Decode()
@@ -244,7 +245,7 @@ type feedSummary struct {
RequestCount int64 `json:"http.request.count"`
RequestBytes int64 `json:"http.request.bytes"`
RequestRate number `json:"http.request.MBps"`
- ExceptionCount int64 `json:"http.exception.count"` // same as ErrorCount, for compatability with vespa-feed-client
+ ExceptionCount int64 `json:"http.exception.count"` // same as ErrorCount, for compatibility with vespa-feed-client output
ResponseCount int64 `json:"http.response.count"`
ResponseBytes int64 `json:"http.response.bytes"`
@@ -264,8 +265,8 @@ func mbps(bytes int64, duration time.Duration) float64 {
func writeSummaryJSON(w io.Writer, stats document.Stats, duration time.Duration) error {
summary := feedSummary{
Seconds: number(duration.Seconds()),
- SuccessCount: stats.Successes(),
- SuccessRate: number(float64(stats.Successes()) / math.Max(1, duration.Seconds())),
+ SuccessCount: stats.Successful(),
+ SuccessRate: number(float64(stats.Successful()) / math.Max(1, duration.Seconds())),
ErrorCount: stats.Errors,
InflightCount: stats.Inflight,
@@ -277,7 +278,7 @@ func writeSummaryJSON(w io.Writer, stats document.Stats, duration time.Duration)
ResponseCount: stats.Responses,
ResponseBytes: stats.BytesRecv,
ResponseRate: number(mbps(stats.BytesRecv, duration)),
- ResponseErrorCount: stats.Responses - stats.Successes(),
+ ResponseErrorCount: stats.Unsuccessful(),
ResponseMinLatency: stats.MinLatency.Milliseconds(),
ResponseAvgLatency: stats.AvgLatency().Milliseconds(),
ResponseMaxLatency: stats.MaxLatency.Milliseconds(),
diff --git a/client/go/internal/cli/cmd/login.go b/client/go/internal/cli/cmd/login.go
index 9ac2262e78d..d2075bdfcf0 100644
--- a/client/go/internal/cli/cmd/login.go
+++ b/client/go/internal/cli/cmd/login.go
@@ -27,11 +27,11 @@ func newLoginCmd(cli *CLI) *cobra.Command {
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
- targetType, err := cli.config.targetType()
+ targetType, err := cli.targetType()
if err != nil {
return err
}
- system, err := cli.system(targetType)
+ system, err := cli.system(targetType.name)
if err != nil {
return err
}
diff --git a/client/go/internal/cli/cmd/logout.go b/client/go/internal/cli/cmd/logout.go
index 32e7cd9783b..93f7cb6270f 100644
--- a/client/go/internal/cli/cmd/logout.go
+++ b/client/go/internal/cli/cmd/logout.go
@@ -14,11 +14,11 @@ func newLogoutCmd(cli *CLI) *cobra.Command {
DisableAutoGenTag: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
- targetType, err := cli.config.targetType()
+ targetType, err := cli.targetType()
if err != nil {
return err
}
- system, err := cli.system(targetType)
+ system, err := cli.system(targetType.name)
if err != nil {
return err
}
diff --git a/client/go/internal/cli/cmd/root.go b/client/go/internal/cli/cmd/root.go
index c4012024426..17c4fc41625 100644
--- a/client/go/internal/cli/cmd/root.go
+++ b/client/go/internal/cli/cmd/root.go
@@ -6,6 +6,7 @@ import (
"fmt"
"io"
"log"
+ "net/url"
"os"
"os/exec"
"strings"
@@ -73,6 +74,11 @@ type targetOptions struct {
noCertificate bool
}
+type targetType struct {
+ name string
+ url string
+}
+
// errHint creates a new CLI error, with optional hints that will be printed after the error
func errHint(err error, hints ...string) ErrCLI { return ErrCLI{Status: 1, hints: hints, error: err} }
@@ -297,7 +303,19 @@ func (c *CLI) printWarning(msg interface{}, hints ...string) {
// target creates a target according the configuration of this CLI and given opts.
func (c *CLI) target(opts targetOptions) (vespa.Target, error) {
- target, err := c.createTarget(opts)
+ targetType, err := c.targetType()
+ if err != nil {
+ return nil, err
+ }
+ var target vespa.Target
+ switch targetType.name {
+ case vespa.TargetLocal, vespa.TargetCustom:
+ target, err = c.createCustomTarget(targetType.name, targetType.url)
+ case vespa.TargetCloud, vespa.TargetHosted:
+ target, err = c.createCloudTarget(targetType.name, opts, targetType.url)
+ default:
+ return nil, errHint(fmt.Errorf("invalid target: %s", targetType), "Valid targets are 'local', 'cloud', 'hosted' or an URL")
+ }
if err != nil {
return nil, err
}
@@ -309,24 +327,39 @@ func (c *CLI) target(opts targetOptions) (vespa.Target, error) {
return target, nil
}
-func (c *CLI) createTarget(opts targetOptions) (vespa.Target, error) {
- targetType, err := c.config.targetType()
+// targetType resolves the real target type and its custom URL (if any)
+func (c *CLI) targetType() (targetType, error) {
+ v, err := c.config.targetOrURL()
if err != nil {
- return nil, err
+ return targetType{}, err
}
- customURL := ""
- if strings.HasPrefix(targetType, "http") {
- customURL = targetType
- targetType = vespa.TargetCustom
+ tt := targetType{name: v}
+ if strings.HasPrefix(tt.name, "http://") || strings.HasPrefix(tt.name, "https://") {
+ tt.url = tt.name
+ tt.name, err = c.targetFromURL(tt.url)
+ if err != nil {
+ return targetType{}, err
+ }
}
- switch targetType {
- case vespa.TargetLocal, vespa.TargetCustom:
- return c.createCustomTarget(targetType, customURL)
- case vespa.TargetCloud, vespa.TargetHosted:
- return c.createCloudTarget(targetType, opts)
- default:
- return nil, errHint(fmt.Errorf("invalid target: %s", targetType), "Valid targets are 'local', 'cloud', 'hosted' or an URL")
+ return tt, nil
+}
+
+func (c *CLI) targetFromURL(customURL string) (string, error) {
+ u, err := url.Parse(customURL)
+ if err != nil {
+ return "", err
+ }
+ // Check if URL belongs to a cloud target
+ for _, cloudTarget := range []string{vespa.TargetHosted, vespa.TargetCloud} {
+ system, err := c.system(cloudTarget)
+ if err != nil {
+ return "", err
+ }
+ if strings.HasSuffix(u.Hostname(), "."+system.EndpointDomain) {
+ return cloudTarget, nil
+ }
}
+ return vespa.TargetCustom, nil
}
func (c *CLI) createCustomTarget(targetType, customURL string) (vespa.Target, error) {
@@ -344,7 +377,7 @@ func (c *CLI) createCustomTarget(targetType, customURL string) (vespa.Target, er
}
}
-func (c *CLI) createCloudTarget(targetType string, opts targetOptions) (vespa.Target, error) {
+func (c *CLI) createCloudTarget(targetType string, opts targetOptions, customURL string) (vespa.Target, error) {
system, err := c.system(targetType)
if err != nil {
return nil, err
@@ -409,6 +442,7 @@ func (c *CLI) createCloudTarget(targetType string, opts targetOptions) (vespa.Ta
deploymentOptions := vespa.CloudDeploymentOptions{
Deployment: deployment,
TLSOptions: deploymentTLSOptions,
+ CustomURL: customURL,
ClusterURLs: endpoints,
}
logLevel := opts.logLevel
diff --git a/client/go/internal/cli/cmd/status_test.go b/client/go/internal/cli/cmd/status_test.go
index a3cae7c3fe4..76efea55503 100644
--- a/client/go/internal/cli/cmd/status_test.go
+++ b/client/go/internal/cli/cmd/status_test.go
@@ -16,7 +16,7 @@ func TestStatusDeployCommand(t *testing.T) {
}
func TestStatusDeployCommandWithURLTarget(t *testing.T) {
- assertDeployStatus("http://mydeploytarget:19071", []string{"-t", "http://mydeploytarget"}, t)
+ assertDeployStatus("http://mydeploytarget:19071", []string{"-t", "http://mydeploytarget:19071"}, t)
}
func TestStatusDeployCommandWithLocalTarget(t *testing.T) {
@@ -28,7 +28,7 @@ func TestStatusQueryCommand(t *testing.T) {
}
func TestStatusQueryCommandWithUrlTarget(t *testing.T) {
- assertQueryStatus("http://mycontainertarget:8080", []string{"-t", "http://mycontainertarget"}, t)
+ assertQueryStatus("http://mycontainertarget:8080", []string{"-t", "http://mycontainertarget:8080"}, t)
}
func TestStatusQueryCommandWithLocalTarget(t *testing.T) {
diff --git a/client/go/internal/cli/cmd/testdata/A-Head-Full-of-Dreams-Put-Id.json b/client/go/internal/cli/cmd/testdata/A-Head-Full-of-Dreams-Put-Id.json
new file mode 100644
index 00000000000..d39b22782ab
--- /dev/null
+++ b/client/go/internal/cli/cmd/testdata/A-Head-Full-of-Dreams-Put-Id.json
@@ -0,0 +1,15 @@
+{
+ "id": "id:mynamespace:music::a-head-full-of-dreams",
+ "fields": {
+ "album": "A Head Full of Dreams",
+ "artist": "Coldplay",
+ "year": 2015,
+ "category_scores": {
+ "cells": [
+ { "address" : { "cat" : "pop" }, "value": 1 },
+ { "address" : { "cat" : "rock" }, "value": 0.2 },
+ { "address" : { "cat" : "jazz" }, "value": 0 }
+ ]
+ }
+ }
+}
diff --git a/client/go/internal/cli/cmd/visit.go b/client/go/internal/cli/cmd/visit.go
index 10fb2743c63..1875c768c60 100644
--- a/client/go/internal/cli/cmd/visit.go
+++ b/client/go/internal/cli/cmd/visit.go
@@ -138,6 +138,18 @@ $ vespa visit --field-set "[id]" # list document IDs
return cmd
}
+func getEpoch(timeStamp string) (int64, error) {
+ t, err := strconv.ParseInt(timeStamp, 10, 64)
+ if err != nil {
+ t, err := time.Parse(time.RFC3339, timeStamp)
+ if err != nil {
+ return 0, err
+ }
+ return t.Unix(), nil
+ }
+ return t, nil
+}
+
func checkArguments(vArgs visitArgs) (res util.OperationResult) {
if vArgs.slices > 0 || vArgs.sliceId > -1 {
if !(vArgs.slices > 0 && vArgs.sliceId > -1) {
@@ -149,13 +161,13 @@ func checkArguments(vArgs visitArgs) (res util.OperationResult) {
}
// to and from will support RFC3339 format soon, add more validation then
if vArgs.from != "" {
- _, err := strconv.ParseInt(vArgs.from, 10, 64)
+ _, err := getEpoch(vArgs.from)
if err != nil {
return util.Failure("Invalid 'from' argument: '" + vArgs.from + "': " + err.Error())
}
}
if vArgs.to != "" {
- _, err := strconv.ParseInt(vArgs.to, 10, 64)
+ _, err := getEpoch(vArgs.to)
if err != nil {
return util.Failure("Invalid 'to' argument: '" + vArgs.to + "': " + err.Error())
}
@@ -336,11 +348,11 @@ func runOneVisit(vArgs *visitArgs, service *vespa.Service, contToken string) (*V
urlPath = urlPath + fmt.Sprintf("&wantedDocumentCount=%d", vArgs.chunkCount)
}
if vArgs.from != "" {
- fromSeconds, _ := strconv.ParseInt(vArgs.from, 10, 64)
+ fromSeconds, _ := getEpoch(vArgs.from)
urlPath = urlPath + fmt.Sprintf("&fromTimestamp=%d", fromSeconds*1000000)
}
if vArgs.to != "" {
- toSeconds, _ := strconv.ParseInt(vArgs.to, 10, 64)
+ toSeconds, _ := getEpoch(vArgs.to)
urlPath = urlPath + fmt.Sprintf("&toTimestamp=%d", toSeconds*1000000)
}
if vArgs.slices > 0 {
diff --git a/client/go/internal/curl/curl.go b/client/go/internal/curl/curl.go
index daa60e6ff14..5f4b7928704 100644
--- a/client/go/internal/curl/curl.go
+++ b/client/go/internal/curl/curl.go
@@ -6,6 +6,7 @@ import (
"net/url"
"os/exec"
"runtime"
+ "sort"
"github.com/alessio/shellescape"
"github.com/vespa-engine/vespa/client/go/internal/util"
@@ -61,6 +62,7 @@ func (c *Command) Args() []string {
if c.Method != "" {
args = append(args, "-X", c.Method)
}
+ sort.Slice(c.headers, func(i, j int) bool { return c.headers[i].key < c.headers[j].key })
for _, header := range c.headers {
args = append(args, "-H", header.key+": "+header.value)
}
diff --git a/client/go/internal/vespa/document.go b/client/go/internal/vespa/document.go
deleted file mode 100644
index 9e4c8e7d136..00000000000
--- a/client/go/internal/vespa/document.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-// vespa document API client
-// Author: bratseth
-
-package vespa
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "os"
- "time"
-
- "github.com/vespa-engine/vespa/client/go/internal/curl"
- "github.com/vespa-engine/vespa/client/go/internal/util"
-)
-
-// Sends the operation given in the file
-func Send(jsonFile string, service *Service, options OperationOptions) util.OperationResult {
- return sendOperation("", jsonFile, service, anyOperation, options)
-}
-
-func Put(documentId string, jsonFile string, service *Service, options OperationOptions) util.OperationResult {
- return sendOperation(documentId, jsonFile, service, putOperation, options)
-}
-
-func Update(documentId string, jsonFile string, service *Service, options OperationOptions) util.OperationResult {
- return sendOperation(documentId, jsonFile, service, updateOperation, options)
-}
-
-func RemoveId(documentId string, service *Service, options OperationOptions) util.OperationResult {
- return sendOperation(documentId, "", service, removeOperation, options)
-}
-
-func RemoveOperation(jsonFile string, service *Service, options OperationOptions) util.OperationResult {
- return sendOperation("", jsonFile, service, removeOperation, options)
-}
-
-const (
- anyOperation string = "any"
- putOperation string = "put"
- updateOperation string = "update"
- removeOperation string = "remove"
-)
-
-type OperationOptions struct {
- CurlOutput io.Writer
- Timeout time.Duration
-}
-
-func sendOperation(documentId string, jsonFile string, service *Service, operation string, options OperationOptions) util.OperationResult {
- header := http.Header{}
- header.Add("Content-Type", "application/json")
-
- var documentData []byte
- if operation == "remove" && jsonFile == "" {
- documentData = []byte("{\n \"remove\": \"" + documentId + "\"\n}\n")
- } else {
- fileReader, err := os.Open(jsonFile)
- if err != nil {
- return util.FailureWithDetail("Could not open file '"+jsonFile+"'", err.Error())
- }
- defer fileReader.Close()
- documentData, err = io.ReadAll(fileReader)
- if err != nil {
- return util.FailureWithDetail("Failed to read '"+jsonFile+"'", err.Error())
- }
- }
-
- var doc map[string]interface{}
- if err := json.Unmarshal(documentData, &doc); err != nil {
- return util.Failure(fmt.Sprintf("Document is not valid JSON: %s", err))
- }
-
- operationInFile := operationIn(doc)
- if operation == anyOperation { // Operation is decided by file content
- operation = operationInFile
- } else if operationInFile != "" && operationInFile != operation { // Otherwise operation must match
- return util.Failure("Wanted document operation is " + operation + " but the JSON file specifies " + operationInFile)
- }
-
- if documentId == "" { // Document id is decided by file content
- if doc[operation] == nil {
- return util.Failure("No document id given neither as argument or as a '" + operation + "' key in the json file")
- }
- documentId = doc[operation].(string) // document feeder format
- }
-
- documentPath, documentPathError := IdToURLPath(documentId)
- if documentPathError != nil {
- return util.Failure("Invalid document id '" + documentId + "': " + documentPathError.Error())
- }
-
- url, urlParseError := url.Parse(service.BaseURL + "/document/v1/" + documentPath)
- if urlParseError != nil {
- return util.Failure("Invalid request path: '" + service.BaseURL + "/document/v1/" + documentPath + "': " + urlParseError.Error())
- }
-
- request := &http.Request{
- URL: url,
- Method: operationToHTTPMethod(operation),
- Header: header,
- Body: io.NopCloser(bytes.NewReader(documentData)),
- }
- response, err := serviceDo(service, request, jsonFile, options)
- if err != nil {
- return util.Failure("Request failed: " + err.Error())
- }
-
- defer response.Body.Close()
- if response.StatusCode == 200 {
- return util.Success(operation + " " + documentId)
- } else if response.StatusCode/100 == 4 {
- return util.FailureWithPayload("Invalid document operation: "+response.Status, util.ReaderToJSON(response.Body))
- } else {
- return util.FailureWithPayload(service.Description()+" at "+request.URL.Host+": "+response.Status, util.ReaderToJSON(response.Body))
- }
-}
-
-func operationIn(doc map[string]interface{}) string {
- if doc["put"] != nil {
- return "put"
- } else if doc["update"] != nil {
- return "update"
- } else if doc["remove"] != nil {
- return "remove"
- } else {
- return ""
- }
-}
-
-func operationToHTTPMethod(operation string) string {
- switch operation {
- case "put":
- return "POST"
- case "update":
- return "PUT"
- case "remove":
- return "DELETE"
- }
- util.JustExitMsg("Unexpected document operation ''" + operation + "'")
- panic("unreachable")
-}
-
-func serviceDo(service *Service, request *http.Request, filename string, options OperationOptions) (*http.Response, error) {
- cmd, err := curl.RawArgs(request.URL.String())
- if err != nil {
- return nil, err
- }
- cmd.Method = request.Method
- for k, vs := range request.Header {
- for _, v := range vs {
- cmd.Header(k, v)
- }
- }
- cmd.WithBodyFile(filename)
- cmd.Certificate = service.TLSOptions.CertificateFile
- cmd.PrivateKey = service.TLSOptions.PrivateKeyFile
- out := cmd.String() + "\n"
- if _, err := io.WriteString(options.CurlOutput, out); err != nil {
- return nil, err
- }
- return service.Do(request, options.Timeout)
-}
-
-func Get(documentId string, service *Service, options OperationOptions) util.OperationResult {
- documentPath, documentPathError := IdToURLPath(documentId)
- if documentPathError != nil {
- return util.Failure("Invalid document id '" + documentId + "': " + documentPathError.Error())
- }
-
- url, urlParseError := url.Parse(service.BaseURL + "/document/v1/" + documentPath)
- if urlParseError != nil {
- return util.Failure("Invalid request path: '" + service.BaseURL + "/document/v1/" + documentPath + "': " + urlParseError.Error())
- }
-
- request := &http.Request{
- URL: url,
- Method: "GET",
- }
- response, err := serviceDo(service, request, "", options)
- if err != nil {
- return util.Failure("Request failed: " + err.Error())
- }
-
- defer response.Body.Close()
- if response.StatusCode == 200 {
- return util.SuccessWithPayload("Read "+documentId, util.ReaderToJSON(response.Body))
- } else if response.StatusCode/100 == 4 {
- return util.FailureWithPayload("Invalid document operation: "+response.Status, util.ReaderToJSON(response.Body))
- } else {
- return util.FailureWithPayload(service.Description()+" at "+request.URL.Host+": "+response.Status, util.ReaderToJSON(response.Body))
- }
-}
diff --git a/client/go/internal/vespa/document/circuit_breaker.go b/client/go/internal/vespa/document/circuit_breaker.go
index 17fc595d58f..9bcf2e3f619 100644
--- a/client/go/internal/vespa/document/circuit_breaker.go
+++ b/client/go/internal/vespa/document/circuit_breaker.go
@@ -19,7 +19,7 @@ const (
type CircuitBreaker interface {
Success()
- Error(error)
+ Failure()
State() CircuitState
}
@@ -27,38 +27,35 @@ type timeCircuitBreaker struct {
graceDuration time.Duration
doomDuration time.Duration
- failingSinceMillis int64
- lastError atomic.Value
- halfOpen atomic.Value
- open atomic.Value
+ failingSinceMillis atomic.Int64
+ halfOpen atomic.Bool
+ open atomic.Bool
now func() time.Time
}
func (b *timeCircuitBreaker) Success() {
- atomic.StoreInt64(&b.failingSinceMillis, math.MaxInt64)
- if !b.open.Load().(bool) {
+ b.failingSinceMillis.Store(math.MaxInt64)
+ if !b.open.Load() {
b.halfOpen.CompareAndSwap(true, false)
}
}
-func (b *timeCircuitBreaker) Error(err error) {
- if atomic.CompareAndSwapInt64(&b.failingSinceMillis, math.MaxInt64, b.now().UnixMilli()) {
- b.lastError.Store(err)
- }
+func (b *timeCircuitBreaker) Failure() {
+ b.failingSinceMillis.CompareAndSwap(math.MaxInt64, b.now().UnixMilli())
}
func (b *timeCircuitBreaker) State() CircuitState {
- failingDuration := b.now().Sub(time.UnixMilli(atomic.LoadInt64(&b.failingSinceMillis)))
+ failingDuration := b.now().Sub(time.UnixMilli(b.failingSinceMillis.Load()))
if failingDuration > b.graceDuration {
b.halfOpen.CompareAndSwap(false, true)
}
if b.doomDuration > 0 && failingDuration > b.doomDuration {
b.open.CompareAndSwap(false, true)
}
- if b.open.Load().(bool) {
+ if b.open.Load() {
return CircuitOpen
- } else if b.halfOpen.Load().(bool) {
+ } else if b.halfOpen.Load() {
return CircuitHalfOpen
}
return CircuitClosed
@@ -66,11 +63,11 @@ func (b *timeCircuitBreaker) State() CircuitState {
func NewCircuitBreaker(graceDuration, doomDuration time.Duration) *timeCircuitBreaker {
b := &timeCircuitBreaker{
- graceDuration: graceDuration,
- doomDuration: doomDuration,
- now: time.Now,
- failingSinceMillis: math.MaxInt64,
+ graceDuration: graceDuration,
+ doomDuration: doomDuration,
+ now: time.Now,
}
+ b.failingSinceMillis.Store(math.MaxInt64)
b.open.Store(false)
b.halfOpen.Store(false)
return b
diff --git a/client/go/internal/vespa/document/circuit_breaker_test.go b/client/go/internal/vespa/document/circuit_breaker_test.go
index 7a4fffaae27..05dbd6da2f5 100644
--- a/client/go/internal/vespa/document/circuit_breaker_test.go
+++ b/client/go/internal/vespa/document/circuit_breaker_test.go
@@ -1,7 +1,6 @@
package document
import (
- "errors"
"testing"
"time"
@@ -12,7 +11,6 @@ func TestCircuitBreaker(t *testing.T) {
clock := &manualClock{}
breaker := NewCircuitBreaker(time.Second, time.Minute)
breaker.now = clock.now
- err := errors.New("error")
assert.Equal(t, CircuitClosed, breaker.State(), "Initial state is closed")
@@ -25,7 +23,7 @@ func TestCircuitBreaker(t *testing.T) {
clock.advance(100 * time.Second)
assert.Equal(t, CircuitClosed, breaker.State(), "State is closed some time after a success")
- breaker.Error(err)
+ breaker.Failure()
assert.Equal(t, CircuitClosed, breaker.State(), "State is closed right after a failure")
clock.advance(time.Second)
@@ -37,7 +35,7 @@ func TestCircuitBreaker(t *testing.T) {
breaker.Success()
assert.Equal(t, CircuitClosed, breaker.State(), "State is closed after a new success")
- breaker.Error(err)
+ breaker.Failure()
clock.advance(time.Minute)
assert.Equal(t, CircuitHalfOpen, breaker.State(), "State is half-open until doom duration has passed")
diff --git a/client/go/internal/vespa/document/dispatcher.go b/client/go/internal/vespa/document/dispatcher.go
index 7237a87b7e2..d9273d2f677 100644
--- a/client/go/internal/vespa/document/dispatcher.go
+++ b/client/go/internal/vespa/document/dispatcher.go
@@ -3,6 +3,7 @@ package document
import (
"fmt"
"io"
+ "strconv"
"strings"
"sync"
"sync/atomic"
@@ -12,6 +13,9 @@ import (
// maxAttempts controls the maximum number of times a document operation is attempted before giving up.
const maxAttempts = 10
+// Feeder is the interface for a consumer of documents.
+type Feeder interface{ Send(Document) Result }
+
// Dispatcher dispatches documents from a queue to a Feeder.
type Dispatcher struct {
feeder Feeder
@@ -20,16 +24,14 @@ type Dispatcher struct {
stats Stats
started bool
- ready chan documentOp
results chan documentOp
msgs chan string
inflight map[string]*Queue[documentOp]
- inflightCount int64
+ inflightCount atomic.Int64
output io.Writer
verbose bool
- queuePool sync.Pool
mu sync.Mutex
statsMu sync.Mutex
wg sync.WaitGroup
@@ -57,46 +59,55 @@ func NewDispatcher(feeder Feeder, throttler Throttler, breaker CircuitBreaker, o
output: output,
verbose: verbose,
}
- d.queuePool.New = func() any { return NewQueue[documentOp]() }
d.start()
return d
}
-func (d *Dispatcher) shouldRetry(op documentOp, result Result) bool {
+func (d *Dispatcher) logResult(doc Document, result Result, retry bool) {
if result.Trace != "" {
- d.msgs <- fmt.Sprintf("feed: trace for %s:\n%s", op.document, result.Trace)
+ d.msgs <- fmt.Sprintf("feed: trace for %s %s:\n%s", doc.Operation, doc.Id, result.Trace)
}
- if result.Success() {
- if d.verbose {
- d.msgs <- fmt.Sprintf("feed: %s succeeded with status %d", op.document, result.HTTPStatus)
- }
- d.throttler.Success()
- d.circuitBreaker.Success()
- return false
+ if !d.verbose && result.Success() {
+ return
}
- if result.HTTPStatus == 429 || result.HTTPStatus == 503 {
- d.msgs <- fmt.Sprintf("feed: %s was throttled with status %d: retrying", op.document, result.HTTPStatus)
- d.throttler.Throttled(atomic.LoadInt64(&d.inflightCount))
- return true
+ var msg strings.Builder
+ msg.WriteString("feed: got status ")
+ msg.WriteString(strconv.Itoa(result.HTTPStatus))
+ msg.WriteString(" (")
+ if result.Body != nil {
+ msg.Write(result.Body)
+ } else {
+ msg.WriteString("no body")
}
- if result.Err != nil || result.HTTPStatus == 500 || result.HTTPStatus == 502 || result.HTTPStatus == 504 {
- retry := op.attempts < maxAttempts
- var msg strings.Builder
- msg.WriteString("feed: ")
- msg.WriteString(op.document.String())
- msg.WriteString(" failed: ")
- if result.Err != nil {
- msg.WriteString(result.Err.Error())
- } else {
- msg.WriteString(fmt.Sprintf("status %d", result.HTTPStatus))
- }
+ msg.WriteString(")")
+ msg.WriteString(" for ")
+ msg.WriteString(doc.Operation.String())
+ msg.WriteString(" ")
+ msg.WriteString(doc.Id.String())
+ if !result.Success() {
if retry {
msg.WriteString(": retrying")
} else {
- msg.WriteString(fmt.Sprintf(": giving up after %d attempts", maxAttempts))
+ msg.WriteString(": giving up after ")
+ msg.WriteString(strconv.Itoa(maxAttempts))
+ msg.WriteString(" attempts")
}
- d.msgs <- msg.String()
- d.circuitBreaker.Error(fmt.Errorf("request failed with status %d", result.HTTPStatus))
+ }
+ d.msgs <- msg.String()
+}
+
+func (d *Dispatcher) shouldRetry(op documentOp, result Result) bool {
+ retry := op.attempts < maxAttempts
+ d.logResult(op.document, result, retry)
+ if result.Success() {
+ d.throttler.Success()
+ d.circuitBreaker.Success()
+ return false
+ } else if result.HTTPStatus == 429 || result.HTTPStatus == 503 {
+ d.throttler.Throttled(d.inflightCount.Load())
+ return true
+ } else if result.Err != nil || result.HTTPStatus == 500 || result.HTTPStatus == 502 || result.HTTPStatus == 504 {
+ d.circuitBreaker.Failure()
if retry {
return true
}
@@ -110,23 +121,14 @@ func (d *Dispatcher) start() {
if d.started {
return
}
- d.ready = make(chan documentOp, 4096)
d.results = make(chan documentOp, 4096)
d.msgs = make(chan string, 4096)
d.started = true
- d.wg.Add(3)
- go d.dispatchReady()
+ d.wg.Add(2)
go d.processResults()
go d.printMessages()
}
-func (d *Dispatcher) dispatchReady() {
- defer d.wg.Done()
- for op := range d.ready {
- d.dispatch(op)
- }
-}
-
func (d *Dispatcher) dispatch(op documentOp) {
if !d.acceptDocument() {
d.msgs <- fmt.Sprintf("refusing to dispatch document %s: too many errors", op.document.Id.String())
@@ -144,11 +146,12 @@ func (d *Dispatcher) processResults() {
defer d.wg.Done()
for op := range d.results {
d.statsMu.Lock()
- d.stats.Add(op.result.Stats)
+ d.stats.Add(op.result)
d.statsMu.Unlock()
if d.shouldRetry(op, op.result) {
d.enqueue(op.resetResult(), true)
} else {
+ op.document.Reset()
d.inflightWg.Done()
}
d.dispatchNext(op.document.Id)
@@ -163,13 +166,18 @@ func (d *Dispatcher) dispatchNext(id Id) {
if !ok {
panic("no queue exists for " + id.String() + ": this should not happen")
}
- if next, ok := q.Poll(); ok {
- // we have more operations with this ID: notify dispatcher about the next one
- d.ready <- next
- } else {
+ hasNext := q != nil
+ if hasNext {
+ if next, ok := q.Poll(); ok {
+ // we have more operations with this ID: dispatch the next one
+ d.dispatch(next)
+ } else {
+ hasNext = false
+ }
+ }
+ if !hasNext {
// no more operations with this ID: release slot
delete(d.inflight, k)
- d.queuePool.Put(q)
d.releaseSlot()
}
}
@@ -191,12 +199,15 @@ func (d *Dispatcher) enqueue(op documentOp, isRetry bool) error {
d.mu.Unlock()
return fmt.Errorf("refusing to enqueue document %s: too many errors", op.document.Id.String())
}
- key := op.document.Id.String()
- q, ok := d.inflight[key]
+ k := op.document.Id.String()
+ q, ok := d.inflight[k]
if !ok {
- q = d.queuePool.Get().(*Queue[documentOp])
- d.inflight[key] = q
+ d.inflight[k] = nil // track operation, but defer allocating queue until needed
} else {
+ if q == nil {
+ q = NewQueue[documentOp]()
+ d.inflight[k] = q
+ }
q.Add(op, isRetry)
}
if !isRetry {
@@ -204,9 +215,9 @@ func (d *Dispatcher) enqueue(op documentOp, isRetry bool) error {
}
d.mu.Unlock()
if !ok && !isRetry {
- // first operation with this ID: acquire slot
+ // first operation with this ID: acquire slot and dispatch
d.acquireSlot()
- d.ready <- op
+ d.dispatch(op)
d.throttler.Sent()
}
return nil
@@ -226,21 +237,22 @@ func (d *Dispatcher) acceptDocument() bool {
}
func (d *Dispatcher) acquireSlot() {
- for atomic.LoadInt64(&d.inflightCount) >= d.throttler.TargetInflight() {
+ for d.inflightCount.Load() >= d.throttler.TargetInflight() {
time.Sleep(time.Millisecond)
}
- atomic.AddInt64(&d.inflightCount, 1)
+ d.inflightCount.Add(1)
}
-func (d *Dispatcher) releaseSlot() { atomic.AddInt64(&d.inflightCount, -1) }
+func (d *Dispatcher) releaseSlot() { d.inflightCount.Add(-1) }
func (d *Dispatcher) Enqueue(doc Document) error { return d.enqueue(documentOp{document: doc}, false) }
func (d *Dispatcher) Stats() Stats {
d.statsMu.Lock()
defer d.statsMu.Unlock()
- d.stats.Inflight = atomic.LoadInt64(&d.inflightCount)
- return d.stats
+ statsCopy := d.stats.Clone()
+ statsCopy.Inflight = d.inflightCount.Load()
+ return statsCopy
}
// Close waits for all inflight operations to complete and closes the dispatcher.
@@ -248,7 +260,6 @@ func (d *Dispatcher) Close() error {
d.inflightWg.Wait() // Wait for all inflight operations to complete
d.mu.Lock()
if d.started {
- close(d.ready)
close(d.results)
close(d.msgs)
d.started = false
diff --git a/client/go/internal/vespa/document/dispatcher_test.go b/client/go/internal/vespa/document/dispatcher_test.go
index 252bd94dff9..834ec8490a6 100644
--- a/client/go/internal/vespa/document/dispatcher_test.go
+++ b/client/go/internal/vespa/document/dispatcher_test.go
@@ -42,16 +42,13 @@ func (f *mockFeeder) Send(doc Document) Result {
} else {
f.documents = append(f.documents, doc)
}
- if !result.Success() {
- result.Stats.Errors = 1
- }
return result
}
type mockCircuitBreaker struct{ state CircuitState }
func (c *mockCircuitBreaker) Success() {}
-func (c *mockCircuitBreaker) Error(err error) {}
+func (c *mockCircuitBreaker) Failure() {}
func (c *mockCircuitBreaker) State() CircuitState { return c.state }
func TestDispatcher(t *testing.T) {
@@ -61,8 +58,8 @@ func TestDispatcher(t *testing.T) {
breaker := NewCircuitBreaker(time.Second, 0)
dispatcher := NewDispatcher(feeder, throttler, breaker, io.Discard, false)
docs := []Document{
- {Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Fields: []byte(`{"foo": "123"}`)},
- {Id: mustParseId("id:ns:type::doc2"), Operation: OperationPut, Fields: []byte(`{"bar": "456"}`)},
+ {Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Body: []byte(`{"fields": {"foo": "123"}}`)},
+ {Id: mustParseId("id:ns:type::doc2"), Operation: OperationPut, Body: []byte(`{"fields": {"bar": "456"}}`)},
}
for _, d := range docs {
dispatcher.Enqueue(d)
@@ -135,7 +132,7 @@ func TestDispatcherOrderingWithFailures(t *testing.T) {
dispatcher.Close()
wantDocs := docs[:2]
assert.Equal(t, wantDocs, feeder.documents)
- assert.Equal(t, int64(20), dispatcher.Stats().Errors)
+ assert.Equal(t, int64(20), dispatcher.Stats().Unsuccessful())
// Dispatching more documents for same ID succeed
feeder.failAfterN(0)
@@ -145,7 +142,7 @@ func TestDispatcherOrderingWithFailures(t *testing.T) {
dispatcher.Enqueue(Document{Id: mustParseId("id:ns:type::doc2"), Operation: OperationPut})
dispatcher.Enqueue(Document{Id: mustParseId("id:ns:type::doc3"), Operation: OperationPut})
dispatcher.Close()
- assert.Equal(t, int64(20), dispatcher.Stats().Errors)
+ assert.Equal(t, int64(20), dispatcher.Stats().Unsuccessful())
assert.Equal(t, 6, len(feeder.documents))
}
@@ -166,7 +163,7 @@ func TestDispatcherOrderingWithRetry(t *testing.T) {
}
dispatcher.Close()
assert.Equal(t, docs, feeder.documents)
- assert.Equal(t, int64(5), dispatcher.Stats().Errors)
+ assert.Equal(t, int64(5), dispatcher.Stats().Unsuccessful())
}
func TestDispatcherOpenCircuit(t *testing.T) {
@@ -192,7 +189,7 @@ func BenchmarkDocumentDispatching(b *testing.B) {
throttler := newThrottler(8, clock.now)
breaker := NewCircuitBreaker(time.Second, 0)
dispatcher := NewDispatcher(feeder, throttler, breaker, io.Discard, false)
- doc := Document{Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Fields: []byte(`{"foo": "123"}`)}
+ doc := Document{Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Body: []byte(`{"fields": {"foo": "123"}}`)}
b.ResetTimer() // ignore setup time
for n := 0; n < b.N; n++ {
diff --git a/client/go/internal/vespa/document/document.go b/client/go/internal/vespa/document/document.go
index ce8b22b24f0..a9b184190fb 100644
--- a/client/go/internal/vespa/document/document.go
+++ b/client/go/internal/vespa/document/document.go
@@ -1,29 +1,58 @@
package document
import (
- "bufio"
"bytes"
+ "errors"
"fmt"
"io"
"math/rand"
"strconv"
"strings"
+ "sync"
"time"
- "github.com/goccy/go-json"
+ // Why do we use an experimental parser? This appears to be the only JSON library that satisfies the following
+ // requirements:
+ // - Faster than the std parser
+ // - Supports parsing from a io.Reader
+ // - Supports parsing token-by-token
+ // - Few allocations during parsing (especially for large objects)
+ "github.com/go-json-experiment/json"
)
-var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
-
type Operation int
const (
OperationPut Operation = iota
OperationUpdate
OperationRemove
+
+ jsonArrayStart json.Kind = '['
+ jsonArrayEnd json.Kind = ']'
+ jsonObjectStart json.Kind = '{'
+ jsonObjectEnd json.Kind = '}'
+ jsonString json.Kind = '"'
)
+var (
+ ErrMissingId = errors.New("no id specified")
+ fieldsPrefix = []byte(`{"fields":`)
+ fieldsSuffix = []byte("}")
+)
+
+func (o Operation) String() string {
+ switch o {
+ case OperationPut:
+ return "put"
+ case OperationUpdate:
+ return "update"
+ case OperationRemove:
+ return "remove"
+ }
+ return ""
+}
+
// Id represents a Vespa document ID.
type Id struct {
id string
@@ -98,39 +127,46 @@ func ParseId(serialized string) (Id, error) {
type Document struct {
Id Id
Condition string
- Fields []byte
+ Body []byte
Operation Operation
Create bool
+
+ resetFunc func()
}
-type jsonDocument struct {
- IdString string `json:"id"`
- PutId string `json:"put"`
- UpdateId string `json:"update"`
- RemoveId string `json:"remove"`
- Condition string `json:"condition"`
- Fields json.RawMessage `json:"fields"`
- Create bool `json:"create"`
+func (d Document) Equal(o Document) bool {
+ return d.Id.Equal(o.Id) &&
+ d.Condition == o.Condition &&
+ bytes.Equal(d.Body, o.Body) &&
+ d.Operation == o.Operation &&
+ d.Create == o.Create
+}
+
+// Reset discards the body of this document.
+func (d *Document) Reset() {
+ d.Body = nil
+ if d.resetFunc != nil {
+ d.resetFunc()
+ }
}
// Decoder decodes documents from a JSON structure which is either an array of objects, or objects separated by newline.
type Decoder struct {
- buf *bufio.Reader
- dec *json.Decoder
+ dec *json.Decoder
+ buf bytes.Buffer
+
array bool
jsonl bool
+
+ fieldsEnd int64
+
+ documentBuffers sync.Pool
}
func (d Document) String() string {
var sb strings.Builder
- switch d.Operation {
- case OperationPut:
- sb.WriteString("put ")
- case OperationUpdate:
- sb.WriteString("update ")
- case OperationRemove:
- sb.WriteString("remove ")
- }
+ sb.WriteString(d.Operation.String())
+ sb.WriteString(" ")
sb.WriteString(d.Id.String())
if d.Condition != "" {
sb.WriteString(", condition=")
@@ -139,113 +175,196 @@ func (d Document) String() string {
if d.Create {
sb.WriteString(", create=true")
}
+ if d.Body != nil {
+ sb.WriteString(", body=")
+ sb.WriteString(string(d.Body))
+ }
return sb.String()
}
func (d *Decoder) guessMode() error {
- for !d.array && !d.jsonl {
- b, err := d.buf.ReadByte()
- if err != nil {
- return err
- }
- // Skip leading whitespace
- if b < 0x80 && asciiSpace[b] != 0 {
- continue
- }
- switch rune(b) {
- case '{':
- d.jsonl = true
- case '[':
- d.array = true
- default:
- return fmt.Errorf("unexpected token: %q", string(b))
- }
- if err := d.buf.UnreadByte(); err != nil {
- return err
- }
- if err := d.readArrayToken(true); err != nil {
+ if d.array || d.jsonl {
+ return nil
+ }
+ kind := d.dec.PeekKind()
+ switch kind {
+ case jsonArrayStart:
+ if _, err := d.readNext(jsonArrayStart); err != nil {
return err
}
+ d.array = true
+ case jsonObjectStart:
+ d.jsonl = true
+ default:
+ return fmt.Errorf("expected %s or %s, got %s", jsonArrayStart, jsonObjectStart, kind)
}
return nil
}
-func (d *Decoder) readArrayToken(open bool) error {
- if !d.array {
- return nil
+func (d *Decoder) readNext(kind json.Kind) (json.Token, error) {
+ t, err := d.dec.ReadToken()
+ if err != nil {
+ return json.Token{}, err
}
- t, err := d.dec.Token()
+ if t.Kind() != kind {
+ return json.Token{}, fmt.Errorf("unexpected json kind: %q: want %q", t, kind)
+ }
+ return t, nil
+}
+
+func (d *Decoder) readString() (string, error) {
+ t, err := d.readNext(jsonString)
if err != nil {
- return err
+ return "", err
}
- if (open && t == json.Delim('[')) || (!open && t == json.Delim(']')) {
- return nil
+ return t.String(), nil
+}
+
+func (d *Decoder) readBool() (bool, error) {
+ t, err := d.dec.ReadToken()
+ if err != nil {
+ return false, err
}
- return fmt.Errorf("invalid array token: %q", t)
+ kind := t.Kind()
+ if kind != 't' && kind != 'f' {
+ return false, fmt.Errorf("unexpected json kind: %q: want %q or %q", t, 't', 'f')
+ }
+ return t.Bool(), nil
}
func (d *Decoder) Decode() (Document, error) {
doc, err := d.decode()
if err != nil && err != io.EOF {
- return Document{}, fmt.Errorf("invalid json at byte offset %d: %w", d.dec.InputOffset(), err)
+ return doc, fmt.Errorf("invalid operation at byte offset %d: %w", d.dec.InputOffset(), err)
}
return doc, err
}
+func (d *Decoder) buffer() *bytes.Buffer {
+ buf := d.documentBuffers.Get().(*bytes.Buffer)
+ buf.Reset()
+ return buf
+}
+
+func (d *Decoder) readField(name string, offset int64, doc *Document) error {
+ readId := false
+ switch name {
+ case "id", "put":
+ readId = true
+ doc.Operation = OperationPut
+ case "update":
+ readId = true
+ doc.Operation = OperationUpdate
+ case "remove":
+ readId = true
+ doc.Operation = OperationRemove
+ case "condition":
+ condition, err := d.readString()
+ if err != nil {
+ return err
+ }
+ doc.Condition = condition
+ case "create":
+ create, err := d.readBool()
+ if err != nil {
+ return err
+ }
+ doc.Create = create
+ case "fields":
+ if _, err := d.readNext(jsonObjectStart); err != nil {
+ return err
+ }
+ // Skip data between start of operation and start of fields
+ fieldsStart := d.dec.InputOffset() - 1
+ d.buf.Next(int(fieldsStart - offset))
+ depth := 1
+ for depth > 0 {
+ t, err := d.dec.ReadToken()
+ if err != nil {
+ return err
+ }
+ switch t.Kind() {
+ case jsonObjectStart:
+ depth++
+ case jsonObjectEnd:
+ depth--
+ }
+ }
+ d.fieldsEnd = d.dec.InputOffset()
+ fields := d.buf.Next(int(d.fieldsEnd - fieldsStart))
+ // Try to re-use buffers holding the document body. The buffer is released by document.Reset()
+ bodyBuf := d.buffer()
+ bodyBuf.Grow(len(fieldsPrefix) + len(fields) + len(fieldsSuffix))
+ bodyBuf.Write(fieldsPrefix)
+ bodyBuf.Write(fields)
+ bodyBuf.Write(fieldsSuffix)
+ doc.Body = bodyBuf.Bytes()
+ doc.resetFunc = func() { d.documentBuffers.Put(bodyBuf) }
+ }
+ if readId {
+ s, err := d.readString()
+ if err != nil {
+ return err
+ }
+ id, err := ParseId(s)
+ if err != nil {
+ return err
+ }
+ doc.Id = id
+ }
+ return nil
+}
+
func (d *Decoder) decode() (Document, error) {
+ start := d.dec.InputOffset()
if err := d.guessMode(); err != nil {
return Document{}, err
}
- if !d.dec.More() {
- if err := d.readArrayToken(false); err != nil {
+ if d.array && d.dec.PeekKind() == jsonArrayEnd {
+ // Reached end of the array holding document operations
+ if _, err := d.readNext(jsonArrayEnd); err != nil {
return Document{}, err
}
return Document{}, io.EOF
}
- doc := jsonDocument{}
- if err := d.dec.Decode(&doc); err != nil {
+ // Start of document operation
+ if _, err := d.readNext(jsonObjectStart); err != nil {
return Document{}, err
}
- return parseDocument(&doc)
-}
-
-func NewDecoder(r io.Reader) *Decoder {
- buf := bufio.NewReaderSize(r, 1<<26)
- return &Decoder{
- buf: buf,
- dec: json.NewDecoder(buf),
+ var doc Document
+loop:
+ for {
+ switch d.dec.PeekKind() {
+ case jsonString:
+ t, err := d.dec.ReadToken()
+ if err != nil {
+ return Document{}, err
+ }
+ if err := d.readField(t.String(), start, &doc); err != nil {
+ return Document{}, err
+ }
+ default:
+ if _, err := d.readNext(jsonObjectEnd); err != nil {
+ return Document{}, err
+ }
+ // Drop operation from the buffer
+ start = max(start, d.fieldsEnd)
+ end := d.dec.InputOffset()
+ d.buf.Next(int(end - start))
+ break loop
+ }
+ }
+ if doc.Id.id == "" {
+ return doc, ErrMissingId
}
+ return doc, nil
}
-func parseDocument(d *jsonDocument) (Document, error) {
- id := ""
- var op Operation
- if d.IdString != "" {
- op = OperationPut
- id = d.IdString
- } else if d.PutId != "" {
- op = OperationPut
- id = d.PutId
- } else if d.UpdateId != "" {
- op = OperationUpdate
- id = d.UpdateId
- } else if d.RemoveId != "" {
- op = OperationRemove
- id = d.RemoveId
- } else {
- return Document{}, fmt.Errorf("invalid document: missing operation: %v", d)
- }
- docId, err := ParseId(id)
- if err != nil {
- return Document{}, err
- }
- return Document{
- Id: docId,
- Operation: op,
- Condition: d.Condition,
- Create: d.Create,
- Fields: d.Fields,
- }, nil
+func NewDecoder(r io.Reader) *Decoder {
+ d := &Decoder{}
+ d.documentBuffers.New = func() any { return &bytes.Buffer{} }
+ d.dec = json.NewDecoder(io.TeeReader(r, &d.buf))
+ return d
}
func parseError(value string) error {
diff --git a/client/go/internal/vespa/document/document_test.go b/client/go/internal/vespa/document/document_test.go
index 397136173bc..d37febf3da8 100644
--- a/client/go/internal/vespa/document/document_test.go
+++ b/client/go/internal/vespa/document/document_test.go
@@ -1,9 +1,9 @@
package document
import (
+ "errors"
"fmt"
"io"
- "reflect"
"strings"
"testing"
"time"
@@ -113,18 +113,31 @@ func feedInput(jsonl bool) string {
`
{
"put": "id:ns:type::doc1",
- "fields": {"foo": "123"}
+ "fields": { "foo" : "123", "bar": {"a": [1, 2, 3]}}
}`,
`
-{
+
+ {
"put": "id:ns:type::doc2",
+ "create": false,
+ "condition": "foo",
"fields": {"bar": "456"}
}`,
`
{
- "remove": "id:ns:type::doc1"
+ "remove": "id:ns:type::doc3"
}
-`}
+`,
+ `
+{
+ "fields": {"qux": "789"},
+ "put": "id:ns:type::doc4",
+ "create": true
+}`,
+ `
+{
+ "remove": "id:ns:type::doc5"
+}`}
if jsonl {
return strings.Join(operations, "\n")
}
@@ -133,32 +146,49 @@ func feedInput(jsonl bool) string {
func testDocumentDecoder(t *testing.T, jsonLike string) {
t.Helper()
- r := NewDecoder(strings.NewReader(jsonLike))
- want := []Document{
- {Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Fields: []byte(`{"foo": "123"}`)},
- {Id: mustParseId("id:ns:type::doc2"), Operation: OperationPut, Fields: []byte(`{"bar": "456"}`)},
- {Id: mustParseId("id:ns:type::doc1"), Operation: OperationRemove},
+ dec := NewDecoder(strings.NewReader(jsonLike))
+ docs := []Document{
+ {Id: mustParseId("id:ns:type::doc1"), Operation: OperationPut, Body: []byte(`{"fields":{ "foo" : "123", "bar": {"a": [1, 2, 3]}}}`)},
+ {Id: mustParseId("id:ns:type::doc2"), Operation: OperationPut, Condition: "foo", Body: []byte(`{"fields":{"bar": "456"}}`)},
+ {Id: mustParseId("id:ns:type::doc3"), Operation: OperationRemove},
+ {Id: mustParseId("id:ns:type::doc4"), Operation: OperationPut, Create: true, Body: []byte(`{"fields":{"qux": "789"}}`)},
+ {Id: mustParseId("id:ns:type::doc5"), Operation: OperationRemove},
}
- got := []Document{}
+ result := []Document{}
for {
- doc, err := r.Decode()
+ doc, err := dec.Decode()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
- got = append(got, doc)
+ result = append(result, doc)
+ }
+ wantBufLen := 0
+ if dec.array {
+ wantBufLen = 1
+ }
+ if l := dec.buf.Len(); l != wantBufLen {
+ t.Errorf("got dec.buf.Len() = %d, want %d", l, wantBufLen)
+ }
+ if len(docs) != len(result) {
+ t.Errorf("len(result) = %d, want %d", len(result), len(docs))
}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("got %+v, want %+v", got, want)
+ for i := 0; i < len(docs); i++ {
+ got := result[i]
+ want := docs[i]
+ if !got.Equal(want) {
+ t.Errorf("got %+v, want %+v", got, want)
+ }
}
}
-func TestDocumentDecoder(t *testing.T) {
- testDocumentDecoder(t, feedInput(false))
- testDocumentDecoder(t, feedInput(true))
+func TestDocumentDecoderArray(t *testing.T) { testDocumentDecoder(t, feedInput(false)) }
+
+func TestDocumentDecoderJSONL(t *testing.T) { testDocumentDecoder(t, feedInput(true)) }
+func TestDocumentDecoderInvalid(t *testing.T) {
jsonLike := `
{
"put": "id:ns:type::doc1",
@@ -169,16 +199,23 @@ func TestDocumentDecoder(t *testing.T) {
"fields": {"foo": "invalid
}
`
- r := NewDecoder(strings.NewReader(jsonLike))
- _, err := r.Decode() // first object is valid
+ dec := NewDecoder(strings.NewReader(jsonLike))
+ _, err := dec.Decode() // first object is valid
if err != nil {
t.Errorf("unexpected error: %s", err)
}
- _, err = r.Decode()
- wantErr := "invalid json at byte offset 122: json: string of object unexpected end of JSON input"
+ _, err = dec.Decode()
+ wantErr := "invalid operation at byte offset 110: json: invalid character '\\n' within string (expecting non-control character)"
if err.Error() != wantErr {
t.Errorf("want error %q, got %q", wantErr, err.Error())
}
+
+ dec = NewDecoder(strings.NewReader(`{}`))
+ _, err = dec.Decode()
+ wantErr = "invalid operation at byte offset 2: no id specified"
+ if !errors.Is(err, ErrMissingId) {
+ t.Errorf("want error %q, got %q", ErrMissingId, err.Error())
+ }
}
func benchmarkDocumentDecoder(b *testing.B, size int) {
diff --git a/client/go/internal/vespa/document/feeder_test.go b/client/go/internal/vespa/document/feeder_test.go
deleted file mode 100644
index a7d92495889..00000000000
--- a/client/go/internal/vespa/document/feeder_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package document
-
-import (
- "reflect"
- "testing"
- "time"
-)
-
-func TestStatsAdd(t *testing.T) {
- var got Stats
- got.Add(Stats{Requests: 1})
- got.Add(Stats{Requests: 1})
- got.Add(Stats{Responses: 1})
- got.Add(Stats{Responses: 1})
- got.Add(Stats{ResponsesByCode: map[int]int64{200: 2}})
- got.Add(Stats{ResponsesByCode: map[int]int64{200: 2}})
- got.Add(Stats{MinLatency: 200 * time.Millisecond})
- got.Add(Stats{MaxLatency: 400 * time.Millisecond})
- got.Add(Stats{MinLatency: 100 * time.Millisecond})
- got.Add(Stats{MaxLatency: 500 * time.Millisecond})
- got.Add(Stats{MaxLatency: 300 * time.Millisecond})
- got.Add(Stats{})
-
- want := Stats{
- Requests: 2,
- Responses: 2,
- ResponsesByCode: map[int]int64{200: 4},
- MinLatency: 100 * time.Millisecond,
- MaxLatency: 500 * time.Millisecond,
- }
- if !reflect.DeepEqual(got, want) {
- t.Errorf("got %+v, want %+v", got, want)
- }
-}
diff --git a/client/go/internal/vespa/document/http.go b/client/go/internal/vespa/document/http.go
index a389a82cee1..986659773f1 100644
--- a/client/go/internal/vespa/document/http.go
+++ b/client/go/internal/vespa/document/http.go
@@ -14,8 +14,7 @@ import (
"sync/atomic"
"time"
- "github.com/goccy/go-json"
-
+ "github.com/go-json-experiment/json"
"github.com/klauspost/compress/gzip"
"github.com/vespa-engine/vespa/client/go/internal/build"
@@ -31,9 +30,6 @@ const (
)
var (
- fieldsPrefix = []byte(`{"fields":`)
- fieldsSuffix = []byte("}")
-
defaultHeaders http.Header = map[string][]string{
"User-Agent": {fmt.Sprintf("Vespa CLI/%s", build.Version)},
"Content-Type": {"application/json; charset=utf-8"},
@@ -48,9 +44,9 @@ var (
// Client represents a HTTP client for the /document/v1/ API.
type Client struct {
options ClientOptions
- httpClients []countingHTTPClient
+ httpClients []*countingHTTPClient
now func() time.Time
- sendCount int32
+ sendCount atomic.Int32
gzippers sync.Pool
buffers sync.Pool
pending chan *pendingDocument
@@ -69,13 +65,11 @@ type ClientOptions struct {
type countingHTTPClient struct {
client util.HTTPClient
- inflight int64
+ inflight atomic.Int64
}
-func (c *countingHTTPClient) addInflight(n int64) { atomic.AddInt64(&c.inflight, n) }
-
func (c *countingHTTPClient) Do(req *http.Request, timeout time.Duration) (*http.Response, error) {
- defer c.addInflight(-1)
+ defer c.inflight.Add(-1)
return c.client.Do(req, timeout)
}
@@ -96,9 +90,9 @@ func NewClient(options ClientOptions, httpClients []util.HTTPClient) (*Client, e
if err != nil {
return nil, fmt.Errorf("invalid base url: %w", err)
}
- countingClients := make([]countingHTTPClient, 0, len(httpClients))
+ countingClients := make([]*countingHTTPClient, 0, len(httpClients))
for _, client := range httpClients {
- countingClients = append(countingClients, countingHTTPClient{client: client})
+ countingClients = append(countingClients, &countingHTTPClient{client: client})
}
nowFunc := options.NowFunc
if nowFunc == nil {
@@ -133,46 +127,38 @@ func writeQueryParam(sb *bytes.Buffer, start int, escape bool, k, v string) {
}
}
-func writeRequestBody(w io.Writer, body []byte) error {
- for _, b := range [][]byte{fieldsPrefix, body, fieldsSuffix} {
- if _, err := w.Write(b); err != nil {
- return err
- }
+func (c *Client) writeDocumentPath(id Id, sb *bytes.Buffer) {
+ sb.WriteString(strings.TrimSuffix(c.options.BaseURL, "/"))
+ sb.WriteString("/document/v1/")
+ sb.WriteString(url.PathEscape(id.Namespace))
+ sb.WriteString("/")
+ sb.WriteString(url.PathEscape(id.Type))
+ if id.Number != nil {
+ sb.WriteString("/number/")
+ n := uint64(*id.Number)
+ sb.WriteString(strconv.FormatUint(n, 10))
+ } else if id.Group != "" {
+ sb.WriteString("/group/")
+ sb.WriteString(url.PathEscape(id.Group))
+ } else {
+ sb.WriteString("/docid")
}
- return nil
+ sb.WriteString("/")
+ sb.WriteString(url.PathEscape(id.UserSpecific))
}
func (c *Client) methodAndURL(d Document, sb *bytes.Buffer) (string, string) {
httpMethod := ""
switch d.Operation {
case OperationPut:
- httpMethod = "POST"
+ httpMethod = http.MethodPost
case OperationUpdate:
- httpMethod = "PUT"
+ httpMethod = http.MethodPut
case OperationRemove:
- httpMethod = "DELETE"
+ httpMethod = http.MethodDelete
}
// Base URL and path
- sb.WriteString(c.options.BaseURL)
- if !strings.HasSuffix(c.options.BaseURL, "/") {
- sb.WriteString("/")
- }
- sb.WriteString("document/v1/")
- sb.WriteString(url.PathEscape(d.Id.Namespace))
- sb.WriteString("/")
- sb.WriteString(url.PathEscape(d.Id.Type))
- if d.Id.Number != nil {
- sb.WriteString("/number/")
- n := uint64(*d.Id.Number)
- sb.WriteString(strconv.FormatUint(n, 10))
- } else if d.Id.Group != "" {
- sb.WriteString("/group/")
- sb.WriteString(url.PathEscape(d.Id.Group))
- } else {
- sb.WriteString("/docid")
- }
- sb.WriteString("/")
- sb.WriteString(url.PathEscape(d.Id.UserSpecific))
+ c.writeDocumentPath(d.Id, sb)
// Query part
queryStart := sb.Len()
if c.options.Timeout > 0 {
@@ -199,19 +185,19 @@ func (c *Client) methodAndURL(d Document, sb *bytes.Buffer) (string, string) {
func (c *Client) leastBusyClient() *countingHTTPClient {
leastBusy := c.httpClients[0]
min := int64(math.MaxInt64)
- next := atomic.AddInt32(&c.sendCount, 1)
+ next := c.sendCount.Add(1)
start := int(next) % len(c.httpClients)
for i := range c.httpClients {
j := (i + start) % len(c.httpClients)
client := c.httpClients[j]
- inflight := atomic.LoadInt64(&client.inflight)
+ inflight := client.inflight.Load()
if inflight < min {
leastBusy = client
min = inflight
}
}
- leastBusy.addInflight(1)
- return &leastBusy
+ leastBusy.inflight.Add(1)
+ return leastBusy
}
func (c *Client) gzipWriter(w io.Writer) *gzip.Writer {
@@ -230,7 +216,7 @@ func (c *Client) preparePending() {
for pd := range c.pending {
pd.buf = c.buffer()
method, url := c.methodAndURL(pd.document, pd.buf)
- pd.request, pd.err = c.createRequest(method, url, pd.document.Fields, pd.buf)
+ pd.request, pd.err = c.createRequest(method, url, pd.document.Body, pd.buf)
pd.prepared <- true
}
}
@@ -260,24 +246,23 @@ func (c *Client) createRequest(method, url string, body []byte, buf *bytes.Buffe
if len(body) == 0 {
return newRequest(method, url, nil, false)
}
- bodySize := len(fieldsPrefix) + len(body) + len(fieldsSuffix)
- useGzip := c.options.Compression == CompressionGzip || (c.options.Compression == CompressionAuto && bodySize > 512)
- buf.Grow(min(1024, bodySize))
+ useGzip := c.options.Compression == CompressionGzip || (c.options.Compression == CompressionAuto && len(body) > 512)
+ var r io.Reader
if useGzip {
+ buf.Grow(min(1024, len(body)))
zw := c.gzipWriter(buf)
defer c.gzippers.Put(zw)
- if err := writeRequestBody(zw, body); err != nil {
+ if _, err := zw.Write(body); err != nil {
return nil, err
}
if err := zw.Close(); err != nil {
return nil, err
}
+ r = buf
} else {
- if err := writeRequestBody(buf, body); err != nil {
- return nil, err
- }
+ r = bytes.NewReader(body)
}
- return newRequest(method, url, buf, useGzip)
+ return newRequest(method, url, r, useGzip)
}
func (c *Client) clientTimeout() time.Duration {
@@ -290,33 +275,54 @@ func (c *Client) clientTimeout() time.Duration {
// Send given document to the endpoint configured in this client.
func (c *Client) Send(document Document) Result {
start := c.now()
- result := Result{Id: document.Id, Stats: Stats{Requests: 1}}
+ result := Result{Id: document.Id}
req, buf, err := c.prepare(document)
defer c.buffers.Put(buf)
if err != nil {
return resultWithErr(result, err)
}
- bodySize := buf.Len()
+ bodySize := len(document.Body)
+ if buf.Len() > 0 {
+ bodySize = buf.Len()
+ }
+ resp, err := c.leastBusyClient().Do(req, c.clientTimeout())
+ if err != nil {
+ return resultWithErr(result, err)
+ }
+ defer resp.Body.Close()
+ elapsed := c.now().Sub(start)
+ return c.resultWithResponse(resp, bodySize, result, elapsed, buf, false)
+}
+
+// Get retrieves document with given ID.
+func (c *Client) Get(id Id) Result {
+ start := c.now()
+ buf := c.buffer()
+ defer c.buffers.Put(buf)
+ c.writeDocumentPath(id, buf)
+ url := buf.String()
+ result := Result{Id: id}
+ req, err := http.NewRequest(http.MethodGet, url, nil)
+ if err != nil {
+ return resultWithErr(result, err)
+ }
resp, err := c.leastBusyClient().Do(req, c.clientTimeout())
if err != nil {
return resultWithErr(result, err)
}
defer resp.Body.Close()
elapsed := c.now().Sub(start)
- return resultWithResponse(resp, bodySize, result, elapsed, buf)
+ return c.resultWithResponse(resp, 0, result, elapsed, buf, true)
}
func resultWithErr(result Result, err error) Result {
- result.Stats.Errors++
result.Status = StatusTransportFailure
result.Err = err
return result
}
-func resultWithResponse(resp *http.Response, sentBytes int, result Result, elapsed time.Duration, buf *bytes.Buffer) Result {
+func (c *Client) resultWithResponse(resp *http.Response, sentBytes int, result Result, elapsed time.Duration, buf *bytes.Buffer, copyBody bool) Result {
result.HTTPStatus = resp.StatusCode
- result.Stats.Responses++
- result.Stats.ResponsesByCode = map[int]int64{resp.StatusCode: 1}
switch resp.StatusCode {
case 200:
result.Status = StatusSuccess
@@ -327,30 +333,28 @@ func resultWithResponse(resp *http.Response, sentBytes int, result Result, elaps
default:
result.Status = StatusTransportFailure
}
- var body struct {
- Message string `json:"message"`
- Trace json.RawMessage `json:"trace"`
- }
buf.Reset()
written, err := io.Copy(buf, resp.Body)
if err != nil {
- result.Status = StatusVespaFailure
- result.Err = err
+ result = resultWithErr(result, err)
} else {
- if err := json.Unmarshal(buf.Bytes(), &body); err != nil {
- result.Status = StatusVespaFailure
- result.Err = fmt.Errorf("failed to decode json response: %w", err)
+ if result.Success() && c.options.TraceLevel > 0 {
+ var jsonResponse struct {
+ Trace json.RawValue `json:"trace"`
+ }
+ if err := json.Unmarshal(buf.Bytes(), &jsonResponse); err != nil {
+ result = resultWithErr(result, fmt.Errorf("failed to decode json response: %w", err))
+ } else {
+ result.Trace = string(jsonResponse.Trace)
+ }
+ }
+ if !result.Success() || copyBody {
+ result.Body = make([]byte, buf.Len())
+ copy(result.Body, buf.Bytes())
}
}
- result.Message = body.Message
- result.Trace = string(body.Trace)
- result.Stats.BytesSent = int64(sentBytes)
- result.Stats.BytesRecv = int64(written)
- if !result.Success() {
- result.Stats.Errors++
- }
- result.Stats.TotalLatency = elapsed
- result.Stats.MinLatency = elapsed
- result.Stats.MaxLatency = elapsed
+ result.Latency = elapsed
+ result.BytesSent = int64(sentBytes)
+ result.BytesRecv = int64(written)
return result
}
diff --git a/client/go/internal/vespa/document/http_test.go b/client/go/internal/vespa/document/http_test.go
index 7d636aa8d5c..30bd8406f45 100644
--- a/client/go/internal/vespa/document/http_test.go
+++ b/client/go/internal/vespa/document/http_test.go
@@ -3,7 +3,6 @@ package document
import (
"bytes"
"fmt"
- "net/http"
"reflect"
"strings"
"testing"
@@ -37,13 +36,13 @@ func TestLeastBusyClient(t *testing.T) {
httpClients = append(httpClients, &mockHTTPClient{i, &httpClient})
}
client, _ := NewClient(ClientOptions{}, httpClients)
- client.httpClients[0].addInflight(1)
- client.httpClients[1].addInflight(1)
- assertLeastBusy(t, 2, client)
+ client.httpClients[0].inflight.Add(1)
+ client.httpClients[1].inflight.Add(1)
assertLeastBusy(t, 2, client)
assertLeastBusy(t, 3, client)
- client.httpClients[3].addInflight(1)
- client.httpClients[1].addInflight(-1)
+ assertLeastBusy(t, 3, client)
+ client.httpClients[3].inflight.Add(1)
+ client.httpClients[1].inflight.Add(-1)
assertLeastBusy(t, 1, client)
}
@@ -62,16 +61,16 @@ func TestClientSend(t *testing.T) {
method string
url string
}{
- {Document{Create: true, Id: mustParseId("id:ns:type::doc1"), Operation: OperationUpdate, Fields: []byte(`{"foo": "123"}`)},
+ {Document{Create: true, Id: mustParseId("id:ns:type::doc1"), Operation: OperationUpdate, Body: []byte(`{"fields":{"foo": "123"}}`)},
"PUT",
"https://example.com:1337/document/v1/ns/type/docid/doc1?timeout=5000ms&create=true"},
- {Document{Id: mustParseId("id:ns:type::doc2"), Operation: OperationUpdate, Fields: []byte(`{"foo": "456"}`)},
+ {Document{Id: mustParseId("id:ns:type::doc2"), Operation: OperationUpdate, Body: []byte(`{"fields":{"foo": "456"}}`)},
"PUT",
"https://example.com:1337/document/v1/ns/type/docid/doc2?timeout=5000ms"},
{Document{Id: mustParseId("id:ns:type::doc3"), Operation: OperationRemove},
"DELETE",
"https://example.com:1337/document/v1/ns/type/docid/doc3?timeout=5000ms"},
- {Document{Condition: "foo", Id: mustParseId("id:ns:type::doc4"), Operation: OperationUpdate, Fields: []byte(`{"baz": "789"}`)},
+ {Document{Condition: "foo", Id: mustParseId("id:ns:type::doc4"), Operation: OperationUpdate, Body: []byte(`{"fields":{"baz": "789"}}`)},
"PUT",
"https://example.com:1337/document/v1/ns/type/docid/doc4?timeout=5000ms&condition=foo"},
}
@@ -86,43 +85,29 @@ func TestClientSend(t *testing.T) {
for i, tt := range tests {
doc := tt.in
wantRes := Result{
- Id: doc.Id,
- Stats: Stats{
- Requests: 1,
- Responses: 1,
- TotalLatency: time.Second,
- MinLatency: time.Second,
- MaxLatency: time.Second,
- },
+ Id: doc.Id,
+ Latency: time.Second,
}
- var wantBody bytes.Buffer
if i < 3 {
- httpClient.NextResponseString(200, `{"message":"All good!"}`)
+ msg := `{"message":"All good!"}`
+ httpClient.NextResponseString(200, msg)
wantRes.Status = StatusSuccess
wantRes.HTTPStatus = 200
- wantRes.Message = "All good!"
- wantRes.Stats.ResponsesByCode = map[int]int64{200: 1}
- wantRes.Stats.BytesRecv = 23
+ wantRes.BytesRecv = 23
} else {
- httpClient.NextResponseString(502, `{"message":"Good bye, cruel world!"}`)
+ errMsg := `something went wront`
+ httpClient.NextResponseString(502, errMsg)
wantRes.Status = StatusVespaFailure
wantRes.HTTPStatus = 502
- wantRes.Message = "Good bye, cruel world!"
- wantRes.Stats.ResponsesByCode = map[int]int64{502: 1}
- wantRes.Stats.Errors = 1
- wantRes.Stats.BytesRecv = 36
- }
- if tt.method == http.MethodPut {
- wantBody.WriteString(`{"fields":`)
- wantBody.Write(doc.Fields)
- wantBody.WriteString("}")
+ wantRes.Body = []byte(errMsg)
+ wantRes.BytesRecv = 20
}
res := client.Send(doc)
- wantRes.Stats.BytesSent = int64(len(httpClient.LastBody))
+ wantRes.BytesSent = int64(len(httpClient.LastBody))
if !reflect.DeepEqual(res, wantRes) {
t.Fatalf("got result %+v, want %+v", res, wantRes)
}
- stats.Add(res.Stats)
+ stats.Add(res)
r := httpClient.LastRequest
if r.Method != tt.method {
t.Errorf("got r.Method = %q, want %q", r.Method, tt.method)
@@ -133,8 +118,8 @@ func TestClientSend(t *testing.T) {
if r.URL.String() != tt.url {
t.Errorf("got r.URL = %q, want %q", r.URL, tt.url)
}
- if !bytes.Equal(httpClient.LastBody, wantBody.Bytes()) {
- t.Errorf("got r.Body = %q, want %q", string(httpClient.LastBody), wantBody.String())
+ if !bytes.Equal(httpClient.LastBody, doc.Body) {
+ t.Errorf("got r.Body = %q, want %q", string(httpClient.LastBody), doc.Body)
}
}
want := Stats{
@@ -144,19 +129,51 @@ func TestClientSend(t *testing.T) {
200: 3,
502: 1,
},
- Errors: 1,
+ Errors: 0,
Inflight: 0,
TotalLatency: 4 * time.Second,
MinLatency: time.Second,
MaxLatency: time.Second,
BytesSent: 75,
- BytesRecv: 105,
+ BytesRecv: 89,
}
if !reflect.DeepEqual(want, stats) {
t.Errorf("got %+v, want %+v", stats, want)
}
}
+func TestClientGet(t *testing.T) {
+ httpClient := mock.HTTPClient{ReadBody: true}
+ client, _ := NewClient(ClientOptions{
+ BaseURL: "https://example.com:1337",
+ Timeout: time.Duration(5 * time.Second),
+ }, []util.HTTPClient{&httpClient})
+ clock := manualClock{t: time.Now(), tick: time.Second}
+ client.now = clock.now
+ doc := `{
+ "pathId": "/document/v1/mynamespace/music/docid/doc1",
+ "id": "id:mynamespace:music::doc1",
+ "fields": {
+ "artist": "Metallica",
+ "album": "Master of Puppets"
+ }
+}`
+ id := Id{Namespace: "mynamespace", Type: "music", UserSpecific: "doc1"}
+ httpClient.NextResponseString(200, doc)
+ result := client.Get(id)
+ want := Result{
+ Id: id,
+ Body: []byte(doc),
+ Status: StatusSuccess,
+ HTTPStatus: 200,
+ Latency: time.Second,
+ BytesRecv: 192,
+ }
+ if !reflect.DeepEqual(want, result) {
+ t.Errorf("got %+v, want %+v", result, want)
+ }
+}
+
func TestClientSendCompressed(t *testing.T) {
httpClient := &mock.HTTPClient{ReadBody: true}
client, _ := NewClient(ClientOptions{
@@ -164,9 +181,9 @@ func TestClientSendCompressed(t *testing.T) {
Timeout: time.Duration(5 * time.Second),
}, []util.HTTPClient{httpClient})
- bigBody := fmt.Sprintf(`{"foo": "%s"}`, strings.Repeat("s", 512+1))
- bigDoc := Document{Create: true, Id: mustParseId("id:ns:type::doc1"), Operation: OperationUpdate, Fields: []byte(bigBody)}
- smallDoc := Document{Create: true, Id: mustParseId("id:ns:type::doc2"), Operation: OperationUpdate, Fields: []byte(`{"foo": "s"}`)}
+ bigBody := fmt.Sprintf(`{"fields": {"foo": "%s"}}`, strings.Repeat("s", 512+1))
+ bigDoc := Document{Create: true, Id: mustParseId("id:ns:type::doc1"), Operation: OperationUpdate, Body: []byte(bigBody)}
+ smallDoc := Document{Create: true, Id: mustParseId("id:ns:type::doc2"), Operation: OperationUpdate, Body: []byte(`{"fields": {"foo": "s"}}`)}
var result Result
client.options.Compression = CompressionNone
@@ -198,8 +215,8 @@ func assertCompressedRequest(t *testing.T, want bool, result Result, client *moc
if gotEnc != wantEnc {
t.Errorf("got Content-Encoding=%q, want %q", gotEnc, wantEnc)
}
- if result.Stats.BytesSent != int64(len(client.LastBody)) {
- t.Errorf("got BytesSent=%d, want %d", result.Stats.BytesSent, len(client.LastBody))
+ if result.BytesSent != int64(len(client.LastBody)) {
+ t.Errorf("got BytesSent=%d, want %d", result.BytesSent, len(client.LastBody))
}
compressed := bytes.HasPrefix(client.LastBody, []byte{0x1f, 0x8b})
if compressed != want {
@@ -307,7 +324,7 @@ func benchmarkClientSend(b *testing.B, compression Compression, document Documen
}
func makeDocument(size int) Document {
- return Document{Id: mustParseId("id:ns:type::doc1"), Operation: OperationUpdate, Fields: []byte(fmt.Sprintf(`{"foo": "%s"}`, randString(size)))}
+ return Document{Id: mustParseId("id:ns:type::doc1"), Operation: OperationUpdate, Body: []byte(fmt.Sprintf(`{"fields": {"foo": "%s"}}`, randString(size)))}
}
func BenchmarkClientSendSmallUncompressed(b *testing.B) {
diff --git a/client/go/internal/vespa/document/feeder.go b/client/go/internal/vespa/document/stats.go
index 6bcd4afe916..3e647d0f893 100644
--- a/client/go/internal/vespa/document/feeder.go
+++ b/client/go/internal/vespa/document/stats.go
@@ -4,6 +4,7 @@ import (
"time"
)
+// Status of a document operation.
type Status int
const (
@@ -23,11 +24,13 @@ const (
type Result struct {
Err error
Id Id
- Message string
Trace string
- Stats Stats
+ Body []byte
Status Status
HTTPStatus int
+ Latency time.Duration
+ BytesSent int64
+ BytesRecv int64
}
func (r Result) Success() bool {
@@ -57,39 +60,46 @@ func (s Stats) AvgLatency() time.Duration {
return s.TotalLatency / time.Duration(requests)
}
-func (s Stats) Successes() int64 {
+func (s Stats) Successful() int64 {
if s.ResponsesByCode == nil {
return 0
}
return s.ResponsesByCode[200]
}
-// Add all statistics contained in other to this.
-func (s *Stats) Add(other Stats) {
- s.Requests += other.Requests
- s.Responses += other.Responses
- if s.ResponsesByCode == nil && other.ResponsesByCode != nil {
+func (s Stats) Unsuccessful() int64 { return s.Requests - s.Successful() }
+
+func (s Stats) Clone() Stats {
+ if s.ResponsesByCode != nil {
+ mapCopy := make(map[int]int64)
+ for k, v := range s.ResponsesByCode {
+ mapCopy[k] = v
+ }
+ s.ResponsesByCode = mapCopy
+ }
+ return s
+}
+
+// Add statistics from result to this.
+func (s *Stats) Add(result Result) {
+ s.Requests++
+ if s.ResponsesByCode == nil {
s.ResponsesByCode = make(map[int]int64)
}
- for code, count := range other.ResponsesByCode {
- _, ok := s.ResponsesByCode[code]
- if ok {
- s.ResponsesByCode[code] += count
- } else {
- s.ResponsesByCode[code] = count
- }
+ responsesByCode := s.ResponsesByCode[result.HTTPStatus]
+ s.ResponsesByCode[result.HTTPStatus] = responsesByCode + 1
+ if result.Err == nil {
+ s.Responses++
+ } else {
+ s.Errors++
}
- s.Errors += other.Errors
- s.TotalLatency += other.TotalLatency
- if s.MinLatency == 0 || (other.MinLatency > 0 && other.MinLatency < s.MinLatency) {
- s.MinLatency = other.MinLatency
+ s.TotalLatency += result.Latency
+ if result.Latency < s.MinLatency || s.MinLatency == 0 {
+ s.MinLatency = result.Latency
}
- if other.MaxLatency > s.MaxLatency {
- s.MaxLatency = other.MaxLatency
+ if result.Latency > s.MaxLatency {
+ s.MaxLatency = result.Latency
}
- s.BytesSent += other.BytesSent
- s.BytesRecv += other.BytesRecv
+ s.BytesSent += result.BytesSent
+ s.BytesRecv += result.BytesRecv
}
-
-// Feeder is the interface for a consumer of documents.
-type Feeder interface{ Send(Document) Result }
diff --git a/client/go/internal/vespa/document/stats_test.go b/client/go/internal/vespa/document/stats_test.go
new file mode 100644
index 00000000000..8788836f9ad
--- /dev/null
+++ b/client/go/internal/vespa/document/stats_test.go
@@ -0,0 +1,43 @@
+package document
+
+import (
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestStatsAdd(t *testing.T) {
+ var stats Stats
+ stats.Add(Result{HTTPStatus: 200, Latency: 200 * time.Millisecond})
+ stats.Add(Result{HTTPStatus: 200, Latency: 400 * time.Millisecond})
+ stats.Add(Result{HTTPStatus: 200, Latency: 100 * time.Millisecond})
+ stats.Add(Result{HTTPStatus: 200, Latency: 500 * time.Millisecond})
+ stats.Add(Result{HTTPStatus: 200, Latency: 300 * time.Millisecond})
+ stats.Add(Result{HTTPStatus: 500, Latency: 100 * time.Millisecond})
+ expected := Stats{
+ Requests: 6,
+ Responses: 6,
+ ResponsesByCode: map[int]int64{200: 5, 500: 1},
+ TotalLatency: 1600 * time.Millisecond,
+ MinLatency: 100 * time.Millisecond,
+ MaxLatency: 500 * time.Millisecond,
+ }
+ if !reflect.DeepEqual(stats, expected) {
+ t.Errorf("got %+v, want %+v", stats, expected)
+ }
+ if want, got := int64(1), stats.Unsuccessful(); want != got {
+ t.Errorf("got stats.Unsuccessful() = %d, want %d", got, want)
+ }
+}
+
+func TestStatsClone(t *testing.T) {
+ var a Stats
+ a.Add(Result{HTTPStatus: 200})
+ b := a.Clone()
+ a.Add(Result{HTTPStatus: 200})
+
+ want := Stats{Requests: 1, Responses: 1, ResponsesByCode: map[int]int64{200: 1}}
+ if !reflect.DeepEqual(b, want) {
+ t.Errorf("got %+v, want %+v", b, want)
+ }
+}
diff --git a/client/go/internal/vespa/document/throttler.go b/client/go/internal/vespa/document/throttler.go
index 667a10d28e3..e32fb804b23 100644
--- a/client/go/internal/vespa/document/throttler.go
+++ b/client/go/internal/vespa/document/throttler.go
@@ -23,11 +23,11 @@ type Throttler interface {
type dynamicThrottler struct {
minInflight int64
maxInflight int64
- targetInflight int64
- targetTimesTen int64
+ targetInflight atomic.Int64
+ targetTimesTen atomic.Int64
throughputs []float64
- ok int64
+ ok atomic.Int64
sent int64
start time.Time
@@ -39,23 +39,24 @@ func newThrottler(connections int, nowFunc func() time.Time) *dynamicThrottler {
minInflight = 16 * int64(connections)
maxInflight = 256 * minInflight // 4096 max streams per connection on the server side
)
- return &dynamicThrottler{
- minInflight: minInflight,
- maxInflight: maxInflight,
- targetInflight: 8 * minInflight,
- targetTimesTen: 10 * maxInflight,
+ t := &dynamicThrottler{
+ minInflight: minInflight,
+ maxInflight: maxInflight,
throughputs: make([]float64, 128),
start: nowFunc(),
now: nowFunc,
}
+ t.targetInflight.Store(8 * minInflight)
+ t.targetTimesTen.Store(10 * maxInflight)
+ return t
}
func NewThrottler(connections int) Throttler { return newThrottler(connections, time.Now) }
func (t *dynamicThrottler) Sent() {
- currentInflight := atomic.LoadInt64(&t.targetInflight)
+ currentInflight := t.targetInflight.Load()
t.sent++
if t.sent*t.sent*t.sent < 100*currentInflight*currentInflight {
return
@@ -64,7 +65,7 @@ func (t *dynamicThrottler) Sent() {
now := t.now()
elapsed := now.Sub(t.start)
t.start = now
- currentThroughput := float64(atomic.SwapInt64(&t.ok, 0)) / float64(elapsed)
+ currentThroughput := float64(t.ok.Swap(0)) / float64(elapsed)
// Use buckets for throughput over inflight, along the log-scale, in [minInflight, maxInflight).
index := int(float64(len(t.throughputs)) * math.Log(max(1, min(255, float64(currentInflight)/float64(t.minInflight)))) / math.Log(256))
@@ -85,20 +86,20 @@ func (t *dynamicThrottler) Sent() {
}
}
target := int64((rand.Float64()*0.20 + 0.92) * choice) // Random walk, skewed towards increase
- atomic.StoreInt64(&t.targetInflight, max(t.minInflight, min(t.maxInflight, target)))
+ t.targetInflight.Store(max(t.minInflight, min(t.maxInflight, target)))
}
func (t *dynamicThrottler) Success() {
- atomic.AddInt64(&t.targetTimesTen, 1)
- atomic.AddInt64(&t.ok, 1)
+ t.targetTimesTen.Add(1)
+ t.ok.Add(1)
}
func (t *dynamicThrottler) Throttled(inflight int64) {
- atomic.StoreInt64(&t.targetTimesTen, max(inflight*5, t.minInflight*10))
+ t.targetTimesTen.Store(max(inflight*5, t.minInflight*10))
}
func (t *dynamicThrottler) TargetInflight() int64 {
- staticTargetInflight := min(t.maxInflight, atomic.LoadInt64(&t.targetTimesTen)/10)
- targetInflight := atomic.LoadInt64(&t.targetInflight)
+ staticTargetInflight := min(t.maxInflight, t.targetTimesTen.Load()/10)
+ targetInflight := t.targetInflight.Load()
return min(staticTargetInflight, targetInflight)
}
diff --git a/client/go/internal/vespa/system.go b/client/go/internal/vespa/system.go
index b8263dbdec0..96795cc0ef8 100644
--- a/client/go/internal/vespa/system.go
+++ b/client/go/internal/vespa/system.go
@@ -4,36 +4,40 @@ import "fmt"
// PublicSystem represents the main Vespa Cloud system.
var PublicSystem = System{
- Name: "public",
- URL: "https://api-ctl.vespa-cloud.com:4443",
- ConsoleURL: "https://console.vespa-cloud.com",
- DefaultZone: ZoneID{Environment: "dev", Region: "aws-us-east-1c"},
+ Name: "public",
+ URL: "https://api-ctl.vespa-cloud.com:4443",
+ ConsoleURL: "https://console.vespa-cloud.com",
+ DefaultZone: ZoneID{Environment: "dev", Region: "aws-us-east-1c"},
+ EndpointDomain: "vespa-app.cloud",
}
// PublicCDSystem represents the CD variant of the Vespa Cloud system.
var PublicCDSystem = System{
- Name: "publiccd",
- URL: "https://api-ctl.cd.vespa-cloud.com:4443",
- ConsoleURL: "https://console.cd.vespa-cloud.com",
- DefaultZone: ZoneID{Environment: "dev", Region: "aws-us-east-1c"},
+ Name: "publiccd",
+ URL: "https://api-ctl.cd.vespa-cloud.com:4443",
+ ConsoleURL: "https://console.cd.vespa-cloud.com",
+ DefaultZone: ZoneID{Environment: "dev", Region: "aws-us-east-1c"},
+ EndpointDomain: "cd.vespa-app.cloud",
}
// MainSystem represents the main hosted Vespa system.
var MainSystem = System{
- Name: "main",
- URL: "https://api.vespa.ouryahoo.com:4443",
- ConsoleURL: "https://console.vespa.ouryahoo.com",
- DefaultZone: ZoneID{Environment: "dev", Region: "us-east-1"},
- AthenzDomain: "vespa.vespa",
+ Name: "main",
+ URL: "https://api.vespa.ouryahoo.com:4443",
+ ConsoleURL: "https://console.vespa.ouryahoo.com",
+ DefaultZone: ZoneID{Environment: "dev", Region: "us-east-1"},
+ AthenzDomain: "vespa.vespa",
+ EndpointDomain: "vespa.oath.cloud",
}
// CDSystem represents the CD variant of the hosted Vespa system.
var CDSystem = System{
- Name: "cd",
- URL: "https://api-cd.vespa.ouryahoo.com:4443",
- ConsoleURL: "https://console-cd.vespa.ouryahoo.com",
- DefaultZone: ZoneID{Environment: "dev", Region: "cd-us-west-1"},
- AthenzDomain: "vespa.vespa.cd",
+ Name: "cd",
+ URL: "https://api-cd.vespa.ouryahoo.com:4443",
+ ConsoleURL: "https://console-cd.vespa.ouryahoo.com",
+ DefaultZone: ZoneID{Environment: "dev", Region: "cd-us-west-1"},
+ AthenzDomain: "vespa.vespa.cd",
+ EndpointDomain: "cd.vespa.oath.cloud",
}
// System represents a Vespa system.
@@ -47,6 +51,8 @@ type System struct {
// AthenzDomain is the Athenz domain used by this system. This is empty for systems not using Athenz for tenant
// authentication.
AthenzDomain string
+ // EndpointDomain is the domain used for application endpoints in this system
+ EndpointDomain string
}
// IsPublic returns whether system s is a public (Vespa Cloud) system.
diff --git a/client/go/internal/vespa/target_cloud.go b/client/go/internal/vespa/target_cloud.go
index 928bb788494..c0169f1a9bd 100644
--- a/client/go/internal/vespa/target_cloud.go
+++ b/client/go/internal/vespa/target_cloud.go
@@ -26,6 +26,7 @@ type APIOptions struct {
type CloudDeploymentOptions struct {
Deployment Deployment
TLSOptions TLSOptions
+ CustomURL string
ClusterURLs map[string]string // Endpoints keyed on cluster name
}
@@ -73,7 +74,15 @@ func CloudTarget(httpClient util.HTTPClient, apiAuth Authenticator, deploymentAu
}, nil
}
-func (t *cloudTarget) findClusterURL(cluster string) (string, error) {
+func (t *cloudTarget) findClusterURL(cluster string, timeout time.Duration, runID int64) (string, error) {
+ if t.deploymentOptions.CustomURL != "" {
+ return t.deploymentOptions.CustomURL, nil
+ }
+ if t.deploymentOptions.ClusterURLs == nil {
+ if err := t.waitForEndpoints(timeout, runID); err != nil {
+ return "", err
+ }
+ }
clusters := make([]string, 0, len(t.deploymentOptions.ClusterURLs))
for c := range t.deploymentOptions.ClusterURLs {
clusters = append(clusters, c)
@@ -129,12 +138,7 @@ func (t *cloudTarget) Service(name string, timeout time.Duration, runID int64, c
}
return service, nil
case QueryService, DocumentService:
- if t.deploymentOptions.ClusterURLs == nil {
- if err := t.waitForEndpoints(timeout, runID); err != nil {
- return nil, err
- }
- }
- url, err := t.findClusterURL(cluster)
+ url, err := t.findClusterURL(cluster, timeout, runID)
if err != nil {
return nil, err
}
diff --git a/client/go/internal/vespa/target_custom.go b/client/go/internal/vespa/target_custom.go
index 0a3a9d48fed..0129b1e1153 100644
--- a/client/go/internal/vespa/target_custom.go
+++ b/client/go/internal/vespa/target_custom.go
@@ -41,7 +41,7 @@ func (t *customTarget) Deployment() Deployment { return Deployment{} }
func (t *customTarget) createService(name string) (*Service, error) {
switch name {
case DeployService, QueryService, DocumentService:
- url, err := t.urlWithPort(name)
+ url, err := t.serviceURL(name, t.targetType)
if err != nil {
return nil, err
}
@@ -79,20 +79,21 @@ func (t *customTarget) PrintLog(options LogOptions) error {
func (t *customTarget) CheckVersion(version version.Version) error { return nil }
-func (t *customTarget) urlWithPort(serviceName string) (string, error) {
+func (t *customTarget) serviceURL(name string, targetType string) (string, error) {
u, err := url.Parse(t.baseURL)
if err != nil {
return "", err
}
- port := u.Port()
- if port == "" {
- switch serviceName {
+ if targetType == TargetLocal {
+ // Use same ports as the vespaengine/vespa container image
+ port := ""
+ switch name {
case DeployService:
port = "19071"
case QueryService, DocumentService:
port = "8080"
default:
- return "", fmt.Errorf("unknown service: %s", serviceName)
+ return "", fmt.Errorf("unknown service: %s", name)
}
u.Host = u.Host + ":" + port
}
diff --git a/client/go/internal/vespa/target_test.go b/client/go/internal/vespa/target_test.go
index bf266e8f9ec..6dc97f496f5 100644
--- a/client/go/internal/vespa/target_test.go
+++ b/client/go/internal/vespa/target_test.go
@@ -76,9 +76,9 @@ func TestCustomTarget(t *testing.T) {
assertServiceURL(t, "http://127.0.0.1:8080", lt, "document")
ct := CustomTarget(&mock.HTTPClient{}, "http://192.0.2.42", TLSOptions{})
- assertServiceURL(t, "http://192.0.2.42:19071", ct, "deploy")
- assertServiceURL(t, "http://192.0.2.42:8080", ct, "query")
- assertServiceURL(t, "http://192.0.2.42:8080", ct, "document")
+ assertServiceURL(t, "http://192.0.2.42", ct, "deploy")
+ assertServiceURL(t, "http://192.0.2.42", ct, "query")
+ assertServiceURL(t, "http://192.0.2.42", ct, "document")
ct2 := CustomTarget(&mock.HTTPClient{}, "http://192.0.2.42:60000", TLSOptions{})
assertServiceURL(t, "http://192.0.2.42:60000", ct2, "deploy")
diff --git a/client/js/app/package.json b/client/js/app/package.json
index 0fcb2d3ba1b..4aa1b8720c0 100644
--- a/client/js/app/package.json
+++ b/client/js/app/package.json
@@ -23,9 +23,7 @@
"@mantine/core": "^5",
"@mantine/hooks": "^5",
"@mantine/notifications": "^5",
- "@types/react": "^18",
- "@types/react-dom": "^18",
- "@vitejs/plugin-react": "^3",
+ "@vitejs/plugin-react": "^4",
"esbuild-jest": "^0",
"eslint": "^8",
"eslint-plugin-import": "^2",
diff --git a/client/js/app/yarn.lock b/client/js/app/yarn.lock
index b260b18c6da..3e180a63226 100644
--- a/client/js/app/yarn.lock
+++ b/client/js/app/yarn.lock
@@ -2,79 +2,80 @@
# yarn lockfile v1
-"@ampproject/remapping@^2.1.0":
- version "2.2.0"
- resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.0.tgz#56c133824780de3174aed5ab6834f3026790154d"
- integrity sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w==
+"@ampproject/remapping@^2.2.0":
+ version "2.2.1"
+ resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.1.tgz#99e8e11851128b8702cd57c33684f1d0f260b630"
+ integrity sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==
dependencies:
- "@jridgewell/gen-mapping" "^0.1.0"
+ "@jridgewell/gen-mapping" "^0.3.0"
"@jridgewell/trace-mapping" "^0.3.9"
-"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.18.6":
- version "7.18.6"
- resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.18.6.tgz#3b25d38c89600baa2dcc219edfa88a74eb2c427a"
- integrity sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==
+"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.21.4":
+ version "7.21.4"
+ resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.21.4.tgz#d0fa9e4413aca81f2b23b9442797bda1826edb39"
+ integrity sha512-LYvhNKfwWSPpocw8GI7gpK2nq3HSDuEPC/uSYaALSJu9xjsalaaYFOq0Pwt5KmVqwEbZlDu81aLXwBOmD/Fv9g==
dependencies:
"@babel/highlight" "^7.18.6"
-"@babel/compat-data@^7.20.5":
- version "7.20.14"
- resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.20.14.tgz#4106fc8b755f3e3ee0a0a7c27dde5de1d2b2baf8"
- integrity sha512-0YpKHD6ImkWMEINCyDAD0HLLUH/lPCefG8ld9it8DJB2wnApraKuhgYTvTY1z7UFIfBTGy5LwncZ+5HWWGbhFw==
-
-"@babel/core@^7.1.0", "@babel/core@^7.11.6", "@babel/core@^7.12.17", "@babel/core@^7.12.3", "@babel/core@^7.20.7":
- version "7.20.12"
- resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.20.12.tgz#7930db57443c6714ad216953d1356dac0eb8496d"
- integrity sha512-XsMfHovsUYHFMdrIHkZphTN/2Hzzi78R08NuHfDBehym2VsPDL6Zn/JAD/JQdnRvbSsbQc4mVaU1m6JgtTEElg==
- dependencies:
- "@ampproject/remapping" "^2.1.0"
- "@babel/code-frame" "^7.18.6"
- "@babel/generator" "^7.20.7"
- "@babel/helper-compilation-targets" "^7.20.7"
- "@babel/helper-module-transforms" "^7.20.11"
- "@babel/helpers" "^7.20.7"
- "@babel/parser" "^7.20.7"
- "@babel/template" "^7.20.7"
- "@babel/traverse" "^7.20.12"
- "@babel/types" "^7.20.7"
+"@babel/compat-data@^7.22.0":
+ version "7.22.3"
+ resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.22.3.tgz#cd502a6a0b6e37d7ad72ce7e71a7160a3ae36f7e"
+ integrity sha512-aNtko9OPOwVESUFp3MZfD8Uzxl7JzSeJpd7npIoxCasU37PFbAQRpKglkaKwlHOyeJdrREpo8TW8ldrkYWwvIQ==
+
+"@babel/core@^7.1.0", "@babel/core@^7.11.6", "@babel/core@^7.12.17", "@babel/core@^7.12.3", "@babel/core@^7.21.4":
+ version "7.22.1"
+ resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.22.1.tgz#5de51c5206f4c6f5533562838337a603c1033cfd"
+ integrity sha512-Hkqu7J4ynysSXxmAahpN1jjRwVJ+NdpraFLIWflgjpVob3KNyK3/tIUc7Q7szed8WMp0JNa7Qtd1E9Oo22F9gA==
+ dependencies:
+ "@ampproject/remapping" "^2.2.0"
+ "@babel/code-frame" "^7.21.4"
+ "@babel/generator" "^7.22.0"
+ "@babel/helper-compilation-targets" "^7.22.1"
+ "@babel/helper-module-transforms" "^7.22.1"
+ "@babel/helpers" "^7.22.0"
+ "@babel/parser" "^7.22.0"
+ "@babel/template" "^7.21.9"
+ "@babel/traverse" "^7.22.1"
+ "@babel/types" "^7.22.0"
convert-source-map "^1.7.0"
debug "^4.1.0"
gensync "^1.0.0-beta.2"
json5 "^2.2.2"
semver "^6.3.0"
-"@babel/generator@^7.20.7", "@babel/generator@^7.7.2":
- version "7.20.14"
- resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.20.14.tgz#9fa772c9f86a46c6ac9b321039400712b96f64ce"
- integrity sha512-AEmuXHdcD3A52HHXxaTmYlb8q/xMEhoRP67B3T4Oq7lbmSoqroMZzjnGj3+i1io3pdnF8iBYVu4Ilj+c4hBxYg==
+"@babel/generator@^7.22.0", "@babel/generator@^7.22.3", "@babel/generator@^7.7.2":
+ version "7.22.3"
+ resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.22.3.tgz#0ff675d2edb93d7596c5f6728b52615cfc0df01e"
+ integrity sha512-C17MW4wlk//ES/CJDL51kPNwl+qiBQyN7b9SKyVp11BLGFeSPoVaHrv+MNt8jwQFhQWowW88z1eeBx3pFz9v8A==
dependencies:
- "@babel/types" "^7.20.7"
+ "@babel/types" "^7.22.3"
"@jridgewell/gen-mapping" "^0.3.2"
+ "@jridgewell/trace-mapping" "^0.3.17"
jsesc "^2.5.1"
-"@babel/helper-compilation-targets@^7.20.7":
- version "7.20.7"
- resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.20.7.tgz#a6cd33e93629f5eb473b021aac05df62c4cd09bb"
- integrity sha512-4tGORmfQcrc+bvrjb5y3dG9Mx1IOZjsHqQVUz7XCNHO+iTmqxWnVg3KRygjGmpRLJGdQSKuvFinbIb0CnZwHAQ==
+"@babel/helper-compilation-targets@^7.22.1":
+ version "7.22.1"
+ resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.1.tgz#bfcd6b7321ffebe33290d68550e2c9d7eb7c7a58"
+ integrity sha512-Rqx13UM3yVB5q0D/KwQ8+SPfX/+Rnsy1Lw1k/UwOC4KC6qrzIQoY3lYnBu5EHKBlEHHcj0M0W8ltPSkD8rqfsQ==
dependencies:
- "@babel/compat-data" "^7.20.5"
- "@babel/helper-validator-option" "^7.18.6"
+ "@babel/compat-data" "^7.22.0"
+ "@babel/helper-validator-option" "^7.21.0"
browserslist "^4.21.3"
lru-cache "^5.1.1"
semver "^6.3.0"
-"@babel/helper-environment-visitor@^7.18.9":
- version "7.18.9"
- resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz#0c0cee9b35d2ca190478756865bb3528422f51be"
- integrity sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==
+"@babel/helper-environment-visitor@^7.22.1":
+ version "7.22.1"
+ resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.1.tgz#ac3a56dbada59ed969d712cf527bd8271fe3eba8"
+ integrity sha512-Z2tgopurB/kTbidvzeBrc2To3PUP/9i5MUe+fU6QJCQDyPwSH2oRapkLw3KGECDYSjhQZCNxEvNvZlLw8JjGwA==
-"@babel/helper-function-name@^7.19.0":
- version "7.19.0"
- resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz#941574ed5390682e872e52d3f38ce9d1bef4648c"
- integrity sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==
+"@babel/helper-function-name@^7.21.0":
+ version "7.21.0"
+ resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.21.0.tgz#d552829b10ea9f120969304023cd0645fa00b1b4"
+ integrity sha512-HfK1aMRanKHpxemaY2gqBmL04iAPOPRj7DxtNbiDOrJK+gdwkiNRVpCpUJYbUT+aZyemKN8brqTOxzCaG6ExRg==
dependencies:
- "@babel/template" "^7.18.10"
- "@babel/types" "^7.19.0"
+ "@babel/template" "^7.20.7"
+ "@babel/types" "^7.21.0"
"@babel/helper-hoist-variables@^7.18.6":
version "7.18.6"
@@ -83,38 +84,38 @@
dependencies:
"@babel/types" "^7.18.6"
-"@babel/helper-module-imports@^7.16.7", "@babel/helper-module-imports@^7.18.6":
- version "7.18.6"
- resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.18.6.tgz#1e3ebdbbd08aad1437b428c50204db13c5a3ca6e"
- integrity sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA==
+"@babel/helper-module-imports@^7.16.7", "@babel/helper-module-imports@^7.21.4":
+ version "7.21.4"
+ resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.21.4.tgz#ac88b2f76093637489e718a90cec6cf8a9b029af"
+ integrity sha512-orajc5T2PsRYUN3ZryCEFeMDYwyw09c/pZeaQEZPH0MpKzSvn3e0uXsDBu3k03VI+9DBiRo+l22BfKTpKwa/Wg==
dependencies:
- "@babel/types" "^7.18.6"
+ "@babel/types" "^7.21.4"
-"@babel/helper-module-transforms@^7.20.11":
- version "7.20.11"
- resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.20.11.tgz#df4c7af713c557938c50ea3ad0117a7944b2f1b0"
- integrity sha512-uRy78kN4psmji1s2QtbtcCSaj/LILFDp0f/ymhpQH5QY3nljUZCaNWz9X1dEj/8MBdBEFECs7yRhKn8i7NjZgg==
+"@babel/helper-module-transforms@^7.21.5", "@babel/helper-module-transforms@^7.22.1":
+ version "7.22.1"
+ resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.22.1.tgz#e0cad47fedcf3cae83c11021696376e2d5a50c63"
+ integrity sha512-dxAe9E7ySDGbQdCVOY/4+UcD8M9ZFqZcZhSPsPacvCG4M+9lwtDDQfI2EoaSvmf7W/8yCBkGU0m7Pvt1ru3UZw==
dependencies:
- "@babel/helper-environment-visitor" "^7.18.9"
- "@babel/helper-module-imports" "^7.18.6"
- "@babel/helper-simple-access" "^7.20.2"
+ "@babel/helper-environment-visitor" "^7.22.1"
+ "@babel/helper-module-imports" "^7.21.4"
+ "@babel/helper-simple-access" "^7.21.5"
"@babel/helper-split-export-declaration" "^7.18.6"
"@babel/helper-validator-identifier" "^7.19.1"
- "@babel/template" "^7.20.7"
- "@babel/traverse" "^7.20.10"
- "@babel/types" "^7.20.7"
+ "@babel/template" "^7.21.9"
+ "@babel/traverse" "^7.22.1"
+ "@babel/types" "^7.22.0"
-"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.19.0", "@babel/helper-plugin-utils@^7.20.2", "@babel/helper-plugin-utils@^7.8.0":
- version "7.20.2"
- resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.20.2.tgz#d1b9000752b18d0877cff85a5c376ce5c3121629"
- integrity sha512-8RvlJG2mj4huQ4pZ+rU9lqKi9ZKiRmuvGuM2HlWmkmgOhbs6zEAw6IEiJ5cQqGbDzGZOhwuOQNtZMi/ENLjZoQ==
+"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.19.0", "@babel/helper-plugin-utils@^7.20.2", "@babel/helper-plugin-utils@^7.21.5", "@babel/helper-plugin-utils@^7.8.0":
+ version "7.21.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.21.5.tgz#345f2377d05a720a4e5ecfa39cbf4474a4daed56"
+ integrity sha512-0WDaIlXKOX/3KfBK/dwP1oQGiPh6rjMkT7HIRv7i5RR2VUMwrx5ZL0dwBkKx7+SW1zwNdgjHd34IMk5ZjTeHVg==
-"@babel/helper-simple-access@^7.20.2":
- version "7.20.2"
- resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.20.2.tgz#0ab452687fe0c2cfb1e2b9e0015de07fc2d62dd9"
- integrity sha512-+0woI/WPq59IrqDYbVGfshjT5Dmk/nnbdpcF8SnMhhXObpTq2KNBdLFRFrkVdbDOyUmHBCxzm5FHV1rACIkIbA==
+"@babel/helper-simple-access@^7.21.5":
+ version "7.21.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.21.5.tgz#d697a7971a5c39eac32c7e63c0921c06c8a249ee"
+ integrity sha512-ENPDAMC1wAjR0uaCUwliBdiSl1KBJAVnMTzXqi64c2MG8MPR6ii4qf7bSXDqSFbr4W6W028/rf5ivoHop5/mkg==
dependencies:
- "@babel/types" "^7.20.2"
+ "@babel/types" "^7.21.5"
"@babel/helper-split-export-declaration@^7.18.6":
version "7.18.6"
@@ -123,29 +124,29 @@
dependencies:
"@babel/types" "^7.18.6"
-"@babel/helper-string-parser@^7.19.4":
- version "7.19.4"
- resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.19.4.tgz#38d3acb654b4701a9b77fb0615a96f775c3a9e63"
- integrity sha512-nHtDoQcuqFmwYNYPz3Rah5ph2p8PFeFCsZk9A/48dPc/rGocJ5J3hAAZ7pb76VWX3fZKu+uEr/FhH5jLx7umrw==
+"@babel/helper-string-parser@^7.21.5":
+ version "7.21.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.21.5.tgz#2b3eea65443c6bdc31c22d037c65f6d323b6b2bd"
+ integrity sha512-5pTUx3hAJaZIdW99sJ6ZUUgWq/Y+Hja7TowEnLNMm1VivRgZQL3vpBY3qUACVsvw+yQU6+YgfBVmcbLaZtrA1w==
"@babel/helper-validator-identifier@^7.18.6", "@babel/helper-validator-identifier@^7.19.1":
version "7.19.1"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz#7eea834cf32901ffdc1a7ee555e2f9c27e249ca2"
integrity sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==
-"@babel/helper-validator-option@^7.18.6":
- version "7.18.6"
- resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.18.6.tgz#bf0d2b5a509b1f336099e4ff36e1a63aa5db4db8"
- integrity sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw==
+"@babel/helper-validator-option@^7.21.0":
+ version "7.21.0"
+ resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.21.0.tgz#8224c7e13ace4bafdc4004da2cf064ef42673180"
+ integrity sha512-rmL/B8/f0mKS2baE9ZpyTcTavvEuWhTTW8amjzXNvYG4AwBsqTLikfXsEofsJEfKHf+HQVQbFOHy6o+4cnC/fQ==
-"@babel/helpers@^7.20.7":
- version "7.20.13"
- resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.20.13.tgz#e3cb731fb70dc5337134cadc24cbbad31cc87ad2"
- integrity sha512-nzJ0DWCL3gB5RCXbUO3KIMMsBY2Eqbx8mBpKGE/02PgyRQFcPQLbkQ1vyy596mZLaP+dAfD+R4ckASzNVmW3jg==
+"@babel/helpers@^7.22.0":
+ version "7.22.3"
+ resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.22.3.tgz#53b74351da9684ea2f694bf0877998da26dd830e"
+ integrity sha512-jBJ7jWblbgr7r6wYZHMdIqKc73ycaTcCaWRq4/2LpuPHcx7xMlZvpGQkOYc9HeSjn6rcx15CPlgVcBtZ4WZJ2w==
dependencies:
- "@babel/template" "^7.20.7"
- "@babel/traverse" "^7.20.13"
- "@babel/types" "^7.20.7"
+ "@babel/template" "^7.21.9"
+ "@babel/traverse" "^7.22.1"
+ "@babel/types" "^7.22.3"
"@babel/highlight@^7.18.6":
version "7.18.6"
@@ -156,10 +157,10 @@
chalk "^2.0.0"
js-tokens "^4.0.0"
-"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.13", "@babel/parser@^7.20.7":
- version "7.20.13"
- resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.20.13.tgz#ddf1eb5a813588d2fb1692b70c6fce75b945c088"
- integrity sha512-gFDLKMfpiXCsjt4za2JA9oTMn70CeseCehb11kRZgvd7+F67Hih3OHOK24cRrWECJ/ljfPGac6ygXAs/C8kIvw==
+"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.21.9", "@babel/parser@^7.22.0", "@babel/parser@^7.22.4":
+ version "7.22.4"
+ resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.22.4.tgz#a770e98fd785c231af9d93f6459d36770993fb32"
+ integrity sha512-VLLsx06XkEYqBtE5YGPwfSGwfrjnyPP5oiGty3S8pQLFDFLaS8VwWSIxkTXpcvr5zeYLE6+MBNl2npl/YnfofA==
"@babel/plugin-syntax-async-generators@^7.8.4":
version "7.8.4"
@@ -196,12 +197,12 @@
dependencies:
"@babel/helper-plugin-utils" "^7.8.0"
-"@babel/plugin-syntax-jsx@^7.17.12", "@babel/plugin-syntax-jsx@^7.7.2":
- version "7.18.6"
- resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.18.6.tgz#a8feef63b010150abd97f1649ec296e849943ca0"
- integrity sha512-6mmljtAedFGTWu2p/8WIORGwy+61PLgOMPOdazc7YoJ9ZCWUyFy3A6CpPkRKLKD1ToAesxX8KGEViAiLo9N+7Q==
+"@babel/plugin-syntax-jsx@^7.7.2":
+ version "7.21.4"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.21.4.tgz#f264ed7bf40ffc9ec239edabc17a50c4f5b6fea2"
+ integrity sha512-5hewiLct5OKyh6PLKEYaFclcqtIgCb6bmELouxjF6up5q3Sov7rOayW4RwhbaBL0dit8rA80GNfY+UuDp2mBbQ==
dependencies:
- "@babel/helper-plugin-utils" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.20.2"
"@babel/plugin-syntax-logical-assignment-operators@^7.8.3":
version "7.10.4"
@@ -253,27 +254,27 @@
"@babel/helper-plugin-utils" "^7.14.5"
"@babel/plugin-syntax-typescript@^7.7.2":
- version "7.20.0"
- resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.20.0.tgz#4e9a0cfc769c85689b77a2e642d24e9f697fc8c7"
- integrity sha512-rd9TkG+u1CExzS4SM1BlMEhMXwFLKVjOAFFCDx9PbX5ycJWDoWMcwdJH9RhkPu1dOgn5TrxLot/Gx6lWFuAUNQ==
+ version "7.21.4"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.21.4.tgz#2751948e9b7c6d771a8efa59340c15d4a2891ff8"
+ integrity sha512-xz0D39NvhQn4t4RNsHmDnnsaQizIlUkdtYvLs8La1BlfjQ6JEwxkJGeqJMW2tAXx+q6H+WFuUTXNdYVpEya0YA==
dependencies:
- "@babel/helper-plugin-utils" "^7.19.0"
+ "@babel/helper-plugin-utils" "^7.20.2"
"@babel/plugin-transform-modules-commonjs@^7.12.13":
- version "7.20.11"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.20.11.tgz#8cb23010869bf7669fd4b3098598b6b2be6dc607"
- integrity sha512-S8e1f7WQ7cimJQ51JkAaDrEtohVEitXjgCGAS2N8S31Y42E+kWwfSz83LYz57QdBm7q9diARVqanIaH2oVgQnw==
+ version "7.21.5"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.21.5.tgz#d69fb947eed51af91de82e4708f676864e5e47bc"
+ integrity sha512-OVryBEgKUbtqMoB7eG2rs6UFexJi6Zj6FDXx+esBLPTCxCNxAY9o+8Di7IsUGJ+AVhp5ncK0fxWUBd0/1gPhrQ==
dependencies:
- "@babel/helper-module-transforms" "^7.20.11"
- "@babel/helper-plugin-utils" "^7.20.2"
- "@babel/helper-simple-access" "^7.20.2"
+ "@babel/helper-module-transforms" "^7.21.5"
+ "@babel/helper-plugin-utils" "^7.21.5"
+ "@babel/helper-simple-access" "^7.21.5"
-"@babel/plugin-transform-react-jsx-self@^7.18.6":
- version "7.18.6"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.18.6.tgz#3849401bab7ae8ffa1e3e5687c94a753fc75bda7"
- integrity sha512-A0LQGx4+4Jv7u/tWzoJF7alZwnBDQd6cGLh9P+Ttk4dpiL+J5p7NSNv/9tlEFFJDq3kjxOavWmbm6t0Gk+A3Ig==
+"@babel/plugin-transform-react-jsx-self@^7.21.0":
+ version "7.21.0"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.21.0.tgz#ec98d4a9baafc5a1eb398da4cf94afbb40254a54"
+ integrity sha512-f/Eq+79JEu+KUANFks9UZCcvydOOGMgF7jBrcwjHa5jTZD8JivnhCJYvmlhR/WTXBWonDExPoW0eO/CR4QJirA==
dependencies:
- "@babel/helper-plugin-utils" "^7.18.6"
+ "@babel/helper-plugin-utils" "^7.20.2"
"@babel/plugin-transform-react-jsx-source@^7.19.6":
version "7.19.6"
@@ -283,43 +284,43 @@
"@babel/helper-plugin-utils" "^7.19.0"
"@babel/runtime@^7.10.2", "@babel/runtime@^7.12.5", "@babel/runtime@^7.13.10", "@babel/runtime@^7.18.3", "@babel/runtime@^7.5.5", "@babel/runtime@^7.8.7":
- version "7.20.13"
- resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.20.13.tgz#7055ab8a7cff2b8f6058bf6ae45ff84ad2aded4b"
- integrity sha512-gt3PKXs0DBoL9xCvOIIZ2NEqAGZqHjAnmVbfQtB620V0uReIQutpel14KcneZuer7UioY8ALKZ7iocavvzTNFA==
+ version "7.22.3"
+ resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.22.3.tgz#0a7fce51d43adbf0f7b517a71f4c3aaca92ebcbb"
+ integrity sha512-XsDuspWKLUsxwCp6r7EhsExHtYfbe5oAGQ19kqngTdCPUoPQzOPdUbD/pB9PJiwb2ptYKQDjSJT3R6dC+EPqfQ==
dependencies:
regenerator-runtime "^0.13.11"
-"@babel/template@^7.18.10", "@babel/template@^7.20.7", "@babel/template@^7.3.3":
- version "7.20.7"
- resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.20.7.tgz#a15090c2839a83b02aa996c0b4994005841fd5a8"
- integrity sha512-8SegXApWe6VoNw0r9JHpSteLKTpTiLZ4rMlGIm9JQ18KiCtyQiAMEazujAHrUS5flrcqYZa75ukev3P6QmUwUw==
+"@babel/template@^7.20.7", "@babel/template@^7.21.9", "@babel/template@^7.3.3":
+ version "7.21.9"
+ resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.21.9.tgz#bf8dad2859130ae46088a99c1f265394877446fb"
+ integrity sha512-MK0X5k8NKOuWRamiEfc3KEJiHMTkGZNUjzMipqCGDDc6ijRl/B7RGSKVGncu4Ro/HdyzzY6cmoXuKI2Gffk7vQ==
dependencies:
- "@babel/code-frame" "^7.18.6"
- "@babel/parser" "^7.20.7"
- "@babel/types" "^7.20.7"
+ "@babel/code-frame" "^7.21.4"
+ "@babel/parser" "^7.21.9"
+ "@babel/types" "^7.21.5"
-"@babel/traverse@^7.20.10", "@babel/traverse@^7.20.12", "@babel/traverse@^7.20.13", "@babel/traverse@^7.7.2":
- version "7.20.13"
- resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.20.13.tgz#817c1ba13d11accca89478bd5481b2d168d07473"
- integrity sha512-kMJXfF0T6DIS9E8cgdLCSAL+cuCK+YEZHWiLK0SXpTo8YRj5lpJu3CDNKiIBCne4m9hhTIqUg6SYTAI39tAiVQ==
+"@babel/traverse@^7.22.1", "@babel/traverse@^7.7.2":
+ version "7.22.4"
+ resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.22.4.tgz#c3cf96c5c290bd13b55e29d025274057727664c0"
+ integrity sha512-Tn1pDsjIcI+JcLKq1AVlZEr4226gpuAQTsLMorsYg9tuS/kG7nuwwJ4AB8jfQuEgb/COBwR/DqJxmoiYFu5/rQ==
dependencies:
- "@babel/code-frame" "^7.18.6"
- "@babel/generator" "^7.20.7"
- "@babel/helper-environment-visitor" "^7.18.9"
- "@babel/helper-function-name" "^7.19.0"
+ "@babel/code-frame" "^7.21.4"
+ "@babel/generator" "^7.22.3"
+ "@babel/helper-environment-visitor" "^7.22.1"
+ "@babel/helper-function-name" "^7.21.0"
"@babel/helper-hoist-variables" "^7.18.6"
"@babel/helper-split-export-declaration" "^7.18.6"
- "@babel/parser" "^7.20.13"
- "@babel/types" "^7.20.7"
+ "@babel/parser" "^7.22.4"
+ "@babel/types" "^7.22.4"
debug "^4.1.0"
globals "^11.1.0"
-"@babel/types@^7.0.0", "@babel/types@^7.18.6", "@babel/types@^7.19.0", "@babel/types@^7.20.2", "@babel/types@^7.20.7", "@babel/types@^7.3.0", "@babel/types@^7.3.3":
- version "7.20.7"
- resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.20.7.tgz#54ec75e252318423fc07fb644dc6a58a64c09b7f"
- integrity sha512-69OnhBxSSgK0OzTJai4kyPDiKTIe3j+ctaHdIGVbRahTLAT7L3R9oeXHC2aVSuGYt3cVnoAMDmOCgJ2yaiLMvg==
+"@babel/types@^7.0.0", "@babel/types@^7.18.6", "@babel/types@^7.20.7", "@babel/types@^7.21.0", "@babel/types@^7.21.4", "@babel/types@^7.21.5", "@babel/types@^7.22.0", "@babel/types@^7.22.3", "@babel/types@^7.22.4", "@babel/types@^7.3.3":
+ version "7.22.4"
+ resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.22.4.tgz#56a2653ae7e7591365dabf20b76295410684c071"
+ integrity sha512-Tx9x3UBHTTsMSW85WB2kphxYQVvrZ/t1FxD88IpSgIjiUJlCm9z+xWIDwyo1vffTwSqteqyznB8ZE9vYYk16zA==
dependencies:
- "@babel/helper-string-parser" "^7.19.4"
+ "@babel/helper-string-parser" "^7.21.5"
"@babel/helper-validator-identifier" "^7.19.1"
to-fast-properties "^2.0.0"
@@ -336,213 +337,224 @@
exec-sh "^0.3.2"
minimist "^1.2.0"
-"@emotion/babel-plugin@^11.10.5":
- version "11.10.5"
- resolved "https://registry.yarnpkg.com/@emotion/babel-plugin/-/babel-plugin-11.10.5.tgz#65fa6e1790ddc9e23cc22658a4c5dea423c55c3c"
- integrity sha512-xE7/hyLHJac7D2Ve9dKroBBZqBT7WuPQmWcq7HSGb84sUuP4mlOWoB8dvVfD9yk5DHkU1m6RW7xSoDtnQHNQeA==
+"@emotion/babel-plugin@^11.11.0":
+ version "11.11.0"
+ resolved "https://registry.yarnpkg.com/@emotion/babel-plugin/-/babel-plugin-11.11.0.tgz#c2d872b6a7767a9d176d007f5b31f7d504bb5d6c"
+ integrity sha512-m4HEDZleaaCH+XgDDsPF15Ht6wTLsgDTeR3WYj9Q/k76JtWhrJjcP4+/XlG8LGT/Rol9qUfOIztXeA84ATpqPQ==
dependencies:
"@babel/helper-module-imports" "^7.16.7"
- "@babel/plugin-syntax-jsx" "^7.17.12"
"@babel/runtime" "^7.18.3"
- "@emotion/hash" "^0.9.0"
- "@emotion/memoize" "^0.8.0"
- "@emotion/serialize" "^1.1.1"
+ "@emotion/hash" "^0.9.1"
+ "@emotion/memoize" "^0.8.1"
+ "@emotion/serialize" "^1.1.2"
babel-plugin-macros "^3.1.0"
convert-source-map "^1.5.0"
escape-string-regexp "^4.0.0"
find-root "^1.1.0"
source-map "^0.5.7"
- stylis "4.1.3"
+ stylis "4.2.0"
-"@emotion/cache@^11.10.5":
- version "11.10.5"
- resolved "https://registry.yarnpkg.com/@emotion/cache/-/cache-11.10.5.tgz#c142da9351f94e47527ed458f7bbbbe40bb13c12"
- integrity sha512-dGYHWyzTdmK+f2+EnIGBpkz1lKc4Zbj2KHd4cX3Wi8/OWr5pKslNjc3yABKH4adRGCvSX4VDC0i04mrrq0aiRA==
+"@emotion/cache@^11.11.0":
+ version "11.11.0"
+ resolved "https://registry.yarnpkg.com/@emotion/cache/-/cache-11.11.0.tgz#809b33ee6b1cb1a625fef7a45bc568ccd9b8f3ff"
+ integrity sha512-P34z9ssTCBi3e9EI1ZsWpNHcfY1r09ZO0rZbRO2ob3ZQMnFI35jB536qoXbkdesr5EUhYi22anuEJuyxifaqAQ==
dependencies:
- "@emotion/memoize" "^0.8.0"
- "@emotion/sheet" "^1.2.1"
- "@emotion/utils" "^1.2.0"
- "@emotion/weak-memoize" "^0.3.0"
- stylis "4.1.3"
+ "@emotion/memoize" "^0.8.1"
+ "@emotion/sheet" "^1.2.2"
+ "@emotion/utils" "^1.2.1"
+ "@emotion/weak-memoize" "^0.3.1"
+ stylis "4.2.0"
-"@emotion/hash@^0.9.0":
- version "0.9.0"
- resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.9.0.tgz#c5153d50401ee3c027a57a177bc269b16d889cb7"
- integrity sha512-14FtKiHhy2QoPIzdTcvh//8OyBlknNs2nXRwIhG904opCby3l+9Xaf/wuPvICBF0rc1ZCNBd3nKe9cd2mecVkQ==
+"@emotion/hash@^0.9.1":
+ version "0.9.1"
+ resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.9.1.tgz#4ffb0055f7ef676ebc3a5a91fb621393294e2f43"
+ integrity sha512-gJB6HLm5rYwSLI6PQa+X1t5CFGrv1J1TWG+sOyMCeKz2ojaj6Fnl/rZEspogG+cvqbt4AE/2eIyD2QfLKTBNlQ==
-"@emotion/memoize@^0.8.0":
- version "0.8.0"
- resolved "https://registry.yarnpkg.com/@emotion/memoize/-/memoize-0.8.0.tgz#f580f9beb67176fa57aae70b08ed510e1b18980f"
- integrity sha512-G/YwXTkv7Den9mXDO7AhLWkE3q+I92B+VqAE+dYG4NGPaHZGvt3G8Q0p9vmE+sq7rTGphUbAvmQ9YpbfMQGGlA==
+"@emotion/memoize@^0.8.1":
+ version "0.8.1"
+ resolved "https://registry.yarnpkg.com/@emotion/memoize/-/memoize-0.8.1.tgz#c1ddb040429c6d21d38cc945fe75c818cfb68e17"
+ integrity sha512-W2P2c/VRW1/1tLox0mVUalvnWXxavmv/Oum2aPsRcoDJuob75FC3Y8FbpfLwUegRcxINtGUMPq0tFCvYNTBXNA==
"@emotion/react@^11":
- version "11.10.5"
- resolved "https://registry.yarnpkg.com/@emotion/react/-/react-11.10.5.tgz#95fff612a5de1efa9c0d535384d3cfa115fe175d"
- integrity sha512-TZs6235tCJ/7iF6/rvTaOH4oxQg2gMAcdHemjwLKIjKz4rRuYe1HJ2TQJKnAcRAfOUDdU8XoDadCe1rl72iv8A==
+ version "11.11.1"
+ resolved "https://registry.yarnpkg.com/@emotion/react/-/react-11.11.1.tgz#b2c36afac95b184f73b08da8c214fdf861fa4157"
+ integrity sha512-5mlW1DquU5HaxjLkfkGN1GA/fvVGdyHURRiX/0FHl2cfIfRxSOfmxEH5YS43edp0OldZrZ+dkBKbngxcNCdZvA==
dependencies:
"@babel/runtime" "^7.18.3"
- "@emotion/babel-plugin" "^11.10.5"
- "@emotion/cache" "^11.10.5"
- "@emotion/serialize" "^1.1.1"
- "@emotion/use-insertion-effect-with-fallbacks" "^1.0.0"
- "@emotion/utils" "^1.2.0"
- "@emotion/weak-memoize" "^0.3.0"
+ "@emotion/babel-plugin" "^11.11.0"
+ "@emotion/cache" "^11.11.0"
+ "@emotion/serialize" "^1.1.2"
+ "@emotion/use-insertion-effect-with-fallbacks" "^1.0.1"
+ "@emotion/utils" "^1.2.1"
+ "@emotion/weak-memoize" "^0.3.1"
hoist-non-react-statics "^3.3.1"
-"@emotion/serialize@^1.1.1":
- version "1.1.1"
- resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-1.1.1.tgz#0595701b1902feded8a96d293b26be3f5c1a5cf0"
- integrity sha512-Zl/0LFggN7+L1liljxXdsVSVlg6E/Z/olVWpfxUTxOAmi8NU7YoeWeLfi1RmnB2TATHoaWwIBRoL+FvAJiTUQA==
+"@emotion/serialize@^1.1.2":
+ version "1.1.2"
+ resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-1.1.2.tgz#017a6e4c9b8a803bd576ff3d52a0ea6fa5a62b51"
+ integrity sha512-zR6a/fkFP4EAcCMQtLOhIgpprZOwNmCldtpaISpvz348+DP4Mz8ZoKaGGCQpbzepNIUWbq4w6hNZkwDyKoS+HA==
dependencies:
- "@emotion/hash" "^0.9.0"
- "@emotion/memoize" "^0.8.0"
- "@emotion/unitless" "^0.8.0"
- "@emotion/utils" "^1.2.0"
+ "@emotion/hash" "^0.9.1"
+ "@emotion/memoize" "^0.8.1"
+ "@emotion/unitless" "^0.8.1"
+ "@emotion/utils" "^1.2.1"
csstype "^3.0.2"
-"@emotion/sheet@^1.2.1":
- version "1.2.1"
- resolved "https://registry.yarnpkg.com/@emotion/sheet/-/sheet-1.2.1.tgz#0767e0305230e894897cadb6c8df2c51e61a6c2c"
- integrity sha512-zxRBwl93sHMsOj4zs+OslQKg/uhF38MB+OMKoCrVuS0nyTkqnau+BM3WGEoOptg9Oz45T/aIGs1qbVAsEFo3nA==
+"@emotion/sheet@^1.2.2":
+ version "1.2.2"
+ resolved "https://registry.yarnpkg.com/@emotion/sheet/-/sheet-1.2.2.tgz#d58e788ee27267a14342303e1abb3d508b6d0fec"
+ integrity sha512-0QBtGvaqtWi+nx6doRwDdBIzhNdZrXUppvTM4dtZZWEGTXL/XE/yJxLMGlDT1Gt+UHH5IX1n+jkXyytE/av7OA==
-"@emotion/unitless@^0.8.0":
- version "0.8.0"
- resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.8.0.tgz#a4a36e9cbdc6903737cd20d38033241e1b8833db"
- integrity sha512-VINS5vEYAscRl2ZUDiT3uMPlrFQupiKgHz5AA4bCH1miKBg4qtwkim1qPmJj/4WG6TreYMY111rEFsjupcOKHw==
+"@emotion/unitless@^0.8.1":
+ version "0.8.1"
+ resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.8.1.tgz#182b5a4704ef8ad91bde93f7a860a88fd92c79a3"
+ integrity sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ==
-"@emotion/use-insertion-effect-with-fallbacks@^1.0.0":
- version "1.0.0"
- resolved "https://registry.yarnpkg.com/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.0.0.tgz#ffadaec35dbb7885bd54de3fa267ab2f860294df"
- integrity sha512-1eEgUGmkaljiBnRMTdksDV1W4kUnmwgp7X9G8B++9GYwl1lUdqSndSriIrTJ0N7LQaoauY9JJ2yhiOYK5+NI4A==
+"@emotion/use-insertion-effect-with-fallbacks@^1.0.1":
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.0.1.tgz#08de79f54eb3406f9daaf77c76e35313da963963"
+ integrity sha512-jT/qyKZ9rzLErtrjGgdkMBn2OP8wl0G3sQlBb3YPryvKHsjvINUhVaPFfP+fpBcOkmrVOVEEHQFJ7nbj2TH2gw==
-"@emotion/utils@^1.2.0":
- version "1.2.0"
- resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-1.2.0.tgz#9716eaccbc6b5ded2ea5a90d65562609aab0f561"
- integrity sha512-sn3WH53Kzpw8oQ5mgMmIzzyAaH2ZqFEbozVVBSYp538E06OSE6ytOp7pRAjNQR+Q/orwqdQYJSe2m3hCOeznkw==
+"@emotion/utils@^1.2.1":
+ version "1.2.1"
+ resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-1.2.1.tgz#bbab58465738d31ae4cb3dbb6fc00a5991f755e4"
+ integrity sha512-Y2tGf3I+XVnajdItskUCn6LX+VUDmP6lTL4fcqsXAv43dnlbZiuW4MWQW38rW/BVWSE7Q/7+XQocmpnRYILUmg==
-"@emotion/weak-memoize@^0.3.0":
- version "0.3.0"
- resolved "https://registry.yarnpkg.com/@emotion/weak-memoize/-/weak-memoize-0.3.0.tgz#ea89004119dc42db2e1dba0f97d553f7372f6fcb"
- integrity sha512-AHPmaAx+RYfZz0eYu6Gviiagpmiyw98ySSlQvCUhVGDRtDFe4DBS0x1bSjdF3gqUDYOczB+yYvBTtEylYSdRhg==
-
-"@esbuild/android-arm64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.16.17.tgz#cf91e86df127aa3d141744edafcba0abdc577d23"
- integrity sha512-MIGl6p5sc3RDTLLkYL1MyL8BMRN4tLMRCn+yRJJmEDvYZ2M7tmAf80hx1kbNEUX2KJ50RRtxZ4JHLvCfuB6kBg==
-
-"@esbuild/android-arm@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.16.17.tgz#025b6246d3f68b7bbaa97069144fb5fb70f2fff2"
- integrity sha512-N9x1CMXVhtWEAMS7pNNONyA14f71VPQN9Cnavj1XQh6T7bskqiLLrSca4O0Vr8Wdcga943eThxnVp3JLnBMYtw==
-
-"@esbuild/android-x64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.16.17.tgz#c820e0fef982f99a85c4b8bfdd582835f04cd96e"
- integrity sha512-a3kTv3m0Ghh4z1DaFEuEDfz3OLONKuFvI4Xqczqx4BqLyuFaFkuaG4j2MtA6fuWEFeC5x9IvqnX7drmRq/fyAQ==
-
-"@esbuild/darwin-arm64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.16.17.tgz#edef4487af6b21afabba7be5132c26d22379b220"
- integrity sha512-/2agbUEfmxWHi9ARTX6OQ/KgXnOWfsNlTeLcoV7HSuSTv63E4DqtAc+2XqGw1KHxKMHGZgbVCZge7HXWX9Vn+w==
-
-"@esbuild/darwin-x64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.16.17.tgz#42829168730071c41ef0d028d8319eea0e2904b4"
- integrity sha512-2By45OBHulkd9Svy5IOCZt376Aa2oOkiE9QWUK9fe6Tb+WDr8hXL3dpqi+DeLiMed8tVXspzsTAvd0jUl96wmg==
-
-"@esbuild/freebsd-arm64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.16.17.tgz#1f4af488bfc7e9ced04207034d398e793b570a27"
- integrity sha512-mt+cxZe1tVx489VTb4mBAOo2aKSnJ33L9fr25JXpqQqzbUIw/yzIzi+NHwAXK2qYV1lEFp4OoVeThGjUbmWmdw==
-
-"@esbuild/freebsd-x64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.16.17.tgz#636306f19e9bc981e06aa1d777302dad8fddaf72"
- integrity sha512-8ScTdNJl5idAKjH8zGAsN7RuWcyHG3BAvMNpKOBaqqR7EbUhhVHOqXRdL7oZvz8WNHL2pr5+eIT5c65kA6NHug==
-
-"@esbuild/linux-arm64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.16.17.tgz#a003f7ff237c501e095d4f3a09e58fc7b25a4aca"
- integrity sha512-7S8gJnSlqKGVJunnMCrXHU9Q8Q/tQIxk/xL8BqAP64wchPCTzuM6W3Ra8cIa1HIflAvDnNOt2jaL17vaW+1V0g==
-
-"@esbuild/linux-arm@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.16.17.tgz#b591e6a59d9c4fe0eeadd4874b157ab78cf5f196"
- integrity sha512-iihzrWbD4gIT7j3caMzKb/RsFFHCwqqbrbH9SqUSRrdXkXaygSZCZg1FybsZz57Ju7N/SHEgPyaR0LZ8Zbe9gQ==
-
-"@esbuild/linux-ia32@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.16.17.tgz#24333a11027ef46a18f57019450a5188918e2a54"
- integrity sha512-kiX69+wcPAdgl3Lonh1VI7MBr16nktEvOfViszBSxygRQqSpzv7BffMKRPMFwzeJGPxcio0pdD3kYQGpqQ2SSg==
-
-"@esbuild/linux-loong64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.16.17.tgz#d5ad459d41ed42bbd4d005256b31882ec52227d8"
- integrity sha512-dTzNnQwembNDhd654cA4QhbS9uDdXC3TKqMJjgOWsC0yNCbpzfWoXdZvp0mY7HU6nzk5E0zpRGGx3qoQg8T2DQ==
-
-"@esbuild/linux-mips64el@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.16.17.tgz#4e5967a665c38360b0a8205594377d4dcf9c3726"
- integrity sha512-ezbDkp2nDl0PfIUn0CsQ30kxfcLTlcx4Foz2kYv8qdC6ia2oX5Q3E/8m6lq84Dj/6b0FrkgD582fJMIfHhJfSw==
-
-"@esbuild/linux-ppc64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.16.17.tgz#206443a02eb568f9fdf0b438fbd47d26e735afc8"
- integrity sha512-dzS678gYD1lJsW73zrFhDApLVdM3cUF2MvAa1D8K8KtcSKdLBPP4zZSLy6LFZ0jYqQdQ29bjAHJDgz0rVbLB3g==
-
-"@esbuild/linux-riscv64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.16.17.tgz#c351e433d009bf256e798ad048152c8d76da2fc9"
- integrity sha512-ylNlVsxuFjZK8DQtNUwiMskh6nT0vI7kYl/4fZgV1llP5d6+HIeL/vmmm3jpuoo8+NuXjQVZxmKuhDApK0/cKw==
-
-"@esbuild/linux-s390x@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.16.17.tgz#661f271e5d59615b84b6801d1c2123ad13d9bd87"
- integrity sha512-gzy7nUTO4UA4oZ2wAMXPNBGTzZFP7mss3aKR2hH+/4UUkCOyqmjXiKpzGrY2TlEUhbbejzXVKKGazYcQTZWA/w==
-
-"@esbuild/linux-x64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.16.17.tgz#e4ba18e8b149a89c982351443a377c723762b85f"
- integrity sha512-mdPjPxfnmoqhgpiEArqi4egmBAMYvaObgn4poorpUaqmvzzbvqbowRllQ+ZgzGVMGKaPkqUmPDOOFQRUFDmeUw==
-
-"@esbuild/netbsd-x64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.16.17.tgz#7d4f4041e30c5c07dd24ffa295c73f06038ec775"
- integrity sha512-/PzmzD/zyAeTUsduZa32bn0ORug+Jd1EGGAUJvqfeixoEISYpGnAezN6lnJoskauoai0Jrs+XSyvDhppCPoKOA==
-
-"@esbuild/openbsd-x64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.16.17.tgz#970fa7f8470681f3e6b1db0cc421a4af8060ec35"
- integrity sha512-2yaWJhvxGEz2RiftSk0UObqJa/b+rIAjnODJgv2GbGGpRwAfpgzyrg1WLK8rqA24mfZa9GvpjLcBBg8JHkoodg==
-
-"@esbuild/sunos-x64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.16.17.tgz#abc60e7c4abf8b89fb7a4fe69a1484132238022c"
- integrity sha512-xtVUiev38tN0R3g8VhRfN7Zl42YCJvyBhRKw1RJjwE1d2emWTVToPLNEQj/5Qxc6lVFATDiy6LjVHYhIPrLxzw==
-
-"@esbuild/win32-arm64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.16.17.tgz#7b0ff9e8c3265537a7a7b1fd9a24e7bd39fcd87a"
- integrity sha512-ga8+JqBDHY4b6fQAmOgtJJue36scANy4l/rL97W+0wYmijhxKetzZdKOJI7olaBaMhWt8Pac2McJdZLxXWUEQw==
-
-"@esbuild/win32-ia32@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.16.17.tgz#e90fe5267d71a7b7567afdc403dfd198c292eb09"
- integrity sha512-WnsKaf46uSSF/sZhwnqE4L/F89AYNMiD4YtEcYekBt9Q7nj0DiId2XH2Ng2PHM54qi5oPrQ8luuzGszqi/veig==
-
-"@esbuild/win32-x64@0.16.17":
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.16.17.tgz#c5a1a4bfe1b57f0c3e61b29883525c6da3e5c091"
- integrity sha512-y+EHuSchhL7FjHgvQL/0fnnFmO4T1bhvWANX6gcnqTjtnKWbTvUMCpGnv2+t+31d7RzyEAYAd4u2fnIhHL6N/Q==
-
-"@eslint/eslintrc@^1.4.1":
- version "1.4.1"
- resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-1.4.1.tgz#af58772019a2d271b7e2d4c23ff4ddcba3ccfb3e"
- integrity sha512-XXrH9Uarn0stsyldqDYq8r++mROmWRI1xKMXa640Bb//SY1+ECYX6VzT6Lcx5frD0V30XieqJ0oX9I2Xj5aoMA==
+"@emotion/weak-memoize@^0.3.1":
+ version "0.3.1"
+ resolved "https://registry.yarnpkg.com/@emotion/weak-memoize/-/weak-memoize-0.3.1.tgz#d0fce5d07b0620caa282b5131c297bb60f9d87e6"
+ integrity sha512-EsBwpc7hBUJWAsNPBmJy4hxWx12v6bshQsldrVmjxJoc3isbxhOrF2IcCpaXxfvq03NwkI7sbsOLXbYuqF/8Ww==
+
+"@esbuild/android-arm64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.17.19.tgz#bafb75234a5d3d1b690e7c2956a599345e84a2fd"
+ integrity sha512-KBMWvEZooR7+kzY0BtbTQn0OAYY7CsiydT63pVEaPtVYF0hXbUaOyZog37DKxK7NF3XacBJOpYT4adIJh+avxA==
+
+"@esbuild/android-arm@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.17.19.tgz#5898f7832c2298bc7d0ab53701c57beb74d78b4d"
+ integrity sha512-rIKddzqhmav7MSmoFCmDIb6e2W57geRsM94gV2l38fzhXMwq7hZoClug9USI2pFRGL06f4IOPHHpFNOkWieR8A==
+
+"@esbuild/android-x64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.17.19.tgz#658368ef92067866d95fb268719f98f363d13ae1"
+ integrity sha512-uUTTc4xGNDT7YSArp/zbtmbhO0uEEK9/ETW29Wk1thYUJBz3IVnvgEiEwEa9IeLyvnpKrWK64Utw2bgUmDveww==
+
+"@esbuild/darwin-arm64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.17.19.tgz#584c34c5991b95d4d48d333300b1a4e2ff7be276"
+ integrity sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==
+
+"@esbuild/darwin-x64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.17.19.tgz#7751d236dfe6ce136cce343dce69f52d76b7f6cb"
+ integrity sha512-IJM4JJsLhRYr9xdtLytPLSH9k/oxR3boaUIYiHkAawtwNOXKE8KoU8tMvryogdcT8AU+Bflmh81Xn6Q0vTZbQw==
+
+"@esbuild/freebsd-arm64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.17.19.tgz#cacd171665dd1d500f45c167d50c6b7e539d5fd2"
+ integrity sha512-pBwbc7DufluUeGdjSU5Si+P3SoMF5DQ/F/UmTSb8HXO80ZEAJmrykPyzo1IfNbAoaqw48YRpv8shwd1NoI0jcQ==
+
+"@esbuild/freebsd-x64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.17.19.tgz#0769456eee2a08b8d925d7c00b79e861cb3162e4"
+ integrity sha512-4lu+n8Wk0XlajEhbEffdy2xy53dpR06SlzvhGByyg36qJw6Kpfk7cp45DR/62aPH9mtJRmIyrXAS5UWBrJT6TQ==
+
+"@esbuild/linux-arm64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.17.19.tgz#38e162ecb723862c6be1c27d6389f48960b68edb"
+ integrity sha512-ct1Tg3WGwd3P+oZYqic+YZF4snNl2bsnMKRkb3ozHmnM0dGWuxcPTTntAF6bOP0Sp4x0PjSF+4uHQ1xvxfRKqg==
+
+"@esbuild/linux-arm@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.17.19.tgz#1a2cd399c50040184a805174a6d89097d9d1559a"
+ integrity sha512-cdmT3KxjlOQ/gZ2cjfrQOtmhG4HJs6hhvm3mWSRDPtZ/lP5oe8FWceS10JaSJC13GBd4eH/haHnqf7hhGNLerA==
+
+"@esbuild/linux-ia32@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.17.19.tgz#e28c25266b036ce1cabca3c30155222841dc035a"
+ integrity sha512-w4IRhSy1VbsNxHRQpeGCHEmibqdTUx61Vc38APcsRbuVgK0OPEnQ0YD39Brymn96mOx48Y2laBQGqgZ0j9w6SQ==
+
+"@esbuild/linux-loong64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.17.19.tgz#0f887b8bb3f90658d1a0117283e55dbd4c9dcf72"
+ integrity sha512-2iAngUbBPMq439a+z//gE+9WBldoMp1s5GWsUSgqHLzLJ9WoZLZhpwWuym0u0u/4XmZ3gpHmzV84PonE+9IIdQ==
+
+"@esbuild/linux-mips64el@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.17.19.tgz#f5d2a0b8047ea9a5d9f592a178ea054053a70289"
+ integrity sha512-LKJltc4LVdMKHsrFe4MGNPp0hqDFA1Wpt3jE1gEyM3nKUvOiO//9PheZZHfYRfYl6AwdTH4aTcXSqBerX0ml4A==
+
+"@esbuild/linux-ppc64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.17.19.tgz#876590e3acbd9fa7f57a2c7d86f83717dbbac8c7"
+ integrity sha512-/c/DGybs95WXNS8y3Ti/ytqETiW7EU44MEKuCAcpPto3YjQbyK3IQVKfF6nbghD7EcLUGl0NbiL5Rt5DMhn5tg==
+
+"@esbuild/linux-riscv64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.17.19.tgz#7f49373df463cd9f41dc34f9b2262d771688bf09"
+ integrity sha512-FC3nUAWhvFoutlhAkgHf8f5HwFWUL6bYdvLc/TTuxKlvLi3+pPzdZiFKSWz/PF30TB1K19SuCxDTI5KcqASJqA==
+
+"@esbuild/linux-s390x@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.17.19.tgz#e2afd1afcaf63afe2c7d9ceacd28ec57c77f8829"
+ integrity sha512-IbFsFbxMWLuKEbH+7sTkKzL6NJmG2vRyy6K7JJo55w+8xDk7RElYn6xvXtDW8HCfoKBFK69f3pgBJSUSQPr+4Q==
+
+"@esbuild/linux-x64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.17.19.tgz#8a0e9738b1635f0c53389e515ae83826dec22aa4"
+ integrity sha512-68ngA9lg2H6zkZcyp22tsVt38mlhWde8l3eJLWkyLrp4HwMUr3c1s/M2t7+kHIhvMjglIBrFpncX1SzMckomGw==
+
+"@esbuild/netbsd-x64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.17.19.tgz#c29fb2453c6b7ddef9a35e2c18b37bda1ae5c462"
+ integrity sha512-CwFq42rXCR8TYIjIfpXCbRX0rp1jo6cPIUPSaWwzbVI4aOfX96OXY8M6KNmtPcg7QjYeDmN+DD0Wp3LaBOLf4Q==
+
+"@esbuild/openbsd-x64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.17.19.tgz#95e75a391403cb10297280d524d66ce04c920691"
+ integrity sha512-cnq5brJYrSZ2CF6c35eCmviIN3k3RczmHz8eYaVlNasVqsNY+JKohZU5MKmaOI+KkllCdzOKKdPs762VCPC20g==
+
+"@esbuild/sunos-x64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.17.19.tgz#722eaf057b83c2575937d3ffe5aeb16540da7273"
+ integrity sha512-vCRT7yP3zX+bKWFeP/zdS6SqdWB8OIpaRq/mbXQxTGHnIxspRtigpkUcDMlSCOejlHowLqII7K2JKevwyRP2rg==
+
+"@esbuild/win32-arm64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.17.19.tgz#9aa9dc074399288bdcdd283443e9aeb6b9552b6f"
+ integrity sha512-yYx+8jwowUstVdorcMdNlzklLYhPxjniHWFKgRqH7IFlUEa0Umu3KuYplf1HUZZ422e3NU9F4LGb+4O0Kdcaag==
+
+"@esbuild/win32-ia32@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.17.19.tgz#95ad43c62ad62485e210f6299c7b2571e48d2b03"
+ integrity sha512-eggDKanJszUtCdlVs0RB+h35wNlb5v4TWEkq4vZcmVt5u/HiDZrTXe2bWFQUez3RgNHwx/x4sk5++4NSSicKkw==
+
+"@esbuild/win32-x64@0.17.19":
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.17.19.tgz#8cfaf2ff603e9aabb910e9c0558c26cf32744061"
+ integrity sha512-lAhycmKnVOuRYNtRtatQR1LPQf2oYCkRGkSFnseDAKPl8lu5SOsK/e1sXe5a0Pc5kHIHe6P2I/ilntNv2xf3cA==
+
+"@eslint-community/eslint-utils@^4.2.0":
+ version "4.4.0"
+ resolved "https://registry.yarnpkg.com/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz#a23514e8fb9af1269d5f7788aa556798d61c6b59"
+ integrity sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==
+ dependencies:
+ eslint-visitor-keys "^3.3.0"
+
+"@eslint-community/regexpp@^4.4.0":
+ version "4.5.1"
+ resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.5.1.tgz#cdd35dce4fa1a89a4fd42b1599eb35b3af408884"
+ integrity sha512-Z5ba73P98O1KUYCCJTUeVpja9RcGoMdncZ6T49FCUl2lN38JtCJ+3WgIDBv0AuY4WChU5PmtJmOCTlN6FZTFKQ==
+
+"@eslint/eslintrc@^2.0.3":
+ version "2.0.3"
+ resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-2.0.3.tgz#4910db5505f4d503f27774bf356e3704818a0331"
+ integrity sha512-+5gy6OQfk+xx3q0d6jGZZC3f3KzAkXc/IanVxd1is/VIIziRqqt3ongQz0FiTUXqTk0c7aDB3OaFuKnuSoJicQ==
dependencies:
ajv "^6.12.4"
debug "^4.3.2"
- espree "^9.4.0"
+ espree "^9.5.2"
globals "^13.19.0"
ignore "^5.2.0"
import-fresh "^3.2.1"
@@ -550,58 +562,64 @@
minimatch "^3.1.2"
strip-json-comments "^3.1.1"
-"@floating-ui/core@^1.1.0":
- version "1.1.1"
- resolved "https://registry.yarnpkg.com/@floating-ui/core/-/core-1.1.1.tgz#cf8b4cdd8987c687329a6099561764d8a16f2f22"
- integrity sha512-PL7g3dhA4dHgZfujkuD8Q+tfJJynEtnNQSPzmucCnxMvkxf4cLBJw/ZYqZUn4HCh33U3WHrAfv2R2tbi9UCSmw==
+"@eslint/js@8.42.0":
+ version "8.42.0"
+ resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.42.0.tgz#484a1d638de2911e6f5a30c12f49c7e4a3270fb6"
+ integrity sha512-6SWlXpWU5AvId8Ac7zjzmIOqMOba/JWY8XZ4A7q7Gn1Vlfg/SFFIlrtHXt9nPn4op9ZPAkl91Jao+QQv3r/ukw==
-"@floating-ui/dom@^1.1.1":
- version "1.1.1"
- resolved "https://registry.yarnpkg.com/@floating-ui/dom/-/dom-1.1.1.tgz#66aa747e15894910869bf9144fc54fc7d6e9f975"
- integrity sha512-TpIO93+DIujg3g7SykEAGZMDtbJRrmnYRCNYSjJlvIbGhBjRSNTLVbNeDQBrzy9qDgUbiWdc7KA0uZHZ2tJmiw==
+"@floating-ui/core@^1.2.6":
+ version "1.2.6"
+ resolved "https://registry.yarnpkg.com/@floating-ui/core/-/core-1.2.6.tgz#d21ace437cc919cdd8f1640302fa8851e65e75c0"
+ integrity sha512-EvYTiXet5XqweYGClEmpu3BoxmsQ4hkj3QaYA6qEnigCWffTP3vNRwBReTdrwDwo7OoJ3wM8Uoe9Uk4n+d4hfg==
+
+"@floating-ui/dom@^1.2.1":
+ version "1.2.9"
+ resolved "https://registry.yarnpkg.com/@floating-ui/dom/-/dom-1.2.9.tgz#b9ed1c15d30963419a6736f1b7feb350dd49c603"
+ integrity sha512-sosQxsqgxMNkV3C+3UqTS6LxP7isRLwX8WMepp843Rb3/b0Wz8+MdUkxJksByip3C2WwLugLHN1b4ibn//zKwQ==
dependencies:
- "@floating-ui/core" "^1.1.0"
+ "@floating-ui/core" "^1.2.6"
-"@floating-ui/react-dom-interactions@^0.10.1":
- version "0.10.3"
- resolved "https://registry.yarnpkg.com/@floating-ui/react-dom-interactions/-/react-dom-interactions-0.10.3.tgz#1d988aad169bf752b54c688db942f12e4fed61c5"
- integrity sha512-UEHqdnzyoiWNU5az/tAljr9iXFzN18DcvpMqW+/cXz4FEhDEB1ogLtWldOWCujLerPBnSRocADALafelOReMpw==
+"@floating-ui/react-dom@^1.3.0":
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/@floating-ui/react-dom/-/react-dom-1.3.0.tgz#4d35d416eb19811c2b0e9271100a6aa18c1579b3"
+ integrity sha512-htwHm67Ji5E/pROEAr7f8IKFShuiCKHwUC/UY4vC3I5jiSvGFAYnSYiZO5MlGmads+QqvUkR9ANHEguGrDv72g==
dependencies:
- "@floating-ui/react-dom" "^1.0.0"
- aria-hidden "^1.1.3"
+ "@floating-ui/dom" "^1.2.1"
-"@floating-ui/react-dom@^1.0.0":
- version "1.2.2"
- resolved "https://registry.yarnpkg.com/@floating-ui/react-dom/-/react-dom-1.2.2.tgz#ed256992fd44fcfcddc96da68b4b92f123d61871"
- integrity sha512-DbmFBLwFrZhtXgCI2ra7wXYT8L2BN4/4AMQKyu05qzsVji51tXOfF36VE2gpMB6nhJGHa85PdEg75FB4+vnLFQ==
+"@floating-ui/react@^0.19.1":
+ version "0.19.2"
+ resolved "https://registry.yarnpkg.com/@floating-ui/react/-/react-0.19.2.tgz#c6e4d2097ed0dca665a7c042ddf9cdecc95e9412"
+ integrity sha512-JyNk4A0Ezirq8FlXECvRtQOX/iBe5Ize0W/pLkrZjfHW9GUV7Xnq6zm6fyZuQzaHHqEnVizmvlA96e1/CkZv+w==
dependencies:
- "@floating-ui/dom" "^1.1.1"
+ "@floating-ui/react-dom" "^1.3.0"
+ aria-hidden "^1.1.3"
+ tabbable "^6.0.1"
-"@fortawesome/fontawesome-common-types@6.2.1":
- version "6.2.1"
- resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.2.1.tgz#411e02a820744d3f7e0d8d9df9d82b471beaa073"
- integrity sha512-Sz07mnQrTekFWLz5BMjOzHl/+NooTdW8F8kDQxjWwbpOJcnoSg4vUDng8d/WR1wOxM0O+CY9Zw0nR054riNYtQ==
+"@fortawesome/fontawesome-common-types@6.4.0":
+ version "6.4.0"
+ resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.4.0.tgz#88da2b70d6ca18aaa6ed3687832e11f39e80624b"
+ integrity sha512-HNii132xfomg5QVZw0HwXXpN22s7VBHQBv9CeOu9tfJnhsWQNd2lmTNi8CSrnw5B+5YOmzu1UoPAyxaXsJ6RgQ==
"@fortawesome/fontawesome-svg-core@^6":
- version "6.2.1"
- resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.2.1.tgz#e87e905e444b5e7b715af09b64d27b53d4c8f9d9"
- integrity sha512-HELwwbCz6C1XEcjzyT1Jugmz2NNklMrSPjZOWMlc+ZsHIVk+XOvOXLGGQtFBwSyqfJDNgRq4xBCwWOaZ/d9DEA==
+ version "6.4.0"
+ resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.4.0.tgz#3727552eff9179506e9203d72feb5b1063c11a21"
+ integrity sha512-Bertv8xOiVELz5raB2FlXDPKt+m94MQ3JgDfsVbrqNpLU9+UE2E18GKjLKw+d3XbeYPqg1pzyQKGsrzbw+pPaw==
dependencies:
- "@fortawesome/fontawesome-common-types" "6.2.1"
+ "@fortawesome/fontawesome-common-types" "6.4.0"
"@fortawesome/free-regular-svg-icons@^6":
- version "6.2.1"
- resolved "https://registry.yarnpkg.com/@fortawesome/free-regular-svg-icons/-/free-regular-svg-icons-6.2.1.tgz#650e56d937755a8341f2eef258ecb6f95458820f"
- integrity sha512-wiqcNDNom75x+pe88FclpKz7aOSqS2lOivZeicMV5KRwOAeypxEYWAK/0v+7r+LrEY30+qzh8r2XDaEHvoLsMA==
+ version "6.4.0"
+ resolved "https://registry.yarnpkg.com/@fortawesome/free-regular-svg-icons/-/free-regular-svg-icons-6.4.0.tgz#cacc53bd8d832d46feead412d9ea9ce80a55e13a"
+ integrity sha512-ZfycI7D0KWPZtf7wtMFnQxs8qjBXArRzczABuMQqecA/nXohquJ5J/RCR77PmY5qGWkxAZDxpnUFVXKwtY/jPw==
dependencies:
- "@fortawesome/fontawesome-common-types" "6.2.1"
+ "@fortawesome/fontawesome-common-types" "6.4.0"
"@fortawesome/free-solid-svg-icons@^6":
- version "6.2.1"
- resolved "https://registry.yarnpkg.com/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.2.1.tgz#2290ea5adcf1537cbd0c43de6feb38af02141d27"
- integrity sha512-oKuqrP5jbfEPJWTij4sM+/RvgX+RMFwx3QZCZcK9PrBDgxC35zuc7AOFsyMjMd/PIFPeB2JxyqDr5zs/DZFPPw==
+ version "6.4.0"
+ resolved "https://registry.yarnpkg.com/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.4.0.tgz#48c0e790847fa56299e2f26b82b39663b8ad7119"
+ integrity sha512-kutPeRGWm8V5dltFP1zGjQOEAzaLZj4StdQhWVZnfGFCvAPVvHh8qk5bRrU4KXnRRRNni5tKQI9PBAdI6MP8nQ==
dependencies:
- "@fortawesome/fontawesome-common-types" "6.2.1"
+ "@fortawesome/fontawesome-common-types" "6.4.0"
"@fortawesome/react-fontawesome@^0":
version "0.2.0"
@@ -610,10 +628,10 @@
dependencies:
prop-types "^15.8.1"
-"@humanwhocodes/config-array@^0.11.8":
- version "0.11.8"
- resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.8.tgz#03595ac2075a4dc0f191cc2131de14fbd7d410b9"
- integrity sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==
+"@humanwhocodes/config-array@^0.11.10":
+ version "0.11.10"
+ resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.10.tgz#5a3ffe32cc9306365fb3fd572596cd602d5e12d2"
+ integrity sha512-KVVjQmNUepDVGXNuoRRdmmEjruj0KfiGSbS8LVc12LMsWDQzRXJ0qdhN8L8uUigKpfEHRhlaQFY0ib1tnUbNeQ==
dependencies:
"@humanwhocodes/object-schema" "^1.2.1"
debug "^4.1.1"
@@ -645,109 +663,109 @@
resolved "https://registry.yarnpkg.com/@istanbuljs/schema/-/schema-0.1.3.tgz#e45e384e4b8ec16bce2fd903af78450f6bf7ec98"
integrity sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==
-"@jest/console@^29.4.1":
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/@jest/console/-/console-29.4.1.tgz#cbc31d73f6329f693b3d34b365124de797704fff"
- integrity sha512-m+XpwKSi3PPM9znm5NGS8bBReeAJJpSkL1OuFCqaMaJL2YX9YXLkkI+MBchMPwu+ZuM2rynL51sgfkQteQ1CKQ==
+"@jest/console@^29.5.0":
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/@jest/console/-/console-29.5.0.tgz#593a6c5c0d3f75689835f1b3b4688c4f8544cb57"
+ integrity sha512-NEpkObxPwyw/XxZVLPmAGKE89IQRp4puc6IQRPru6JKd1M3fW9v1xM1AnzIJE65hbCkzQAdnL8P47e9hzhiYLQ==
dependencies:
- "@jest/types" "^29.4.1"
+ "@jest/types" "^29.5.0"
"@types/node" "*"
chalk "^4.0.0"
- jest-message-util "^29.4.1"
- jest-util "^29.4.1"
+ jest-message-util "^29.5.0"
+ jest-util "^29.5.0"
slash "^3.0.0"
-"@jest/core@^29.4.1":
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/@jest/core/-/core-29.4.1.tgz#91371179b5959951e211dfaeea4277a01dcca14f"
- integrity sha512-RXFTohpBqpaTebNdg5l3I5yadnKo9zLBajMT0I38D0tDhreVBYv3fA8kywthI00sWxPztWLD3yjiUkewwu/wKA==
+"@jest/core@^29.5.0":
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/@jest/core/-/core-29.5.0.tgz#76674b96904484e8214614d17261cc491e5f1f03"
+ integrity sha512-28UzQc7ulUrOQw1IsN/kv1QES3q2kkbl/wGslyhAclqZ/8cMdB5M68BffkIdSJgKBUt50d3hbwJ92XESlE7LiQ==
dependencies:
- "@jest/console" "^29.4.1"
- "@jest/reporters" "^29.4.1"
- "@jest/test-result" "^29.4.1"
- "@jest/transform" "^29.4.1"
- "@jest/types" "^29.4.1"
+ "@jest/console" "^29.5.0"
+ "@jest/reporters" "^29.5.0"
+ "@jest/test-result" "^29.5.0"
+ "@jest/transform" "^29.5.0"
+ "@jest/types" "^29.5.0"
"@types/node" "*"
ansi-escapes "^4.2.1"
chalk "^4.0.0"
ci-info "^3.2.0"
exit "^0.1.2"
graceful-fs "^4.2.9"
- jest-changed-files "^29.4.0"
- jest-config "^29.4.1"
- jest-haste-map "^29.4.1"
- jest-message-util "^29.4.1"
- jest-regex-util "^29.2.0"
- jest-resolve "^29.4.1"
- jest-resolve-dependencies "^29.4.1"
- jest-runner "^29.4.1"
- jest-runtime "^29.4.1"
- jest-snapshot "^29.4.1"
- jest-util "^29.4.1"
- jest-validate "^29.4.1"
- jest-watcher "^29.4.1"
+ jest-changed-files "^29.5.0"
+ jest-config "^29.5.0"
+ jest-haste-map "^29.5.0"
+ jest-message-util "^29.5.0"
+ jest-regex-util "^29.4.3"
+ jest-resolve "^29.5.0"
+ jest-resolve-dependencies "^29.5.0"
+ jest-runner "^29.5.0"
+ jest-runtime "^29.5.0"
+ jest-snapshot "^29.5.0"
+ jest-util "^29.5.0"
+ jest-validate "^29.5.0"
+ jest-watcher "^29.5.0"
micromatch "^4.0.4"
- pretty-format "^29.4.1"
+ pretty-format "^29.5.0"
slash "^3.0.0"
strip-ansi "^6.0.0"
-"@jest/environment@^29.4.1":
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-29.4.1.tgz#52d232a85cdc995b407a940c89c86568f5a88ffe"
- integrity sha512-pJ14dHGSQke7Q3mkL/UZR9ZtTOxqskZaC91NzamEH4dlKRt42W+maRBXiw/LWkdJe+P0f/zDR37+SPMplMRlPg==
+"@jest/environment@^29.5.0":
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-29.5.0.tgz#9152d56317c1fdb1af389c46640ba74ef0bb4c65"
+ integrity sha512-5FXw2+wD29YU1d4I2htpRX7jYnAyTRjP2CsXQdo9SAM8g3ifxWPSV0HnClSn71xwctr0U3oZIIH+dtbfmnbXVQ==
dependencies:
- "@jest/fake-timers" "^29.4.1"
- "@jest/types" "^29.4.1"
+ "@jest/fake-timers" "^29.5.0"
+ "@jest/types" "^29.5.0"
"@types/node" "*"
- jest-mock "^29.4.1"
+ jest-mock "^29.5.0"
-"@jest/expect-utils@^29.4.1":
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/@jest/expect-utils/-/expect-utils-29.4.1.tgz#105b9f3e2c48101f09cae2f0a4d79a1b3a419cbb"
- integrity sha512-w6YJMn5DlzmxjO00i9wu2YSozUYRBhIoJ6nQwpMYcBMtiqMGJm1QBzOf6DDgRao8dbtpDoaqLg6iiQTvv0UHhQ==
+"@jest/expect-utils@^29.5.0":
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/@jest/expect-utils/-/expect-utils-29.5.0.tgz#f74fad6b6e20f924582dc8ecbf2cb800fe43a036"
+ integrity sha512-fmKzsidoXQT2KwnrwE0SQq3uj8Z763vzR8LnLBwC2qYWEFpjX8daRsk6rHUM1QvNlEW/UJXNXm59ztmJJWs2Mg==
dependencies:
- jest-get-type "^29.2.0"
+ jest-get-type "^29.4.3"
-"@jest/expect@^29.4.1":
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/@jest/expect/-/expect-29.4.1.tgz#3338fa20f547bb6e550c4be37d6f82711cc13c38"
- integrity sha512-ZxKJP5DTUNF2XkpJeZIzvnzF1KkfrhEF6Rz0HGG69fHl6Bgx5/GoU3XyaeFYEjuuKSOOsbqD/k72wFvFxc3iTw==
+"@jest/expect@^29.5.0":
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/@jest/expect/-/expect-29.5.0.tgz#80952f5316b23c483fbca4363ce822af79c38fba"
+ integrity sha512-PueDR2HGihN3ciUNGr4uelropW7rqUfTiOn+8u0leg/42UhblPxHkfoh0Ruu3I9Y1962P3u2DY4+h7GVTSVU6g==
dependencies:
- expect "^29.4.1"
- jest-snapshot "^29.4.1"
+ expect "^29.5.0"
+ jest-snapshot "^29.5.0"
-"@jest/fake-timers@^29.4.1":
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-29.4.1.tgz#7b673131e8ea2a2045858f08241cace5d518b42b"
- integrity sha512-/1joI6rfHFmmm39JxNfmNAO3Nwm6Y0VoL5fJDy7H1AtWrD1CgRtqJbN9Ld6rhAkGO76qqp4cwhhxJ9o9kYjQMw==
+"@jest/fake-timers@^29.5.0":
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-29.5.0.tgz#d4d09ec3286b3d90c60bdcd66ed28d35f1b4dc2c"
+ integrity sha512-9ARvuAAQcBwDAqOnglWq2zwNIRUDtk/SCkp/ToGEhFv5r86K21l+VEs0qNTaXtyiY0lEePl3kylijSYJQqdbDg==
dependencies:
- "@jest/types" "^29.4.1"
+ "@jest/types" "^29.5.0"
"@sinonjs/fake-timers" "^10.0.2"
"@types/node" "*"
- jest-message-util "^29.4.1"
- jest-mock "^29.4.1"
- jest-util "^29.4.1"
+ jest-message-util "^29.5.0"
+ jest-mock "^29.5.0"
+ jest-util "^29.5.0"
-"@jest/globals@^29.4.1":
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/@jest/globals/-/globals-29.4.1.tgz#3cd78c5567ab0249f09fbd81bf9f37a7328f4713"
- integrity sha512-znoK2EuFytbHH0ZSf2mQK2K1xtIgmaw4Da21R2C/NE/+NnItm5mPEFQmn8gmF3f0rfOlmZ3Y3bIf7bFj7DHxAA==
+"@jest/globals@^29.5.0":
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/@jest/globals/-/globals-29.5.0.tgz#6166c0bfc374c58268677539d0c181f9c1833298"
+ integrity sha512-S02y0qMWGihdzNbUiqSAiKSpSozSuHX5UYc7QbnHP+D9Lyw8DgGGCinrN9uSuHPeKgSSzvPom2q1nAtBvUsvPQ==
dependencies:
- "@jest/environment" "^29.4.1"
- "@jest/expect" "^29.4.1"
- "@jest/types" "^29.4.1"
- jest-mock "^29.4.1"
+ "@jest/environment" "^29.5.0"
+ "@jest/expect" "^29.5.0"
+ "@jest/types" "^29.5.0"
+ jest-mock "^29.5.0"
-"@jest/reporters@^29.4.1":
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-29.4.1.tgz#50d509c08575c75e3cd2176d72ec3786419d5e04"
- integrity sha512-AISY5xpt2Xpxj9R6y0RF1+O6GRy9JsGa8+vK23Lmzdy1AYcpQn5ItX79wJSsTmfzPKSAcsY1LNt/8Y5Xe5LOSg==
+"@jest/reporters@^29.5.0":
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-29.5.0.tgz#985dfd91290cd78ddae4914ba7921bcbabe8ac9b"
+ integrity sha512-D05STXqj/M8bP9hQNSICtPqz97u7ffGzZu+9XLucXhkOFBqKcXe04JLZOgIekOxdb73MAoBUFnqvf7MCpKk5OA==
dependencies:
"@bcoe/v8-coverage" "^0.2.3"
- "@jest/console" "^29.4.1"
- "@jest/test-result" "^29.4.1"
- "@jest/transform" "^29.4.1"
- "@jest/types" "^29.4.1"
+ "@jest/console" "^29.5.0"
+ "@jest/test-result" "^29.5.0"
+ "@jest/transform" "^29.5.0"
+ "@jest/types" "^29.5.0"
"@jridgewell/trace-mapping" "^0.3.15"
"@types/node" "*"
chalk "^4.0.0"
@@ -760,48 +778,48 @@
istanbul-lib-report "^3.0.0"
istanbul-lib-source-maps "^4.0.0"
istanbul-reports "^3.1.3"
- jest-message-util "^29.4.1"
- jest-util "^29.4.1"
- jest-worker "^29.4.1"
+ jest-message-util "^29.5.0"
+ jest-util "^29.5.0"
+ jest-worker "^29.5.0"
slash "^3.0.0"
string-length "^4.0.1"
strip-ansi "^6.0.0"
v8-to-istanbul "^9.0.1"
-"@jest/schemas@^29.4.0":
- version "29.4.0"
- resolved "https://registry.yarnpkg.com/@jest/schemas/-/schemas-29.4.0.tgz#0d6ad358f295cc1deca0b643e6b4c86ebd539f17"
- integrity sha512-0E01f/gOZeNTG76i5eWWSupvSHaIINrTie7vCyjiYFKgzNdyEGd12BUv4oNBFHOqlHDbtoJi3HrQ38KCC90NsQ==
+"@jest/schemas@^29.4.3":
+ version "29.4.3"
+ resolved "https://registry.yarnpkg.com/@jest/schemas/-/schemas-29.4.3.tgz#39cf1b8469afc40b6f5a2baaa146e332c4151788"
+ integrity sha512-VLYKXQmtmuEz6IxJsrZwzG9NvtkQsWNnWMsKxqWNu3+CnfzJQhp0WDDKWLVV9hLKr0l3SLLFRqcYHjhtyuDVxg==
dependencies:
"@sinclair/typebox" "^0.25.16"
-"@jest/source-map@^29.2.0":
- version "29.2.0"
- resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-29.2.0.tgz#ab3420c46d42508dcc3dc1c6deee0b613c235744"
- integrity sha512-1NX9/7zzI0nqa6+kgpSdKPK+WU1p+SJk3TloWZf5MzPbxri9UEeXX5bWZAPCzbQcyuAzubcdUHA7hcNznmRqWQ==
+"@jest/source-map@^29.4.3":
+ version "29.4.3"
+ resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-29.4.3.tgz#ff8d05cbfff875d4a791ab679b4333df47951d20"
+ integrity sha512-qyt/mb6rLyd9j1jUts4EQncvS6Yy3PM9HghnNv86QBlV+zdL2inCdK1tuVlL+J+lpiw2BI67qXOrX3UurBqQ1w==
dependencies:
"@jridgewell/trace-mapping" "^0.3.15"
callsites "^3.0.0"
graceful-fs "^4.2.9"
-"@jest/test-result@^29.4.1":
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-29.4.1.tgz#997f19695e13b34779ceb3c288a416bd26c3238d"
- integrity sha512-WRt29Lwt+hEgfN8QDrXqXGgCTidq1rLyFqmZ4lmJOpVArC8daXrZWkWjiaijQvgd3aOUj2fM8INclKHsQW9YyQ==
+"@jest/test-result@^29.5.0":
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-29.5.0.tgz#7c856a6ca84f45cc36926a4e9c6b57f1973f1408"
+ integrity sha512-fGl4rfitnbfLsrfx1uUpDEESS7zM8JdgZgOCQuxQvL1Sn/I6ijeAVQWGfXI9zb1i9Mzo495cIpVZhA0yr60PkQ==
dependencies:
- "@jest/console" "^29.4.1"
- "@jest/types" "^29.4.1"
+ "@jest/console" "^29.5.0"
+ "@jest/types" "^29.5.0"
"@types/istanbul-lib-coverage" "^2.0.0"
collect-v8-coverage "^1.0.0"
-"@jest/test-sequencer@^29.4.1":
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-29.4.1.tgz#f7a006ec7058b194a10cf833c88282ef86d578fd"
- integrity sha512-v5qLBNSsM0eHzWLXsQ5fiB65xi49A3ILPSFQKPXzGL4Vyux0DPZAIN7NAFJa9b4BiTDP9MBF/Zqc/QA1vuiJ0w==
+"@jest/test-sequencer@^29.5.0":
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-29.5.0.tgz#34d7d82d3081abd523dbddc038a3ddcb9f6d3cc4"
+ integrity sha512-yPafQEcKjkSfDXyvtgiV4pevSeyuA6MQr6ZIdVkWJly9vkqjnFfcfhRQqpD5whjoU8EORki752xQmjaqoFjzMQ==
dependencies:
- "@jest/test-result" "^29.4.1"
+ "@jest/test-result" "^29.5.0"
graceful-fs "^4.2.9"
- jest-haste-map "^29.4.1"
+ jest-haste-map "^29.5.0"
slash "^3.0.0"
"@jest/transform@^26.6.2":
@@ -825,26 +843,26 @@
source-map "^0.6.1"
write-file-atomic "^3.0.0"
-"@jest/transform@^29.4.1":
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-29.4.1.tgz#e4f517841bb795c7dcdee1ba896275e2c2d26d4a"
- integrity sha512-5w6YJrVAtiAgr0phzKjYd83UPbCXsBRTeYI4BXokv9Er9CcrH9hfXL/crCvP2d2nGOcovPUnlYiLPFLZrkG5Hg==
+"@jest/transform@^29.5.0":
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-29.5.0.tgz#cf9c872d0965f0cbd32f1458aa44a2b1988b00f9"
+ integrity sha512-8vbeZWqLJOvHaDfeMuoHITGKSz5qWc9u04lnWrQE3VyuSw604PzQM824ZeX9XSjUCeDiE3GuxZe5UKa8J61NQw==
dependencies:
"@babel/core" "^7.11.6"
- "@jest/types" "^29.4.1"
+ "@jest/types" "^29.5.0"
"@jridgewell/trace-mapping" "^0.3.15"
babel-plugin-istanbul "^6.1.1"
chalk "^4.0.0"
convert-source-map "^2.0.0"
fast-json-stable-stringify "^2.1.0"
graceful-fs "^4.2.9"
- jest-haste-map "^29.4.1"
- jest-regex-util "^29.2.0"
- jest-util "^29.4.1"
+ jest-haste-map "^29.5.0"
+ jest-regex-util "^29.4.3"
+ jest-util "^29.5.0"
micromatch "^4.0.4"
pirates "^4.0.4"
slash "^3.0.0"
- write-file-atomic "^5.0.0"
+ write-file-atomic "^4.0.2"
"@jest/types@^26.6.2":
version "26.6.2"
@@ -857,30 +875,22 @@
"@types/yargs" "^15.0.0"
chalk "^4.0.0"
-"@jest/types@^29.4.1":
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/@jest/types/-/types-29.4.1.tgz#f9f83d0916f50696661da72766132729dcb82ecb"
- integrity sha512-zbrAXDUOnpJ+FMST2rV7QZOgec8rskg2zv8g2ajeqitp4tvZiyqTCYXANrKsM+ryj5o+LI+ZN2EgU9drrkiwSA==
+"@jest/types@^29.5.0":
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/@jest/types/-/types-29.5.0.tgz#f59ef9b031ced83047c67032700d8c807d6e1593"
+ integrity sha512-qbu7kN6czmVRc3xWFQcAN03RAUamgppVUdXrvl1Wr3jlNF93o9mJbGcDWrwGB6ht44u7efB1qCFgVQmca24Uog==
dependencies:
- "@jest/schemas" "^29.4.0"
+ "@jest/schemas" "^29.4.3"
"@types/istanbul-lib-coverage" "^2.0.0"
"@types/istanbul-reports" "^3.0.0"
"@types/node" "*"
"@types/yargs" "^17.0.8"
chalk "^4.0.0"
-"@jridgewell/gen-mapping@^0.1.0":
- version "0.1.1"
- resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz#e5d2e450306a9491e3bd77e323e38d7aff315996"
- integrity sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w==
- dependencies:
- "@jridgewell/set-array" "^1.0.0"
- "@jridgewell/sourcemap-codec" "^1.4.10"
-
-"@jridgewell/gen-mapping@^0.3.2":
- version "0.3.2"
- resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz#c1aedc61e853f2bb9f5dfe6d4442d3b565b253b9"
- integrity sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==
+"@jridgewell/gen-mapping@^0.3.0", "@jridgewell/gen-mapping@^0.3.2":
+ version "0.3.3"
+ resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz#7e02e6eb5df901aaedb08514203b096614024098"
+ integrity sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==
dependencies:
"@jridgewell/set-array" "^1.0.1"
"@jridgewell/sourcemap-codec" "^1.4.10"
@@ -891,60 +901,65 @@
resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz#2203b118c157721addfe69d47b70465463066d78"
integrity sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==
-"@jridgewell/set-array@^1.0.0", "@jridgewell/set-array@^1.0.1":
+"@jridgewell/set-array@^1.0.1":
version "1.1.2"
resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72"
integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==
-"@jridgewell/sourcemap-codec@1.4.14", "@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.13":
+"@jridgewell/sourcemap-codec@1.4.14":
version "1.4.14"
resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24"
integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==
-"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.15", "@jridgewell/trace-mapping@^0.3.9":
- version "0.3.17"
- resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.17.tgz#793041277af9073b0951a7fe0f0d8c4c98c36985"
- integrity sha512-MCNzAp77qzKca9+W/+I0+sEpaUnZoeasnghNeVc41VZCEKaCH73Vq3BZZ/SzWIgrqE4H4ceI+p+b6C0mHf9T4g==
+"@jridgewell/sourcemap-codec@^1.4.10":
+ version "1.4.15"
+ resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz#d7c6e6755c78567a951e04ab52ef0fd26de59f32"
+ integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==
+
+"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.15", "@jridgewell/trace-mapping@^0.3.17", "@jridgewell/trace-mapping@^0.3.9":
+ version "0.3.18"
+ resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz#25783b2086daf6ff1dcb53c9249ae480e4dd4cd6"
+ integrity sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==
dependencies:
"@jridgewell/resolve-uri" "3.1.0"
"@jridgewell/sourcemap-codec" "1.4.14"
"@mantine/core@^5":
- version "5.10.2"
- resolved "https://registry.yarnpkg.com/@mantine/core/-/core-5.10.2.tgz#d1099736e2a98d24b19d4dc9e35dfea2cff02774"
- integrity sha512-DyvrkTj2U63Kvt3YD6kECA/PltcKwNa0Zr0IIPnEmhYM7bxpdae9pMRpRjwZ/o3JxkIh8k6vL2+7sxoec+IeKw==
+ version "5.10.5"
+ resolved "https://registry.yarnpkg.com/@mantine/core/-/core-5.10.5.tgz#071e14dcf8b94a36d0243f1f4b30305ac0074afd"
+ integrity sha512-F4tqHSEVM9D6/iSqHfPda+Xl5XgSEPHAAkT01Zwzj4Jnbd10qGrlqr/SFUop2CIcuKYnmra9XltUahUPXBC2BQ==
dependencies:
- "@floating-ui/react-dom-interactions" "^0.10.1"
- "@mantine/styles" "5.10.2"
- "@mantine/utils" "5.10.2"
+ "@floating-ui/react" "^0.19.1"
+ "@mantine/styles" "5.10.5"
+ "@mantine/utils" "5.10.5"
"@radix-ui/react-scroll-area" "1.0.2"
react-textarea-autosize "8.3.4"
"@mantine/hooks@^5":
- version "5.10.2"
- resolved "https://registry.yarnpkg.com/@mantine/hooks/-/hooks-5.10.2.tgz#6bb94d895bf59a10997171df1ac33b4f56f764b9"
- integrity sha512-H26lZ+P2HasLXt3coXYUiN5hVRNXPYNwF93oP9TfmOOA+EEzbGtGQR/dBv51BF53UJ+rzgJ0W5lLi1CbZPdMnA==
+ version "5.10.5"
+ resolved "https://registry.yarnpkg.com/@mantine/hooks/-/hooks-5.10.5.tgz#568586a0fa649be46f057ddc920bf98761017ffb"
+ integrity sha512-hFQp71QZDfivPzfIUOQZfMKLiOL/Cn2EnzacRlbUr55myteTfzYN8YMt+nzniE/6c4IRopFHEAdbKEtfyQc6kg==
"@mantine/notifications@^5":
- version "5.10.2"
- resolved "https://registry.yarnpkg.com/@mantine/notifications/-/notifications-5.10.2.tgz#4e9477a08f0b0bfa423ee6206dda368d07edad98"
- integrity sha512-JO92oVLRVbeD1Lz3zFlAFaK8uCWxev4R+RfhuAf3/MJgMLMvHT44hAJtnRUYC4jqnW0pweOKKflyQvnTZM7/Hg==
+ version "5.10.5"
+ resolved "https://registry.yarnpkg.com/@mantine/notifications/-/notifications-5.10.5.tgz#2f3f2d013ce4637e64e935aa5dd8c1df1a7acec0"
+ integrity sha512-IzTAXA7Zb9DcI94Mv5O2OinhLmI7fvs/VutDw9uCpp6OHtLuF/XN1d262jrsGhMZT0c4nuUsotSLFZF3GWZwXg==
dependencies:
- "@mantine/utils" "5.10.2"
+ "@mantine/utils" "5.10.5"
react-transition-group "4.4.2"
-"@mantine/styles@5.10.2":
- version "5.10.2"
- resolved "https://registry.yarnpkg.com/@mantine/styles/-/styles-5.10.2.tgz#5c95f7619df67f1c5b895fb5e30c34a748aae906"
- integrity sha512-/LMGgiBJc+gDkh61gMgj5srPsapl2ZFbklF1dBxBF+77j7o1tORJWyAl3IOUD+po9P6jwfTjlDoNDNLESUJhyQ==
+"@mantine/styles@5.10.5":
+ version "5.10.5"
+ resolved "https://registry.yarnpkg.com/@mantine/styles/-/styles-5.10.5.tgz#ace82a71b4fe3d14ee14638f1735d5680d93d36d"
+ integrity sha512-0NXk8c/XGzuTUkZc6KceF2NaTCMEu5mHR4ru0x+ttb9DGnLpHuGWduTHjSfr4hl6eAJgedD0zauO+VAhDzO9zA==
dependencies:
clsx "1.1.1"
csstype "3.0.9"
-"@mantine/utils@5.10.2":
- version "5.10.2"
- resolved "https://registry.yarnpkg.com/@mantine/utils/-/utils-5.10.2.tgz#207a4e8434b521305bb63563244765db1022b005"
- integrity sha512-mpXTPG9X2nPfHGg3ejcWkSlYVxuiTx6RxWbbFHEk/U2fSikEAQdMNdVPaKI2c0/S2jksqJXlODZJv2Qt9HAFMQ==
+"@mantine/utils@5.10.5":
+ version "5.10.5"
+ resolved "https://registry.yarnpkg.com/@mantine/utils/-/utils-5.10.5.tgz#ad620d714e545c6efb7f69d94ce46e3fd2fe01fb"
+ integrity sha512-FGMq4dGs5HhDAtI0z46uzxzKKPmZ3h5uKUyKg1ZHoFR1mBtcUMbB6FylFmHqKFRWlJ5IXqX9dwmiVrLYUOfTmA==
"@nodelib/fs.scandir@2.1.5":
version "2.1.5"
@@ -1057,34 +1072,34 @@
dependencies:
"@babel/runtime" "^7.13.10"
-"@remix-run/router@1.3.1":
- version "1.3.1"
- resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.3.1.tgz#3bb0b6ddc0a276e8dc1138d08f63035e4e23e8bf"
- integrity sha512-+eun1Wtf72RNRSqgU7qM2AMX/oHp+dnx7BHk1qhK5ZHzdHTUU4LA1mGG1vT+jMc8sbhG3orvsfOmryjzx2PzQw==
+"@remix-run/router@1.6.3":
+ version "1.6.3"
+ resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.6.3.tgz#8205baf6e17ef93be35bf62c37d2d594e9be0dad"
+ integrity sha512-EXJysQ7J3veRECd0kZFQwYYd5sJMcq2O/m60zu1W2l3oVQ9xtub8jTOtYRE0+M2iomyG/W3Ps7+vp2kna0C27Q==
"@sinclair/typebox@^0.25.16":
- version "0.25.21"
- resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.25.21.tgz#763b05a4b472c93a8db29b2c3e359d55b29ce272"
- integrity sha512-gFukHN4t8K4+wVC+ECqeqwzBDeFeTzBXroBTqE6vcWrQGbEUpHO7LYdG0f4xnvYq4VOEwITSlHlp0JBAIFMS/g==
+ version "0.25.24"
+ resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.25.24.tgz#8c7688559979f7079aacaf31aa881c3aa410b718"
+ integrity sha512-XJfwUVUKDHF5ugKwIcxEgc9k8b7HbznCp6eUfWgu710hMPNIO4aw4/zB5RogDQz8nd6gyCDpU9O/m6qYEWY6yQ==
-"@sinonjs/commons@^2.0.0":
- version "2.0.0"
- resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-2.0.0.tgz#fd4ca5b063554307e8327b4564bd56d3b73924a3"
- integrity sha512-uLa0j859mMrg2slwQYdO/AkrOfmH+X6LTVmNTS9CqexuE2IvVORIkSpJLqePAbEnKJ77aMmCwr1NUZ57120Xcg==
+"@sinonjs/commons@^3.0.0":
+ version "3.0.0"
+ resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-3.0.0.tgz#beb434fe875d965265e04722ccfc21df7f755d72"
+ integrity sha512-jXBtWAF4vmdNmZgD5FoKsVLv3rPgDnLgPbU84LIJ3otV44vJlDRokVng5v8NFJdCf/da9legHcKaRuZs4L7faA==
dependencies:
type-detect "4.0.8"
"@sinonjs/fake-timers@^10.0.2":
- version "10.0.2"
- resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-10.0.2.tgz#d10549ed1f423d80639c528b6c7f5a1017747d0c"
- integrity sha512-SwUDyjWnah1AaNl7kxsa7cfLhlTYoiyhDAIgyh+El30YvXs/o7OLXpYH88Zdhyx9JExKrmHDJ+10bwIcY80Jmw==
+ version "10.2.0"
+ resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-10.2.0.tgz#b3e322a34c5f26e3184e7f6115695f299c1b1194"
+ integrity sha512-OPwQlEdg40HAj5KNF8WW6q2KG4Z+cBCZb3m4ninfTZKaBmbIJodviQsDBoYMPHkOyJJMHnOJo5j2+LKDOhOACg==
dependencies:
- "@sinonjs/commons" "^2.0.0"
+ "@sinonjs/commons" "^3.0.0"
"@types/babel__core@^7.0.0", "@types/babel__core@^7.1.14", "@types/babel__core@^7.1.7":
- version "7.20.0"
- resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.20.0.tgz#61bc5a4cae505ce98e1e36c5445e4bee060d8891"
- integrity sha512-+n8dL/9GWblDO0iU6eZAwEIJVr5DWigtle+Q6HLOrh/pdbXOhOtqzq8VPPE2zvNJzSKY4vH/z3iT3tn0A3ypiQ==
+ version "7.20.1"
+ resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.20.1.tgz#916ecea274b0c776fec721e333e55762d3a9614b"
+ integrity sha512-aACu/U/omhdk15O4Nfb+fHgH/z3QsfQzpnvRZhYhThms83ZnAOZz7zZAWO7mn2yyNQaA4xTO8GLK3uqFU4bYYw==
dependencies:
"@babel/parser" "^7.20.7"
"@babel/types" "^7.20.7"
@@ -1108,11 +1123,11 @@
"@babel/types" "^7.0.0"
"@types/babel__traverse@*", "@types/babel__traverse@^7.0.6":
- version "7.18.3"
- resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.18.3.tgz#dfc508a85781e5698d5b33443416b6268c4b3e8d"
- integrity sha512-1kbcJ40lLB7MHsj39U4Sh1uTd2E7rLEa79kmDpI6cy+XiXsteB3POdQomoq4FxszMrO3ZYchkhYJw7A2862b3w==
+ version "7.20.1"
+ resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.20.1.tgz#dd6f1d2411ae677dcb2db008c962598be31d6acf"
+ integrity sha512-MitHFXnhtgwsGZWtT68URpOvLN4EREih1u3QtQiN4VdAxWKRVvGCSvw/Qth0M0Qq3pJpnGOu5JaM/ydK7OGbqg==
dependencies:
- "@babel/types" "^7.3.0"
+ "@babel/types" "^7.20.7"
"@types/graceful-fs@^4.1.2", "@types/graceful-fs@^4.1.3":
version "4.1.6"
@@ -1151,9 +1166,9 @@
integrity sha512-Klz949h02Gz2uZCMGwDUSDS1YBlTdDDgbWHi+81l29tQALUtvz4rAYi5uoVhE5Lagoq6DeqAUlbrHvW/mXDgdQ==
"@types/node@*":
- version "18.11.18"
- resolved "https://registry.yarnpkg.com/@types/node/-/node-18.11.18.tgz#8dfb97f0da23c2293e554c5a50d61ef134d7697f"
- integrity sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA==
+ version "20.2.5"
+ resolved "https://registry.yarnpkg.com/@types/node/-/node-20.2.5.tgz#26d295f3570323b2837d322180dfbf1ba156fefb"
+ integrity sha512-JJulVEQXmiY9Px5axXHeYGLSjhkZEnD+MDPDGbCbIAbMslkKwmygtZFy1X6s/075Yo94sf8GuSlFfPzysQrWZQ==
"@types/parse-json@^4.0.0":
version "4.0.0"
@@ -1161,35 +1176,9 @@
integrity sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==
"@types/prettier@^2.1.5":
- version "2.7.2"
- resolved "https://registry.yarnpkg.com/@types/prettier/-/prettier-2.7.2.tgz#6c2324641cc4ba050a8c710b2b251b377581fbf0"
- integrity sha512-KufADq8uQqo1pYKVIYzfKbJfBAc0sOeXqGbFaSpv8MRmC/zXgowNZmFcbngndGk922QDmOASEXUZCaY48gs4cg==
-
-"@types/prop-types@*":
- version "15.7.5"
- resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.5.tgz#5f19d2b85a98e9558036f6a3cacc8819420f05cf"
- integrity sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==
-
-"@types/react-dom@^18":
- version "18.0.10"
- resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-18.0.10.tgz#3b66dec56aa0f16a6cc26da9e9ca96c35c0b4352"
- integrity sha512-E42GW/JA4Qv15wQdqJq8DL4JhNpB3prJgjgapN3qJT9K2zO5IIAQh4VXvCEDupoqAwnz0cY4RlXeC/ajX5SFHg==
- dependencies:
- "@types/react" "*"
-
-"@types/react@*", "@types/react@^18":
- version "18.0.27"
- resolved "https://registry.yarnpkg.com/@types/react/-/react-18.0.27.tgz#d9425abe187a00f8a5ec182b010d4fd9da703b71"
- integrity sha512-3vtRKHgVxu3Jp9t718R9BuzoD4NcQ8YJ5XRzsSKxNDiDonD2MXIT1TmSkenxuCycZJoQT5d2vE8LwWJxBC1gmA==
- dependencies:
- "@types/prop-types" "*"
- "@types/scheduler" "*"
- csstype "^3.0.2"
-
-"@types/scheduler@*":
- version "0.16.2"
- resolved "https://registry.yarnpkg.com/@types/scheduler/-/scheduler-0.16.2.tgz#1a62f89525723dde24ba1b01b092bf5df8ad4d39"
- integrity sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew==
+ version "2.7.3"
+ resolved "https://registry.yarnpkg.com/@types/prettier/-/prettier-2.7.3.tgz#3e51a17e291d01d17d3fc61422015a933af7a08f"
+ integrity sha512-+68kP9yzs4LMp7VNh8gdzMSPZFL44MLGqiHWvttYJe+6qnuVr4Ek9wSBQoveqY/r+LwjCcU29kNVkidwim+kYA==
"@types/stack-utils@^2.0.0":
version "2.0.1"
@@ -1209,21 +1198,20 @@
"@types/yargs-parser" "*"
"@types/yargs@^17.0.8":
- version "17.0.20"
- resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-17.0.20.tgz#107f0fcc13bd4a524e352b41c49fe88aab5c54d5"
- integrity sha512-eknWrTHofQuPk2iuqDm1waA7V6xPlbgBoaaXEgYkClhLOnB0TtbW+srJaOToAgawPxPlHQzwypFA2bhZaUGP5A==
+ version "17.0.24"
+ resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-17.0.24.tgz#b3ef8d50ad4aa6aecf6ddc97c580a00f5aa11902"
+ integrity sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==
dependencies:
"@types/yargs-parser" "*"
-"@vitejs/plugin-react@^3":
- version "3.0.1"
- resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-3.0.1.tgz#ad21fb81377970dd4021a31cd95a03eb6f5c4c48"
- integrity sha512-mx+QvYwIbbpOIJw+hypjnW1lAbKDHtWK5ibkF/V1/oMBu8HU/chb+SnqJDAsLq1+7rGqjktCEomMTM5KShzUKQ==
+"@vitejs/plugin-react@^4":
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-4.0.0.tgz#46d1c37c507447d10467be1c111595174555ef28"
+ integrity sha512-HX0XzMjL3hhOYm+0s95pb0Z7F8O81G7joUHgfDd/9J/ZZf5k4xX6QAMFkKsHFxaHlf6X7GD7+XuaZ66ULiJuhQ==
dependencies:
- "@babel/core" "^7.20.7"
- "@babel/plugin-transform-react-jsx-self" "^7.18.6"
+ "@babel/core" "^7.21.4"
+ "@babel/plugin-transform-react-jsx-self" "^7.21.0"
"@babel/plugin-transform-react-jsx-source" "^7.19.6"
- magic-string "^0.27.0"
react-refresh "^0.14.0"
acorn-jsx@^5.3.2:
@@ -1306,9 +1294,9 @@ argparse@^2.0.1:
integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==
aria-hidden@^1.1.3:
- version "1.2.2"
- resolved "https://registry.yarnpkg.com/aria-hidden/-/aria-hidden-1.2.2.tgz#8c4f7cc88d73ca42114106fdf6f47e68d31475b8"
- integrity sha512-6y/ogyDTk/7YAe91T3E2PR1ALVKyM2QbTio5HwM+N1Q6CMlCKhvClyIjkckBswa0f2xJhjsfzIGa1yVSe1UMVA==
+ version "1.2.3"
+ resolved "https://registry.yarnpkg.com/aria-hidden/-/aria-hidden-1.2.3.tgz#14aeb7fb692bbb72d69bebfa47279c1fd725e954"
+ integrity sha512-xcLxITLe2HYa1cnYnwCjkOO1PqUHQpozB8x9AR0OgWN2woOBi5kSDVxKfd0b7sb1hw5qFeJhXm9H1nu3xSfLeQ==
dependencies:
tslib "^2.0.0"
@@ -1327,6 +1315,14 @@ arr-union@^3.1.0:
resolved "https://registry.yarnpkg.com/arr-union/-/arr-union-3.1.0.tgz#e39b09aea9def866a8f206e288af63919bae39c4"
integrity sha512-sKpyeERZ02v1FeCZT8lrfJq5u6goHCtpTAzPwJYe7c8SPFOboNjNg1vz2L4VTn9T4PQxEx13TbXLmYUcS6Ug7Q==
+array-buffer-byte-length@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/array-buffer-byte-length/-/array-buffer-byte-length-1.0.0.tgz#fabe8bc193fea865f317fe7807085ee0dee5aead"
+ integrity sha512-LPuwb2P+NrQw3XhxGc36+XSvuBPopovXYTR9Ew++Du9Yb/bx5AzBfrIsBoj0EZUifjQU+sHL21sseZ3jerWO/A==
+ dependencies:
+ call-bind "^1.0.2"
+ is-array-buffer "^3.0.1"
+
array-differ@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/array-differ/-/array-differ-3.0.0.tgz#3cbb3d0f316810eafcc47624734237d6aee4ae6b"
@@ -1418,15 +1414,15 @@ babel-jest@^26.6.3:
graceful-fs "^4.2.4"
slash "^3.0.0"
-babel-jest@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-29.4.1.tgz#01fa167e27470b35c2d4a1b841d9586b1764da19"
- integrity sha512-xBZa/pLSsF/1sNpkgsiT3CmY7zV1kAsZ9OxxtrFqYucnOuRftXAfcJqcDVyOPeN4lttWTwhLdu0T9f8uvoPEUg==
+babel-jest@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-29.5.0.tgz#3fe3ddb109198e78b1c88f9ebdecd5e4fc2f50a5"
+ integrity sha512-mA4eCDh5mSo2EcA9xQjVTpmbbNk32Zb3Q3QFQsNhaK56Q+yoXowzFodLux30HRgyOho5rsQ6B0P9QpMkvvnJ0Q==
dependencies:
- "@jest/transform" "^29.4.1"
+ "@jest/transform" "^29.5.0"
"@types/babel__core" "^7.1.14"
babel-plugin-istanbul "^6.1.1"
- babel-preset-jest "^29.4.0"
+ babel-preset-jest "^29.5.0"
chalk "^4.0.0"
graceful-fs "^4.2.9"
slash "^3.0.0"
@@ -1452,10 +1448,10 @@ babel-plugin-jest-hoist@^26.6.2:
"@types/babel__core" "^7.0.0"
"@types/babel__traverse" "^7.0.6"
-babel-plugin-jest-hoist@^29.4.0:
- version "29.4.0"
- resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.4.0.tgz#3fd3dfcedf645932df6d0c9fc3d9a704dd860248"
- integrity sha512-a/sZRLQJEmsmejQ2rPEUe35nO1+C9dc9O1gplH1SXmJxveQSRUYdBk8yGZG/VOUuZs1u2aHZJusEGoRMbhhwCg==
+babel-plugin-jest-hoist@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.5.0.tgz#a97db437936f441ec196990c9738d4b88538618a"
+ integrity sha512-zSuuuAlTMT4mzLj2nPnUm6fsE6270vdOfnpbJ+RmruU75UhLFvL0N2NgI7xpeS7NaB6hGqmd5pVpGTDYvi4Q3w==
dependencies:
"@babel/template" "^7.3.3"
"@babel/types" "^7.3.3"
@@ -1497,12 +1493,12 @@ babel-preset-jest@^26.6.2:
babel-plugin-jest-hoist "^26.6.2"
babel-preset-current-node-syntax "^1.0.0"
-babel-preset-jest@^29.4.0:
- version "29.4.0"
- resolved "https://registry.yarnpkg.com/babel-preset-jest/-/babel-preset-jest-29.4.0.tgz#c2b03c548b02dea0a18ae21d5759c136f9251ee4"
- integrity sha512-fUB9vZflUSM3dO/6M2TCAepTzvA4VkOvl67PjErcrQMGt9Eve7uazaeyCZ2th3UtI7ljpiBJES0F7A1vBRsLZA==
+babel-preset-jest@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/babel-preset-jest/-/babel-preset-jest-29.5.0.tgz#57bc8cc88097af7ff6a5ab59d1cd29d52a5916e2"
+ integrity sha512-JOMloxOqdiBSxMAzjRaH023/vvcaSaec49zvg+2LmNsktC7ei39LTJGw02J+9uUtTZUq6xbLyJ4dxe9sSmIuAg==
dependencies:
- babel-plugin-jest-hoist "^29.4.0"
+ babel-plugin-jest-hoist "^29.5.0"
babel-preset-current-node-syntax "^1.0.0"
balanced-match@^1.0.0:
@@ -1555,14 +1551,14 @@ braces@^3.0.2:
fill-range "^7.0.1"
browserslist@^4.21.3:
- version "4.21.4"
- resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.21.4.tgz#e7496bbc67b9e39dd0f98565feccdcb0d4ff6987"
- integrity sha512-CBHJJdDmgjl3daYjN5Cp5kbTf1mUhZoS+beLklHIvkOWscs83YAhLlF3Wsh/lciQYAcbBJgTOD44VtG31ZM4Hw==
+ version "4.21.7"
+ resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.21.7.tgz#e2b420947e5fb0a58e8f4668ae6e23488127e551"
+ integrity sha512-BauCXrQ7I2ftSqd2mvKHGo85XR0u7Ru3C/Hxsy/0TkfCtjrmAbPdzLGasmoiBxplpDXlPvdjX9u7srIMfgasNA==
dependencies:
- caniuse-lite "^1.0.30001400"
- electron-to-chromium "^1.4.251"
- node-releases "^2.0.6"
- update-browserslist-db "^1.0.9"
+ caniuse-lite "^1.0.30001489"
+ electron-to-chromium "^1.4.411"
+ node-releases "^2.0.12"
+ update-browserslist-db "^1.0.11"
bser@2.1.1:
version "2.1.1"
@@ -1614,10 +1610,10 @@ camelcase@^6.2.0:
resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a"
integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==
-caniuse-lite@^1.0.30001400:
- version "1.0.30001449"
- resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001449.tgz#a8d11f6a814c75c9ce9d851dc53eb1d1dfbcd657"
- integrity sha512-CPB+UL9XMT/Av+pJxCKGhdx+yg1hzplvFJQlJ2n68PyQGMz9L/E2zCyLdOL8uasbouTUgnPl+y0tccI/se+BEw==
+caniuse-lite@^1.0.30001489:
+ version "1.0.30001495"
+ resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001495.tgz#64a0ccef1911a9dcff647115b4430f8eff1ef2d9"
+ integrity sha512-F6x5IEuigtUfU5ZMQK2jsy5JqUUlEFRVZq8bO2a+ysq5K7jD6PPc9YXZj78xDNS3uNchesp1Jw47YXEqr+Viyg==
capture-exit@^2.0.0:
version "2.0.0"
@@ -1662,9 +1658,9 @@ ci-info@^2.0.0:
integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==
ci-info@^3.2.0:
- version "3.7.1"
- resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.7.1.tgz#708a6cdae38915d597afdf3b145f2f8e1ff55f3f"
- integrity sha512-4jYS4MOAaCIStSRwiuxc4B8MYhIe676yO1sYGzARnjXkWpmzZMMYxY6zu8WYWDhSuth5zhrQ1rhNSibyyvv4/w==
+ version "3.8.0"
+ resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.8.0.tgz#81408265a5380c929f0bc665d62256628ce9ef91"
+ integrity sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==
cjs-module-lexer@^1.0.0:
version "1.2.2"
@@ -1799,9 +1795,9 @@ csstype@3.0.9:
integrity sha512-rpw6JPxK6Rfg1zLOYCSwle2GFOOsnjmDYDaBwEcwoOg4qlsIVCN789VkBZDJAGi4T07gI4YSutR43t9Zz4Lzuw==
csstype@^3.0.2:
- version "3.1.1"
- resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.1.tgz#841b532c45c758ee546a11d5bd7b7b473c8c30b9"
- integrity sha512-DJR/VvkAvSZW9bTouZue2sSxDwdTN92uHjqeKVm+0dAqdfNykRzQ95tay8aXMBAAPpUiq4Qcug2L7neoRh2Egw==
+ version "3.1.2"
+ resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.2.tgz#1d4bf9d572f11c14031f0436e1c10bc1f571f50b"
+ integrity sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==
debug@^2.2.0, debug@^2.3.3:
version "2.6.9"
@@ -1840,14 +1836,14 @@ deep-is@^0.1.3:
integrity sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==
deepmerge@^4.2.2:
- version "4.3.0"
- resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.3.0.tgz#65491893ec47756d44719ae520e0e2609233b59b"
- integrity sha512-z2wJZXrmeHdvYJp/Ux55wIjqo81G5Bp4c+oELTW+7ar6SogWHajt5a9gO3s3IDaGSAXjDk0vlQKN3rms8ab3og==
+ version "4.3.1"
+ resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.3.1.tgz#44b5f2147cd3b00d4b56137685966f26fd25dd4a"
+ integrity sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==
-define-properties@^1.1.3, define-properties@^1.1.4:
- version "1.1.4"
- resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.4.tgz#0b14d7bd7fbeb2f3572c3a7eda80ea5d57fb05b1"
- integrity sha512-uckOqKcfaVvtBdsVkdPv3XjveQJsNQqmhXgRi8uhvWWuPYZCNlzT8qAyblUgNoXdHdjMTzAqeGjAoli8f+bzPA==
+define-properties@^1.1.3, define-properties@^1.1.4, define-properties@^1.2.0:
+ version "1.2.0"
+ resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.2.0.tgz#52988570670c9eacedd8064f4a990f2405849bd5"
+ integrity sha512-xvqAVKGfT1+UAvPwKTVw/njhdQ8ZhXK4lI0bCIuCMrp2up9nPnaDftrLtmpTazqd1o+UY4zgzU+avtMbDP+ldA==
dependencies:
has-property-descriptors "^1.0.0"
object-keys "^1.1.1"
@@ -1879,10 +1875,10 @@ detect-newline@^3.0.0:
resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-3.1.0.tgz#576f5dfc63ae1a192ff192d8ad3af6308991b651"
integrity sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==
-diff-sequences@^29.3.1:
- version "29.3.1"
- resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-29.3.1.tgz#104b5b95fe725932421a9c6e5b4bef84c3f2249e"
- integrity sha512-hlM3QR272NXCi4pq+N4Kok4kOp6EsgOM3ZSpJI7Da3UAs+Ttsi8MRmB6trM/lhyzUxGfOgnpkHtgqm5Q/CTcfQ==
+diff-sequences@^29.4.3:
+ version "29.4.3"
+ resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-29.4.3.tgz#9314bc1fabe09267ffeca9cbafc457d8499a13f2"
+ integrity sha512-ofrBgwpPhCD85kMKtE9RYFFq6OC1A89oW2vvgWZNCwxrUpRUILopY7lsYyMDSjc8g6U6aiO0Qubg6r4Wgt5ZnA==
doctrine@^2.1.0:
version "2.1.0"
@@ -1906,10 +1902,10 @@ dom-helpers@^5.0.1:
"@babel/runtime" "^7.8.7"
csstype "^3.0.2"
-electron-to-chromium@^1.4.251:
- version "1.4.284"
- resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.284.tgz#61046d1e4cab3a25238f6bf7413795270f125592"
- integrity sha512-M8WEXFuKXMYMVr45fo8mq0wUrrJHheiKZf6BArTKk9ZBYCKJEOU5H8cdWgDT+qCVZf7Na4lVUaZsA+h6uA9+PA==
+electron-to-chromium@^1.4.411:
+ version "1.4.423"
+ resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.423.tgz#99567f3a0563fe0d1d0931e9ce851bca239f6658"
+ integrity sha512-y4A7YfQcDGPAeSWM1IuoWzXpg9RY1nwHzHSwRtCSQFp9FgAVDgdWlFf0RbdWfLWQ2WUI+bddUgk5RgTjqRE6FQ==
emittery@^0.13.1:
version "0.13.1"
@@ -1936,17 +1932,17 @@ error-ex@^1.3.1:
is-arrayish "^0.2.1"
es-abstract@^1.19.0, es-abstract@^1.20.4:
- version "1.21.1"
- resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.21.1.tgz#e6105a099967c08377830a0c9cb589d570dd86c6"
- integrity sha512-QudMsPOz86xYz/1dG1OuGBKOELjCh99IIWHLzy5znUB6j8xG2yMA7bfTV86VSqKF+Y/H08vQPR+9jyXpuC6hfg==
+ version "1.21.2"
+ resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.21.2.tgz#a56b9695322c8a185dc25975aa3b8ec31d0e7eff"
+ integrity sha512-y/B5POM2iBnIxCiernH1G7rC9qQoM77lLIMQLuob0zhp8C56Po81+2Nj0WFKnd0pNReDTnkYryc+zhOzpEIROg==
dependencies:
+ array-buffer-byte-length "^1.0.0"
available-typed-arrays "^1.0.5"
call-bind "^1.0.2"
es-set-tostringtag "^2.0.1"
es-to-primitive "^1.2.1"
- function-bind "^1.1.1"
function.prototype.name "^1.1.5"
- get-intrinsic "^1.1.3"
+ get-intrinsic "^1.2.0"
get-symbol-description "^1.0.0"
globalthis "^1.0.3"
gopd "^1.0.1"
@@ -1954,8 +1950,8 @@ es-abstract@^1.19.0, es-abstract@^1.20.4:
has-property-descriptors "^1.0.0"
has-proto "^1.0.1"
has-symbols "^1.0.3"
- internal-slot "^1.0.4"
- is-array-buffer "^3.0.1"
+ internal-slot "^1.0.5"
+ is-array-buffer "^3.0.2"
is-callable "^1.2.7"
is-negative-zero "^2.0.2"
is-regex "^1.1.4"
@@ -1963,11 +1959,12 @@ es-abstract@^1.19.0, es-abstract@^1.20.4:
is-string "^1.0.7"
is-typed-array "^1.1.10"
is-weakref "^1.0.2"
- object-inspect "^1.12.2"
+ object-inspect "^1.12.3"
object-keys "^1.1.1"
object.assign "^4.1.4"
regexp.prototype.flags "^1.4.3"
safe-regex-test "^1.0.0"
+ string.prototype.trim "^1.2.7"
string.prototype.trimend "^1.0.6"
string.prototype.trimstart "^1.0.6"
typed-array-length "^1.0.4"
@@ -2008,33 +2005,33 @@ esbuild-jest@^0:
"@babel/plugin-transform-modules-commonjs" "^7.12.13"
babel-jest "^26.6.3"
-esbuild@^0.16.3:
- version "0.16.17"
- resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.16.17.tgz#fc2c3914c57ee750635fee71b89f615f25065259"
- integrity sha512-G8LEkV0XzDMNwXKgM0Jwu3nY3lSTwSGY6XbxM9cr9+s0T/qSV1q1JVPBGzm3dcjhCic9+emZDmMffkwgPeOeLg==
+esbuild@^0.17.5:
+ version "0.17.19"
+ resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.17.19.tgz#087a727e98299f0462a3d0bcdd9cd7ff100bd955"
+ integrity sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==
optionalDependencies:
- "@esbuild/android-arm" "0.16.17"
- "@esbuild/android-arm64" "0.16.17"
- "@esbuild/android-x64" "0.16.17"
- "@esbuild/darwin-arm64" "0.16.17"
- "@esbuild/darwin-x64" "0.16.17"
- "@esbuild/freebsd-arm64" "0.16.17"
- "@esbuild/freebsd-x64" "0.16.17"
- "@esbuild/linux-arm" "0.16.17"
- "@esbuild/linux-arm64" "0.16.17"
- "@esbuild/linux-ia32" "0.16.17"
- "@esbuild/linux-loong64" "0.16.17"
- "@esbuild/linux-mips64el" "0.16.17"
- "@esbuild/linux-ppc64" "0.16.17"
- "@esbuild/linux-riscv64" "0.16.17"
- "@esbuild/linux-s390x" "0.16.17"
- "@esbuild/linux-x64" "0.16.17"
- "@esbuild/netbsd-x64" "0.16.17"
- "@esbuild/openbsd-x64" "0.16.17"
- "@esbuild/sunos-x64" "0.16.17"
- "@esbuild/win32-arm64" "0.16.17"
- "@esbuild/win32-ia32" "0.16.17"
- "@esbuild/win32-x64" "0.16.17"
+ "@esbuild/android-arm" "0.17.19"
+ "@esbuild/android-arm64" "0.17.19"
+ "@esbuild/android-x64" "0.17.19"
+ "@esbuild/darwin-arm64" "0.17.19"
+ "@esbuild/darwin-x64" "0.17.19"
+ "@esbuild/freebsd-arm64" "0.17.19"
+ "@esbuild/freebsd-x64" "0.17.19"
+ "@esbuild/linux-arm" "0.17.19"
+ "@esbuild/linux-arm64" "0.17.19"
+ "@esbuild/linux-ia32" "0.17.19"
+ "@esbuild/linux-loong64" "0.17.19"
+ "@esbuild/linux-mips64el" "0.17.19"
+ "@esbuild/linux-ppc64" "0.17.19"
+ "@esbuild/linux-riscv64" "0.17.19"
+ "@esbuild/linux-s390x" "0.17.19"
+ "@esbuild/linux-x64" "0.17.19"
+ "@esbuild/netbsd-x64" "0.17.19"
+ "@esbuild/openbsd-x64" "0.17.19"
+ "@esbuild/sunos-x64" "0.17.19"
+ "@esbuild/win32-arm64" "0.17.19"
+ "@esbuild/win32-ia32" "0.17.19"
+ "@esbuild/win32-x64" "0.17.19"
escalade@^3.1.1:
version "3.1.1"
@@ -2066,9 +2063,9 @@ eslint-import-resolver-node@^0.3.7:
resolve "^1.22.1"
eslint-module-utils@^2.7.4:
- version "2.7.4"
- resolved "https://registry.yarnpkg.com/eslint-module-utils/-/eslint-module-utils-2.7.4.tgz#4f3e41116aaf13a20792261e61d3a2e7e0583974"
- integrity sha512-j4GT+rqzCoRKHwURX7pddtIPGySnX9Si/cgMI5ztrcqOPtk5dDEeZ34CQVPphnqkJytlc97Vuk05Um2mJ3gEQA==
+ version "2.8.0"
+ resolved "https://registry.yarnpkg.com/eslint-module-utils/-/eslint-module-utils-2.8.0.tgz#e439fee65fc33f6bba630ff621efc38ec0375c49"
+ integrity sha512-aWajIYfsqCKRDgUfjEXNN/JlrzauMuSEy5sbd7WXbtW3EH6A6MpwEh42c7qD+MqQo9QMJ6fWLAeIJynx0g6OAw==
dependencies:
debug "^3.2.7"
@@ -2143,38 +2140,29 @@ eslint-rule-composer@^0.3.0:
resolved "https://registry.yarnpkg.com/eslint-rule-composer/-/eslint-rule-composer-0.3.0.tgz#79320c927b0c5c0d3d3d2b76c8b4a488f25bbaf9"
integrity sha512-bt+Sh8CtDmn2OajxvNO+BX7Wn4CIWMpTRm3MaiKPCQcnnlm0CS2mhui6QaoeQugs+3Kj2ESKEEGJUdVafwhiCg==
-eslint-scope@^7.1.1:
- version "7.1.1"
- resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-7.1.1.tgz#fff34894c2f65e5226d3041ac480b4513a163642"
- integrity sha512-QKQM/UXpIiHcLqJ5AOyIW7XZmzjkzQXYE54n1++wb0u9V/abW3l9uQnxX8Z5Xd18xyKIMTUAyQ0k1e8pz6LUrw==
+eslint-scope@^7.2.0:
+ version "7.2.0"
+ resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-7.2.0.tgz#f21ebdafda02352f103634b96dd47d9f81ca117b"
+ integrity sha512-DYj5deGlHBfMt15J7rdtyKNq/Nqlv5KfU4iodrQ019XESsRnwXH9KAE0y3cwtUHDo2ob7CypAnCqefh6vioWRw==
dependencies:
esrecurse "^4.3.0"
estraverse "^5.2.0"
-eslint-utils@^3.0.0:
- version "3.0.0"
- resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-3.0.0.tgz#8aebaface7345bb33559db0a1f13a1d2d48c3672"
- integrity sha512-uuQC43IGctw68pJA1RgbQS8/NP7rch6Cwd4j3ZBtgo4/8Flj4eGE7ZYSZRN3iq5pVUv6GPdW5Z1RFleo84uLDA==
- dependencies:
- eslint-visitor-keys "^2.0.0"
-
-eslint-visitor-keys@^2.0.0:
- version "2.1.0"
- resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz#f65328259305927392c938ed44eb0a5c9b2bd303"
- integrity sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==
-
-eslint-visitor-keys@^3.3.0:
- version "3.3.0"
- resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.3.0.tgz#f6480fa6b1f30efe2d1968aa8ac745b862469826"
- integrity sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA==
+eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1:
+ version "3.4.1"
+ resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.1.tgz#c22c48f48942d08ca824cc526211ae400478a994"
+ integrity sha512-pZnmmLwYzf+kWaM/Qgrvpen51upAktaaiI01nsJD/Yr3lMOdNtq0cxkrrg16w64VtisN6okbs7Q8AfGqj4c9fA==
eslint@^8:
- version "8.33.0"
- resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.33.0.tgz#02f110f32998cb598c6461f24f4d306e41ca33d7"
- integrity sha512-WjOpFQgKK8VrCnAtl8We0SUOy/oVZ5NHykyMiagV1M9r8IFpIJX7DduK6n1mpfhlG7T1NLWm2SuD8QB7KFySaA==
- dependencies:
- "@eslint/eslintrc" "^1.4.1"
- "@humanwhocodes/config-array" "^0.11.8"
+ version "8.42.0"
+ resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.42.0.tgz#7bebdc3a55f9ed7167251fe7259f75219cade291"
+ integrity sha512-ulg9Ms6E1WPf67PHaEY4/6E2tEn5/f7FXGzr3t9cBMugOmf1INYvuUwwh1aXQN4MfJ6a5K2iNwP3w4AColvI9A==
+ dependencies:
+ "@eslint-community/eslint-utils" "^4.2.0"
+ "@eslint-community/regexpp" "^4.4.0"
+ "@eslint/eslintrc" "^2.0.3"
+ "@eslint/js" "8.42.0"
+ "@humanwhocodes/config-array" "^0.11.10"
"@humanwhocodes/module-importer" "^1.0.1"
"@nodelib/fs.walk" "^1.2.8"
ajv "^6.10.0"
@@ -2183,24 +2171,22 @@ eslint@^8:
debug "^4.3.2"
doctrine "^3.0.0"
escape-string-regexp "^4.0.0"
- eslint-scope "^7.1.1"
- eslint-utils "^3.0.0"
- eslint-visitor-keys "^3.3.0"
- espree "^9.4.0"
- esquery "^1.4.0"
+ eslint-scope "^7.2.0"
+ eslint-visitor-keys "^3.4.1"
+ espree "^9.5.2"
+ esquery "^1.4.2"
esutils "^2.0.2"
fast-deep-equal "^3.1.3"
file-entry-cache "^6.0.1"
find-up "^5.0.0"
glob-parent "^6.0.2"
globals "^13.19.0"
- grapheme-splitter "^1.0.4"
+ graphemer "^1.4.0"
ignore "^5.2.0"
import-fresh "^3.0.0"
imurmurhash "^0.1.4"
is-glob "^4.0.0"
is-path-inside "^3.0.3"
- js-sdsl "^4.1.4"
js-yaml "^4.1.0"
json-stable-stringify-without-jsonify "^1.0.1"
levn "^0.4.1"
@@ -2208,29 +2194,28 @@ eslint@^8:
minimatch "^3.1.2"
natural-compare "^1.4.0"
optionator "^0.9.1"
- regexpp "^3.2.0"
strip-ansi "^6.0.1"
strip-json-comments "^3.1.0"
text-table "^0.2.0"
-espree@^9.4.0:
- version "9.4.1"
- resolved "https://registry.yarnpkg.com/espree/-/espree-9.4.1.tgz#51d6092615567a2c2cff7833445e37c28c0065bd"
- integrity sha512-XwctdmTO6SIvCzd9810yyNzIrOrqNYV9Koizx4C/mRhf9uq0o4yHoCEU/670pOxOL/MSraektvSAji79kX90Vg==
+espree@^9.5.2:
+ version "9.5.2"
+ resolved "https://registry.yarnpkg.com/espree/-/espree-9.5.2.tgz#e994e7dc33a082a7a82dceaf12883a829353215b"
+ integrity sha512-7OASN1Wma5fum5SrNhFMAMJxOUAbhyfQ8dQ//PJaJbNw0URTPWqIghHWt1MmAANKhHZIYOHruW4Kw4ruUWOdGw==
dependencies:
acorn "^8.8.0"
acorn-jsx "^5.3.2"
- eslint-visitor-keys "^3.3.0"
+ eslint-visitor-keys "^3.4.1"
esprima@^4.0.0:
version "4.0.1"
resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71"
integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==
-esquery@^1.4.0:
- version "1.4.0"
- resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.4.0.tgz#2148ffc38b82e8c7057dfed48425b3e61f0f24a5"
- integrity sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==
+esquery@^1.4.2:
+ version "1.5.0"
+ resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.5.0.tgz#6ce17738de8577694edd7361c57182ac8cb0db0b"
+ integrity sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==
dependencies:
estraverse "^5.1.0"
@@ -2317,16 +2302,16 @@ expand-brackets@^2.1.4:
snapdragon "^0.8.1"
to-regex "^3.0.1"
-expect@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/expect/-/expect-29.4.1.tgz#58cfeea9cbf479b64ed081fd1e074ac8beb5a1fe"
- integrity sha512-OKrGESHOaMxK3b6zxIq9SOW8kEXztKff/Dvg88j4xIJxur1hspEbedVkR3GpHe5LO+WB2Qw7OWN0RMTdp6as5A==
+expect@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/expect/-/expect-29.5.0.tgz#68c0509156cb2a0adb8865d413b137eeaae682f7"
+ integrity sha512-yM7xqUrCO2JdpFo4XpM82t+PJBFybdqoQuJLDGeDX2ij8NZzqRHyu3Hp188/JX7SWqud+7t4MUdvcgGBICMHZg==
dependencies:
- "@jest/expect-utils" "^29.4.1"
- jest-get-type "^29.2.0"
- jest-matcher-utils "^29.4.1"
- jest-message-util "^29.4.1"
- jest-util "^29.4.1"
+ "@jest/expect-utils" "^29.5.0"
+ jest-get-type "^29.4.3"
+ jest-matcher-utils "^29.5.0"
+ jest-message-util "^29.5.0"
+ jest-util "^29.5.0"
extend-shallow@^2.0.1:
version "2.0.1"
@@ -2363,9 +2348,9 @@ fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3:
integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==
fast-diff@^1.1.2:
- version "1.2.0"
- resolved "https://registry.yarnpkg.com/fast-diff/-/fast-diff-1.2.0.tgz#73ee11982d86caaf7959828d519cfe927fac5f03"
- integrity sha512-xJuoT5+L99XlZ8twedaRf6Ax2TgQVxvgZOYoPKqZufmJib0tL2tegPBOZb1pVNgIhlqDlA0eO0c3wBvQcmzx4w==
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/fast-diff/-/fast-diff-1.3.0.tgz#ece407fa550a64d638536cd727e129c61616e0f0"
+ integrity sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==
fast-json-stable-stringify@^2.0.0, fast-json-stable-stringify@^2.1.0:
version "2.1.0"
@@ -2493,7 +2478,7 @@ function.prototype.name@^1.1.5:
es-abstract "^1.19.0"
functions-have-names "^1.2.2"
-functions-have-names@^1.2.2:
+functions-have-names@^1.2.2, functions-have-names@^1.2.3:
version "1.2.3"
resolved "https://registry.yarnpkg.com/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834"
integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==
@@ -2508,13 +2493,14 @@ get-caller-file@^2.0.5:
resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e"
integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==
-get-intrinsic@^1.0.2, get-intrinsic@^1.1.1, get-intrinsic@^1.1.3:
- version "1.2.0"
- resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.0.tgz#7ad1dc0535f3a2904bba075772763e5051f6d05f"
- integrity sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==
+get-intrinsic@^1.0.2, get-intrinsic@^1.1.1, get-intrinsic@^1.1.3, get-intrinsic@^1.2.0:
+ version "1.2.1"
+ resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.1.tgz#d295644fed4505fc9cde952c37ee12b477a83d82"
+ integrity sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==
dependencies:
function-bind "^1.1.1"
has "^1.0.3"
+ has-proto "^1.0.1"
has-symbols "^1.0.3"
get-package-type@^0.1.0:
@@ -2600,14 +2586,14 @@ gopd@^1.0.1:
get-intrinsic "^1.1.3"
graceful-fs@^4.2.4, graceful-fs@^4.2.9:
- version "4.2.10"
- resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.10.tgz#147d3a006da4ca3ce14728c7aefc287c367d7a6c"
- integrity sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==
+ version "4.2.11"
+ resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3"
+ integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==
-grapheme-splitter@^1.0.4:
- version "1.0.4"
- resolved "https://registry.yarnpkg.com/grapheme-splitter/-/grapheme-splitter-1.0.4.tgz#9cf3a665c6247479896834af35cf1dbb4400767e"
- integrity sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==
+graphemer@^1.4.0:
+ version "1.4.0"
+ resolved "https://registry.yarnpkg.com/graphemer/-/graphemer-1.4.0.tgz#fb2f1d55e0e3a1849aeffc90c4fa0dd53a0e66c6"
+ integrity sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==
has-bigints@^1.0.1, has-bigints@^1.0.2:
version "1.0.2"
@@ -2752,12 +2738,12 @@ inherits@2:
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c"
integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
-internal-slot@^1.0.3, internal-slot@^1.0.4:
- version "1.0.4"
- resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.4.tgz#8551e7baf74a7a6ba5f749cfb16aa60722f0d6f3"
- integrity sha512-tA8URYccNzMo94s5MQZgH8NB/XTa6HsOo0MLfXTKKEnHVVdegzaQoFZ7Jp44bdvLvY2waT5dc+j5ICEswhi7UQ==
+internal-slot@^1.0.3, internal-slot@^1.0.5:
+ version "1.0.5"
+ resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.5.tgz#f2a2ee21f668f8627a4667f309dc0f4fb6674986"
+ integrity sha512-Y+R5hJrzs52QCG2laLn4udYVnxsfny9CpOhNhUvk/SSSVyF6T27FzRbF0sroPidSu3X8oEAkOn2K804mjpt6UQ==
dependencies:
- get-intrinsic "^1.1.3"
+ get-intrinsic "^1.2.0"
has "^1.0.3"
side-channel "^1.0.4"
@@ -2775,13 +2761,13 @@ is-accessor-descriptor@^1.0.0:
dependencies:
kind-of "^6.0.0"
-is-array-buffer@^3.0.1:
- version "3.0.1"
- resolved "https://registry.yarnpkg.com/is-array-buffer/-/is-array-buffer-3.0.1.tgz#deb1db4fcae48308d54ef2442706c0393997052a"
- integrity sha512-ASfLknmY8Xa2XtB4wmbz13Wu202baeA18cJBCeCy0wXUHZF0IPyVEXqKEcd+t2fNSLLL1vC6k7lxZEojNbISXQ==
+is-array-buffer@^3.0.1, is-array-buffer@^3.0.2:
+ version "3.0.2"
+ resolved "https://registry.yarnpkg.com/is-array-buffer/-/is-array-buffer-3.0.2.tgz#f2653ced8412081638ecb0ebbd0c41c6e0aecbbe"
+ integrity sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==
dependencies:
call-bind "^1.0.2"
- get-intrinsic "^1.1.3"
+ get-intrinsic "^1.2.0"
is-typed-array "^1.1.10"
is-arrayish@^0.2.1:
@@ -2822,9 +2808,9 @@ is-ci@^2.0.0:
ci-info "^2.0.0"
is-core-module@^2.11.0, is-core-module@^2.9.0:
- version "2.11.0"
- resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.11.0.tgz#ad4cb3e3863e814523c96f3f58d26cc570ff0144"
- integrity sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==
+ version "2.12.1"
+ resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.12.1.tgz#0c0b6885b6f80011c71541ce15c8d66cf5a4f9fd"
+ integrity sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==
dependencies:
has "^1.0.3"
@@ -3068,129 +3054,130 @@ istanbul-reports@^3.1.3:
html-escaper "^2.0.0"
istanbul-lib-report "^3.0.0"
-jest-changed-files@^29.4.0:
- version "29.4.0"
- resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.4.0.tgz#ac2498bcd394228f7eddcadcf928b3583bf2779d"
- integrity sha512-rnI1oPxgFghoz32Y8eZsGJMjW54UlqT17ycQeCEktcxxwqqKdlj9afl8LNeO0Pbu+h2JQHThQP0BzS67eTRx4w==
+jest-changed-files@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.5.0.tgz#e88786dca8bf2aa899ec4af7644e16d9dcf9b23e"
+ integrity sha512-IFG34IUMUaNBIxjQXF/iu7g6EcdMrGRRxaUSw92I/2g2YC6vCdTltl4nHvt7Ci5nSJwXIkCu8Ka1DKF+X7Z1Ag==
dependencies:
execa "^5.0.0"
p-limit "^3.1.0"
-jest-circus@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-circus/-/jest-circus-29.4.1.tgz#ff1b63eb04c3b111cefea9489e8dbadd23ce49bd"
- integrity sha512-v02NuL5crMNY4CGPHBEflLzl4v91NFb85a+dH9a1pUNx6Xjggrd8l9pPy4LZ1VYNRXlb+f65+7O/MSIbLir6pA==
+jest-circus@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-circus/-/jest-circus-29.5.0.tgz#b5926989449e75bff0d59944bae083c9d7fb7317"
+ integrity sha512-gq/ongqeQKAplVxqJmbeUOJJKkW3dDNPY8PjhJ5G0lBRvu0e3EWGxGy5cI4LAGA7gV2UHCtWBI4EMXK8c9nQKA==
dependencies:
- "@jest/environment" "^29.4.1"
- "@jest/expect" "^29.4.1"
- "@jest/test-result" "^29.4.1"
- "@jest/types" "^29.4.1"
+ "@jest/environment" "^29.5.0"
+ "@jest/expect" "^29.5.0"
+ "@jest/test-result" "^29.5.0"
+ "@jest/types" "^29.5.0"
"@types/node" "*"
chalk "^4.0.0"
co "^4.6.0"
dedent "^0.7.0"
is-generator-fn "^2.0.0"
- jest-each "^29.4.1"
- jest-matcher-utils "^29.4.1"
- jest-message-util "^29.4.1"
- jest-runtime "^29.4.1"
- jest-snapshot "^29.4.1"
- jest-util "^29.4.1"
+ jest-each "^29.5.0"
+ jest-matcher-utils "^29.5.0"
+ jest-message-util "^29.5.0"
+ jest-runtime "^29.5.0"
+ jest-snapshot "^29.5.0"
+ jest-util "^29.5.0"
p-limit "^3.1.0"
- pretty-format "^29.4.1"
+ pretty-format "^29.5.0"
+ pure-rand "^6.0.0"
slash "^3.0.0"
stack-utils "^2.0.3"
-jest-cli@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-29.4.1.tgz#7abef96944f300feb9b76f68b1eb2d68774fe553"
- integrity sha512-jz7GDIhtxQ37M+9dlbv5K+/FVcIo1O/b1sX3cJgzlQUf/3VG25nvuWzlDC4F1FLLzUThJeWLu8I7JF9eWpuURQ==
+jest-cli@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-29.5.0.tgz#b34c20a6d35968f3ee47a7437ff8e53e086b4a67"
+ integrity sha512-L1KcP1l4HtfwdxXNFCL5bmUbLQiKrakMUriBEcc1Vfz6gx31ORKdreuWvmQVBit+1ss9NNR3yxjwfwzZNdQXJw==
dependencies:
- "@jest/core" "^29.4.1"
- "@jest/test-result" "^29.4.1"
- "@jest/types" "^29.4.1"
+ "@jest/core" "^29.5.0"
+ "@jest/test-result" "^29.5.0"
+ "@jest/types" "^29.5.0"
chalk "^4.0.0"
exit "^0.1.2"
graceful-fs "^4.2.9"
import-local "^3.0.2"
- jest-config "^29.4.1"
- jest-util "^29.4.1"
- jest-validate "^29.4.1"
+ jest-config "^29.5.0"
+ jest-util "^29.5.0"
+ jest-validate "^29.5.0"
prompts "^2.0.1"
yargs "^17.3.1"
-jest-config@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-29.4.1.tgz#e62670c6c980ec21d75941806ec4d0c0c6402728"
- integrity sha512-g7p3q4NuXiM4hrS4XFATTkd+2z0Ml2RhFmFPM8c3WyKwVDNszbl4E7cV7WIx1YZeqqCtqbtTtZhGZWJlJqngzg==
+jest-config@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-29.5.0.tgz#3cc972faec8c8aaea9ae158c694541b79f3748da"
+ integrity sha512-kvDUKBnNJPNBmFFOhDbm59iu1Fii1Q6SxyhXfvylq3UTHbg6o7j/g8k2dZyXWLvfdKB1vAPxNZnMgtKJcmu3kA==
dependencies:
"@babel/core" "^7.11.6"
- "@jest/test-sequencer" "^29.4.1"
- "@jest/types" "^29.4.1"
- babel-jest "^29.4.1"
+ "@jest/test-sequencer" "^29.5.0"
+ "@jest/types" "^29.5.0"
+ babel-jest "^29.5.0"
chalk "^4.0.0"
ci-info "^3.2.0"
deepmerge "^4.2.2"
glob "^7.1.3"
graceful-fs "^4.2.9"
- jest-circus "^29.4.1"
- jest-environment-node "^29.4.1"
- jest-get-type "^29.2.0"
- jest-regex-util "^29.2.0"
- jest-resolve "^29.4.1"
- jest-runner "^29.4.1"
- jest-util "^29.4.1"
- jest-validate "^29.4.1"
+ jest-circus "^29.5.0"
+ jest-environment-node "^29.5.0"
+ jest-get-type "^29.4.3"
+ jest-regex-util "^29.4.3"
+ jest-resolve "^29.5.0"
+ jest-runner "^29.5.0"
+ jest-util "^29.5.0"
+ jest-validate "^29.5.0"
micromatch "^4.0.4"
parse-json "^5.2.0"
- pretty-format "^29.4.1"
+ pretty-format "^29.5.0"
slash "^3.0.0"
strip-json-comments "^3.1.1"
-jest-diff@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-29.4.1.tgz#9a6dc715037e1fa7a8a44554e7d272088c4029bd"
- integrity sha512-uazdl2g331iY56CEyfbNA0Ut7Mn2ulAG5vUaEHXycf1L6IPyuImIxSz4F0VYBKi7LYIuxOwTZzK3wh5jHzASMw==
+jest-diff@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-29.5.0.tgz#e0d83a58eb5451dcc1fa61b1c3ee4e8f5a290d63"
+ integrity sha512-LtxijLLZBduXnHSniy0WMdaHjmQnt3g5sa16W4p0HqukYTTsyTW3GD1q41TyGl5YFXj/5B2U6dlh5FM1LIMgxw==
dependencies:
chalk "^4.0.0"
- diff-sequences "^29.3.1"
- jest-get-type "^29.2.0"
- pretty-format "^29.4.1"
+ diff-sequences "^29.4.3"
+ jest-get-type "^29.4.3"
+ pretty-format "^29.5.0"
-jest-docblock@^29.2.0:
- version "29.2.0"
- resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-29.2.0.tgz#307203e20b637d97cee04809efc1d43afc641e82"
- integrity sha512-bkxUsxTgWQGbXV5IENmfiIuqZhJcyvF7tU4zJ/7ioTutdz4ToB5Yx6JOFBpgI+TphRY4lhOyCWGNH/QFQh5T6A==
+jest-docblock@^29.4.3:
+ version "29.4.3"
+ resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-29.4.3.tgz#90505aa89514a1c7dceeac1123df79e414636ea8"
+ integrity sha512-fzdTftThczeSD9nZ3fzA/4KkHtnmllawWrXO69vtI+L9WjEIuXWs4AmyME7lN5hU7dB0sHhuPfcKofRsUb/2Fg==
dependencies:
detect-newline "^3.0.0"
-jest-each@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-29.4.1.tgz#05ce9979e7486dbd0f5d41895f49ccfdd0afce01"
- integrity sha512-QlYFiX3llJMWUV0BtWht/esGEz9w+0i7BHwODKCze7YzZzizgExB9MOfiivF/vVT0GSQ8wXLhvHXh3x2fVD4QQ==
+jest-each@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-29.5.0.tgz#fc6e7014f83eac68e22b7195598de8554c2e5c06"
+ integrity sha512-HM5kIJ1BTnVt+DQZ2ALp3rzXEl+g726csObrW/jpEGl+CDSSQpOJJX2KE/vEg8cxcMXdyEPu6U4QX5eruQv5hA==
dependencies:
- "@jest/types" "^29.4.1"
+ "@jest/types" "^29.5.0"
chalk "^4.0.0"
- jest-get-type "^29.2.0"
- jest-util "^29.4.1"
- pretty-format "^29.4.1"
-
-jest-environment-node@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-29.4.1.tgz#22550b7d0f8f0b16228639c9f88ca04bbf3c1974"
- integrity sha512-x/H2kdVgxSkxWAIlIh9MfMuBa0hZySmfsC5lCsWmWr6tZySP44ediRKDUiNggX/eHLH7Cd5ZN10Rw+XF5tXsqg==
- dependencies:
- "@jest/environment" "^29.4.1"
- "@jest/fake-timers" "^29.4.1"
- "@jest/types" "^29.4.1"
+ jest-get-type "^29.4.3"
+ jest-util "^29.5.0"
+ pretty-format "^29.5.0"
+
+jest-environment-node@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-29.5.0.tgz#f17219d0f0cc0e68e0727c58b792c040e332c967"
+ integrity sha512-ExxuIK/+yQ+6PRGaHkKewYtg6hto2uGCgvKdb2nfJfKXgZ17DfXjvbZ+jA1Qt9A8EQSfPnt5FKIfnOO3u1h9qw==
+ dependencies:
+ "@jest/environment" "^29.5.0"
+ "@jest/fake-timers" "^29.5.0"
+ "@jest/types" "^29.5.0"
"@types/node" "*"
- jest-mock "^29.4.1"
- jest-util "^29.4.1"
+ jest-mock "^29.5.0"
+ jest-util "^29.5.0"
-jest-get-type@^29.2.0:
- version "29.2.0"
- resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-29.2.0.tgz#726646f927ef61d583a3b3adb1ab13f3a5036408"
- integrity sha512-uXNJlg8hKFEnDgFsrCjznB+sTxdkuqiCL6zMgA75qEbAJjJYTs9XPrvDctrEig2GDow22T/LvHgO57iJhXB/UA==
+jest-get-type@^29.4.3:
+ version "29.4.3"
+ resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-29.4.3.tgz#1ab7a5207c995161100b5187159ca82dd48b3dd5"
+ integrity sha512-J5Xez4nRRMjk8emnTpWrlkyb9pfRQQanDrvWHhsR1+VUfbwxi30eVcZFlcdGInRibU4G5LwHXpI7IRHU0CY+gg==
jest-haste-map@^26.6.2:
version "26.6.2"
@@ -3213,66 +3200,66 @@ jest-haste-map@^26.6.2:
optionalDependencies:
fsevents "^2.1.2"
-jest-haste-map@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-29.4.1.tgz#b0579dc82d94b40ed9041af56ad25c2f80bedaeb"
- integrity sha512-imTjcgfVVTvg02khXL11NNLTx9ZaofbAWhilrMg/G8dIkp+HYCswhxf0xxJwBkfhWb3e8dwbjuWburvxmcr58w==
+jest-haste-map@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-29.5.0.tgz#69bd67dc9012d6e2723f20a945099e972b2e94de"
+ integrity sha512-IspOPnnBro8YfVYSw6yDRKh/TiCdRngjxeacCps1cQ9cgVN6+10JUcuJ1EabrgYLOATsIAigxA0rLR9x/YlrSA==
dependencies:
- "@jest/types" "^29.4.1"
+ "@jest/types" "^29.5.0"
"@types/graceful-fs" "^4.1.3"
"@types/node" "*"
anymatch "^3.0.3"
fb-watchman "^2.0.0"
graceful-fs "^4.2.9"
- jest-regex-util "^29.2.0"
- jest-util "^29.4.1"
- jest-worker "^29.4.1"
+ jest-regex-util "^29.4.3"
+ jest-util "^29.5.0"
+ jest-worker "^29.5.0"
micromatch "^4.0.4"
walker "^1.0.8"
optionalDependencies:
fsevents "^2.3.2"
-jest-leak-detector@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-29.4.1.tgz#632186c546e084da2b490b7496fee1a1c9929637"
- integrity sha512-akpZv7TPyGMnH2RimOCgy+hPmWZf55EyFUvymQ4LMsQP8xSPlZumCPtXGoDhFNhUE2039RApZkTQDKU79p/FiQ==
+jest-leak-detector@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-29.5.0.tgz#cf4bdea9615c72bac4a3a7ba7e7930f9c0610c8c"
+ integrity sha512-u9YdeeVnghBUtpN5mVxjID7KbkKE1QU4f6uUwuxiY0vYRi9BUCLKlPEZfDGR67ofdFmDz9oPAy2G92Ujrntmow==
dependencies:
- jest-get-type "^29.2.0"
- pretty-format "^29.4.1"
+ jest-get-type "^29.4.3"
+ pretty-format "^29.5.0"
-jest-matcher-utils@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-29.4.1.tgz#73d834e305909c3b43285fbc76f78bf0ad7e1954"
- integrity sha512-k5h0u8V4nAEy6lSACepxL/rw78FLDkBnXhZVgFneVpnJONhb2DhZj/Gv4eNe+1XqQ5IhgUcqj745UwH0HJmMnA==
+jest-matcher-utils@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-29.5.0.tgz#d957af7f8c0692c5453666705621ad4abc2c59c5"
+ integrity sha512-lecRtgm/rjIK0CQ7LPQwzCs2VwW6WAahA55YBuI+xqmhm7LAaxokSB8C97yJeYyT+HvQkH741StzpU41wohhWw==
dependencies:
chalk "^4.0.0"
- jest-diff "^29.4.1"
- jest-get-type "^29.2.0"
- pretty-format "^29.4.1"
+ jest-diff "^29.5.0"
+ jest-get-type "^29.4.3"
+ pretty-format "^29.5.0"
-jest-message-util@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-29.4.1.tgz#522623aa1df9a36ebfdffb06495c7d9d19e8a845"
- integrity sha512-H4/I0cXUaLeCw6FM+i4AwCnOwHRgitdaUFOdm49022YD5nfyr8C/DrbXOBEyJaj+w/y0gGJ57klssOaUiLLQGQ==
+jest-message-util@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-29.5.0.tgz#1f776cac3aca332ab8dd2e3b41625435085c900e"
+ integrity sha512-Kijeg9Dag6CKtIDA7O21zNTACqD5MD/8HfIV8pdD94vFyFuer52SigdC3IQMhab3vACxXMiFk+yMHNdbqtyTGA==
dependencies:
"@babel/code-frame" "^7.12.13"
- "@jest/types" "^29.4.1"
+ "@jest/types" "^29.5.0"
"@types/stack-utils" "^2.0.0"
chalk "^4.0.0"
graceful-fs "^4.2.9"
micromatch "^4.0.4"
- pretty-format "^29.4.1"
+ pretty-format "^29.5.0"
slash "^3.0.0"
stack-utils "^2.0.3"
-jest-mock@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-29.4.1.tgz#a218a2abf45c99c501d4665207748a6b9e29afbd"
- integrity sha512-MwA4hQ7zBOcgVCVnsM8TzaFLVUD/pFWTfbkY953Y81L5ret3GFRZtmPmRFAjKQSdCKoJvvqOu6Bvfpqlwwb0dQ==
+jest-mock@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-29.5.0.tgz#26e2172bcc71d8b0195081ff1f146ac7e1518aed"
+ integrity sha512-GqOzvdWDE4fAV2bWQLQCkujxYWL7RxjCnj71b5VhDAGOevB3qj3Ovg26A5NI84ZpODxyzaozXLOh2NCgkbvyaw==
dependencies:
- "@jest/types" "^29.4.1"
+ "@jest/types" "^29.5.0"
"@types/node" "*"
- jest-util "^29.4.1"
+ jest-util "^29.5.0"
jest-pnp-resolver@^1.2.2:
version "1.2.3"
@@ -3284,87 +3271,86 @@ jest-regex-util@^26.0.0:
resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-26.0.0.tgz#d25e7184b36e39fd466c3bc41be0971e821fee28"
integrity sha512-Gv3ZIs/nA48/Zvjrl34bf+oD76JHiGDUxNOVgUjh3j890sblXryjY4rss71fPtD/njchl6PSE2hIhvyWa1eT0A==
-jest-regex-util@^29.2.0:
- version "29.2.0"
- resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-29.2.0.tgz#82ef3b587e8c303357728d0322d48bbfd2971f7b"
- integrity sha512-6yXn0kg2JXzH30cr2NlThF+70iuO/3irbaB4mh5WyqNIvLLP+B6sFdluO1/1RJmslyh/f9osnefECflHvTbwVA==
+jest-regex-util@^29.4.3:
+ version "29.4.3"
+ resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-29.4.3.tgz#a42616141e0cae052cfa32c169945d00c0aa0bb8"
+ integrity sha512-O4FglZaMmWXbGHSQInfXewIsd1LMn9p3ZXB/6r4FOkyhX2/iP/soMG98jGvk/A3HAN78+5VWcBGO0BJAPRh4kg==
-jest-resolve-dependencies@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-29.4.1.tgz#02420a2e055da105e5fca8218c471d8b9553c904"
- integrity sha512-Y3QG3M1ncAMxfjbYgtqNXC5B595zmB6e//p/qpA/58JkQXu/IpLDoLeOa8YoYfsSglBKQQzNUqtfGJJT/qLmJg==
+jest-resolve-dependencies@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-29.5.0.tgz#f0ea29955996f49788bf70996052aa98e7befee4"
+ integrity sha512-sjV3GFr0hDJMBpYeUuGduP+YeCRbd7S/ck6IvL3kQ9cpySYKqcqhdLLC2rFwrcL7tz5vYibomBrsFYWkIGGjOg==
dependencies:
- jest-regex-util "^29.2.0"
- jest-snapshot "^29.4.1"
+ jest-regex-util "^29.4.3"
+ jest-snapshot "^29.5.0"
-jest-resolve@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-29.4.1.tgz#4c6bf71a07b8f0b79c5fdf4f2a2cf47317694c5e"
- integrity sha512-j/ZFNV2lm9IJ2wmlq1uYK0Y/1PiyDq9g4HEGsNTNr3viRbJdV+8Lf1SXIiLZXFvyiisu0qUyIXGBnw+OKWkJwQ==
+jest-resolve@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-29.5.0.tgz#b053cc95ad1d5f6327f0ac8aae9f98795475ecdc"
+ integrity sha512-1TzxJ37FQq7J10jPtQjcc+MkCkE3GBpBecsSUWJ0qZNJpmg6m0D9/7II03yJulm3H/fvVjgqLh/k2eYg+ui52w==
dependencies:
chalk "^4.0.0"
graceful-fs "^4.2.9"
- jest-haste-map "^29.4.1"
+ jest-haste-map "^29.5.0"
jest-pnp-resolver "^1.2.2"
- jest-util "^29.4.1"
- jest-validate "^29.4.1"
+ jest-util "^29.5.0"
+ jest-validate "^29.5.0"
resolve "^1.20.0"
resolve.exports "^2.0.0"
slash "^3.0.0"
-jest-runner@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-29.4.1.tgz#57460d9ebb0eea2e27eeddca1816cf8537469661"
- integrity sha512-8d6XXXi7GtHmsHrnaqBKWxjKb166Eyj/ksSaUYdcBK09VbjPwIgWov1VwSmtupCIz8q1Xv4Qkzt/BTo3ZqiCeg==
+jest-runner@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-29.5.0.tgz#6a57c282eb0ef749778d444c1d758c6a7693b6f8"
+ integrity sha512-m7b6ypERhFghJsslMLhydaXBiLf7+jXy8FwGRHO3BGV1mcQpPbwiqiKUR2zU2NJuNeMenJmlFZCsIqzJCTeGLQ==
dependencies:
- "@jest/console" "^29.4.1"
- "@jest/environment" "^29.4.1"
- "@jest/test-result" "^29.4.1"
- "@jest/transform" "^29.4.1"
- "@jest/types" "^29.4.1"
+ "@jest/console" "^29.5.0"
+ "@jest/environment" "^29.5.0"
+ "@jest/test-result" "^29.5.0"
+ "@jest/transform" "^29.5.0"
+ "@jest/types" "^29.5.0"
"@types/node" "*"
chalk "^4.0.0"
emittery "^0.13.1"
graceful-fs "^4.2.9"
- jest-docblock "^29.2.0"
- jest-environment-node "^29.4.1"
- jest-haste-map "^29.4.1"
- jest-leak-detector "^29.4.1"
- jest-message-util "^29.4.1"
- jest-resolve "^29.4.1"
- jest-runtime "^29.4.1"
- jest-util "^29.4.1"
- jest-watcher "^29.4.1"
- jest-worker "^29.4.1"
+ jest-docblock "^29.4.3"
+ jest-environment-node "^29.5.0"
+ jest-haste-map "^29.5.0"
+ jest-leak-detector "^29.5.0"
+ jest-message-util "^29.5.0"
+ jest-resolve "^29.5.0"
+ jest-runtime "^29.5.0"
+ jest-util "^29.5.0"
+ jest-watcher "^29.5.0"
+ jest-worker "^29.5.0"
p-limit "^3.1.0"
source-map-support "0.5.13"
-jest-runtime@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-29.4.1.tgz#9a50f9c69d3a391690897c01b0bfa8dc5dd45808"
- integrity sha512-UXTMU9uKu2GjYwTtoAw5rn4STxWw/nadOfW7v1sx6LaJYa3V/iymdCLQM6xy3+7C6mY8GfX22vKpgxY171UIoA==
- dependencies:
- "@jest/environment" "^29.4.1"
- "@jest/fake-timers" "^29.4.1"
- "@jest/globals" "^29.4.1"
- "@jest/source-map" "^29.2.0"
- "@jest/test-result" "^29.4.1"
- "@jest/transform" "^29.4.1"
- "@jest/types" "^29.4.1"
+jest-runtime@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-29.5.0.tgz#c83f943ee0c1da7eb91fa181b0811ebd59b03420"
+ integrity sha512-1Hr6Hh7bAgXQP+pln3homOiEZtCDZFqwmle7Ew2j8OlbkIu6uE3Y/etJQG8MLQs3Zy90xrp2C0BRrtPHG4zryw==
+ dependencies:
+ "@jest/environment" "^29.5.0"
+ "@jest/fake-timers" "^29.5.0"
+ "@jest/globals" "^29.5.0"
+ "@jest/source-map" "^29.4.3"
+ "@jest/test-result" "^29.5.0"
+ "@jest/transform" "^29.5.0"
+ "@jest/types" "^29.5.0"
"@types/node" "*"
chalk "^4.0.0"
cjs-module-lexer "^1.0.0"
collect-v8-coverage "^1.0.0"
glob "^7.1.3"
graceful-fs "^4.2.9"
- jest-haste-map "^29.4.1"
- jest-message-util "^29.4.1"
- jest-mock "^29.4.1"
- jest-regex-util "^29.2.0"
- jest-resolve "^29.4.1"
- jest-snapshot "^29.4.1"
- jest-util "^29.4.1"
- semver "^7.3.5"
+ jest-haste-map "^29.5.0"
+ jest-message-util "^29.5.0"
+ jest-mock "^29.5.0"
+ jest-regex-util "^29.4.3"
+ jest-resolve "^29.5.0"
+ jest-snapshot "^29.5.0"
+ jest-util "^29.5.0"
slash "^3.0.0"
strip-bom "^4.0.0"
@@ -3376,10 +3362,10 @@ jest-serializer@^26.6.2:
"@types/node" "*"
graceful-fs "^4.2.4"
-jest-snapshot@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-29.4.1.tgz#5692210b3690c94f19317913d4082b123bd83dd9"
- integrity sha512-l4iV8EjGgQWVz3ee/LR9sULDk2pCkqb71bjvlqn+qp90lFwpnulHj4ZBT8nm1hA1C5wowXLc7MGnw321u0tsYA==
+jest-snapshot@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-29.5.0.tgz#c9c1ce0331e5b63cd444e2f95a55a73b84b1e8ce"
+ integrity sha512-x7Wolra5V0tt3wRs3/ts3S6ciSQVypgGQlJpz2rsdQYoUKxMxPNaoHMGJN6qAuPJqS+2iQ1ZUn5kl7HCyls84g==
dependencies:
"@babel/core" "^7.11.6"
"@babel/generator" "^7.7.2"
@@ -3387,23 +3373,22 @@ jest-snapshot@^29.4.1:
"@babel/plugin-syntax-typescript" "^7.7.2"
"@babel/traverse" "^7.7.2"
"@babel/types" "^7.3.3"
- "@jest/expect-utils" "^29.4.1"
- "@jest/transform" "^29.4.1"
- "@jest/types" "^29.4.1"
+ "@jest/expect-utils" "^29.5.0"
+ "@jest/transform" "^29.5.0"
+ "@jest/types" "^29.5.0"
"@types/babel__traverse" "^7.0.6"
"@types/prettier" "^2.1.5"
babel-preset-current-node-syntax "^1.0.0"
chalk "^4.0.0"
- expect "^29.4.1"
+ expect "^29.5.0"
graceful-fs "^4.2.9"
- jest-diff "^29.4.1"
- jest-get-type "^29.2.0"
- jest-haste-map "^29.4.1"
- jest-matcher-utils "^29.4.1"
- jest-message-util "^29.4.1"
- jest-util "^29.4.1"
+ jest-diff "^29.5.0"
+ jest-get-type "^29.4.3"
+ jest-matcher-utils "^29.5.0"
+ jest-message-util "^29.5.0"
+ jest-util "^29.5.0"
natural-compare "^1.4.0"
- pretty-format "^29.4.1"
+ pretty-format "^29.5.0"
semver "^7.3.5"
jest-util@^26.6.2:
@@ -3418,42 +3403,42 @@ jest-util@^26.6.2:
is-ci "^2.0.0"
micromatch "^4.0.2"
-jest-util@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-29.4.1.tgz#2eeed98ff4563b441b5a656ed1a786e3abc3e4c4"
- integrity sha512-bQy9FPGxVutgpN4VRc0hk6w7Hx/m6L53QxpDreTZgJd9gfx/AV2MjyPde9tGyZRINAUrSv57p2inGBu2dRLmkQ==
+jest-util@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-29.5.0.tgz#24a4d3d92fc39ce90425311b23c27a6e0ef16b8f"
+ integrity sha512-RYMgG/MTadOr5t8KdhejfvUU82MxsCu5MF6KuDUHl+NuwzUt+Sm6jJWxTJVrDR1j5M/gJVCPKQEpWXY+yIQ6lQ==
dependencies:
- "@jest/types" "^29.4.1"
+ "@jest/types" "^29.5.0"
"@types/node" "*"
chalk "^4.0.0"
ci-info "^3.2.0"
graceful-fs "^4.2.9"
picomatch "^2.2.3"
-jest-validate@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-29.4.1.tgz#0d5174510415083ec329d4f981bf6779211f17e9"
- integrity sha512-qNZXcZQdIQx4SfUB/atWnI4/I2HUvhz8ajOSYUu40CSmf9U5emil8EDHgE7M+3j9/pavtk3knlZBDsgFvv/SWw==
+jest-validate@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-29.5.0.tgz#8e5a8f36178d40e47138dc00866a5f3bd9916ffc"
+ integrity sha512-pC26etNIi+y3HV8A+tUGr/lph9B18GnzSRAkPaaZJIE1eFdiYm6/CewuiJQ8/RlfHd1u/8Ioi8/sJ+CmbA+zAQ==
dependencies:
- "@jest/types" "^29.4.1"
+ "@jest/types" "^29.5.0"
camelcase "^6.2.0"
chalk "^4.0.0"
- jest-get-type "^29.2.0"
+ jest-get-type "^29.4.3"
leven "^3.1.0"
- pretty-format "^29.4.1"
+ pretty-format "^29.5.0"
-jest-watcher@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-29.4.1.tgz#6e3e2486918bd778849d4d6e67fd77b814f3e6ed"
- integrity sha512-vFOzflGFs27nU6h8dpnVRER3O2rFtL+VMEwnG0H3KLHcllLsU8y9DchSh0AL/Rg5nN1/wSiQ+P4ByMGpuybaVw==
+jest-watcher@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-29.5.0.tgz#cf7f0f949828ba65ddbbb45c743a382a4d911363"
+ integrity sha512-KmTojKcapuqYrKDpRwfqcQ3zjMlwu27SYext9pt4GlF5FUgB+7XE1mcCnSm6a4uUpFyQIkb6ZhzZvHl+jiBCiA==
dependencies:
- "@jest/test-result" "^29.4.1"
- "@jest/types" "^29.4.1"
+ "@jest/test-result" "^29.5.0"
+ "@jest/types" "^29.5.0"
"@types/node" "*"
ansi-escapes "^4.2.1"
chalk "^4.0.0"
emittery "^0.13.1"
- jest-util "^29.4.1"
+ jest-util "^29.5.0"
string-length "^4.0.1"
jest-worker@^26.6.2:
@@ -3465,30 +3450,25 @@ jest-worker@^26.6.2:
merge-stream "^2.0.0"
supports-color "^7.0.0"
-jest-worker@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-29.4.1.tgz#7cb4a99a38975679600305650f86f4807460aab1"
- integrity sha512-O9doU/S1EBe+yp/mstQ0VpPwpv0Clgn68TkNwGxL6/usX/KUW9Arnn4ag8C3jc6qHcXznhsT5Na1liYzAsuAbQ==
+jest-worker@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-29.5.0.tgz#bdaefb06811bd3384d93f009755014d8acb4615d"
+ integrity sha512-NcrQnevGoSp4b5kg+akIpthoAFHxPBcb5P6mYPY0fUNT+sSvmtu6jlkEle3anczUKIKEbMxFimk9oTP/tpIPgA==
dependencies:
"@types/node" "*"
- jest-util "^29.4.1"
+ jest-util "^29.5.0"
merge-stream "^2.0.0"
supports-color "^8.0.0"
jest@^29:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/jest/-/jest-29.4.1.tgz#bb34baca8e05901b49c02c62f1183a6182ea1785"
- integrity sha512-cknimw7gAXPDOmj0QqztlxVtBVCw2lYY9CeIE5N6kD+kET1H4H79HSNISJmijb1HF+qk+G+ploJgiDi5k/fRlg==
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/jest/-/jest-29.5.0.tgz#f75157622f5ce7ad53028f2f8888ab53e1f1f24e"
+ integrity sha512-juMg3he2uru1QoXX078zTa7pO85QyB9xajZc6bU+d9yEGwrKX6+vGmJQ3UdVZsvTEUARIdObzH68QItim6OSSQ==
dependencies:
- "@jest/core" "^29.4.1"
- "@jest/types" "^29.4.1"
+ "@jest/core" "^29.5.0"
+ "@jest/types" "^29.5.0"
import-local "^3.0.2"
- jest-cli "^29.4.1"
-
-js-sdsl@^4.1.4:
- version "4.3.0"
- resolved "https://registry.yarnpkg.com/js-sdsl/-/js-sdsl-4.3.0.tgz#aeefe32a451f7af88425b11fdb5f58c90ae1d711"
- integrity sha512-mifzlm2+5nZ+lEcLJMoBK0/IH/bDg8XnJfd/Wq6IP+xoCjLZsTOnV2QpxlVbX9bMnkl5PdEjNtBJ9Cj1NjifhQ==
+ jest-cli "^29.5.0"
"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0:
version "4.0.0"
@@ -3530,7 +3510,7 @@ json-stable-stringify-without-jsonify@^1.0.1:
resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651"
integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==
-json5@^1.0.1:
+json5@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.2.tgz#63d98d60f21b313b77c4d6da18bfa69d80e1d593"
integrity sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==
@@ -3642,13 +3622,6 @@ lru-cache@^6.0.0:
dependencies:
yallist "^4.0.0"
-magic-string@^0.27.0:
- version "0.27.0"
- resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.27.0.tgz#e4a3413b4bab6d98d2becffd48b4a257effdbbf3"
- integrity sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==
- dependencies:
- "@jridgewell/sourcemap-codec" "^1.4.13"
-
make-dir@^3.0.0:
version "3.1.0"
resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-3.1.0.tgz#415e967046b3a7f1d185277d84aa58203726a13f"
@@ -3720,9 +3693,9 @@ minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2:
brace-expansion "^1.1.7"
minimist@^1.1.1, minimist@^1.2.0, minimist@^1.2.6:
- version "1.2.7"
- resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.7.tgz#daa1c4d91f507390437c6a8bc01078e7000c4d18"
- integrity sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==
+ version "1.2.8"
+ resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c"
+ integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==
mixin-deep@^1.2.0:
version "1.3.2"
@@ -3763,10 +3736,10 @@ multimatch@^4.0.0:
arrify "^2.0.1"
minimatch "^3.0.4"
-nanoid@^3.3.4:
- version "3.3.4"
- resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.4.tgz#730b67e3cd09e2deacf03c027c81c9d9dbc5e8ab"
- integrity sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==
+nanoid@^3.3.6:
+ version "3.3.6"
+ resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.6.tgz#443380c856d6e9f9824267d960b4236ad583ea4c"
+ integrity sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==
nanomatch@^1.2.9:
version "1.2.13"
@@ -3800,10 +3773,10 @@ node-int64@^0.4.0:
resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b"
integrity sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==
-node-releases@^2.0.6:
- version "2.0.8"
- resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.8.tgz#0f349cdc8fcfa39a92ac0be9bc48b7706292b9ae"
- integrity sha512-dFSmB8fFHEH/s81Xi+Y/15DQY6VHW81nXRj86EMSL3lmuTmK1e+aT4wrFCkTbm+gSwkw4KpX+rT/pMM2c1mF+A==
+node-releases@^2.0.12:
+ version "2.0.12"
+ resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.12.tgz#35627cc224a23bfb06fb3380f2b3afaaa7eb1039"
+ integrity sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ==
normalize-path@^2.1.1:
version "2.1.1"
@@ -3845,7 +3818,7 @@ object-copy@^0.1.0:
define-property "^0.2.5"
kind-of "^3.0.3"
-object-inspect@^1.12.2, object-inspect@^1.9.0:
+object-inspect@^1.12.3, object-inspect@^1.9.0:
version "1.12.3"
resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.12.3.tgz#ba62dffd67ee256c8c086dfae69e016cd1f198b9"
integrity sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==
@@ -4057,12 +4030,12 @@ posix-character-classes@^0.1.0:
resolved "https://registry.yarnpkg.com/posix-character-classes/-/posix-character-classes-0.1.1.tgz#01eac0fe3b5af71a2a6c02feabb8c1fef7e00eab"
integrity sha512-xTgYBc3fuo7Yt7JbiuFxSYGToMoz8fLoE6TC9Wx1P/u+LfeThMOAqmuyECnlBaaJb+u1m9hHiXUEtwW4OzfUJg==
-postcss@^8.4.20:
- version "8.4.21"
- resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.21.tgz#c639b719a57efc3187b13a1d765675485f4134f4"
- integrity sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==
+postcss@^8.4.23:
+ version "8.4.24"
+ resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.24.tgz#f714dba9b2284be3cc07dbd2fc57ee4dc972d2df"
+ integrity sha512-M0RzbcI0sO/XJNucsGjvWU9ERWxb/ytp1w6dKtxTKgixdtQDq4rmx/g8W1hnaheq9jgwL/oyEdH5Bc4WwJKMqg==
dependencies:
- nanoid "^3.3.4"
+ nanoid "^3.3.6"
picocolors "^1.0.0"
source-map-js "^1.0.2"
@@ -4079,16 +4052,16 @@ prettier-linter-helpers@^1.0.0:
fast-diff "^1.1.2"
prettier@2:
- version "2.8.3"
- resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.8.3.tgz#ab697b1d3dd46fb4626fbe2f543afe0cc98d8632"
- integrity sha512-tJ/oJ4amDihPoufT5sM0Z1SKEuKay8LfVAMlbbhnnkvt6BUserZylqo2PN+p9KeljLr0OHa2rXHU1T8reeoTrw==
+ version "2.8.8"
+ resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.8.8.tgz#e8c5d7e98a4305ffe3de2e1fc4aca1a71c28b1da"
+ integrity sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==
-pretty-format@^29.4.1:
- version "29.4.1"
- resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-29.4.1.tgz#0da99b532559097b8254298da7c75a0785b1751c"
- integrity sha512-dt/Z761JUVsrIKaY215o1xQJBGlSmTx/h4cSqXqjHLnU1+Kt+mavVE7UgqJJO5ukx5HjSswHfmXz4LjS2oIJfg==
+pretty-format@^29.5.0:
+ version "29.5.0"
+ resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-29.5.0.tgz#283134e74f70e2e3e7229336de0e4fce94ccde5a"
+ integrity sha512-V2mGkI31qdttvTFX7Mt4efOqHXqJWMu4/r66Xh3Z3BwZaPfPJgp6/gbwoujRpPUtfEF6AUUWx3Jim3GCw5g/Qw==
dependencies:
- "@jest/schemas" "^29.4.0"
+ "@jest/schemas" "^29.4.3"
ansi-styles "^5.0.0"
react-is "^18.0.0"
@@ -4134,6 +4107,11 @@ punycode@^2.1.0:
resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.0.tgz#f67fa67c94da8f4d0cfff981aee4118064199b8f"
integrity sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==
+pure-rand@^6.0.0:
+ version "6.0.2"
+ resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.0.2.tgz#a9c2ddcae9b68d736a8163036f088a2781c8b306"
+ integrity sha512-6Yg0ekpKICSjPswYOuC5sku/TSWaRYlA0qsXqJgM/d/4pLPHPuTxK7Nbf7jFKzAeedUhR8C7K9Uv63FBsSo8xQ==
+
queue-microtask@^1.2.2:
version "1.2.3"
resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243"
@@ -4163,19 +4141,19 @@ react-refresh@^0.14.0:
integrity sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ==
react-router-dom@^6:
- version "6.8.0"
- resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-6.8.0.tgz#5e5f4c4b15fdec3965d2ad9d7460d0c61971e744"
- integrity sha512-hQouduSTywGJndE86CXJ2h7YEy4HYC6C/uh19etM+79FfQ6cFFFHnHyDlzO4Pq0eBUI96E4qVE5yUjA00yJZGQ==
+ version "6.12.0"
+ resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-6.12.0.tgz#372279caaaa1ffb0204926c83e93a139b112d861"
+ integrity sha512-UzLwZ3ZVaDr6YV0HdjwxuwtDKgwpJx9o1ea9fU0HV4tTvzdB8WPHzlLFMo5orchpIS84e8G4Erlhu7Rl84XDFQ==
dependencies:
- "@remix-run/router" "1.3.1"
- react-router "6.8.0"
+ "@remix-run/router" "1.6.3"
+ react-router "6.12.0"
-react-router@6.8.0:
- version "6.8.0"
- resolved "https://registry.yarnpkg.com/react-router/-/react-router-6.8.0.tgz#dd61fd1ec44daa2cceaef8e6baa00f99a01a650f"
- integrity sha512-760bk7y3QwabduExtudhWbd88IBbuD1YfwzpuDUAlJUJ7laIIcqhMvdhSVh1Fur1PE8cGl84L0dxhR3/gvHF7A==
+react-router@6.12.0:
+ version "6.12.0"
+ resolved "https://registry.yarnpkg.com/react-router/-/react-router-6.12.0.tgz#1afae9219c24c8611809469d7a386c8023ade39a"
+ integrity sha512-/tCGtLq9umxRvbYeIx3j94CmpQfue0E3qnetVm9luKhu58cR4t+3O4ZrQXBdXfJrBATOAj+wF/1ihJJQI8AoTw==
dependencies:
- "@remix-run/router" "1.3.1"
+ "@remix-run/router" "1.6.3"
react-textarea-autosize@8.3.4:
version "8.3.4"
@@ -4217,18 +4195,13 @@ regex-not@^1.0.0, regex-not@^1.0.2:
safe-regex "^1.1.0"
regexp.prototype.flags@^1.4.3:
- version "1.4.3"
- resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.4.3.tgz#87cab30f80f66660181a3bb7bf5981a872b367ac"
- integrity sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==
+ version "1.5.0"
+ resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.0.tgz#fe7ce25e7e4cca8db37b6634c8a2c7009199b9cb"
+ integrity sha512-0SutC3pNudRKgquxGoRGIz946MZVHqbNfPjBdxeOhBrdgDKlRoXmYLQN9xRbrR09ZXWeGAdPuif7egofn6v5LA==
dependencies:
call-bind "^1.0.2"
- define-properties "^1.1.3"
- functions-have-names "^1.2.2"
-
-regexpp@^3.2.0:
- version "3.2.0"
- resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-3.2.0.tgz#0425a2768d8f23bad70ca4b90461fa2f1213e1b2"
- integrity sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==
+ define-properties "^1.2.0"
+ functions-have-names "^1.2.3"
remove-trailing-separator@^1.0.1:
version "1.1.0"
@@ -4273,16 +4246,16 @@ resolve-url@^0.2.1:
integrity sha512-ZuF55hVUQaaczgOIwqWzkEcEidmlD/xl44x1UZnhOXcYuFN2S6+rcxpG+C1N3So0wvNI3DmJICUFfu2SxhBmvg==
resolve.exports@^2.0.0:
- version "2.0.0"
- resolved "https://registry.yarnpkg.com/resolve.exports/-/resolve.exports-2.0.0.tgz#c1a0028c2d166ec2fbf7d0644584927e76e7400e"
- integrity sha512-6K/gDlqgQscOlg9fSRpWstA8sYe8rbELsSTNpx+3kTrsVCzvSl0zIvRErM7fdl9ERWDsKnrLnwB+Ne89918XOg==
+ version "2.0.2"
+ resolved "https://registry.yarnpkg.com/resolve.exports/-/resolve.exports-2.0.2.tgz#f8c934b8e6a13f539e38b7098e2e36134f01e800"
+ integrity sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==
resolve@^1.19.0, resolve@^1.20.0, resolve@^1.22.1:
- version "1.22.1"
- resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.1.tgz#27cb2ebb53f91abb49470a928bba7558066ac177"
- integrity sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==
+ version "1.22.2"
+ resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.2.tgz#0ed0943d4e301867955766c9f3e1ae6d01c6845f"
+ integrity sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==
dependencies:
- is-core-module "^2.9.0"
+ is-core-module "^2.11.0"
path-parse "^1.0.7"
supports-preserve-symlinks-flag "^1.0.0"
@@ -4312,10 +4285,10 @@ rimraf@^3.0.2:
dependencies:
glob "^7.1.3"
-rollup@^3.7.0:
- version "3.12.0"
- resolved "https://registry.yarnpkg.com/rollup/-/rollup-3.12.0.tgz#813d88ec11e36108da788fc471b3c81b365a7c29"
- integrity sha512-4MZ8kA2HNYahIjz63rzrMMRvDqQDeS9LoriJvMuV0V6zIGysP36e9t4yObUfwdT9h/szXoHQideICftcdZklWg==
+rollup@^3.21.0:
+ version "3.24.0"
+ resolved "https://registry.yarnpkg.com/rollup/-/rollup-3.24.0.tgz#865dee1fe0bb528747b59914dfab25e6f480e370"
+ integrity sha512-OgraHOIg2YpHQTjl0/ymWfFNBEyPucB7lmhXrQUh38qNOegxLapSPFs9sNr0qKR75awW41D93XafoR2QfhBdUQ==
optionalDependencies:
fsevents "~2.3.2"
@@ -4380,9 +4353,9 @@ semver@^6.0.0, semver@^6.3.0:
integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==
semver@^7.3.5:
- version "7.3.8"
- resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.8.tgz#07a78feafb3f7b32347d725e33de7e2a2df67798"
- integrity sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==
+ version "7.5.1"
+ resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.1.tgz#c90c4d631cf74720e46b21c1d37ea07edfab91ec"
+ integrity sha512-Wvss5ivl8TMRZXXESstBA4uR5iXgEN/VC5/sOcuXdVLzcdkz4HWetIoRfG5gb5X+ij/G9rw9YoGn3QoQ8OCSpw==
dependencies:
lru-cache "^6.0.0"
@@ -4571,6 +4544,15 @@ string.prototype.matchall@^4.0.8:
regexp.prototype.flags "^1.4.3"
side-channel "^1.0.4"
+string.prototype.trim@^1.2.7:
+ version "1.2.7"
+ resolved "https://registry.yarnpkg.com/string.prototype.trim/-/string.prototype.trim-1.2.7.tgz#a68352740859f6893f14ce3ef1bb3037f7a90533"
+ integrity sha512-p6TmeT1T3411M8Cgg9wBTMRtY2q9+PNy9EV1i2lIXUN/btt763oIfxwN3RR8VU6wHX8j/1CFy0L+YuThm6bgOg==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.4"
+ es-abstract "^1.20.4"
+
string.prototype.trimend@^1.0.6:
version "1.0.6"
resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.6.tgz#c4a27fa026d979d79c04f17397f250a462944533"
@@ -4621,10 +4603,10 @@ strip-json-comments@^3.1.0, strip-json-comments@^3.1.1:
resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006"
integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==
-stylis@4.1.3:
- version "4.1.3"
- resolved "https://registry.yarnpkg.com/stylis/-/stylis-4.1.3.tgz#fd2fbe79f5fed17c55269e16ed8da14c84d069f7"
- integrity sha512-GP6WDNWf+o403jrEp9c5jibKavrtLW+/qYGhFxFrG8maXhwTBI7gLLhiBb0o7uFccWN+EOS9aMO6cGHWAO07OA==
+stylis@4.2.0:
+ version "4.2.0"
+ resolved "https://registry.yarnpkg.com/stylis/-/stylis-4.2.0.tgz#79daee0208964c8fe695a42fcffcac633a211a51"
+ integrity sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==
supports-color@^5.3.0:
version "5.5.0"
@@ -4652,6 +4634,11 @@ supports-preserve-symlinks-flag@^1.0.0:
resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09"
integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==
+tabbable@^6.0.1:
+ version "6.1.2"
+ resolved "https://registry.yarnpkg.com/tabbable/-/tabbable-6.1.2.tgz#b0d3ca81d582d48a80f71b267d1434b1469a3703"
+ integrity sha512-qCN98uP7i9z0fIS4amQ5zbGBOq+OSigYeGvPy7NDk8Y9yncqDZ9pRPgfsc2PJIVM9RrJj7GIfuRgmjoUU9zTHQ==
+
test-exclude@^6.0.0:
version "6.0.0"
resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-6.0.0.tgz#04a8698661d805ea6fa293b6cb9e63ac044ef15e"
@@ -4709,19 +4696,19 @@ to-regex@^3.0.1, to-regex@^3.0.2:
safe-regex "^1.1.0"
tsconfig-paths@^3.14.1:
- version "3.14.1"
- resolved "https://registry.yarnpkg.com/tsconfig-paths/-/tsconfig-paths-3.14.1.tgz#ba0734599e8ea36c862798e920bcf163277b137a"
- integrity sha512-fxDhWnFSLt3VuTwtvJt5fpwxBHg5AdKWMsgcPOOIilyjymcYVZoCQF8fvFRezCNfblEXmi+PcM1eYHeOAgXCOQ==
+ version "3.14.2"
+ resolved "https://registry.yarnpkg.com/tsconfig-paths/-/tsconfig-paths-3.14.2.tgz#6e32f1f79412decd261f92d633a9dc1cfa99f088"
+ integrity sha512-o/9iXgCYc5L/JxCHPe3Hvh8Q/2xm5Z+p18PESBU6Ff33695QnCHBEjcytY2q19ua7Mbl/DavtBOLq+oG0RCL+g==
dependencies:
"@types/json5" "^0.0.29"
- json5 "^1.0.1"
+ json5 "^1.0.2"
minimist "^1.2.6"
strip-bom "^3.0.0"
tslib@^2.0.0:
- version "2.5.0"
- resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.5.0.tgz#42bfed86f5787aeb41d031866c8f402429e0fddf"
- integrity sha512-336iVw3rtn2BUK7ORdIAHTyxHGRIHVReokCR3XjbckJMK7ms8FysBfhLR8IXnAgy7T0PTPNBWKiH514FOW/WSg==
+ version "2.5.3"
+ resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.5.3.tgz#24944ba2d990940e6e982c4bea147aba80209913"
+ integrity sha512-mSxlJJwl3BMEQCUNnxXBU9jP4JBktcEGhURcPR6VQVlnP0FdDEsIaz0C35dXNGLyRfrATNofF0F5p2KPxQgB+w==
type-check@^0.4.0, type-check@~0.4.0:
version "0.4.0"
@@ -4789,10 +4776,10 @@ unset-value@^1.0.0:
has-value "^0.3.1"
isobject "^3.0.0"
-update-browserslist-db@^1.0.9:
- version "1.0.10"
- resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.10.tgz#0f54b876545726f17d00cd9a2561e6dade943ff3"
- integrity sha512-OztqDenkfFkbSG+tRxBeAnCVPckDBcvibKd35yDONx6OU8N7sqgwc7rCbkJ/WcYtVRZ4ba68d6byhC21GFh7sQ==
+update-browserslist-db@^1.0.11:
+ version "1.0.11"
+ resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz#9a2a641ad2907ae7b3616506f4b977851db5b940"
+ integrity sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==
dependencies:
escalade "^3.1.1"
picocolors "^1.0.0"
@@ -4837,23 +4824,22 @@ use@^3.1.0:
integrity sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==
v8-to-istanbul@^9.0.1:
- version "9.0.1"
- resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-9.0.1.tgz#b6f994b0b5d4ef255e17a0d17dc444a9f5132fa4"
- integrity sha512-74Y4LqY74kLE6IFyIjPtkSTWzUZmj8tdHT9Ii/26dvQ6K9Dl2NbEfj0XgU2sHCtKgt5VupqhlO/5aWuqS+IY1w==
+ version "9.1.0"
+ resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-9.1.0.tgz#1b83ed4e397f58c85c266a570fc2558b5feb9265"
+ integrity sha512-6z3GW9x8G1gd+JIIgQQQxXuiJtCXeAjp6RaPEPLv62mH3iPHPxV6W3robxtCzNErRo6ZwTmzWhsbNvjyEBKzKA==
dependencies:
"@jridgewell/trace-mapping" "^0.3.12"
"@types/istanbul-lib-coverage" "^2.0.1"
convert-source-map "^1.6.0"
vite@^4:
- version "4.0.4"
- resolved "https://registry.yarnpkg.com/vite/-/vite-4.0.4.tgz#4612ce0b47bbb233a887a54a4ae0c6e240a0da31"
- integrity sha512-xevPU7M8FU0i/80DMR+YhgrzR5KS2ORy1B4xcX/cXLsvnUWvfHuqMmVU6N0YiJ4JWGRJJsLCgjEzKjG9/GKoSw==
+ version "4.3.9"
+ resolved "https://registry.yarnpkg.com/vite/-/vite-4.3.9.tgz#db896200c0b1aa13b37cdc35c9e99ee2fdd5f96d"
+ integrity sha512-qsTNZjO9NoJNW7KnOrgYwczm0WctJ8m/yqYAMAK9Lxt4SoySUfS5S8ia9K7JHpa3KEeMfyF8LoJ3c5NeBJy6pg==
dependencies:
- esbuild "^0.16.3"
- postcss "^8.4.20"
- resolve "^1.22.1"
- rollup "^3.7.0"
+ esbuild "^0.17.5"
+ postcss "^8.4.23"
+ rollup "^3.21.0"
optionalDependencies:
fsevents "~2.3.2"
@@ -4930,10 +4916,10 @@ write-file-atomic@^3.0.0:
signal-exit "^3.0.2"
typedarray-to-buffer "^3.1.5"
-write-file-atomic@^5.0.0:
- version "5.0.0"
- resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-5.0.0.tgz#54303f117e109bf3d540261125c8ea5a7320fab0"
- integrity sha512-R7NYMnHSlV42K54lwY9lvW6MnSm1HSJqZL3xiSgi9E7//FYaI74r2G0rd+/X6VAMkHEdzxQaU5HUOXWUz5kA/w==
+write-file-atomic@^4.0.2:
+ version "4.0.2"
+ resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-4.0.2.tgz#a9df01ae5b77858a027fd2e80768ee433555fcfd"
+ integrity sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==
dependencies:
imurmurhash "^0.1.4"
signal-exit "^3.0.7"
@@ -4964,9 +4950,9 @@ yargs-parser@^21.1.1:
integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==
yargs@^17.3.1:
- version "17.6.2"
- resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.6.2.tgz#2e23f2944e976339a1ee00f18c77fedee8332541"
- integrity sha512-1/9UrdHjDZc0eOU0HxOHoS78C69UD3JRMvzlJ7S79S2nTaWRA/whGCTV8o9e/N/1Va9YIV7Q4sOxD8VV4pCWOw==
+ version "17.7.2"
+ resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269"
+ integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==
dependencies:
cliui "^8.0.1"
escalade "^3.1.1"
diff --git a/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterControllerClusterConfigurer.java b/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterControllerClusterConfigurer.java
index 786928391a5..a893abb519e 100644
--- a/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterControllerClusterConfigurer.java
+++ b/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterControllerClusterConfigurer.java
@@ -69,7 +69,6 @@ public class ClusterControllerClusterConfigurer extends AbstractComponent {
builder.setCount(config.fleet_controller_count());
builder.setZooKeeperSessionTimeout((int) (config.zookeeper_session_timeout() * 1000));
builder.setMasterZooKeeperCooldownPeriod((int) (config.master_zookeeper_cooldown_period() * 1000));
- builder.setStateGatherCount(config.state_gather_count());
builder.setRpcPort(config.rpc_port());
builder.setHttpPort(config.http_port());
builder.setMaxTransitionTime(NodeType.STORAGE, config.storage_transition_time());
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AnnotatedClusterState.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AnnotatedClusterState.java
index 7654f9bc5a4..a7602bbfd41 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AnnotatedClusterState.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AnnotatedClusterState.java
@@ -44,11 +44,10 @@ public class AnnotatedClusterState implements Cloneable {
public AnnotatedClusterState(ClusterState clusterState,
Optional<ClusterStateReason> clusterStateReason,
- Map<Node, NodeStateReason> nodeStateReasons)
- {
- this.clusterState = clusterState;
- this.clusterStateReason = clusterStateReason;
- this.nodeStateReasons = nodeStateReasons;
+ Map<Node, NodeStateReason> nodeStateReasons) {
+ this.clusterState = Objects.requireNonNull(clusterState, "Cluster state cannot be null");
+ this.clusterStateReason = Objects.requireNonNull(clusterStateReason, "Cluster state reason cannot be null");
+ this.nodeStateReasons = Objects.requireNonNull(nodeStateReasons, "Node state reasons cannot be null");
}
public static AnnotatedClusterState emptyState() {
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ContentCluster.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ContentCluster.java
index 2535589395d..2e8e2707166 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ContentCluster.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ContentCluster.java
@@ -25,12 +25,11 @@ public class ContentCluster {
private final String clusterName;
private final ClusterInfo clusterInfo = new ClusterInfo();
private final Map<Node, Long> nodeStartTimestamps = new TreeMap<>();
+ private final int maxNumberOfGroupsAllowedToBeDown;
private int slobrokGenerationCount = 0;
private Distribution distribution;
- private final int maxNumberOfGroupsAllowedToBeDown;
-
public ContentCluster(String clusterName, Collection<ConfiguredNode> configuredNodes, Distribution distribution) {
this(clusterName, configuredNodes, distribution, -1);
}
@@ -45,7 +44,7 @@ public class ContentCluster {
int maxNumberOfGroupsAllowedToBeDown) {
if (configuredNodes == null) throw new IllegalArgumentException("Nodes must be set");
this.clusterName = clusterName;
- this.distribution = distribution;
+ this.distribution = Objects.requireNonNull(distribution, "distribution must be non-null");
setNodes(configuredNodes, new NodeListener() {});
this.maxNumberOfGroupsAllowedToBeDown = maxNumberOfGroupsAllowedToBeDown;
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/EventLog.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/EventLog.java
index e64e684ed70..0203ae060d8 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/EventLog.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/EventLog.java
@@ -8,6 +8,7 @@ import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.TimeZone;
import java.util.TreeMap;
import java.util.logging.Level;
@@ -20,18 +21,17 @@ public class EventLog implements EventLogInterface {
private final Timer timer;
private final LinkedList<Event> eventLog = new LinkedList<>();
private final Map<Node, LinkedList<NodeEvent>> nodeLog = new TreeMap<>();
- private final MetricUpdater metricUpdater; // may be null
+ private final MetricUpdater metricUpdater;
private long eventsSeen = 0;
private final long startTime;
private int maxSize = 1024;
private int maxNodeSize = 1024;
private final long recentTimePeriod = 7 * 24 * 60 * 60 * 1000; // millisecs - 1 week
- /** Note: metricReporter may be null. */
public EventLog(Timer timer, MetricUpdater metricUpdater) {
this.timer = timer;
this.startTime = timer.getCurrentTimeInMillis();
- this.metricUpdater = metricUpdater;
+ this.metricUpdater = Objects.requireNonNull(metricUpdater, "metricUpdater must be non-null");
}
public void setMaxSize(int size, int nodesize) {
@@ -70,9 +70,7 @@ public class EventLog implements EventLogInterface {
public void addNodeOnlyEvent(NodeEvent e, java.util.logging.Level level) {
log.log(level, "Added node only event: " + e.toString());
- if (metricUpdater != null) {
- metricUpdater.recordNewNodeEvent();
- }
+ metricUpdater.recordNewNodeEvent();
LinkedList<NodeEvent> nodeList = nodeLog.get(e.getNode().getNode());
if (nodeList == null) {
nodeList = new LinkedList<>();
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
index 5f2a6daf39e..8027cec4e3c 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
@@ -207,15 +207,11 @@ public class FleetController implements NodeListener, SlobrokListener, SystemSta
public void addSystemStateListener(SystemStateListener listener) {
systemStateListeners.add(listener);
// Always give cluster state listeners the current state, in case acceptable state has come before listener is registered.
- com.yahoo.vdslib.state.ClusterState state = getSystemState();
- if (state == null) {
- throw new NullPointerException("Cluster state should never be null at this point");
- }
+ var state = getSystemState();
listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state)));
ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged();
- if (convergedState != null) {
+ if (convergedState != null)
listener.handleStateConvergedInCluster(convergedState);
- }
}
public FleetControllerOptions getOptions() {
@@ -489,11 +485,9 @@ public class FleetController implements NodeListener, SlobrokListener, SystemSta
// TODO: remove as many temporal parameter dependencies as possible here. Currently duplication of state.
stateChangeHandler.reconfigureFromOptions(options);
- stateChangeHandler.setStateChangedFlag(); // Always trigger state recomputation after reconfig
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount());
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod());
- masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress() != null && !options.zooKeeperServerAddress().isEmpty());
if (rpcServer != null) {
rpcServer.setMasterElectionHandler(masterElectionHandler);
@@ -565,7 +559,7 @@ public class FleetController implements NodeListener, SlobrokListener, SystemSta
if ( ! isRunning()) { return; }
- if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount())) {
+ if (masterElectionHandler.isFirstInLine()) {
didWork |= resyncLocallyCachedState(); // Calls to metricUpdate.forWork inside method
} else {
stepDownAsStateGatherer();
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetControllerOptions.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetControllerOptions.java
index 1541e1a4218..e116bb28e46 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetControllerOptions.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetControllerOptions.java
@@ -24,7 +24,6 @@ public class FleetControllerOptions {
private final String clusterName;
private final int fleetControllerIndex;
private final int fleetControllerCount;
- private final int stateGatherCount;
private final String[] slobrokConnectionSpecs;
private final int rpcPort;
@@ -132,7 +131,6 @@ public class FleetControllerOptions {
private FleetControllerOptions(String clusterName,
int fleetControllerIndex,
int fleetControllerCount,
- int stateGatherCount,
String[] slobrokConnectionSpecs,
int rpcPort,
int httpPort,
@@ -174,14 +172,13 @@ public class FleetControllerOptions {
this.clusterName = clusterName;
this.fleetControllerIndex = fleetControllerIndex;
this.fleetControllerCount = fleetControllerCount;
- this.stateGatherCount = stateGatherCount;
this.slobrokConnectionSpecs = slobrokConnectionSpecs;
this.rpcPort = rpcPort;
this.httpPort = httpPort;
this.distributionBits = distributionBits;
this.zooKeeperSessionTimeout = zooKeeperSessionTimeout;
this.masterZooKeeperCooldownPeriod = masterZooKeeperCooldownPeriod;
- this.zooKeeperServerAddress = zooKeeperServerAddress;
+ this.zooKeeperServerAddress = Objects.requireNonNull(zooKeeperServerAddress, "zooKeeperServerAddress cannot be null");
this.maxTransitionTime = maxTransitionTime;
this.maxInitProgressTime = maxInitProgressTime;
this.maxPrematureCrashes = maxPrematureCrashes;
@@ -235,10 +232,6 @@ public class FleetControllerOptions {
return fleetControllerCount;
}
- public int stateGatherCount() {
- return stateGatherCount;
- }
-
public String[] slobrokConnectionSpecs() {
return slobrokConnectionSpecs;
}
@@ -394,7 +387,6 @@ public class FleetControllerOptions {
private String clusterName;
private int index = 0;
private int count = 1;
- private int stateGatherCount = 2;
private String[] slobrokConnectionSpecs;
private int rpcPort = 0;
private int httpPort = 0;
@@ -464,11 +456,6 @@ public class FleetControllerOptions {
return this;
}
- public Builder setStateGatherCount(int stateGatherCount) {
- this.stateGatherCount = stateGatherCount;
- return this;
- }
-
public Builder setSlobrokConnectionSpecs(String[] slobrokConnectionSpecs) {
Objects.requireNonNull(slobrokConnectionSpecs, "slobrokConnectionSpecs cannot be null");
this.slobrokConnectionSpecs = slobrokConnectionSpecs;
@@ -694,7 +681,6 @@ public class FleetControllerOptions {
return new FleetControllerOptions(clusterName,
index,
count,
- stateGatherCount,
slobrokConnectionSpecs,
rpcPort,
httpPort,
@@ -740,7 +726,6 @@ public class FleetControllerOptions {
builder.clusterName = options.clusterName;
builder.index = options.fleetControllerIndex;
builder.count = options.fleetControllerCount;
- builder.stateGatherCount = options.stateGatherCount;
builder.slobrokConnectionSpecs = options.slobrokConnectionSpecs;
builder.rpcPort = options.rpcPort;
builder.httpPort = options.httpPort;
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java
index a697d8f9868..35e3ae8d063 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java
@@ -12,6 +12,7 @@ import com.yahoo.vdslib.state.NodeType;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
+import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -27,7 +28,7 @@ class GroupAvailabilityCalculator {
double minNodeRatioPerGroup,
int safeMaintenanceGroupThreshold,
List<Integer> nodesSafelySetToMaintenance) {
- this.distribution = distribution;
+ this.distribution = Objects.requireNonNull(distribution, "distribution must be non-null");
this.minNodeRatioPerGroup = minNodeRatioPerGroup;
this.safeMaintenanceGroupThreshold = safeMaintenanceGroupThreshold;
this.nodesSafelySetToMaintenance = nodesSafelySetToMaintenance;
@@ -181,9 +182,6 @@ class GroupAvailabilityCalculator {
}
public Result calculate(ClusterState state) {
- if (distribution == null) { // FIXME: for tests that don't set distribution properly!
- return new Result();
- }
if (isFlatCluster(distribution.getRootGroup())) {
// Implicit group takedown only applies to hierarchic cluster setups.
return new Result();
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java
index b041e6b14f8..68b132e34b4 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java
@@ -26,7 +26,6 @@ public class MasterElectionHandler implements MasterInterface {
private Map<Integer, Integer> nextMasterData;
private long masterGoneFromZooKeeperTime; // Set to time master fleet controller disappears from zookeeper
private long masterZooKeeperCooldownPeriod; // The period in ms that we won't take over unless master come back.
- private boolean usingZooKeeper = false; // Unit tests may not use ZooKeeper at all.
public MasterElectionHandler(FleetControllerContext context, int index, int totalCount, Object monitor, Timer timer) {
this.context = context;
@@ -34,7 +33,8 @@ public class MasterElectionHandler implements MasterInterface {
this.timer = timer;
this.index = index;
this.totalCount = totalCount;
- this.nextInLineCount = Integer.MAX_VALUE;
+ // nextInLineCount should/will always be 0 when we have one controller
+ this.nextInLineCount = totalCount == 1 ? 0 : Integer.MAX_VALUE;
if (cannotBecomeMaster())
context.log(logger, Level.FINE, () -> "We can never become master and will always stay a follower.");
// Tag current time as when we have not seen any other master. Make sure we're not taking over at once for master that is on the way down
@@ -43,25 +43,12 @@ public class MasterElectionHandler implements MasterInterface {
public void setFleetControllerCount(int count) {
totalCount = count;
- if (count == 1 && !usingZooKeeper) {
- masterCandidate = 0;
- followers = 1;
- nextInLineCount = 0;
- }
}
public void setMasterZooKeeperCooldownPeriod(int period) {
masterZooKeeperCooldownPeriod = period;
}
- public void setUsingZooKeeper(boolean usingZK) {
- if (!usingZooKeeper && usingZK) {
- // Reset any shortcuts taken by non-ZK election logic.
- resetElectionProgress();
- }
- usingZooKeeper = usingZK;
- }
-
@Override
public boolean isMaster() {
Integer master = getMaster();
@@ -90,46 +77,20 @@ public class MasterElectionHandler implements MasterInterface {
return masterCandidate;
}
- public String getMasterReason() {
- if (masterCandidate == null) {
- return "There is currently no master candidate.";
- }
- if (tooFewFollowersToHaveAMaster()) {
- return "More than half of the nodes must agree for there to be a master. Only " + followers + " of "
- + totalCount + " nodes agree on current master candidate (" + masterCandidate + ").";
- }
- // If all are following master candidate, it is master if it exists.
- if (followers == totalCount) {
- return "All " + totalCount + " nodes agree that " + masterCandidate + " is current master.";
- }
-
- // If not all are following we only accept master candidate if old master
- // disappeared sufficient time ago
- if (masterGoneFromZooKeeperTime + masterZooKeeperCooldownPeriod > timer.getCurrentTimeInMillis()) {
- return followers + " of " + totalCount + " nodes agree " + masterCandidate + " should be master, "
- + "but old master cooldown period of " + masterZooKeeperCooldownPeriod + " ms has not passed yet. "
- + "To ensure it has got time to realize it is no longer master before we elect a new one, "
- + "currently there is no master.";
- }
- return followers + " of " + totalCount + " nodes agree " + masterCandidate + " is master.";
- }
-
private boolean tooFewFollowersToHaveAMaster() {
return 2 * followers <= totalCount;
}
- public boolean isAmongNthFirst(int first) { return (nextInLineCount < first); }
+ public boolean isFirstInLine() { return (nextInLineCount < 1); }
public boolean watchMasterElection(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext) {
- if (totalCount == 1 && !usingZooKeeper) {
- return false; // Allow single configured node to become master implicitly if no ZK configured
- }
if (nextMasterData == null) {
if (masterCandidate == null) {
context.log(logger, Level.FINEST, () -> "No current master candidate. Waiting for data to do master election.");
}
return false; // Nothing have happened since last time.
}
+
// Move next data to temporary, such that we don't need to keep lock, and such that we don't retry
// if we happen to fail processing the data.
Map<Integer, Integer> state;
@@ -140,6 +101,7 @@ public class MasterElectionHandler implements MasterInterface {
}
context.log(logger, Level.INFO, "Got master election state " + toString(state) + ".");
if (state.isEmpty()) throw new IllegalStateException("Database has no master data. We should at least have data for ourselves.");
+
Map.Entry<Integer, Integer> first = state.entrySet().iterator().next();
Integer currentMaster = getMaster();
if (currentMaster != null && first.getKey().intValue() != currentMaster.intValue()) {
@@ -194,9 +156,8 @@ public class MasterElectionHandler implements MasterInterface {
}
if (nextInLineCount != ourPosition) {
nextInLineCount = ourPosition;
- if (ourPosition > 0) {
- context.log(logger, Level.FINE, () -> "We are now " + getPosition(nextInLineCount) + " in queue to take over being master.");
- }
+ if (nextInLineCount > 0)
+ context.log(logger, Level.FINE, () -> "We are now in position " + nextInLineCount + " in queue to take over being master.");
}
}
masterData = state;
@@ -221,14 +182,6 @@ public class MasterElectionHandler implements MasterInterface {
return sb.toString();
}
- private String getPosition(int val) {
- if (val < 1) return "invalid(" + val + ")";
- if (val == 1) { return "first"; }
- if (val == 2) { return "second"; }
- if (val == 3) { return "third"; }
- return val + "th";
- }
-
public void handleFleetData(Map<Integer, Integer> data) {
context.log(logger, Level.INFO, "Got new fleet data with " + data.size() + " entries: " + data);
synchronized (monitor) {
@@ -238,20 +191,14 @@ public class MasterElectionHandler implements MasterInterface {
}
public void lostDatabaseConnection() {
- if (totalCount > 1 || usingZooKeeper) {
- context.log(logger, Level.INFO, "Clearing master data as we lost connection on node " + index);
- resetElectionProgress();
- }
- }
-
- private void resetElectionProgress() {
+ context.log(logger, Level.INFO, "Clearing master data as we lost connection on node " + index);
masterData = null;
masterCandidate = null;
followers = 0;
nextMasterData = null;
}
- public void writeHtmlState(StringBuilder sb, int stateGatherCount) {
+ public void writeHtmlState(StringBuilder sb) {
sb.append("<h2>Master state</h2>\n");
Integer master = getMaster();
if (master != null) {
@@ -270,7 +217,7 @@ public class MasterElectionHandler implements MasterInterface {
.append(" before electing new master unless all possible master candidates are online.</p>");
}
}
- if ((master == null || master != index) && nextInLineCount < stateGatherCount) {
+ if ((master == null || master != index) && nextInLineCount < 1) {
sb.append("<p>As we are number ").append(nextInLineCount)
.append(" in line for taking over as master, we're gathering state from nodes.</p>");
sb.append("<p><font color=\"red\">As we are not the master, we don't know about nodes current system state"
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java
index 28149477e36..2317777e43d 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java
@@ -234,6 +234,7 @@ public class StateChangeHandler {
setMaxInitProgressTime(options.maxInitProgressTime());
setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod());
setMaxTransitionTime(options.maxTransitionTime());
+ setStateChangedFlag(); // Always trigger state recomputation after reconfig
}
// TODO too many hidden behavior dependencies between this and the actually
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java
index 72bd5148f11..cc121a8b120 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java
@@ -288,27 +288,26 @@ public class SystemStateBroadcaster {
return false;
}
- ClusterState baselineState = clusterStateBundle.getBaselineClusterState();
+ int baselineStateVersion = clusterStateBundle.getBaselineClusterState().getVersion();
if (!currentBundleVersionIsTaggedOfficial()) {
- context.log(log, Level.INFO, "Publishing cluster state version " + baselineState.getVersion());
+ context.log(log, Level.INFO, "Publishing cluster state version " + baselineStateVersion);
tagCurrentBundleVersionAsOfficial();
}
List<NodeInfo> recipients = resolveStateVersionSendSet(dbContext);
+ ClusterStateBundle modifiedBundle = clusterStateBundle.cloneWithMapper(state -> buildModifiedClusterState(state, dbContext));
for (NodeInfo node : recipients) {
if (nodeNeedsToObserveStartupTimestamps(node)) {
- // TODO this is the same for all nodes, compute only once
- ClusterStateBundle modifiedBundle = clusterStateBundle.cloneWithMapper(state -> buildModifiedClusterState(state, dbContext));
context.log(log,
Level.FINE,
- () -> "Sending modified cluster state version " + baselineState.getVersion() +
+ () -> "Sending modified cluster state version " + baselineStateVersion +
" to node " + node + ": " + modifiedBundle);
communicator.setSystemState(modifiedBundle, node, setClusterStateWaiter);
} else {
context.log(log,
Level.FINE,
- () -> "Sending system state version " + baselineState.getVersion() +
+ () -> "Sending system state version " + baselineStateVersion +
" to node " + node + ". (went down time " + node.getWentDownWithStartTime() +
", node start time " + node.getStartTimestamp() + ")");
communicator.setSystemState(clusterStateBundle, node, setClusterStateWaiter);
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java
index efb97a4a69e..ed194776d78 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java
@@ -15,6 +15,7 @@ import org.apache.zookeeper.KeeperException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Map;
+import java.util.Objects;
import java.util.TreeMap;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -101,8 +102,7 @@ public class DatabaseHandler {
this.timer = timer;
pendingStore.masterVote = fleetControllerContext.id().index(); // To begin with we'll vote for ourselves.
this.monitor = monitor;
- // TODO: Require non-null, not possible now since at least ClusterFeedBlockTest uses null address
- this.zooKeeperAddress = zooKeeperAddress;
+ this.zooKeeperAddress = Objects.requireNonNull(zooKeeperAddress, "zooKeeperAddress cannot be null");
}
private boolean isDatabaseClosedSafe() {
@@ -161,11 +161,9 @@ public class DatabaseHandler {
}
public void setZooKeeperAddress(String address, DatabaseContext databaseContext) {
- if (address == null && zooKeeperAddress == null) return;
- if (address != null && address.equals(zooKeeperAddress)) return;
- if (zooKeeperAddress != null) {
- fleetControllerContext.log(logger, Level.INFO, "" + (address == null ? "Stopped using ZooKeeper." : "Got new ZooKeeper address to use: " + address));
- }
+ Objects.requireNonNull(address, "address cannot be null");
+ if (address.equals(zooKeeperAddress)) return;
+ fleetControllerContext.log(logger, Level.INFO, "Got new ZooKeeper address to use: " + address);
zooKeeperAddress = address;
reset(databaseContext);
}
@@ -177,8 +175,6 @@ public class DatabaseHandler {
reset(databaseContext);
}
- private boolean usingZooKeeper() { return (zooKeeperAddress != null); }
-
private void connect(long currentTime) {
try {
lastZooKeeperConnectionAttempt = currentTime;
@@ -245,7 +241,7 @@ public class DatabaseHandler {
didWork = true;
}
}
- if (isDatabaseClosedSafe() && zooKeeperIsConfigured()) {
+ if (isDatabaseClosedSafe()) {
long currentTime = timer.getCurrentTimeInMillis();
if (currentTime - lastZooKeeperConnectionAttempt < minimumWaitBetweenFailedConnectionAttempts) {
return false; // Not time to attempt connection yet.
@@ -270,11 +266,6 @@ public class DatabaseHandler {
return didWork;
}
- private boolean zooKeeperIsConfigured() {
- // This should only ever be null during unit testing.
- return zooKeeperAddress != null;
- }
-
private void relinquishDatabaseConnectivity(DatabaseContext databaseContext) {
// reset() will handle both session clearing and trigger a database loss callback into the CC.
reset(databaseContext);
@@ -383,9 +374,7 @@ public class DatabaseHandler {
}
Integer version = currentlyStored.lastSystemStateVersion;
if (version == null) {
- if (usingZooKeeper()) {
- fleetControllerContext.log(logger, Level.WARNING, "Failed to retrieve latest system state version from ZooKeeper. Returning version 0.");
- }
+ fleetControllerContext.log(logger, Level.WARNING, "Failed to retrieve latest system state version from ZooKeeper. Returning version 0.");
return 0; // FIXME "fail-oblivious" is not a good error handling mode for such a critical component!
}
return version;
@@ -395,22 +384,13 @@ public class DatabaseHandler {
fleetControllerContext.log(logger, Level.FINE, () -> "Scheduling bundle " + clusterStateBundle + " to be saved to ZooKeeper");
pendingStore.clusterStateBundle = clusterStateBundle;
doNextZooKeeperTask(databaseContext);
- // FIXME this is a nasty hack to get around the fact that a massive amount of unit tests
- // set up the system with a null ZooKeeper server address. If we don't fake that we have
- // written the state version, the tests will never progress past waiting for state broadcasts.
- if (zooKeeperAddress == null) {
- logger.warning(() -> "Simulating ZK write of version " + clusterStateBundle.getVersion() +
- ". This should not happen in production!");
- lastKnownStateBundleVersionWrittenBySelf = clusterStateBundle.getVersion();
- }
}
// TODO should we expand this to cover _any_ pending ZK write?
public boolean hasPendingClusterStateMetaDataStore() {
synchronized (databaseMonitor) {
- return ((zooKeeperAddress != null) &&
- ((pendingStore.clusterStateBundle != null) ||
- (pendingStore.lastSystemStateVersion != null)));
+ return ((pendingStore.clusterStateBundle != null) ||
+ (pendingStore.lastSystemStateVersion != null));
}
}
@@ -458,11 +438,9 @@ public class DatabaseHandler {
}
Map<Node, NodeState> wantedStates = currentlyStored.wantedStates;
if (wantedStates == null) {
- if (usingZooKeeper()) {
- // We get here if the ZooKeeper client has lost the connection to ZooKeeper.
- // TODO: Should instead fail the tick until connected!?
- fleetControllerContext.log(logger, Level.FINE, () -> "Failed to retrieve wanted states from ZooKeeper. Assuming UP for all nodes.");
- }
+ // We get here if the ZooKeeper client has lost connection to ZooKeeper.
+ // TODO: Should instead fail the tick until connected!?
+ fleetControllerContext.log(logger, Level.FINE, () -> "Failed to retrieve wanted states from ZooKeeper. Assuming UP for all nodes.");
wantedStates = new TreeMap<>();
}
boolean altered = false;
@@ -510,9 +488,7 @@ public class DatabaseHandler {
}
Map<Node, Long> startTimestamps = currentlyStored.startTimestamps;
if (startTimestamps == null) {
- if (usingZooKeeper()) {
- fleetControllerContext.log(logger, Level.WARNING, "Failed to retrieve start timestamps from ZooKeeper. Cluster state will be bloated with timestamps until we get them set.");
- }
+ fleetControllerContext.log(logger, Level.WARNING, "Failed to retrieve start timestamps from ZooKeeper. Cluster state will be bloated with timestamps until we get them set.");
startTimestamps = new TreeMap<>();
}
for (Map.Entry<Node, Long> e : startTimestamps.entrySet()) {
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
index 9358ae0d298..fa8550efb93 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
@@ -3,12 +3,10 @@ package com.yahoo.vespa.clustercontroller.core.rpc;
import com.yahoo.jrt.Acceptor;
import com.yahoo.jrt.ErrorCode;
-import com.yahoo.jrt.Int32Value;
import com.yahoo.jrt.ListenFailedException;
import com.yahoo.jrt.Method;
import com.yahoo.jrt.Request;
import com.yahoo.jrt.Spec;
-import com.yahoo.jrt.StringArray;
import com.yahoo.jrt.StringValue;
import com.yahoo.jrt.Supervisor;
import com.yahoo.jrt.Transport;
@@ -24,9 +22,9 @@ import com.yahoo.vespa.clustercontroller.core.ContentCluster;
import com.yahoo.vespa.clustercontroller.core.MasterElectionHandler;
import com.yahoo.vespa.clustercontroller.core.NodeInfo;
import com.yahoo.vespa.clustercontroller.core.listeners.NodeListener;
+
import java.io.PrintWriter;
import java.io.StringWriter;
-import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedList;
@@ -122,19 +120,7 @@ public class RpcServer {
}
public void addMethods() {
- Method m = new Method("getMaster", "", "is", this::queueRpcRequest);
- m.methodDesc("Get index of current fleetcontroller master");
- m.returnDesc(0, "masterindex", "The index of the current master according to this node, or -1 if there is none.");
- m.returnDesc(1, "description", "A textual field, used for additional information, such as why there is no master.");
- supervisor.addMethod(m);
-
- m = new Method("getNodeList", "", "SS", this::queueRpcRequest);
- m.methodDesc("Get list of connection-specs to all nodes in the system");
- m.returnDesc(0, "distributors", "connection-spec of all distributor-nodes (empty string for unknown nodes)");
- m.returnDesc(1, "storagenodes", "connection-spec of all storage-nodes, (empty string for unknown nodes)");
- supervisor.addMethod(m);
-
- m = new Method("getSystemState", "", "ss", this::queueRpcRequest);
+ Method m = new Method("getSystemState", "", "ss", this::queueRpcRequest);
m.methodDesc("Get nodeState of all nodes and the system itself");
m.returnDesc(0, "systemstate", "nodeState string of system");
m.returnDesc(1, "nodestate", "nodeState-string for distributor and storage-nodes");
@@ -182,33 +168,10 @@ public class RpcServer {
handledAnyRequests = true;
}
try{
- if (req.methodName().equals("getMaster")) {
- log.log(Level.FINE, "Resolving RPC getMaster request");
- Integer master = masterHandler.getMaster();
- String masterReason = masterHandler.getMasterReason();
- req.returnValues().add(new Int32Value(master == null ? -1 : master));
- req.returnValues().add(new StringValue(masterReason == null ? "No reason given" : masterReason));
- req.returnRequest();
- continue;
- }
if (!masterHandler.isMaster()) {
throw new IllegalStateException("Refusing to answer RPC calls as we are not the master fleetcontroller.");
}
- if (req.methodName().equals("getNodeList")) {
- log.log(Level.FINE, "Resolving RPC getNodeList request");
- List<String> slobrok = new ArrayList<>();
- List<String> rpc = new ArrayList<>();
- for(NodeInfo node : cluster.getNodeInfos()) {
- String s1 = node.getSlobrokAddress();
- String s2 = node.getRpcAddress();
- assert(s1 != null);
- slobrok.add(s1);
- rpc.add(s2 == null ? "" : s2);
- }
- req.returnValues().add(new StringArray(slobrok.toArray(new String[0])));
- req.returnValues().add(new StringArray(rpc.toArray(new String[0])));
- req.returnRequest();
- } else if (req.methodName().equals("getSystemState")) {
+ if (req.methodName().equals("getSystemState")) {
log.log(Level.FINE, "Resolving RPC getSystemState request");
req.returnValues().add(new StringValue(""));
req.returnValues().add(new StringValue(systemState.toString(true)));
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java
index 3297d511469..7a9bea91b9c 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java
@@ -81,7 +81,7 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa
.append(" ]</font></p>\n");
content.append("<table><tr><td>UTC time when creating this page:</td><td align=\"right\">").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</td></tr>");
content.append("<tr><td>Cluster controller uptime:</td><td align=\"right\">" + RealTimer.printDuration(currentTime - startedTime) + "</td></tr></table>");
- if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount())) {
+ if (masterElectionHandler.isFirstInLine()) {
// Table overview of all the nodes
writeHtmlState(cluster, content, timer, stateVersionTracker, options, eventLog);
// Current cluster state and cluster state history
@@ -91,7 +91,7 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa
writeHtmlState(content, options);
}
// State of master election
- masterElectionHandler.writeHtmlState(content, options.stateGatherCount());
+ masterElectionHandler.writeHtmlState(content);
// Overview of current config
writeHtmlState(content, options);
// Event log
@@ -223,13 +223,12 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa
sb.append("<tr><td><nobr>Cluster name</nobr></td><td align=\"right\">").append(options.clusterName()).append("</td></tr>");
sb.append("<tr><td><nobr>Fleet controller index</nobr></td><td align=\"right\">").append(options.fleetControllerIndex()).append("/").append(options.fleetControllerCount()).append("</td></tr>");
- sb.append("<tr><td><nobr>Number of fleetcontrollers gathering states from nodes</nobr></td><td align=\"right\">").append(options.stateGatherCount()).append("</td></tr>");
sb.append("<tr><td><nobr>Slobrok connection spec</nobr></td><td align=\"right\">").append(slobrokspecs).append("</td></tr>");
sb.append("<tr><td><nobr>RPC port</nobr></td><td align=\"right\">").append(options.rpcPort() == 0 ? "Pick random available" : options.rpcPort()).append("</td></tr>");
sb.append("<tr><td><nobr>HTTP port</nobr></td><td align=\"right\">").append(options.httpPort() == 0 ? "Pick random available" : options.httpPort()).append("</td></tr>");
sb.append("<tr><td><nobr>Master cooldown period</nobr></td><td align=\"right\">").append(RealTimer.printDuration(options.masterZooKeeperCooldownPeriod())).append("</td></tr>");
- String zooKeeperAddress = (options.zooKeeperServerAddress() == null ? "Not using Zookeeper" : splitZooKeeperAddress(options.zooKeeperServerAddress()));
+ String zooKeeperAddress = splitZooKeeperAddress(options.zooKeeperServerAddress());
sb.append("<tr><td><nobr>Zookeeper server address</nobr></td><td align=\"right\">").append(zooKeeperAddress).append("</td></tr>");
sb.append("<tr><td><nobr>Zookeeper session timeout</nobr></td><td align=\"right\">").append(RealTimer.printDuration(options.zooKeeperSessionTimeout())).append("</td></tr>");
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java
index d4eea261767..55e256cf89c 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java
@@ -32,15 +32,17 @@ public class ClusterFeedBlockTest extends FleetControllerTest {
private FleetController ctrl;
private DummyCommunicator communicator;
- private void initialize(FleetControllerOptions options) throws Exception {
+ private void initialize(FleetControllerOptions.Builder builder) throws Exception {
List<Node> nodes = new ArrayList<>();
- for (int i = 0; i < options.nodes().size(); ++i) {
+ for (int i = 0; i < builder.nodes().size(); ++i) {
nodes.add(new Node(NodeType.STORAGE, i));
nodes.add(new Node(NodeType.DISTRIBUTOR, i));
}
- var context = new TestFleetControllerContext(options);
communicator = new DummyCommunicator(nodes, timer);
+ setUpZooKeeperServer(builder);
+ options = builder.build();
+ var context = new TestFleetControllerContext(options);
boolean start = false;
ctrl = createFleetController(timer, options, context, communicator, communicator, null, start);
@@ -57,16 +59,16 @@ public class ClusterFeedBlockTest extends FleetControllerTest {
ctrl.tick();
}
- private static FleetControllerOptions createOptions(Map<String, Double> feedBlockLimits, double clusterFeedBlockNoiseLevel) {
+ private static FleetControllerOptions.Builder createOptions(Map<String, Double> feedBlockLimits, double clusterFeedBlockNoiseLevel) {
return defaultOptions()
.setStorageDistribution(DistributionBuilder.forFlatCluster(NODE_COUNT))
.setNodes(new HashSet<>(DistributionBuilder.buildConfiguredNodes(NODE_COUNT)))
.setClusterFeedBlockEnabled(true)
.setClusterFeedBlockLimit(feedBlockLimits)
- .setClusterFeedBlockNoiseLevel(clusterFeedBlockNoiseLevel).build();
+ .setClusterFeedBlockNoiseLevel(clusterFeedBlockNoiseLevel);
}
- private static FleetControllerOptions createOptions(Map<String, Double> feedBlockLimits) {
+ private static FleetControllerOptions.Builder createOptions(Map<String, Double> feedBlockLimits) {
return createOptions(feedBlockLimits, 0.0);
}
@@ -109,7 +111,7 @@ public class ClusterFeedBlockTest extends FleetControllerTest {
assertTrue(ctrl.getClusterStateBundle().clusterFeedIsBlocked());
// Increase cheese allowance. Should now automatically unblock since reported usage is lower.
- ctrl.updateOptions(createOptions(mapOf(usage("cheese", 0.9), usage("wine", 0.4))));
+ ctrl.updateOptions(createOptions(mapOf(usage("cheese", 0.9), usage("wine", 0.4))).build());
ctrl.tick(); // Options propagation
ctrl.tick(); // State recomputation
assertFalse(ctrl.getClusterStateBundle().clusterFeedIsBlocked());
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java
index 30c90ee0664..b5aebadd82b 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java
@@ -870,7 +870,9 @@ public class ClusterStateGeneratorTest {
.setMinNodeRatioPerGroup(0.6)
.setDistributionBits(7)
.setMaxTransitionTime(NodeType.DISTRIBUTOR, 1000)
- .setMaxTransitionTime(NodeType.STORAGE, 2000).build();
+ .setMaxTransitionTime(NodeType.STORAGE, 2000)
+ .setZooKeeperServerAddress("localhost:2181")
+ .build();
final ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
assertThat(params.maxPrematureCrashes, equalTo(options.maxPrematureCrashes()));
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ContentClusterHtmlRendererTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ContentClusterHtmlRendererTest.java
index 8d5c2f685f8..68a740988ad 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ContentClusterHtmlRendererTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ContentClusterHtmlRendererTest.java
@@ -6,6 +6,7 @@ import com.google.common.collect.Sets;
import com.yahoo.vdslib.state.ClusterState;
import com.yahoo.vespa.clustercontroller.core.hostinfo.HostInfo;
import com.yahoo.vespa.clustercontroller.core.status.statuspage.VdsClusterHtmlRenderer;
+import com.yahoo.vespa.clustercontroller.utils.util.NoMetricReporter;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -27,14 +28,13 @@ public class ContentClusterHtmlRendererTest {
@BeforeEach
public void before() throws IOException {
- final ClusterStateBundle stateBundle = ClusterStateBundle.ofBaselineOnly(
+ ClusterStateBundle stateBundle = ClusterStateBundle.ofBaselineOnly(
AnnotatedClusterState.withoutAnnotations(
ClusterState.stateFromString("version:34633 bits:24 distributor:211 storage:211")));
- final EventLog eventLog = new EventLog(new FakeTimer(), null);
-
- final VdsClusterHtmlRenderer.Table table = renderer.createNewClusterHtmlTable(clusterName, slobrokGeneration);
-
- final ContentCluster contentCluster = mock(ContentCluster.class);
+ var metricUpdater = new MetricUpdater(new NoMetricReporter(), 0, clusterName);
+ EventLog eventLog = new EventLog(new FakeTimer(), metricUpdater);
+ VdsClusterHtmlRenderer.Table table = renderer.createNewClusterHtmlTable(clusterName, slobrokGeneration);
+ ContentCluster contentCluster = mock(ContentCluster.class);
for (int x = 0; x < 10; x++) {
NodeInfo nodeInfo = new DistributorNodeInfo(contentCluster, x, "dist " + x, null);
@@ -57,7 +57,7 @@ public class ContentClusterHtmlRendererTest {
eventLog,
"pathPrefix",
"name");
- final StringBuilder stringBuilder = new StringBuilder();
+ StringBuilder stringBuilder = new StringBuilder();
table.addTable(stringBuilder, 34);
result = stringBuilder.toString();
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java
index 95b9d13cad5..11bdb6ec1c8 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java
@@ -22,7 +22,7 @@ public class DistributionBitCountTest extends FleetControllerTest {
for (int i = 0 ; i < 10; i++) {
configuredNodes.add(new ConfiguredNode(i, false));
}
- var builder = defaultOptions("mycluster", configuredNodes);
+ var builder = defaultOptions(configuredNodes);
builder.setDistributionBits(17);
Timer timer = new RealTimer();
setUpFleetController(timer, builder);
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/EventLogTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/EventLogTest.java
index eee0cb41eeb..015fd78ac91 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/EventLogTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/EventLogTest.java
@@ -46,15 +46,6 @@ public class EventLogTest {
}
@Test
- void testNullMetricReporter() {
- initialize(null);
-
- eventLog.addNodeOnlyEvent(nodeEvent, Level.INFO);
-
- verifyNoMoreInteractions(metricUpdater);
- }
-
- @Test
void testNoEventsDoNotThrowException() {
initialize(metricUpdater);
StringBuilder builder = new StringBuilder();
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java
index 238dfd42da5..fb59df7e433 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java
@@ -25,6 +25,8 @@ import com.yahoo.vespa.clustercontroller.core.testutils.WaitTask;
import com.yahoo.vespa.clustercontroller.core.testutils.Waiter;
import com.yahoo.vespa.clustercontroller.utils.util.NoMetricReporter;
import org.junit.jupiter.api.AfterEach;
+
+import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
@@ -72,30 +74,24 @@ public abstract class FleetControllerTest implements Waiter {
}
protected static FleetControllerOptions.Builder defaultOptions() {
- return defaultOptions(DEFAULT_NODE_COUNT);
- }
-
- protected static FleetControllerOptions.Builder defaultOptions(int nodeCount) {
- return defaultOptions("mycluster", IntStream.range(0, nodeCount)
- .mapToObj(i -> new ConfiguredNode(i, false))
- .collect(Collectors.toSet()));
+ return defaultOptions(IntStream.range(0, DEFAULT_NODE_COUNT)
+ .mapToObj(i -> new ConfiguredNode(i, false))
+ .collect(Collectors.toSet()));
}
- protected static FleetControllerOptions.Builder defaultOptions(String clusterName, Collection<ConfiguredNode> nodes) {
- var builder = new FleetControllerOptions.Builder(clusterName, nodes);
+ protected static FleetControllerOptions.Builder defaultOptions(Collection<ConfiguredNode> nodes) {
+ var builder = new FleetControllerOptions.Builder("mycluster", nodes);
builder.enableTwoPhaseClusterStateActivation(true); // Enable by default, tests can explicitly disable.
+ builder.setStorageDistribution(DistributionBuilder.forFlatCluster(builder.nodes().size()));
+ builder.setZooKeeperServerAddress("localhost:2181");
return builder;
}
- private void setUpSystem(FleetControllerOptions.Builder builder) throws Exception {
- log.log(Level.FINE, "Setting up system");
- if (builder.zooKeeperServerAddress() != null) {
- zooKeeperServer = new ZooKeeperTestServer();
- // Need to set zookeeper address again, as port number is not known until ZooKeeperTestServer has been created
- builder.setZooKeeperServerAddress(zooKeeperServer.getAddress());
- log.log(Level.FINE, "Set up new zookeeper server at " + zooKeeperServer.getAddress());
- }
- builder.setSlobrokConnectionSpecs(getSlobrokConnectionSpecs(slobrok));
+ protected void setUpZooKeeperServer(FleetControllerOptions.Builder builder) throws IOException {
+ zooKeeperServer = new ZooKeeperTestServer();
+ // Need to set zookeeper address again, as port number is not known until ZooKeeperTestServer has been created
+ builder.setZooKeeperServerAddress(zooKeeperServer.getAddress());
+ log.log(Level.FINE, "Set up new zookeeper server at " + zooKeeperServer.getAddress());
}
FleetController createFleetController(Timer timer, FleetControllerOptions options) {
@@ -143,7 +139,8 @@ public abstract class FleetControllerTest implements Waiter {
}
protected FleetControllerOptions setUpFleetController(Timer timer, FleetControllerOptions.Builder builder) throws Exception {
- setUpSystem(builder);
+ setUpZooKeeperServer(builder);
+ builder.setSlobrokConnectionSpecs(getSlobrokConnectionSpecs(slobrok));
options = builder.build();
startFleetController(timer);
return options;
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java
index 77c89d77ba5..f930c694a34 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java
@@ -1,12 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.clustercontroller.core;
-import com.yahoo.jrt.Request;
-import com.yahoo.jrt.Spec;
import com.yahoo.jrt.Supervisor;
-import com.yahoo.jrt.Target;
import com.yahoo.jrt.Transport;
-import com.yahoo.jrt.slobrok.server.Slobrok;
import com.yahoo.vdslib.state.ClusterState;
import com.yahoo.vdslib.state.NodeState;
import com.yahoo.vdslib.state.NodeType;
@@ -17,10 +13,9 @@ import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.ExtendWith;
+
import java.time.Instant;
-import java.util.ArrayList;
import java.util.List;
-import java.util.Objects;
import java.util.concurrent.TimeoutException;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -53,7 +48,6 @@ public class MasterElectionTest extends FleetControllerTest {
if (zooKeeperServer == null) {
zooKeeperServer = new ZooKeeperTestServer();
}
- slobrok = new Slobrok();
builder.setZooKeeperSessionTimeout(defaultZkSessionTimeoutInMillis())
.setZooKeeperServerAddress(zooKeeperServer.getAddress())
.setSlobrokConnectionSpecs(getSlobrokConnectionSpecs(slobrok))
@@ -283,33 +277,6 @@ public class MasterElectionTest extends FleetControllerTest {
waitForMaster(1);
}
- private void waitForNoMasterWithExpectedReason(String reason, List<Target> connections, int[] nodes) {
- Objects.requireNonNull(reason, "reason cannot be null");
- Instant endTime = Instant.now().plus(timeout());
- while (Instant.now().isBefore(endTime)) {
- boolean allOk = true;
- for (int node : nodes) {
- Request req = new Request("getMaster");
- connections.get(node).invokeSync(req, timeout());
- if (req.isError()) {
- allOk = false;
- break;
- }
- if (req.returnValues().get(0).asInt32() != -1) { // -1 means no master, which we are waiting for
- allOk = false;
- break;
- }
- if ( ! reason.equals(req.returnValues().get(1).asString())) {
- allOk = false;
- break;
- }
- }
- if (allOk) return;
- try { Thread.sleep(100); } catch (InterruptedException e) { /* ignore */ }
- }
- throw new IllegalStateException("Did not get master reason '" + reason + "' within timeout of " + timeout());
- }
-
@Test
void testGetMaster() throws Exception {
FleetControllerOptions.Builder options = defaultOptions();
@@ -318,32 +285,12 @@ public class MasterElectionTest extends FleetControllerTest {
setUpFleetControllers(3, timer, options);
waitForMaster(0);
- List<Target> connections = new ArrayList<>();
- for (FleetController fleetController : fleetControllers) {
- int rpcPort = fleetController.getRpcPort();
- Target connection = supervisor.connect(new Spec("localhost", rpcPort));
- assertTrue(connection.isValid());
- connections.add(connection);
- }
-
timer.advanceTime(24 * 3600 * 1000); // A day
waitForCompleteCycles();
- Request req = new Request("getMaster");
-
long maxRetries = timeout().toMillis() / 100;
for (int nodeIndex = 0; nodeIndex < 3; ++nodeIndex) {
- for (int retry = 0; retry < maxRetries; ++retry) {
- req = new Request("getMaster");
- connections.get(nodeIndex).invokeSync(req, timeout());
- assertFalse(req.isError(), req.errorMessage());
- if (req.returnValues().get(0).asInt32() == 0 &&
- req.returnValues().get(1).asString().equals("All 3 nodes agree that 0 is current master.")) {
- break;
- }
- }
- assertEquals(0, req.returnValues().get(0).asInt32(), req.toString());
- assertEquals("All 3 nodes agree that 0 is current master.", req.returnValues().get(1).asString(), req.toString());
+ assertEquals((nodeIndex == 0), fleetControllers.get(nodeIndex).isMaster());
}
log.log(Level.INFO, "SHUTTING DOWN FLEET CONTROLLER 0");
@@ -354,41 +301,20 @@ public class MasterElectionTest extends FleetControllerTest {
// 5 minutes is not long enough period to wait before letting this node be master.
timer.advanceTime(300 * 1000); // 5 minutes
- int[] remainingNodes = {1, 2};
- waitForNoMasterWithExpectedReason(
- "2 of 3 nodes agree 1 should be master, but old master cooldown period of 3600000 ms has not passed yet. To ensure it has got time to realize it is no longer master before we elect a new one, currently there is no master.",
- connections,
- remainingNodes);
- // Verify that fc 1 is not master, and the correct reasons for why not
+ List<Integer> remainingNodes = List.of(1, 2);
+ waitForNoMaster(remainingNodes);
+ // Verify that fc 1 is not master
assertFalse(fleetControllers.get(1).isMaster());
// But after an hour it should become one.
timer.advanceTime(3600 * 1000); // 60 minutes
waitForMaster(1);
- req = new Request("getMaster");
- connections.get(0).invokeSync(req, timeout());
- assertEquals(104, req.errorCode(), req.toString());
- assertEquals("Connection error", req.errorMessage(), req.toString());
-
for (int i = 0; i < maxRetries; ++i) {
- req = new Request("getMaster");
- connections.get(1).invokeSync(req, timeout());
- assertFalse(req.isError(), req.errorMessage());
- if (req.returnValues().get(0).asInt32() != -1) break;
+ if (fleetControllers.get(i).isMaster()) break;
// We may have bad timing causing node not to have realized it is master yet
}
- assertEquals(1, req.returnValues().get(0).asInt32(), req.toString());
- assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString());
-
- for (int i = 0; i < maxRetries; ++i) {
- req = new Request("getMaster");
- connections.get(2).invokeSync(req, timeout());
- assertFalse(req.isError(), req.errorMessage());
- if (req.returnValues().get(0).asInt32() != -1) break;
- }
- assertEquals(1, req.returnValues().get(0).asInt32(), req.toString());
- assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString());
+ assertTrue(fleetControllers.get(1).isMaster());
}
@Test
@@ -517,4 +443,22 @@ public class MasterElectionTest extends FleetControllerTest {
waitForStateInAllSpaces("version:\\d+ distributor:10 storage:10");
}
+ private void waitForNoMaster(List<Integer> nodes) {
+ Instant endTime = Instant.now().plus(timeout());
+ while (Instant.now().isBefore(endTime)) {
+ boolean allOk = true;
+ for (int node : nodes) {
+ if (fleetControllers.get(node).isMaster()) { // there is a master, we are waiting for no master
+ allOk = false;
+ break;
+ }
+ }
+ if (allOk) return;
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e) { /* ignore */ }
+ }
+ throw new IllegalStateException("Did not end up in state with no master within timeout of " + timeout());
+ }
+
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NoZooKeeperTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NoZooKeeperTest.java
deleted file mode 100644
index 3d3a38aacd4..00000000000
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NoZooKeeperTest.java
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.clustercontroller.core;
-
-import org.junit.jupiter.api.Test;
-
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-public class NoZooKeeperTest extends FleetControllerTest {
-
- @Test
- void testWantedStatesInZooKeeper() throws Exception {
- // Null is the default for zooKeeperServerAddress
- FleetControllerOptions.Builder builder = defaultOptions();
- Timer timer = new FakeTimer();
- setUpFleetController(timer, builder);
- setUpVdsNodes(timer);
- waitForStableSystem();
-
- assertTrue(nodes.get(0).isDistributor());
- nodes.get(0).disconnect();
- waitForState("version:\\d+ distributor:10 .0.s:d storage:10");
-
- nodes.get(0).connect();
- waitForState("version:\\d+ distributor:10 storage:10");
- }
-}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeSlobrokConfigurationMembershipTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeSlobrokConfigurationMembershipTest.java
index 2c77767d6b4..e432efc1447 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeSlobrokConfigurationMembershipTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeSlobrokConfigurationMembershipTest.java
@@ -30,7 +30,7 @@ public class NodeSlobrokConfigurationMembershipTest extends FleetControllerTest
}
private FleetControllerOptions.Builder optionsForConfiguredNodes(Set<ConfiguredNode> configuredNodes) {
- return defaultOptions("mycluster", configuredNodes)
+ return defaultOptions(configuredNodes)
.setMaxSlobrokDisconnectGracePeriod(60 * 1000)
.setNodeStateRequestTimeoutMS(10000 * 60 * 1000)
.setMaxTransitionTime(NodeType.DISTRIBUTOR, 0)
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java
index cc9c3f84de3..82422762e88 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java
@@ -23,6 +23,7 @@ import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
+
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
@@ -33,7 +34,6 @@ import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
@@ -131,7 +131,7 @@ public class RpcServerTest extends FleetControllerTest {
Set<ConfiguredNode> configuredNodes = new TreeSet<>();
for (int i = 0; i < 10; i++)
configuredNodes.add(new ConfiguredNode(i, false));
- FleetControllerOptions.Builder builder = defaultOptions("mycluster", configuredNodes);
+ FleetControllerOptions.Builder builder = defaultOptions(configuredNodes);
builder.setMinRatioOfStorageNodesUp(0);
builder.setMaxInitProgressTime(30000);
builder.setStableStateTimePeriod(60000);
@@ -224,7 +224,7 @@ public class RpcServerTest extends FleetControllerTest {
for (int i = 0; i < 4; i++)
configuredNodes.add(new ConfiguredNode(i, false));
configuredNodes.add(new ConfiguredNode(4, true)); // Last node is configured retired
- FleetControllerOptions.Builder builder = defaultOptions("mycluster", configuredNodes)
+ FleetControllerOptions.Builder builder = defaultOptions(configuredNodes)
.setMinRatioOfStorageNodesUp(0)
.setMaxInitProgressTime(30000)
.setStableStateTimePeriod(60000);
@@ -257,7 +257,7 @@ public class RpcServerTest extends FleetControllerTest {
List<ConfiguredNode> configuredNodes = new ArrayList<>();
for (int i = 0; i < 5; i++)
configuredNodes.add(new ConfiguredNode(i, false));
- FleetControllerOptions.Builder builder = defaultOptions("mycluster", configuredNodes)
+ FleetControllerOptions.Builder builder = defaultOptions(configuredNodes)
.setMaxInitProgressTime(30000)
.setStableStateTimePeriod(60000);
setUpFleetController(timer, builder);
@@ -281,10 +281,8 @@ public class RpcServerTest extends FleetControllerTest {
configuredNodes.add(new ConfiguredNode(i, true));
configuredNodes.add(new ConfiguredNode(5, false));
configuredNodes.add(new ConfiguredNode(6, false));
- FleetControllerOptions.Builder builder = defaultOptions("mycluster", configuredNodes)
- .setSlobrokConnectionSpecs(this.options.slobrokConnectionSpecs())
- .setMaxInitProgressTime(30000)
- .setStableStateTimePeriod(60000);
+ var builder = FleetControllerOptions.Builder.copy(fleetController().getOptions())
+ .setNodes(configuredNodes);
fleetController().updateOptions(builder.build());
waitForState("version:\\d+ distributor:7 storage:7 .0.s:m .1.s:m .2.s:r .3.s:r .4.s:r");
}
@@ -311,10 +309,8 @@ public class RpcServerTest extends FleetControllerTest {
Set<ConfiguredNode> configuredNodes = new TreeSet<>();
for (int i = 0; i < 7; i++)
configuredNodes.add(new ConfiguredNode(i, false));
- FleetControllerOptions.Builder builder = defaultOptions("mycluster", configuredNodes)
- .setSlobrokConnectionSpecs(this.options.slobrokConnectionSpecs())
- .setMaxInitProgressTime(30000)
- .setStableStateTimePeriod(60000);
+ var builder = FleetControllerOptions.Builder.copy(fleetController().getOptions())
+ .setNodes(configuredNodes);
fleetController().updateOptions(builder.build());
waitForState("version:\\d+ distributor:7 storage:7 .0.s:m .1.s:m");
}
@@ -336,7 +332,7 @@ public class RpcServerTest extends FleetControllerTest {
List<ConfiguredNode> configuredNodes = new ArrayList<>();
for (int i = 0; i < 5; i++)
configuredNodes.add(new ConfiguredNode(i, false));
- FleetControllerOptions.Builder builder = defaultOptions("mycluster", configuredNodes)
+ FleetControllerOptions.Builder builder = defaultOptions(configuredNodes)
.setMaxInitProgressTime(30000)
.setStableStateTimePeriod(60000);
options = builder.build();
@@ -349,10 +345,8 @@ public class RpcServerTest extends FleetControllerTest {
Set<ConfiguredNode> configuredNodes = new TreeSet<>();
for (int i = 0; i < 5; i++)
configuredNodes.add(new ConfiguredNode(i, false));
- FleetControllerOptions.Builder builder = defaultOptions("mycluster", configuredNodes)
- .setSlobrokConnectionSpecs(options.slobrokConnectionSpecs())
- .setMaxInitProgressTime(30000)
- .setStableStateTimePeriod(60000);
+ var builder = FleetControllerOptions.Builder.copy(fleetController().getOptions())
+ .setNodes(configuredNodes);
fleetController().updateOptions(builder.build());
waitForState("version:\\d+ distributor:5 storage:5");
}
@@ -364,10 +358,8 @@ public class RpcServerTest extends FleetControllerTest {
configuredNodes.add(new ConfiguredNode(i, true));
configuredNodes.add(new ConfiguredNode(5, false));
configuredNodes.add(new ConfiguredNode(6, false));
- FleetControllerOptions.Builder builder = defaultOptions("mycluster", configuredNodes)
- .setSlobrokConnectionSpecs(options.slobrokConnectionSpecs())
- .setMaxInitProgressTime(30000)
- .setStableStateTimePeriod(60000);
+ var builder = FleetControllerOptions.Builder.copy(fleetController().getOptions())
+ .setNodes(configuredNodes);
fleetController().updateOptions(builder.build());
waitForState("version:\\d+ distributor:7 storage:7 .0.s:r .1.s:r .2.s:r .3.s:r .4.s:r");
}
@@ -378,10 +370,8 @@ public class RpcServerTest extends FleetControllerTest {
configuredNodes.add(new ConfiguredNode(i, true));
configuredNodes.add(new ConfiguredNode(5, false));
configuredNodes.add(new ConfiguredNode(6, false));
- FleetControllerOptions.Builder builder = defaultOptions("mycluster", configuredNodes)
- .setSlobrokConnectionSpecs(options.slobrokConnectionSpecs())
- .setMaxInitProgressTime(30000)
- .setStableStateTimePeriod(60000);
+ var builder = FleetControllerOptions.Builder.copy(fleetController().getOptions())
+ .setNodes(configuredNodes);
fleetController().updateOptions(builder.build());
waitForState("version:\\d+ distributor:7 storage:7 .0.s:r .1.s:r .2.s:r .3.s:r .4.s:r");
}
@@ -411,7 +401,7 @@ public class RpcServerTest extends FleetControllerTest {
void testSetNodeState() throws Exception {
Set<Integer> nodeIndexes = new TreeSet<>(List.of(4, 6, 9, 10, 14, 16, 21, 22, 23, 25));
Set<ConfiguredNode> configuredNodes = nodeIndexes.stream().map(i -> new ConfiguredNode(i, false)).collect(Collectors.toSet());
- FleetControllerOptions.Builder options = defaultOptions("mycluster", configuredNodes);
+ FleetControllerOptions.Builder options = defaultOptions(configuredNodes);
//options.setStorageDistribution(new Distribution(getDistConfig(nodeIndexes)));
setUpFleetController(timer, options);
setUpVdsNodes(timer, false, nodeIndexes);
@@ -472,69 +462,6 @@ public class RpcServerTest extends FleetControllerTest {
waitForState("version:\\d+ distributor:10 storage:10 .9.s:m");
}
- @Test
- void testGetMaster() throws Exception {
- FleetControllerOptions.Builder options = defaultOptions();
- options.setStorageDistribution(new Distribution(Distribution.getDefaultDistributionConfig(2, 10)));
- setUpFleetController(timer, options);
- setUpVdsNodes(timer);
- waitForStableSystem();
-
- int rpcPort = fleetController().getRpcPort();
- Target connection = supervisor.connect(new Spec("localhost", rpcPort));
- assertTrue(connection.isValid());
-
- Request req = new Request("getMaster");
- connection.invokeSync(req, timeout());
- assertEquals(0, req.returnValues().get(0).asInt32(), req.toString());
- assertEquals("All 1 nodes agree that 0 is current master.", req.returnValues().get(1).asString(), req.toString());
-
- // Note that this feature is tested better in MasterElectionTest.testGetMaster as it has multiple fleetcontrollers
- }
-
- @Test
- void testGetNodeList() throws Exception {
- setUpFleetController(timer, defaultOptions(5));
- final int nodeCount = 5;
- setUpVdsNodes(timer, false, nodeCount);
- waitForStableSystem();
-
- assertTrue(nodes.get(0).isDistributor());
- nodes.get(0).disconnect();
- waitForState("version:\\d+ distributor:5 .0.s:d storage:5");
-
- int rpcPort = fleetController().getRpcPort();
- Target connection = supervisor.connect(new Spec("localhost", rpcPort));
- assertTrue(connection.isValid());
-
- Request req = new Request("getNodeList");
- connection.invokeSync(req, timeout());
- assertEquals(ErrorCode.NONE, req.errorCode(), req.errorMessage());
- assertTrue(req.checkReturnTypes("SS"), req.toString());
- String[] slobrok = req.returnValues().get(0).asStringArray().clone();
- String[] rpc = req.returnValues().get(1).asStringArray().clone();
-
- assertEquals(2 * nodeCount, slobrok.length);
- assertEquals(2 * nodeCount, rpc.length);
-
- // Verify that we can connect to all addresses returned.
- for (int i = 0; i < 2 * nodeCount; ++i) {
- if (slobrok[i].equals("storage/cluster.mycluster/distributor/0")) {
- if (i < nodeCount && !"".equals(rpc[i])) {
- continue;
- }
- assertEquals("", rpc[i], slobrok[i]);
- continue;
- }
- assertNotEquals("", rpc[i]);
- Request req2 = new Request("getnodestate3");
- req2.parameters().add(new StringValue("unknown"));
- Target connection2 = supervisor.connect(new Spec(rpc[i]));
- connection2.invokeSync(req2, timeout());
- assertEquals(ErrorCode.NONE, req.errorCode(), req2.toString());
- }
- }
-
private Request setNodeState(String node, NodeState newNodeState, Target connection) {
return setNodeState(node, newNodeState.serialize(true), connection);
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java
index c0e116ef5fe..f2261794b75 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java
@@ -31,16 +31,18 @@ public class StateChangeTest extends FleetControllerTest {
private FleetController ctrl;
private DummyCommunicator communicator;
- private void initialize(FleetControllerOptions options) throws Exception {
+ private void initialize(FleetControllerOptions.Builder builder) throws Exception {
List<Node> nodes = new ArrayList<>();
- for (int i = 0; i < options.nodes().size(); ++i) {
+ for (int i = 0; i < builder.nodes().size(); ++i) {
nodes.add(new Node(NodeType.STORAGE, i));
nodes.add(new Node(NodeType.DISTRIBUTOR, i));
}
- var context = new TestFleetControllerContext(options);
+ setUpZooKeeperServer(builder);
communicator = new DummyCommunicator(nodes, timer);
boolean start = false;
+ FleetControllerOptions options = builder.build();
+ var context = new TestFleetControllerContext(options);
ctrl = createFleetController(timer, options, context, communicator, communicator, null, start);
ctrl.tick();
@@ -72,7 +74,7 @@ public class StateChangeTest extends FleetControllerTest {
FleetControllerOptions.Builder options = defaultOptions();
options.setMaxInitProgressTime(50000);
- initialize(options.build());
+ initialize(options);
// Should now pick up previous node states
ctrl.tick();
@@ -95,7 +97,7 @@ public class StateChangeTest extends FleetControllerTest {
// Regular init progress does not update the cluster state until the node is done initializing (or goes down,
// whichever comes first).
- assertEquals("version:6 distributor:10 .0.s:i .0.i:0.0 .1.s:i .1.i:0.0 .2.s:i .2.i:0.0 .3.s:i .3.i:0.0 " +
+ assertEquals("version:5 distributor:10 .0.s:i .0.i:0.0 .1.s:i .1.i:0.0 .2.s:i .2.i:0.0 .3.s:i .3.i:0.0 " +
".4.s:i .4.i:0.0 .5.s:i .5.i:0.0 .6.s:i .6.i:0.0 .7.s:i .7.i:0.0 .8.s:i .8.i:0.0 " +
".9.s:i .9.i:0.0 storage:10 .0.s:i .0.i:0.1 .1.s:i .1.i:0.1 .2.s:i .2.i:0.1 .3.s:i .3.i:0.1 " +
".4.s:i .4.i:0.1 .5.s:i .5.i:0.1 .6.s:i .6.i:0.1 .7.s:i .7.i:0.1 .8.s:i .8.i:0.1 .9.s:i .9.i:0.1",
@@ -118,12 +120,12 @@ public class StateChangeTest extends FleetControllerTest {
timer.advanceTime(options.maxInitProgressTime() / 20);
ctrl.tick();
- assertEquals("version:8 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:7 distributor:10 storage:10", ctrl.getSystemState().toString());
verifyNodeEvents(new Node(NodeType.DISTRIBUTOR, 0),
"""
Event: distributor.0: Now reporting state U
- Event: distributor.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'
+ Event: distributor.0: Altered node state in cluster state from 'D' to 'U'
Event: distributor.0: Now reporting state I, i 0.00
Event: distributor.0: Altered node state in cluster state from 'U' to 'I, i 0.00'
Event: distributor.0: Now reporting state U
@@ -133,7 +135,7 @@ public class StateChangeTest extends FleetControllerTest {
verifyNodeEvents(new Node(NodeType.STORAGE, 0),
"""
Event: storage.0: Now reporting state U
- Event: storage.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'
+ Event: storage.0: Altered node state in cluster state from 'D' to 'U'
Event: storage.0: Now reporting state I, i 0.00 (ls)
Event: storage.0: Altered node state in cluster state from 'U' to 'D'
Event: storage.0: Now reporting state I, i 0.100 (read)
@@ -153,7 +155,7 @@ public class StateChangeTest extends FleetControllerTest {
// Two-phase cluster state activation changes this quite a bit, so disable it. At least for now.
.enableTwoPhaseClusterStateActivation(false);
- initialize(builder.build());
+ initialize(builder);
ctrl.tick();
@@ -164,7 +166,7 @@ public class StateChangeTest extends FleetControllerTest {
String desc = ctrl.getReportedNodeState(new Node(NodeType.DISTRIBUTOR, 0)).getDescription();
assertTrue(desc.contains("Closed at other end"), desc);
- assertEquals("version:4 distributor:10 .0.s:d storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 .0.s:d storage:10", ctrl.getSystemState().toString());
timer.advanceTime(1000);
@@ -175,7 +177,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:5 distributor:10 .0.t:12345678 storage:10 .0.s:m", ctrl.getSystemState().toString());
+ assertEquals("version:4 distributor:10 .0.t:12345678 storage:10 .0.s:m", ctrl.getSystemState().toString());
assert(!ctrl.getReportedNodeState(new Node(NodeType.DISTRIBUTOR, 0)).hasDescription());
desc = ctrl.getReportedNodeState(new Node(NodeType.STORAGE, 0)).getDescription();
@@ -185,7 +187,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:6 distributor:10 .0.t:12345678 storage:10 .0.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:5 distributor:10 .0.t:12345678 storage:10 .0.s:d", ctrl.getSystemState().toString());
desc = ctrl.getReportedNodeState(new Node(NodeType.STORAGE, 0)).getDescription();
assertTrue(desc.contains("Closed at other end"), desc);
@@ -198,14 +200,14 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:7 distributor:10 storage:10 .0.t:12345679", ctrl.getSystemState().toString());
+ assertEquals("version:6 distributor:10 storage:10 .0.t:12345679", ctrl.getSystemState().toString());
assert(!ctrl.getReportedNodeState(new Node(NodeType.STORAGE, 0)).hasDescription());
verifyNodeEvents(new Node(NodeType.DISTRIBUTOR, 0),
"""
Event: distributor.0: Now reporting state U
- Event: distributor.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'
+ Event: distributor.0: Altered node state in cluster state from 'D' to 'U'
Event: distributor.0: Failed to get node state: D: Closed at other end
Event: distributor.0: Stopped or possibly crashed after 0 ms, which is before stable state time period. Premature crash count is now 1.
Event: distributor.0: Altered node state in cluster state from 'U' to 'D: Closed at other end'
@@ -217,7 +219,7 @@ public class StateChangeTest extends FleetControllerTest {
verifyNodeEvents(new Node(NodeType.STORAGE, 0),
"""
Event: storage.0: Now reporting state U
- Event: storage.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'
+ Event: storage.0: Altered node state in cluster state from 'D' to 'U'
Event: storage.0: Failed to get node state: D: Closed at other end
Event: storage.0: Stopped or possibly crashed after 1000 ms, which is before stable state time period. Premature crash count is now 1.
Event: storage.0: Altered node state in cluster state from 'U' to 'M: Closed at other end'
@@ -243,7 +245,7 @@ public class StateChangeTest extends FleetControllerTest {
.setNodeStateRequestTimeoutMS(60 * 60 * 1000)
.setMaxSlobrokDisconnectGracePeriod(100000);
- initialize(builder.build());
+ initialize(builder);
ctrl.tick();
@@ -264,7 +266,7 @@ public class StateChangeTest extends FleetControllerTest {
tick(1000);
- assertEquals("version:5 distributor:10 storage:10 .0.s:m", ctrl.getSystemState().toString());
+ assertEquals("version:4 distributor:10 storage:10 .0.s:m", ctrl.getSystemState().toString());
assert(!ctrl.getReportedNodeState(new Node(NodeType.DISTRIBUTOR, 0)).hasDescription());
desc = ctrl.getReportedNodeState(new Node(NodeType.STORAGE, 0)).getDescription();
@@ -273,7 +275,7 @@ public class StateChangeTest extends FleetControllerTest {
tick(builder.maxTransitionTime().get(NodeType.STORAGE) + 1);
- assertEquals("version:6 distributor:10 storage:10 .0.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:5 distributor:10 storage:10 .0.s:d", ctrl.getSystemState().toString());
desc = ctrl.getReportedNodeState(new Node(NodeType.STORAGE, 0)).getDescription();
assertTrue(desc.contains("Received signal 15 (SIGTERM - Termination signal)")
|| desc.contains("controlled shutdown"), desc);
@@ -282,7 +284,7 @@ public class StateChangeTest extends FleetControllerTest {
tick(1000);
- assertEquals("version:7 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:6 distributor:10 storage:10", ctrl.getSystemState().toString());
assert(!ctrl.getReportedNodeState(new Node(NodeType.STORAGE, 0)).hasDescription());
assertEquals(0, ctrl.getCluster().getNodeInfo(new Node(NodeType.DISTRIBUTOR, 0)).getPrematureCrashCount());
@@ -291,7 +293,7 @@ public class StateChangeTest extends FleetControllerTest {
verifyNodeEvents(new Node(NodeType.DISTRIBUTOR, 0),
"""
Event: distributor.0: Now reporting state U
- Event: distributor.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'
+ Event: distributor.0: Altered node state in cluster state from 'D' to 'U'
Event: distributor.0: Failed to get node state: D: controlled shutdown
Event: distributor.0: Altered node state in cluster state from 'U' to 'D: controlled shutdown'
Event: distributor.0: Now reporting state U
@@ -301,7 +303,7 @@ public class StateChangeTest extends FleetControllerTest {
verifyNodeEvents(new Node(NodeType.STORAGE, 0),
"""
Event: storage.0: Now reporting state U
- Event: storage.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'
+ Event: storage.0: Altered node state in cluster state from 'D' to 'U'
Event: storage.0: Failed to get node state: D: controlled shutdown
Event: storage.0: Altered node state in cluster state from 'U' to 'M: controlled shutdown'
Event: storage.0: Exceeded implicit maintenance mode grace period of 5000 milliseconds. Marking node down.
@@ -317,7 +319,7 @@ public class StateChangeTest extends FleetControllerTest {
FleetControllerOptions.Builder builder = defaultOptions()
.setMaxSlobrokDisconnectGracePeriod(60 * 1000);
- initialize(builder.build());
+ initialize(builder);
ctrl.tick();
@@ -333,7 +335,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
ctrl.tick();
- assertEquals("version:3 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:2 distributor:10 storage:10", ctrl.getSystemState().toString());
nodes = new ArrayList<>();
for (int i = 0; i < 10; ++i) {
@@ -345,12 +347,12 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:3 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:2 distributor:10 storage:10", ctrl.getSystemState().toString());
verifyNodeEvents(new Node(NodeType.STORAGE, 0),
"""
Event: storage.0: Now reporting state U
- Event: storage.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'
+ Event: storage.0: Altered node state in cluster state from 'D' to 'U'
Event: storage.0: Node is no longer in slobrok, but we still have a pending state request.
""");
}
@@ -360,13 +362,13 @@ public class StateChangeTest extends FleetControllerTest {
FleetControllerOptions.Builder builder = defaultOptions()
.setMaxSlobrokDisconnectGracePeriod(60 * 1000);
- initialize(builder.build());
+ initialize(builder);
communicator.setNodeState(new Node(NodeType.STORAGE, 6), State.DOWN, "Connection error: Closed at other end");
ctrl.tick();
- assertEquals("version:4 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
NodeState ns = ctrl.getReportedNodeState(new Node(NodeType.STORAGE, 6));
assertTrue(ns.getDescription().contains("Connection error: Closed at other end"), ns.toString());
@@ -378,14 +380,14 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
// Still maintenance since .i progress 0.0 is really down.
- assertEquals("version:4 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
communicator.setNodeState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.6f), "");
ctrl.tick();
// Now it's OK
- assertEquals("version:5 distributor:10 storage:10 .6.s:i .6.i:0.6", ctrl.getSystemState().toString());
+ assertEquals("version:4 distributor:10 storage:10 .6.s:i .6.i:0.6", ctrl.getSystemState().toString());
tick(1000);
@@ -393,13 +395,13 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:6 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:5 distributor:10 storage:10", ctrl.getSystemState().toString());
assert(!ctrl.getReportedNodeState(new Node(NodeType.STORAGE, 6)).hasDescription());
verifyNodeEvents(new Node(NodeType.STORAGE, 6),
"""
Event: storage.6: Now reporting state U
- Event: storage.6: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'
+ Event: storage.6: Altered node state in cluster state from 'D' to 'U'
Event: storage.6: Failed to get node state: D: Connection error: Closed at other end
Event: storage.6: Stopped or possibly crashed after 0 ms, which is before stable state time period. Premature crash count is now 1.
Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'
@@ -419,16 +421,16 @@ public class StateChangeTest extends FleetControllerTest {
nodes.add(new ConfiguredNode(i, retired));
}
- FleetControllerOptions.Builder builder = defaultOptions("mycluster", nodes)
+ FleetControllerOptions.Builder builder = defaultOptions(nodes)
.setMaxSlobrokDisconnectGracePeriod(60 * 1000);
- initialize(builder.build());
+ initialize(builder);
communicator.setNodeState(new Node(NodeType.STORAGE, 6), State.DOWN, "Connection error: Closed at other end");
ctrl.tick();
- assertEquals("version:4 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
NodeState ns = ctrl.getReportedNodeState(new Node(NodeType.STORAGE, 6));
assertTrue(ns.getDescription().contains("Connection error: Closed at other end"), ns.toString());
@@ -440,14 +442,14 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
// Still maintenance since .i progress 0.0 is really down.
- assertEquals("version:4 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
communicator.setNodeState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.6f), "");
ctrl.tick();
// Still maintenance since configured.
- assertEquals("version:4 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
tick(1000);
@@ -455,13 +457,13 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:5 distributor:10 storage:10 .6.s:r", ctrl.getSystemState().toString());
+ assertEquals("version:4 distributor:10 storage:10 .6.s:r", ctrl.getSystemState().toString());
assert(!ctrl.getReportedNodeState(new Node(NodeType.STORAGE, 6)).hasDescription());
verifyNodeEvents(new Node(NodeType.STORAGE, 6),
"""
Event: storage.6: Now reporting state U
- Event: storage.6: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'R'
+ Event: storage.6: Altered node state in cluster state from 'D' to 'R'
Event: storage.6: Failed to get node state: D: Connection error: Closed at other end
Event: storage.6: Stopped or possibly crashed after 0 ms, which is before stable state time period. Premature crash count is now 1.
Event: storage.6: Altered node state in cluster state from 'R' to 'M: Connection error: Closed at other end'
@@ -480,21 +482,21 @@ public class StateChangeTest extends FleetControllerTest {
nodes.add(new ConfiguredNode(i, retired));
}
- FleetControllerOptions.Builder builder = defaultOptions("mycluster", nodes)
+ FleetControllerOptions.Builder builder = defaultOptions(nodes)
.setMaxSlobrokDisconnectGracePeriod(60 * 1000);
- initialize(builder.build());
+ initialize(builder);
communicator.setNodeState(new Node(NodeType.STORAGE, 6), State.DOWN, "Connection error: Closed at other end");
ctrl.tick();
- assertEquals("version:4 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
timer.advanceTime(100000);
ctrl.tick();
- assertEquals("version:5 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:4 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
}
// Test that a node that has been down for a long time (above steady state period), actually alters cluster state to
@@ -509,7 +511,7 @@ public class StateChangeTest extends FleetControllerTest {
.setNodeStateRequestTimeoutMS(1000000)
.setMaxSlobrokDisconnectGracePeriod(1000000);
- initialize(builder.build());
+ initialize(builder);
timer.advanceTime(100000); // Node has been in steady state up
ctrl.tick();
@@ -518,40 +520,40 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:4 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
timer.advanceTime(100000); // Node has been in steady state down
ctrl.tick();
- assertEquals("version:5 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:4 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
communicator.setNodeState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.001f), "");
ctrl.tick();
- assertEquals("version:5 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:4 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
communicator.setNodeState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.1f), "");
ctrl.tick();
- assertEquals("version:6 distributor:10 storage:10 .6.s:i .6.i:0.1", ctrl.getSystemState().toString());
+ assertEquals("version:5 distributor:10 storage:10 .6.s:i .6.i:0.1", ctrl.getSystemState().toString());
ctrl.tick();
- assertEquals("version:6 distributor:10 storage:10 .6.s:i .6.i:0.1", ctrl.getSystemState().toString());
+ assertEquals("version:5 distributor:10 storage:10 .6.s:i .6.i:0.1", ctrl.getSystemState().toString());
communicator.setNodeState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.UP), "");
ctrl.tick();
- assertEquals("version:7 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:6 distributor:10 storage:10", ctrl.getSystemState().toString());
verifyNodeEvents(new Node(NodeType.STORAGE, 6),
"""
Event: storage.6: Now reporting state U
- Event: storage.6: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'
+ Event: storage.6: Altered node state in cluster state from 'D' to 'U'
Event: storage.6: Failed to get node state: D: Connection error: Closed at other end
Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'
Event: storage.6: Exceeded implicit maintenance mode grace period of 5000 milliseconds. Marking node down.
@@ -573,7 +575,7 @@ public class StateChangeTest extends FleetControllerTest {
.setStableStateTimePeriod(1000000)
.setMaxSlobrokDisconnectGracePeriod(10000000);
- initialize(builder.build());
+ initialize(builder);
timer.advanceTime(1000000); // Node has been in steady state up
@@ -583,26 +585,26 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:4 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
timer.advanceTime(1000000); // Node has been in steady state down
ctrl.tick();
- assertEquals("version:5 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:4 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
communicator.setNodeState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.1f), "");
ctrl.tick();
- assertEquals("version:6 distributor:10 storage:10 .6.s:i .6.i:0.1", ctrl.getSystemState().toString());
+ assertEquals("version:5 distributor:10 storage:10 .6.s:i .6.i:0.1", ctrl.getSystemState().toString());
timer.advanceTime(builder.maxInitProgressTime() + 1);
ctrl.tick();
// We should now get the node marked down.
- assertEquals("version:7 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:6 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
tick(1000);
@@ -621,7 +623,7 @@ public class StateChangeTest extends FleetControllerTest {
tick(1000);
// Still down since it seemingly crashed during last init.
- assertEquals("version:7 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:6 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
ctrl.tick();
@@ -629,12 +631,12 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:8 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:7 distributor:10 storage:10", ctrl.getSystemState().toString());
verifyNodeEvents(new Node(NodeType.STORAGE, 6),
"""
Event: storage.6: Now reporting state U
- Event: storage.6: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'
+ Event: storage.6: Altered node state in cluster state from 'D' to 'U'
Event: storage.6: Failed to get node state: D: Connection error: Closed at other end
Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'
Event: storage.6: Exceeded implicit maintenance mode grace period of 5000 milliseconds. Marking node down.
@@ -662,7 +664,7 @@ public class StateChangeTest extends FleetControllerTest {
// Set long so we don't time out RPC requests and mark nodes down due to advancing time to get in steady state
builder.setNodeStateRequestTimeoutMS((int) builder.stableStateTimePeriod() * 2);
- initialize(builder.build());
+ initialize(builder);
timer.advanceTime(1000000); // Node has been in steady state up
@@ -672,19 +674,19 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:4 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
timer.advanceTime(1000000); // Node has been in steady state down
ctrl.tick();
- assertEquals("version:5 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:4 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
communicator.setNodeState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.3f), "");
ctrl.tick();
- assertEquals("version:6 distributor:10 storage:10 .6.s:i .6.i:0.3", ctrl.getSystemState().toString());
+ assertEquals("version:5 distributor:10 storage:10 .6.s:i .6.i:0.3", ctrl.getSystemState().toString());
ctrl.tick();
@@ -692,7 +694,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:7 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:6 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
}
@Test
@@ -705,7 +707,7 @@ public class StateChangeTest extends FleetControllerTest {
// Set very high so the advanceTime don't start sending state replies right before we disconnect.
.setNodeStateRequestTimeoutMS(365 * 24 * 60 * 1000);
- initialize(builder.build());
+ initialize(builder);
timer.advanceTime(1000000); // Node has been in steady state up
@@ -715,13 +717,13 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:4 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
timer.advanceTime(1000000); // Node has been in steady state down
ctrl.tick();
- assertEquals("version:5 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:4 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
ctrl.tick();
@@ -729,7 +731,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:6 distributor:10 storage:10 .6.s:i .6.i:0.3", ctrl.getSystemState().toString());
+ assertEquals("version:5 distributor:10 storage:10 .6.s:i .6.i:0.3", ctrl.getSystemState().toString());
ctrl.tick();
@@ -737,7 +739,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:7 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:6 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
tick(1000);
@@ -745,13 +747,13 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:7 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:6 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
communicator.setNodeState(new Node(NodeType.STORAGE, 6), State.UP, "");
ctrl.tick();
- assertEquals("version:8 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:7 distributor:10 storage:10", ctrl.getSystemState().toString());
}
@Test
@@ -764,7 +766,7 @@ public class StateChangeTest extends FleetControllerTest {
.setStableStateTimePeriod(1000000)
.setMaxSlobrokDisconnectGracePeriod(10000000);
- initialize(builder.build());
+ initialize(builder);
timer.advanceTime(1000000); // Node has been in steady state up
@@ -774,13 +776,13 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:4 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 storage:10 .6.s:m", ctrl.getSystemState().toString());
timer.advanceTime(1000000); // Node has been in steady state down
ctrl.tick();
- assertEquals("version:5 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:4 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
for (int j = 0; j <= builder.maxPrematureCrashes(); ++j) {
ctrl.tick();
@@ -804,7 +806,7 @@ public class StateChangeTest extends FleetControllerTest {
tick(1000);
}
- assertEquals("version:7 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:6 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
}
@Test
@@ -818,13 +820,13 @@ public class StateChangeTest extends FleetControllerTest {
.setMinRatioOfDistributorNodesUp(0.0)
.setMinRatioOfStorageNodesUp(0.0);
- initialize(builder.build());
+ initialize(builder);
timer.advanceTime(1000000); // Node has been in steady state up
ctrl.tick();
- assertEquals("version:3 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:2 distributor:10 storage:10", ctrl.getSystemState().toString());
communicator.setNodeState(new Node(NodeType.DISTRIBUTOR, 0), State.DOWN, "Connection error: Closed at other end");
communicator.setNodeState(new Node(NodeType.DISTRIBUTOR, 1), State.DOWN, "Connection error: Closed at other end");
@@ -836,13 +838,13 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:4 distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d storage:10 .0.s:d .1.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d storage:10 .0.s:d .1.s:d", ctrl.getSystemState().toString());
communicator.setNodeState(new Node(NodeType.DISTRIBUTOR, 4), State.DOWN, "Connection error: Closed at other end");
ctrl.tick();
- assertEquals("version:5 cluster:d distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d .4.s:d storage:10 .0.s:d .1.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:4 cluster:d distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d .4.s:d storage:10 .0.s:d .1.s:d", ctrl.getSystemState().toString());
tick(1000);
@@ -850,7 +852,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:6 distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d storage:10 .0.s:d .1.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:5 distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d storage:10 .0.s:d .1.s:d", ctrl.getSystemState().toString());
tick(1000);
@@ -858,7 +860,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:7 cluster:d distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d storage:10 .0.s:d .1.s:d .2.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:6 cluster:d distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d storage:10 .0.s:d .1.s:d .2.s:d", ctrl.getSystemState().toString());
}
@Test
@@ -872,13 +874,13 @@ public class StateChangeTest extends FleetControllerTest {
options.setMinRatioOfDistributorNodesUp(0.6);
options.setMinRatioOfStorageNodesUp(0.8);
- initialize(options.build());
+ initialize(options);
timer.advanceTime(1000000); // Node has been in steady state up
ctrl.tick();
- assertEquals("version:3 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:2 distributor:10 storage:10", ctrl.getSystemState().toString());
communicator.setNodeState(new Node(NodeType.DISTRIBUTOR, 0), State.DOWN, "Connection error: Closed at other end");
communicator.setNodeState(new Node(NodeType.DISTRIBUTOR, 1), State.DOWN, "Connection error: Closed at other end");
@@ -890,13 +892,13 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:4 distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d storage:10 .0.s:d .1.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:3 distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d storage:10 .0.s:d .1.s:d", ctrl.getSystemState().toString());
communicator.setNodeState(new Node(NodeType.DISTRIBUTOR, 4), State.DOWN, "Connection error: Closed at other end");
ctrl.tick();
- assertEquals("version:5 cluster:d distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d .4.s:d storage:10 .0.s:d .1.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:4 cluster:d distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d .4.s:d storage:10 .0.s:d .1.s:d", ctrl.getSystemState().toString());
tick(1000);
@@ -904,7 +906,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:6 distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d storage:10 .0.s:d .1.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:5 distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d storage:10 .0.s:d .1.s:d", ctrl.getSystemState().toString());
tick(1000);
@@ -912,7 +914,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:7 cluster:d distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d storage:10 .0.s:d .1.s:d .2.s:d", ctrl.getSystemState().toString());
+ assertEquals("version:6 cluster:d distributor:10 .0.s:d .1.s:d .2.s:d .3.s:d storage:10 .0.s:d .1.s:d .2.s:d", ctrl.getSystemState().toString());
}
/**
@@ -1044,7 +1046,7 @@ public class StateChangeTest extends FleetControllerTest {
FleetControllerOptions.Builder options = defaultOptions();
options.setDistributionBits(17);
- initialize(options.build());
+ initialize(options);
timer.advanceTime(1000000); // Node has been in steady state up
@@ -1054,7 +1056,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:4 bits:15 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:3 bits:15 distributor:10 storage:10", ctrl.getSystemState().toString());
tick(1000);
@@ -1062,7 +1064,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("version:5 bits:13 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:4 bits:13 distributor:10 storage:10", ctrl.getSystemState().toString());
tick(1000);
setMinUsedBitsForAllNodes(16);
@@ -1070,13 +1072,13 @@ public class StateChangeTest extends FleetControllerTest {
// Don't increase dist bits until we've reached at least the wanted
// level, in order to avoid multiple full redistributions of data.
- assertEquals("version:5 bits:13 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:4 bits:13 distributor:10 storage:10", ctrl.getSystemState().toString());
tick(1000);
setMinUsedBitsForAllNodes(19);
ctrl.tick();
- assertEquals("version:6 bits:17 distributor:10 storage:10", ctrl.getSystemState().toString());
+ assertEquals("version:5 bits:17 distributor:10 storage:10", ctrl.getSystemState().toString());
}
private void setMinUsedBitsForAllNodes(int bits) {
@@ -1140,16 +1142,16 @@ public class StateChangeTest extends FleetControllerTest {
options.setMaxTransitionTime(NodeType.STORAGE, 0);
options.setMinStorageNodesUp(10);
options.setMinDistributorNodesUp(10);
- initialize(options.build());
+ initialize(options);
ctrl.tick();
- assertThat(ctrl.consolidatedClusterState().toString(), equalTo("version:3 distributor:10 storage:10"));
+ assertThat(ctrl.consolidatedClusterState().toString(), equalTo("version:2 distributor:10 storage:10"));
communicator.setNodeState(new Node(NodeType.STORAGE, 2), State.DOWN, "foo");
ctrl.tick();
assertThat(ctrl.consolidatedClusterState().toString(),
- equalTo("version:4 cluster:d distributor:10 storage:10 .2.s:d"));
+ equalTo("version:3 cluster:d distributor:10 storage:10 .2.s:d"));
// After this point, any further node changes while the cluster is still down won't be published.
// This is because cluster state similarity checks are short-circuited if both are Down, as no other parts
@@ -1162,7 +1164,7 @@ public class StateChangeTest extends FleetControllerTest {
// NOTE: _same_ version, different node state content. Overall cluster down-state is still the same.
assertThat(ctrl.consolidatedClusterState().toString(),
- equalTo("version:4 cluster:d distributor:10 storage:10 .2.s:d .5.s:d"));
+ equalTo("version:3 cluster:d distributor:10 storage:10 .2.s:d .5.s:d"));
}
// Related to the above test, watchTimer invocations must receive the _current_ state and not the
@@ -1174,7 +1176,7 @@ public class StateChangeTest extends FleetControllerTest {
options.setMaxTransitionTime(NodeType.STORAGE, 1000);
options.setMinStorageNodesUp(10);
options.setMinDistributorNodesUp(10);
- initialize(options.build());
+ initialize(options);
ctrl.tick();
communicator.setNodeState(new Node(NodeType.STORAGE, 2), State.DOWN, "foo");
@@ -1182,7 +1184,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
communicator.setNodeState(new Node(NodeType.STORAGE, 3), State.DOWN, "foo");
ctrl.tick();
- assertThat(ctrl.consolidatedClusterState().toString(), equalTo("version:4 cluster:d distributor:10 storage:10 .2.s:m .3.s:m"));
+ assertThat(ctrl.consolidatedClusterState().toString(), equalTo("version:3 cluster:d distributor:10 storage:10 .2.s:m .3.s:m"));
// Subsequent timer tick should _not_ trigger additional events. Providing published state
// only would result in "Marking node down" events for node 2 emitted per tick.
@@ -1194,7 +1196,7 @@ public class StateChangeTest extends FleetControllerTest {
verifyNodeEvents(new Node(NodeType.STORAGE, 2),
"""
Event: storage.2: Now reporting state U
- Event: storage.2: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'
+ Event: storage.2: Altered node state in cluster state from 'D' to 'U'
Event: storage.2: Failed to get node state: D: foo
Event: storage.2: Stopped or possibly crashed after 500 ms, which is before stable state time period. Premature crash count is now 1.
Event: storage.2: Altered node state in cluster state from 'U' to 'M: foo'
@@ -1208,7 +1210,7 @@ public class StateChangeTest extends FleetControllerTest {
@Test
void do_not_emit_multiple_events_when_node_state_does_not_match_versioned_state() throws Exception {
FleetControllerOptions.Builder options = defaultOptions();
- initialize(options.build());
+ initialize(options);
ctrl.tick();
communicator.setNodeState(
@@ -1240,7 +1242,7 @@ public class StateChangeTest extends FleetControllerTest {
verifyNodeEvents(new Node(NodeType.STORAGE, 2),
"""
Event: storage.2: Now reporting state U
- Event: storage.2: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'
+ Event: storage.2: Altered node state in cluster state from 'D' to 'U'
Event: storage.2: Now reporting state I, i 0.100 (read)
Event: storage.2: Altered node state in cluster state from 'U' to 'I, i 0.100 (read)'
Event: storage.2: Altered min distribution bit count from 16 to 17
@@ -1307,7 +1309,7 @@ public class StateChangeTest extends FleetControllerTest {
// TODO ideally we'd break this out so it doesn't depend on fields in the parent test instance, but
// fleet controller tests have a _lot_ of state, so risk of duplicating a lot of that...
class RemoteTaskFixture {
- RemoteTaskFixture(FleetControllerOptions options) throws Exception {
+ RemoteTaskFixture(FleetControllerOptions.Builder options) throws Exception {
initialize(options);
ctrl.tick();
}
@@ -1392,12 +1394,12 @@ public class StateChangeTest extends FleetControllerTest {
return options;
}
- private RemoteTaskFixture createFixtureWith(FleetControllerOptions options) throws Exception {
+ private RemoteTaskFixture createFixtureWith(FleetControllerOptions.Builder options) throws Exception {
return new RemoteTaskFixture(options);
}
private RemoteTaskFixture createDefaultFixture() throws Exception {
- return new RemoteTaskFixture(defaultOptions().build());
+ return new RemoteTaskFixture(defaultOptions());
}
@Test
@@ -1430,7 +1432,7 @@ public class StateChangeTest extends FleetControllerTest {
@Test
void no_op_synchronous_remote_task_can_complete_immediately_if_current_state_already_acked() throws Exception {
- RemoteTaskFixture fixture = createFixtureWith(optionsWithZeroTransitionTime().build());
+ RemoteTaskFixture fixture = createFixtureWith(optionsWithZeroTransitionTime());
fixture.markStorageNodeDown(0);
MockTask task = fixture.scheduleNoOpVersionDependentTask(); // Tries to set node 0 into Down; already in that state
@@ -1443,7 +1445,7 @@ public class StateChangeTest extends FleetControllerTest {
@Test
void no_op_synchronous_remote_task_waits_until_current_state_is_acked() throws Exception {
- RemoteTaskFixture fixture = createFixtureWith(optionsWithZeroTransitionTime().build());
+ RemoteTaskFixture fixture = createFixtureWith(optionsWithZeroTransitionTime());
communicator.setShouldDeferDistributorClusterStateAcks(true);
fixture.markStorageNodeDown(0);
@@ -1467,7 +1469,7 @@ public class StateChangeTest extends FleetControllerTest {
// the cluster down-state to have been published.
@Test
void immediately_complete_sync_remote_task_when_cluster_is_down() throws Exception {
- RemoteTaskFixture fixture = createFixtureWith(optionsAllowingZeroNodesDown().build());
+ RemoteTaskFixture fixture = createFixtureWith(optionsAllowingZeroNodesDown());
// Controller options require 10/10 nodes up, so take one down to trigger a cluster Down edge.
fixture.markStorageNodeDown(1);
MockTask task = fixture.scheduleVersionDependentTaskWithSideEffects();
@@ -1501,7 +1503,7 @@ public class StateChangeTest extends FleetControllerTest {
void synchronous_task_immediately_failed_when_leadership_lost() throws Exception {
FleetControllerOptions.Builder options = optionsWithZeroTransitionTime();
options.setCount(3);
- RemoteTaskFixture fixture = createFixtureWith(options.build());
+ RemoteTaskFixture fixture = createFixtureWith(options);
fixture.winLeadership();
markAllNodesAsUp(options.build());
@@ -1526,7 +1528,7 @@ public class StateChangeTest extends FleetControllerTest {
void cluster_state_ack_is_not_dependent_on_state_send_grace_period() throws Exception {
FleetControllerOptions.Builder options = defaultOptions();
options.setMinTimeBetweenNewSystemStates(10_000);
- RemoteTaskFixture fixture = createFixtureWith(options.build());
+ RemoteTaskFixture fixture = createFixtureWith(options);
// Have to increment timer here to be able to send state generated by the scheduled task
timer.advanceTime(10_000);
@@ -1546,11 +1548,10 @@ public class StateChangeTest extends FleetControllerTest {
void synchronous_task_immediately_answered_when_not_leader() throws Exception {
FleetControllerOptions.Builder builder = optionsWithZeroTransitionTime();
builder.setCount(3);
- var options = builder.build();
- RemoteTaskFixture fixture = createFixtureWith(options);
+ RemoteTaskFixture fixture = createFixtureWith(builder);
fixture.loseLeadership();
- markAllNodesAsUp(options);
+ markAllNodesAsUp(ctrl.getOptions());
MockTask task = fixture.scheduleVersionDependentTaskWithSideEffects();
@@ -1562,7 +1563,7 @@ public class StateChangeTest extends FleetControllerTest {
void task_not_completed_within_deadline_is_failed_with_deadline_exceeded_error() throws Exception {
FleetControllerOptions.Builder builder = defaultOptions();
builder.setMaxDeferredTaskVersionWaitTime(Duration.ofSeconds(60));
- RemoteTaskFixture fixture = createFixtureWith(builder.build());
+ RemoteTaskFixture fixture = createFixtureWith(builder);
MockTask task = fixture.scheduleVersionDependentTaskWithSideEffects();
communicator.setShouldDeferDistributorClusterStateAcks(true);
@@ -1588,7 +1589,7 @@ public class StateChangeTest extends FleetControllerTest {
options.setMaxDeferredTaskVersionWaitTime(Duration.ofSeconds(60));
options.enableTwoPhaseClusterStateActivation(deferredActivation);
options.setMaxDivergentNodesPrintedInTaskErrorMessages(10);
- RemoteTaskFixture fixture = createFixtureWith(options.build());
+ RemoteTaskFixture fixture = createFixtureWith(options);
MockTask task = fixture.scheduleVersionDependentTaskWithSideEffects();
communicator.setShouldDeferDistributorClusterStateAcks(true);
@@ -1610,14 +1611,14 @@ public class StateChangeTest extends FleetControllerTest {
@Test
void task_not_completed_within_deadline_lists_nodes_not_converged_in_error_message() throws Exception {
doTestTaskDeadlineExceeded(false, "the following nodes have not converged to " +
- "at least version 4: distributor.0, distributor.1, distributor.2, distributor.3, " +
+ "at least version 3: distributor.0, distributor.1, distributor.2, distributor.3, " +
"distributor.4, distributor.5, distributor.6, distributor.7, distributor.8, distributor.9");
}
@Test
void task_not_completed_within_deadline_with_deferred_activation_checks_activation_version() throws Exception {
doTestTaskDeadlineExceeded(true, "the following nodes have not converged to " +
- "at least version 4: distributor.0, distributor.1, distributor.2, distributor.3, " +
+ "at least version 3: distributor.0, distributor.1, distributor.2, distributor.3, " +
"distributor.4, distributor.5, distributor.6, distributor.7, distributor.8, distributor.9 " +
"(... and 10 more)");
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicatorTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicatorTest.java
index b533168e61a..1018515cbfa 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicatorTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicatorTest.java
@@ -76,10 +76,11 @@ public class RPCCommunicatorTest {
@Test
void testGenerateNodeStateRequestTimeoutMsWithUpdates() {
final RPCCommunicator communicator = new RPCCommunicator(RPCCommunicator.createRealSupervisor(), null /* Timer */, INDEX, 1, 1, 100, 0);
- FleetControllerOptions.Builder builder = new FleetControllerOptions.Builder(null /*clustername*/, Set.of(new ConfiguredNode(0, false)));
- builder.setNodeStateRequestTimeoutEarliestPercentage(100);
- builder.setNodeStateRequestTimeoutLatestPercentage(100);
- builder.setNodeStateRequestTimeoutMS(NODE_STATE_REQUEST_TIMEOUT_INTERVAL_MAX_MS);
+ FleetControllerOptions.Builder builder = new FleetControllerOptions.Builder(null /*clustername*/, Set.of(new ConfiguredNode(0, false)))
+ .setNodeStateRequestTimeoutEarliestPercentage(100)
+ .setNodeStateRequestTimeoutLatestPercentage(100)
+ .setNodeStateRequestTimeoutMS(NODE_STATE_REQUEST_TIMEOUT_INTERVAL_MAX_MS)
+ .setZooKeeperServerAddress("localhost:2181");
communicator.propagateOptions(builder.build());
long timeOutMs = communicator.generateNodeStateRequestTimeout().toMillis();
assertEquals(timeOutMs, NODE_STATE_REQUEST_TIMEOUT_INTERVAL_MAX_MS);
diff --git a/clustercontroller-utils/pom.xml b/clustercontroller-utils/pom.xml
index 381a4c88946..a572ec4c14e 100644
--- a/clustercontroller-utils/pom.xml
+++ b/clustercontroller-utils/pom.xml
@@ -46,6 +46,9 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/component/pom.xml b/component/pom.xml
index e8cee594066..7c2c7e68a54 100755
--- a/component/pom.xml
+++ b/component/pom.xml
@@ -67,6 +67,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
diff --git a/config-application-package/pom.xml b/config-application-package/pom.xml
index dc2c8d91db1..ce6dc3987df 100644
--- a/config-application-package/pom.xml
+++ b/config-application-package/pom.xml
@@ -116,6 +116,9 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/config-bundle/pom.xml b/config-bundle/pom.xml
index a1770537a3e..bf0721ff7e2 100644
--- a/config-bundle/pom.xml
+++ b/config-bundle/pom.xml
@@ -70,6 +70,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
</plugins>
</build>
diff --git a/config-lib/pom.xml b/config-lib/pom.xml
index 66603deeb38..2a1bc734956 100644
--- a/config-lib/pom.xml
+++ b/config-lib/pom.xml
@@ -69,6 +69,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>com.yahoo.vespa</groupId>
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/Bcp.java b/config-model-api/src/main/java/com/yahoo/config/application/api/Bcp.java
index 7464373df9e..bfd39fb66a5 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/Bcp.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/Bcp.java
@@ -6,6 +6,7 @@ import java.time.Duration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
@@ -87,6 +88,19 @@ public class Bcp {
public static Bcp empty() { return empty; }
@Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Bcp bcp = (Bcp) o;
+ return defaultDeadline.equals(bcp.defaultDeadline) && groups.equals(bcp.groups);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(defaultDeadline, groups);
+ }
+
+ @Override
public String toString() {
if (isEmpty()) return "empty BCP";
return "BCP of " +
@@ -117,6 +131,19 @@ public class Bcp {
public Duration deadline() { return deadline; }
@Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Group group = (Group) o;
+ return members.equals(group.members) && memberRegions.equals(group.memberRegions) && deadline.equals(group.deadline);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(members, memberRegions, deadline);
+ }
+
+ @Override
public String toString() {
return "BCP group of " + members;
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentInstanceSpec.java b/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentInstanceSpec.java
index bd5056deec6..a4be547fe70 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentInstanceSpec.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentInstanceSpec.java
@@ -1,8 +1,10 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.application.api;
+import ai.vespa.validation.Validation;
import com.yahoo.config.provision.AthenzService;
import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
@@ -31,6 +33,7 @@ import static ai.vespa.validation.Validation.requireAtLeast;
import static ai.vespa.validation.Validation.requireInRange;
import static com.yahoo.config.application.api.DeploymentSpec.RevisionChange.whenClear;
import static com.yahoo.config.application.api.DeploymentSpec.RevisionTarget.next;
+import static com.yahoo.config.application.api.DeploymentSpec.illegal;
import static com.yahoo.config.provision.Environment.prod;
/**
@@ -57,7 +60,8 @@ public class DeploymentInstanceSpec extends DeploymentSpec.Steps {
private final List<DeploymentSpec.ChangeBlocker> changeBlockers;
private final Optional<String> globalServiceId;
private final Optional<AthenzService> athenzService;
- private final Optional<CloudAccount> cloudAccount;
+ private final Map<CloudName, CloudAccount> cloudAccounts;
+ private final Optional<Duration> hostTTL;
private final Notifications notifications;
private final List<Endpoint> endpoints;
private final Map<ClusterSpec.Id, Map<ZoneId, ZoneEndpoint>> zoneEndpoints;
@@ -74,7 +78,8 @@ public class DeploymentInstanceSpec extends DeploymentSpec.Steps {
List<DeploymentSpec.ChangeBlocker> changeBlockers,
Optional<String> globalServiceId,
Optional<AthenzService> athenzService,
- Optional<CloudAccount> cloudAccount,
+ Map<CloudName, CloudAccount> cloudAccounts,
+ Optional<Duration> hostTTL,
Notifications notifications,
List<Endpoint> endpoints,
Map<ClusterSpec.Id, Map<ZoneId, ZoneEndpoint>> zoneEndpoints,
@@ -97,7 +102,8 @@ public class DeploymentInstanceSpec extends DeploymentSpec.Steps {
this.changeBlockers = Objects.requireNonNull(changeBlockers);
this.globalServiceId = Objects.requireNonNull(globalServiceId);
this.athenzService = Objects.requireNonNull(athenzService);
- this.cloudAccount = Objects.requireNonNull(cloudAccount);
+ this.cloudAccounts = Map.copyOf(cloudAccounts);
+ this.hostTTL = Objects.requireNonNull(hostTTL);
this.notifications = Objects.requireNonNull(notifications);
this.endpoints = List.copyOf(Objects.requireNonNull(endpoints));
Map<ClusterSpec.Id, Map<ZoneId, ZoneEndpoint>> zoneEndpointsCopy = new HashMap<>();
@@ -108,6 +114,7 @@ public class DeploymentInstanceSpec extends DeploymentSpec.Steps {
validateEndpoints(globalServiceId, this.endpoints);
validateChangeBlockers(changeBlockers, now);
validateBcp(bcp);
+ hostTTL.filter(Duration::isNegative).ifPresent(ttl -> illegal("Host TTL cannot be negative"));
}
public InstanceName name() { return name; }
@@ -257,16 +264,25 @@ public class DeploymentInstanceSpec extends DeploymentSpec.Steps {
.filter(zone -> zone.concerns(environment, Optional.of(region)))
.findFirst()
.flatMap(DeploymentSpec.DeclaredZone::athenzService)
- .or(() -> this.athenzService);
+ .or(() -> athenzService);
}
- /** Returns the cloud account to use for given environment and region, if any */
- public Optional<CloudAccount> cloudAccount(Environment environment, Optional<RegionName> region) {
+ /** Returns the cloud accounts to use for given environment and region, if any */
+ public Map<CloudName, CloudAccount> cloudAccounts(Environment environment, RegionName region) {
+ return zones().stream()
+ .filter(zone -> zone.concerns(environment, Optional.of(region)))
+ .findFirst()
+ .map(DeploymentSpec.DeclaredZone::cloudAccounts)
+ .orElse(cloudAccounts);
+ }
+
+ /** Returns the host TTL to use for given environment and region, if any */
+ public Optional<Duration> hostTTL(Environment environment, Optional<RegionName> region) {
return zones().stream()
.filter(zone -> zone.concerns(environment, region))
.findFirst()
- .flatMap(DeploymentSpec.DeclaredZone::cloudAccount)
- .or(() -> cloudAccount);
+ .flatMap(DeploymentSpec.DeclaredZone::hostTTL)
+ .or(() -> hostTTL);
}
/** Returns the notification configuration of these instances */
@@ -315,22 +331,27 @@ public class DeploymentInstanceSpec extends DeploymentSpec.Steps {
steps().equals(other.steps()) &&
athenzService.equals(other.athenzService) &&
notifications.equals(other.notifications) &&
- endpoints.equals(other.endpoints);
+ endpoints.equals(other.endpoints) &&
+ zoneEndpoints.equals(other.zoneEndpoints) &&
+ bcp.equals(other.bcp) &&
+ tags.equals(other.tags);
}
@Override
public int hashCode() {
- return Objects.hash(globalServiceId, upgradePolicy, revisionTarget, upgradeRollout, changeBlockers, steps(), athenzService, notifications, endpoints);
+ return Objects.hash(globalServiceId, upgradePolicy, revisionTarget, upgradeRollout, changeBlockers, steps(), athenzService, notifications, endpoints, zoneEndpoints, bcp, tags);
}
int deployableHashCode() {
List<DeploymentSpec.DeclaredZone> zones = zones().stream().filter(zone -> zone.concerns(prod)).toList();
- Object[] toHash = new Object[zones.size() + 4];
+ Object[] toHash = new Object[zones.size() + 6];
int i = 0;
toHash[i++] = name;
toHash[i++] = endpoints;
+ toHash[i++] = zoneEndpoints;
toHash[i++] = globalServiceId;
toHash[i++] = tags;
+ toHash[i++] = bcp;
for (DeploymentSpec.DeclaredZone zone : zones)
toHash[i++] = Objects.hash(zone, zone.athenzService());
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java b/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java
index 1f44e599e11..f355a61fa8a 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java
@@ -1,11 +1,13 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.application.api;
+import ai.vespa.validation.Validation;
import com.yahoo.collections.Comparables;
import com.yahoo.config.application.api.xml.DeploymentSpecXmlReader;
import com.yahoo.config.provision.AthenzDomain;
import com.yahoo.config.provision.AthenzService;
import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
@@ -44,6 +46,7 @@ public class DeploymentSpec {
Optional.empty(),
Optional.empty(),
Optional.empty(),
+ Map.of(),
Optional.empty(),
List.of(),
"<deployment version='1.0'/>",
@@ -55,7 +58,8 @@ public class DeploymentSpec {
private final Optional<Integer> majorVersion;
private final Optional<AthenzDomain> athenzDomain;
private final Optional<AthenzService> athenzService;
- private final Optional<CloudAccount> cloudAccount;
+ private final Map<CloudName, CloudAccount> cloudAccounts;
+ private final Optional<Duration> hostTTL;
private final List<Endpoint> endpoints;
private final List<DeprecatedElement> deprecatedElements;
@@ -65,7 +69,8 @@ public class DeploymentSpec {
Optional<Integer> majorVersion,
Optional<AthenzDomain> athenzDomain,
Optional<AthenzService> athenzService,
- Optional<CloudAccount> cloudAccount,
+ Map<CloudName, CloudAccount> cloudAccounts,
+ Optional<Duration> hostTTL,
List<Endpoint> endpoints,
String xmlForm,
List<DeprecatedElement> deprecatedElements) {
@@ -73,7 +78,8 @@ public class DeploymentSpec {
this.majorVersion = Objects.requireNonNull(majorVersion);
this.athenzDomain = Objects.requireNonNull(athenzDomain);
this.athenzService = Objects.requireNonNull(athenzService);
- this.cloudAccount = Objects.requireNonNull(cloudAccount);
+ this.cloudAccounts = Map.copyOf(cloudAccounts);
+ this.hostTTL = Objects.requireNonNull(hostTTL);
this.xmlForm = Objects.requireNonNull(xmlForm);
this.endpoints = List.copyOf(Objects.requireNonNull(endpoints));
this.deprecatedElements = List.copyOf(Objects.requireNonNull(deprecatedElements));
@@ -81,6 +87,7 @@ public class DeploymentSpec {
validateUpgradePoliciesOfIncreasingConservativeness(steps);
validateAthenz();
validateApplicationEndpoints();
+ hostTTL.filter(Duration::isNegative).ifPresent(ttl -> illegal("Host TTL cannot be negative"));
}
public boolean isEmpty() { return this == empty; }
@@ -180,8 +187,33 @@ public class DeploymentSpec {
// to have environment, instance or region variants on those.
public Optional<AthenzService> athenzService() { return athenzService; }
- /** Cloud account set on the deployment root; see discussion for {@link #athenzService}. */
- public Optional<CloudAccount> cloudAccount() { return cloudAccount; }
+ /** The most specific Athenz service for the given arguments. */
+ public Optional<AthenzService> athenzService(InstanceName instance, Environment environment, RegionName region) {
+ return instance(instance).flatMap(spec -> spec.athenzService(environment, region))
+ .or(this::athenzService);
+ }
+
+ /** The most specific Cloud account for the given arguments. */
+ public CloudAccount cloudAccount(CloudName cloud, InstanceName instance, ZoneId zone) {
+ return instance(instance).map(spec -> spec.cloudAccounts(zone.environment(), zone.region()))
+ .orElse(cloudAccounts)
+ .getOrDefault(cloud, CloudAccount.empty);
+ }
+
+ public Map<CloudName, CloudAccount> cloudAccounts() { return cloudAccounts; }
+
+ /**
+ * Additional host time-to-live for this application. Requires a custom cloud account to be set.
+ * This also applies only to zones with dynamic provisioning, and is then the time hosts are
+ * allowed remain empty, before being deprovisioned. This is useful for applications which frequently
+ * deploy to, e.g., test and staging zones, and want to avoid the delay of having to provision hosts.
+ */
+ public Optional<Duration> hostTTL(InstanceName instance, Environment environment, RegionName region) {
+ return instance(instance).flatMap(spec -> spec.hostTTL(environment, Optional.of(region)))
+ .or(this::hostTTL);
+ }
+
+ public Optional<Duration> hostTTL() { return hostTTL; }
/**
* Returns the most specific zone endpoint, where specificity is given, in decreasing order:
@@ -262,7 +294,7 @@ public class DeploymentSpec {
}
- private static void illegal(String message) {
+ static void illegal(String message) {
throw new IllegalArgumentException(message);
}
@@ -370,6 +402,8 @@ public class DeploymentSpec {
return true;
}
+ public Optional<Duration> hostTTL() { return Optional.empty(); }
+
}
/** A deployment step which is to wait for some time before progressing to the next step */
@@ -402,25 +436,28 @@ public class DeploymentSpec {
private final boolean active;
private final Optional<AthenzService> athenzService;
private final Optional<String> testerFlavor;
- private final Optional<CloudAccount> cloudAccount;
+ private final Map<CloudName, CloudAccount> cloudAccounts;
+ private final Optional<Duration> hostTTL;
public DeclaredZone(Environment environment) {
- this(environment, Optional.empty(), false, Optional.empty(), Optional.empty(), Optional.empty());
+ this(environment, Optional.empty(), false, Optional.empty(), Optional.empty(), Map.of(), Optional.empty());
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active,
Optional<AthenzService> athenzService, Optional<String> testerFlavor,
- Optional<CloudAccount> cloudAccount) {
+ Map<CloudName, CloudAccount> cloudAccounts, Optional<Duration> hostTTL) {
if (environment != Environment.prod && region.isPresent())
illegal("Non-prod environments cannot specify a region");
if (environment == Environment.prod && region.isEmpty())
illegal("Prod environments must be specified with a region");
+ hostTTL.filter(Duration::isNegative).ifPresent(ttl -> illegal("Host TTL cannot be negative"));
this.environment = Objects.requireNonNull(environment);
this.region = Objects.requireNonNull(region);
this.active = active;
this.athenzService = Objects.requireNonNull(athenzService);
this.testerFlavor = Objects.requireNonNull(testerFlavor);
- this.cloudAccount = Objects.requireNonNull(cloudAccount);
+ this.cloudAccounts = Map.copyOf(cloudAccounts);
+ this.hostTTL = Objects.requireNonNull(hostTTL);
}
public Environment environment() { return environment; }
@@ -433,11 +470,9 @@ public class DeploymentSpec {
public Optional<String> testerFlavor() { return testerFlavor; }
- public Optional<AthenzService> athenzService() { return athenzService; }
+ Optional<AthenzService> athenzService() { return athenzService; }
- public Optional<CloudAccount> cloudAccount() {
- return cloudAccount;
- }
+ Map<CloudName, CloudAccount> cloudAccounts() { return cloudAccounts; }
@Override
public List<DeclaredZone> zones() { return List.of(this); }
@@ -472,15 +507,23 @@ public class DeploymentSpec {
return environment + (region.map(regionName -> "." + regionName).orElse(""));
}
+ @Override
+ public Optional<Duration> hostTTL() {
+ return hostTTL;
+ }
+
}
/** A declared production test */
public static class DeclaredTest extends Step {
private final RegionName region;
+ private final Optional<Duration> hostTTL;
- public DeclaredTest(RegionName region) {
+ public DeclaredTest(RegionName region, Optional<Duration> hostTTL) {
this.region = Objects.requireNonNull(region);
+ this.hostTTL = Objects.requireNonNull(hostTTL);
+ hostTTL.filter(Duration::isNegative).ifPresent(ttl -> illegal("Host TTL cannot be negative"));
}
@Override
@@ -497,6 +540,11 @@ public class DeploymentSpec {
}
@Override
+ public Optional<Duration> hostTTL() {
+ return hostTTL;
+ }
+
+ @Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
index 89373d8bca0..38562eefb03 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
@@ -25,6 +25,7 @@ import com.yahoo.config.application.api.TimeWindow;
import com.yahoo.config.provision.AthenzDomain;
import com.yahoo.config.provision.AthenzService;
import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
@@ -94,6 +95,7 @@ public class DeploymentSpecXmlReader {
private static final String majorVersionAttribute = "major-version";
private static final String globalServiceIdAttribute = "global-service-id";
private static final String cloudAccountAttribute = "cloud-account";
+ private static final String hostTTLAttribute = "empty-host-ttl";
private final boolean validate;
private final Clock clock;
@@ -164,7 +166,8 @@ public class DeploymentSpecXmlReader {
optionalIntegerAttribute(majorVersionAttribute, root),
stringAttribute(athenzDomainAttribute, root).map(AthenzDomain::from),
stringAttribute(athenzServiceAttribute, root).map(AthenzService::from),
- stringAttribute(cloudAccountAttribute, root).map(CloudAccount::from),
+ readCloudAccounts(root),
+ stringAttribute(hostTTLAttribute, root).map(s -> toDuration(s, "empty host TTL")),
applicationEndpoints,
xmlForm,
deprecatedElements);
@@ -203,7 +206,8 @@ public class DeploymentSpecXmlReader {
int maxIdleHours = getWithFallback(instanceElement, parentTag, upgradeTag, "max-idle-hours", Integer::parseInt, 8);
List<DeploymentSpec.ChangeBlocker> changeBlockers = readChangeBlockers(instanceElement, parentTag);
Optional<AthenzService> athenzService = mostSpecificAttribute(instanceElement, athenzServiceAttribute).map(AthenzService::from);
- Optional<CloudAccount> cloudAccount = mostSpecificAttribute(instanceElement, cloudAccountAttribute).map(CloudAccount::from);
+ Map<CloudName, CloudAccount> cloudAccounts = readCloudAccounts(instanceElement);
+ Optional<Duration> hostTTL = mostSpecificAttribute(instanceElement, hostTTLAttribute).map(s -> toDuration(s, "empty host TTL"));
Notifications notifications = readNotifications(instanceElement, parentTag);
// Values where there is no default
@@ -232,7 +236,8 @@ public class DeploymentSpecXmlReader {
changeBlockers,
Optional.ofNullable(prodAttributes.get(globalServiceIdAttribute)),
athenzService,
- cloudAccount,
+ cloudAccounts,
+ hostTTL,
notifications,
endpoints,
zoneEndpoints,
@@ -258,6 +263,7 @@ public class DeploymentSpecXmlReader {
}
// Consume the given tag as 0-N steps. 0 if it is not a step, >1 if it contains multiple nested steps that should be flattened
+ @SuppressWarnings("fallthrough")
private List<Step> readNonInstanceSteps(Element stepTag, Map<String, String> prodAttributes, Element parentTag, Bcp defaultBcp) {
Optional<AthenzService> athenzService = mostSpecificAttribute(stepTag, athenzServiceAttribute).map(AthenzService::from);
Optional<String> testerFlavor = mostSpecificAttribute(stepTag, testerFlavorAttribute);
@@ -272,12 +278,10 @@ public class DeploymentSpecXmlReader {
case testTag:
if (Stream.iterate(stepTag, Objects::nonNull, Node::getParentNode)
.anyMatch(node -> prodTag.equals(node.getNodeName()))) {
- // A production test
- return List.of(new DeclaredTest(RegionName.from(XML.getValue(stepTag).trim())));
+ return List.of(new DeclaredTest(RegionName.from(XML.getValue(stepTag).trim()), readHostTTL(stepTag))); // A production test
}
- return List.of(new DeclaredZone(Environment.from(stepTag.getTagName()), Optional.empty(), false, athenzService, testerFlavor, readCloudAccount(stepTag)));
- case devTag, perfTag, stagingTag:
- return List.of(new DeclaredZone(Environment.from(stepTag.getTagName()), Optional.empty(), false, athenzService, testerFlavor, readCloudAccount(stepTag)));
+ case devTag, perfTag, stagingTag: // Intentional fallthrough from test tag.
+ return List.of(new DeclaredZone(Environment.from(stepTag.getTagName()), Optional.empty(), false, athenzService, testerFlavor, readCloudAccounts(stepTag), readHostTTL(stepTag)));
case prodTag: // regions, delay and parallel may be nested within, but we can flatten them
return XML.getChildren(stepTag).stream()
.flatMap(child -> readNonInstanceSteps(child, prodAttributes, stepTag, defaultBcp).stream())
@@ -667,8 +671,13 @@ public class DeploymentSpecXmlReader {
/** Returns the given non-blank attribute of tag as a string, if any */
private static Optional<String> stringAttribute(String attributeName, Element tag) {
+ return stringAttribute(attributeName, tag, true);
+ }
+
+ /** Returns the given non-blank attribute of tag as a string, if any */
+ private static Optional<String> stringAttribute(String attributeName, Element tag, boolean ignoreBlanks) {
String value = tag.getAttribute(attributeName);
- return Optional.of(value).filter(s -> !s.isBlank());
+ return Optional.of(value).filter(s -> (tag.getAttributeNode(attributeName) != null && ! ignoreBlanks || ! s.isBlank()));
}
/** Returns the given non-blank attribute of tag or throw */
@@ -682,11 +691,27 @@ public class DeploymentSpecXmlReader {
Optional<String> testerFlavor, Element regionTag) {
return new DeclaredZone(environment, Optional.of(RegionName.from(XML.getValue(regionTag).trim())),
readActive(regionTag), athenzService, testerFlavor,
- readCloudAccount(regionTag));
+ readCloudAccounts(regionTag), readHostTTL(regionTag));
+ }
+
+ private Map<CloudName, CloudAccount> readCloudAccounts(Element tag) {
+ return mostSpecificAttribute(tag, cloudAccountAttribute, false)
+ .map(value -> {
+ Map<CloudName, CloudAccount> accounts = new HashMap<>();
+ for (String part : value.split(",")) {
+ CloudAccount account = CloudAccount.from(part);
+ accounts.merge(account.cloudName(), account, (o, n) -> {
+ throw illegal("both '" + o.account() + "' and '" + n.account() + "' " +
+ "are declared for cloud '" + o.cloudName() + "', in '" + value + "'");
+ });
+ }
+ return accounts;
+ })
+ .orElse(Map.of());
}
- private Optional<CloudAccount> readCloudAccount(Element tag) {
- return mostSpecificAttribute(tag, cloudAccountAttribute).map(CloudAccount::from);
+ private Optional<Duration> readHostTTL(Element tag) {
+ return mostSpecificAttribute(tag, hostTTLAttribute).map(s -> toDuration(s, "empty host TTL"));
}
private Optional<String> readGlobalServiceId(Element environmentTag) {
@@ -795,17 +820,22 @@ public class DeploymentSpecXmlReader {
}
/** Returns the given attribute from the given tag or its closest ancestor with the attribute. */
- private static Optional<String> mostSpecificAttribute(Element tag, String attributeName) {
+ private static Optional<String> mostSpecificAttribute(Element tag, String attributeName, boolean ignoreBlanks) {
return Stream.iterate(tag, Objects::nonNull, Node::getParentNode)
.filter(Element.class::isInstance)
.map(Element.class::cast)
- .flatMap(element -> stringAttribute(attributeName, element).stream())
+ .flatMap(element -> stringAttribute(attributeName, element, ignoreBlanks).stream())
.findFirst();
}
+ /** Returns the given attribute from the given tag or its closest ancestor with the attribute. */
+ private static Optional<String> mostSpecificAttribute(Element tag, String attributeName) {
+ return mostSpecificAttribute(tag, attributeName, true);
+ }
+
/**
- * Returns a string consisting of a number followed by "m" or "h" to a duration given in that unit,
- * or zero duration if null of blank.
+ * Returns a string consisting of a number followed by "m", "h" or "d" to a duration given in that unit,
+ * or zero duration if null or blank.
*/
private static Duration toDuration(String durationSpec, String sourceDescription) {
try {
@@ -844,7 +874,7 @@ public class DeploymentSpecXmlReader {
}
}
- private static void illegal(String message) {
+ private static IllegalArgumentException illegal(String message) {
throw new IllegalArgumentException(message);
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 0d42df88d04..a9cbe82895f 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -144,6 +144,7 @@ public interface ModelContext {
/** Warning: As elsewhere in this package, do not make backwards incompatible changes that will break old config models! */
interface Properties {
+
FeatureFlags featureFlags();
boolean multitenant();
ApplicationId applicationId();
diff --git a/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java b/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java
index 89b7318739e..d4312a0e54e 100644
--- a/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java
+++ b/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java
@@ -6,6 +6,7 @@ import com.yahoo.config.application.api.Endpoint.Level;
import com.yahoo.config.application.api.Endpoint.Target;
import com.yahoo.config.application.api.xml.DeploymentSpecXmlReader;
import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
@@ -25,6 +26,7 @@ import java.time.ZoneId;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
@@ -32,6 +34,13 @@ import java.util.stream.Collectors;
import static com.yahoo.config.application.api.Notifications.Role.author;
import static com.yahoo.config.application.api.Notifications.When.failing;
import static com.yahoo.config.application.api.Notifications.When.failingCommit;
+import static com.yahoo.config.provision.CloudName.AWS;
+import static com.yahoo.config.provision.CloudName.GCP;
+import static com.yahoo.config.provision.Environment.dev;
+import static com.yahoo.config.provision.Environment.perf;
+import static com.yahoo.config.provision.Environment.prod;
+import static com.yahoo.config.provision.Environment.staging;
+import static com.yahoo.config.provision.Environment.test;
import static com.yahoo.config.provision.zone.ZoneId.defaultId;
import static com.yahoo.config.provision.zone.ZoneId.from;
import static org.junit.Assert.assertEquals;
@@ -60,11 +69,11 @@ public class DeploymentSpecTest {
assertEquals(specXml, spec.xmlForm());
assertEquals(1, spec.requireInstance("default").steps().size());
assertFalse(spec.majorVersion().isPresent());
- assertTrue(spec.requireInstance("default").steps().get(0).concerns(Environment.test));
- assertTrue(spec.requireInstance("default").concerns(Environment.test, Optional.empty()));
- assertTrue(spec.requireInstance("default").concerns(Environment.test, Optional.of(RegionName.from("region1")))); // test steps specify no region
- assertFalse(spec.requireInstance("default").concerns(Environment.staging, Optional.empty()));
- assertFalse(spec.requireInstance("default").concerns(Environment.prod, Optional.empty()));
+ assertTrue(spec.requireInstance("default").steps().get(0).concerns(test));
+ assertTrue(spec.requireInstance("default").concerns(test, Optional.empty()));
+ assertTrue(spec.requireInstance("default").concerns(test, Optional.of(RegionName.from("region1")))); // test steps specify no region
+ assertFalse(spec.requireInstance("default").concerns(staging, Optional.empty()));
+ assertFalse(spec.requireInstance("default").concerns(prod, Optional.empty()));
assertFalse(spec.requireInstance("default").globalServiceId().isPresent());
}
@@ -97,10 +106,10 @@ public class DeploymentSpecTest {
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(1, spec.steps().size());
assertEquals(1, spec.requireInstance("default").steps().size());
- assertTrue(spec.requireInstance("default").steps().get(0).concerns(Environment.staging));
- assertFalse(spec.requireInstance("default").concerns(Environment.test, Optional.empty()));
- assertTrue(spec.requireInstance("default").concerns(Environment.staging, Optional.empty()));
- assertFalse(spec.requireInstance("default").concerns(Environment.prod, Optional.empty()));
+ assertTrue(spec.requireInstance("default").steps().get(0).concerns(staging));
+ assertFalse(spec.requireInstance("default").concerns(test, Optional.empty()));
+ assertTrue(spec.requireInstance("default").concerns(staging, Optional.empty()));
+ assertFalse(spec.requireInstance("default").concerns(prod, Optional.empty()));
assertFalse(spec.requireInstance("default").globalServiceId().isPresent());
}
@@ -121,17 +130,17 @@ public class DeploymentSpecTest {
assertEquals(1, spec.steps().size());
assertEquals(2, spec.requireInstance("default").steps().size());
- assertTrue(spec.requireInstance("default").steps().get(0).concerns(Environment.prod, Optional.of(RegionName.from("us-east1"))));
+ assertTrue(spec.requireInstance("default").steps().get(0).concerns(prod, Optional.of(RegionName.from("us-east1"))));
assertFalse(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(0)).active());
- assertTrue(spec.requireInstance("default").steps().get(1).concerns(Environment.prod, Optional.of(RegionName.from("us-west1"))));
+ assertTrue(spec.requireInstance("default").steps().get(1).concerns(prod, Optional.of(RegionName.from("us-west1"))));
assertTrue(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(1)).active());
- assertFalse(spec.requireInstance("default").concerns(Environment.test, Optional.empty()));
- assertFalse(spec.requireInstance("default").concerns(Environment.staging, Optional.empty()));
- assertTrue(spec.requireInstance("default").concerns(Environment.prod, Optional.of(RegionName.from("us-east1"))));
- assertTrue(spec.requireInstance("default").concerns(Environment.prod, Optional.of(RegionName.from("us-west1"))));
- assertFalse(spec.requireInstance("default").concerns(Environment.prod, Optional.of(RegionName.from("no-such-region"))));
+ assertFalse(spec.requireInstance("default").concerns(test, Optional.empty()));
+ assertFalse(spec.requireInstance("default").concerns(staging, Optional.empty()));
+ assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-east1"))));
+ assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-west1"))));
+ assertFalse(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("no-such-region"))));
assertFalse(spec.requireInstance("default").globalServiceId().isPresent());
assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.requireInstance("default").upgradePolicy());
@@ -293,7 +302,7 @@ public class DeploymentSpecTest {
assertEquals(1, instance2.steps().size());
assertEquals(1, instance2.zones().size());
- assertTrue(instance2.steps().get(0).concerns(Environment.prod, Optional.of(RegionName.from("us-central1"))));
+ assertTrue(instance2.steps().get(0).concerns(prod, Optional.of(RegionName.from("us-central1"))));
}
@Test
@@ -322,25 +331,25 @@ public class DeploymentSpecTest {
assertEquals(5, instance.steps().size());
assertEquals(4, instance.zones().size());
- assertTrue(instance.steps().get(0).concerns(Environment.test));
+ assertTrue(instance.steps().get(0).concerns(test));
- assertTrue(instance.steps().get(1).concerns(Environment.staging));
+ assertTrue(instance.steps().get(1).concerns(staging));
- assertTrue(instance.steps().get(2).concerns(Environment.prod, Optional.of(RegionName.from("us-east1"))));
+ assertTrue(instance.steps().get(2).concerns(prod, Optional.of(RegionName.from("us-east1"))));
assertFalse(((DeploymentSpec.DeclaredZone)instance.steps().get(2)).active());
assertTrue(instance.steps().get(3) instanceof DeploymentSpec.Delay);
assertEquals(3 * 60 * 60 + 30 * 60, instance.steps().get(3).delay().getSeconds());
- assertTrue(instance.steps().get(4).concerns(Environment.prod, Optional.of(RegionName.from("us-west1"))));
+ assertTrue(instance.steps().get(4).concerns(prod, Optional.of(RegionName.from("us-west1"))));
assertTrue(((DeploymentSpec.DeclaredZone)instance.steps().get(4)).active());
- assertTrue(instance.concerns(Environment.test, Optional.empty()));
- assertTrue(instance.concerns(Environment.test, Optional.of(RegionName.from("region1")))); // test steps specify no region
- assertTrue(instance.concerns(Environment.staging, Optional.empty()));
- assertTrue(instance.concerns(Environment.prod, Optional.of(RegionName.from("us-east1"))));
- assertTrue(instance.concerns(Environment.prod, Optional.of(RegionName.from("us-west1"))));
- assertFalse(instance.concerns(Environment.prod, Optional.of(RegionName.from("no-such-region"))));
+ assertTrue(instance.concerns(test, Optional.empty()));
+ assertTrue(instance.concerns(test, Optional.of(RegionName.from("region1")))); // test steps specify no region
+ assertTrue(instance.concerns(staging, Optional.empty()));
+ assertTrue(instance.concerns(prod, Optional.of(RegionName.from("us-east1"))));
+ assertTrue(instance.concerns(prod, Optional.of(RegionName.from("us-west1"))));
+ assertFalse(instance.concerns(prod, Optional.of(RegionName.from("no-such-region"))));
assertFalse(instance.globalServiceId().isPresent());
}
@@ -563,7 +572,7 @@ public class DeploymentSpecTest {
DeploymentInstanceSpec instance = spec.instances().get(0);
assertEquals("default", instance.name().value());
- assertEquals("service", instance.athenzService(Environment.prod, RegionName.defaultName()).get().value());
+ assertEquals("service", instance.athenzService(prod, RegionName.defaultName()).get().value());
}
@Test
@@ -695,9 +704,9 @@ public class DeploymentSpecTest {
List<DeploymentSpec.Step> innerParallelSteps = secondSerialSteps.get(2).steps();
assertEquals(3, innerParallelSteps.size());
assertEquals("prod.ap-northeast-1", innerParallelSteps.get(0).toString());
- assertEquals("no-service", spec.requireInstance("instance").athenzService(Environment.prod, RegionName.from("ap-northeast-1")).get().value());
+ assertEquals("no-service", spec.requireInstance("instance").athenzService(prod, RegionName.from("ap-northeast-1")).get().value());
assertEquals("prod.ap-southeast-2", innerParallelSteps.get(1).toString());
- assertEquals("in-service", spec.requireInstance("instance").athenzService(Environment.prod, RegionName.from("ap-southeast-2")).get().value());
+ assertEquals("in-service", spec.requireInstance("instance").athenzService(prod, RegionName.from("ap-southeast-2")).get().value());
assertEquals("tests for prod.aws-us-east-1a", innerParallelSteps.get(2).toString());
}
@@ -956,7 +965,7 @@ public class DeploymentSpecTest {
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("domain", spec.athenzDomain().get().value());
assertEquals("service", spec.athenzService().get().value());
- assertEquals("service", spec.requireInstance("instance1").athenzService(Environment.prod,
+ assertEquals("service", spec.requireInstance("instance1").athenzService(prod,
RegionName.from("us-west-1")).get().value());
}
@@ -979,11 +988,11 @@ public class DeploymentSpecTest {
assertEquals("domain", spec.athenzDomain().get().value());
assertEquals("service", spec.athenzService().get().value());
- assertEquals("prod-service", spec.requireInstance("instance1").athenzService(Environment.prod,
+ assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod,
RegionName.from("us-central-1")).get().value());
- assertEquals("prod-service", spec.requireInstance("instance1").athenzService(Environment.prod,
+ assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod,
RegionName.from("us-west-1")).get().value());
- assertEquals("prod-service", spec.requireInstance("instance1").athenzService(Environment.prod,
+ assertEquals("prod-service", spec.requireInstance("instance1").athenzService(prod,
RegionName.from("us-east-3")).get().value());
}
@@ -1014,11 +1023,11 @@ public class DeploymentSpecTest {
""";
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("domain", spec.athenzDomain().get().value());
- assertEquals("service", spec.requireInstance("instance1").athenzService(Environment.prod,
+ assertEquals("service", spec.requireInstance("instance1").athenzService(prod,
RegionName.from("us-west-1")).get().value());
- assertEquals("service", spec.requireInstance("instance1").athenzService(Environment.prod,
+ assertEquals("service", spec.requireInstance("instance1").athenzService(prod,
RegionName.from("us-east-3")).get().value());
- assertEquals("service", spec.requireInstance("instance2").athenzService(Environment.prod,
+ assertEquals("service", spec.requireInstance("instance2").athenzService(prod,
RegionName.from("us-east-3")).get().value());
}
@@ -1036,7 +1045,7 @@ public class DeploymentSpecTest {
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("domain", spec.athenzDomain().get().value());
assertEquals(Optional.empty(), spec.athenzService());
- assertEquals("service", spec.requireInstance("default").athenzService(Environment.prod, RegionName.from("us-west-1")).get().value());
+ assertEquals("service", spec.requireInstance("default").athenzService(prod, RegionName.from("us-west-1")).get().value());
}
@Test
@@ -1054,13 +1063,13 @@ public class DeploymentSpecTest {
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("service",
- spec.requireInstance("default").athenzService(Environment.test,
+ spec.requireInstance("default").athenzService(test,
RegionName.from("us-east-1")).get().value());
assertEquals("staging-service",
- spec.requireInstance("default").athenzService(Environment.staging,
+ spec.requireInstance("default").athenzService(staging,
RegionName.from("us-north-1")).get().value());
assertEquals("prod-service",
- spec.requireInstance("default").athenzService(Environment.prod,
+ spec.requireInstance("default").athenzService(prod,
RegionName.from("us-west-1")).get().value());
}
@@ -1273,8 +1282,8 @@ public class DeploymentSpecTest {
assertEquals(List.of(RegionName.from("us-east")), spec.requireInstance("default").endpoints().get(0).regions());
- var zone = from(Environment.prod, RegionName.from("us-east"));
- var testZone = from(Environment.test, RegionName.from("us-east"));
+ var zone = from(prod, RegionName.from("us-east"));
+ var testZone = from(test, RegionName.from("us-east"));
assertEquals(ZoneEndpoint.defaultEndpoint,
spec.zoneEndpoint(InstanceName.from("custom"), zone, ClusterSpec.Id.from("bax")));
assertEquals(ZoneEndpoint.defaultEndpoint,
@@ -1752,18 +1761,19 @@ public class DeploymentSpecTest {
public void cloudAccount() {
String r =
"""
- <deployment version='1.0' cloud-account='100000000000'>
+ <deployment version='1.0' cloud-account='100000000000,gcp:foobar'>
<instance id='alpha'>
<prod cloud-account='800000000000'>
<region>us-east-1</region>
</prod>
</instance>
<instance id='beta' cloud-account='200000000000'>
- <staging cloud-account='600000000000'/>
+ <staging cloud-account='gcp:barbaz'/>
<perf cloud-account='700000000000'/>
<prod>
<region>us-west-1</region>
<region cloud-account='default'>us-west-2</region>
+ <region cloud-account=''>us-west-3</region>
</prod>
</instance>
<instance id='main'>
@@ -1777,22 +1787,103 @@ public class DeploymentSpecTest {
</deployment>
""";
DeploymentSpec spec = DeploymentSpec.fromXml(r);
- assertEquals(Optional.of(CloudAccount.from("100000000000")), spec.cloudAccount());
- assertCloudAccount("800000000000", spec.requireInstance("alpha"), Environment.prod, "us-east-1");
- assertCloudAccount("200000000000", spec.requireInstance("beta"), Environment.prod, "us-west-1");
- assertCloudAccount("600000000000", spec.requireInstance("beta"), Environment.staging, "");
- assertCloudAccount("700000000000", spec.requireInstance("beta"), Environment.perf, "");
- assertCloudAccount("200000000000", spec.requireInstance("beta"), Environment.dev, "");
- assertCloudAccount("300000000000", spec.requireInstance("main"), Environment.prod, "us-east-1");
- assertCloudAccount("100000000000", spec.requireInstance("main"), Environment.prod, "eu-west-1");
- assertCloudAccount("400000000000", spec.requireInstance("main"), Environment.dev, "");
- assertCloudAccount("500000000000", spec.requireInstance("main"), Environment.test, "");
- assertCloudAccount("100000000000", spec.requireInstance("main"), Environment.staging, "");
- assertCloudAccount("default", spec.requireInstance("beta"), Environment.prod, "us-west-2");
- }
-
- private void assertCloudAccount(String expected, DeploymentInstanceSpec instance, Environment environment, String region) {
- assertEquals(Optional.of(expected).map(CloudAccount::from), instance.cloudAccount(environment, Optional.of(region).filter(s -> !s.isEmpty()).map(RegionName::from)));
+ assertEquals(Map.of(AWS, CloudAccount.from("100000000000"),
+ GCP, CloudAccount.from("gcp:foobar")), spec.cloudAccounts());
+ assertCloudAccount("800000000000", spec, AWS, "alpha", prod, "us-east-1");
+ assertCloudAccount("", spec, GCP, "alpha", prod, "us-east-1");
+ assertCloudAccount("200000000000", spec, AWS, "beta", prod, "us-west-1");
+ assertCloudAccount("", spec, AWS, "beta", staging, "default");
+ assertCloudAccount("gcp:barbaz", spec, GCP, "beta", staging, "default");
+ assertCloudAccount("700000000000", spec, AWS, "beta", perf, "default");
+ assertCloudAccount("200000000000", spec, AWS, "beta", dev, "default");
+ assertCloudAccount("300000000000", spec, AWS, "main", prod, "us-east-1");
+ assertCloudAccount("100000000000", spec, AWS, "main", prod, "eu-west-1");
+ assertCloudAccount("400000000000", spec, AWS, "main", dev, "default");
+ assertCloudAccount("500000000000", spec, AWS, "main", test, "default");
+ assertCloudAccount("100000000000", spec, AWS, "main", staging, "default");
+ assertCloudAccount("default", spec, AWS, "beta", prod, "us-west-2");
+ assertCloudAccount("", spec, GCP, "beta", prod, "us-west-2");
+ assertCloudAccount("", spec, AWS, "beta", prod, "us-west-3");
+ assertCloudAccount("", spec, GCP, "beta", prod, "us-west-3");
+ }
+
+ @Test
+ public void hostTTL() {
+ String r =
+ """
+ <deployment version='1.0' cloud-account='100000000000' empty-host-ttl='1h'>
+ <instance id='alpha'>
+ <staging />
+ <prod empty-host-ttl='1m'>
+ <region>us-east</region>
+ <region empty-host-ttl='2m'>us-west</region>
+ <test>us-east</test>
+ <test empty-host-ttl='3m'>us-west</test>
+ </prod>
+ </instance>
+ <instance id='beta'>
+ <staging empty-host-ttl='3d'/>
+ <perf empty-host-ttl='4h'/>
+ <prod>
+ <region>us-east</region>
+ <region empty-host-ttl='0d'>us-west</region>
+ </prod>
+ </instance>
+ <instance id='gamma' empty-host-ttl='6h'>
+ <dev empty-host-ttl='7d'/>
+ <prod>
+ <region>us-east</region>
+ </prod>
+ </instance>
+ </deployment>
+ """;
+ DeploymentSpec spec = DeploymentSpec.fromXml(r);
+ assertEquals(Map.of(AWS, CloudAccount.from("100000000000")), spec.cloudAccounts());
+
+ assertHostTTL(Duration.ofHours(1), spec, "alpha", test, null);
+ assertHostTTL(Duration.ofHours(1), spec, "alpha", staging, null);
+ assertHostTTL(Duration.ofHours(1), spec, "alpha", dev, null);
+ assertHostTTL(Duration.ofHours(1), spec, "alpha", perf, null);
+ assertHostTTL(Duration.ofMinutes(1), spec, "alpha", prod, "us-east");
+ assertHostTTL(Duration.ofMinutes(2), spec, "alpha", prod, "us-west");
+ assertEquals(Optional.of(Duration.ofMinutes(1)), spec.requireInstance("alpha").steps().stream()
+ .filter(step -> step.concerns(prod, Optional.of(RegionName.from("us-east"))) && step.isTest())
+ .findFirst().orElseThrow()
+ .hostTTL());
+ assertEquals(Optional.of(Duration.ofMinutes(3)), spec.requireInstance("alpha").steps().stream()
+ .filter(step -> step.concerns(prod, Optional.of(RegionName.from("us-west"))) && step.isTest())
+ .findFirst().orElseThrow()
+ .hostTTL());
+
+ assertHostTTL(Duration.ofHours(1), spec, "beta", test, null);
+ assertHostTTL(Duration.ofDays(3), spec, "beta", staging, null);
+ assertHostTTL(Duration.ofHours(1), spec, "beta", dev, null);
+ assertHostTTL(Duration.ofHours(4), spec, "beta", perf, null);
+ assertHostTTL(Duration.ofHours(1), spec, "beta", prod, "us-east");
+ assertHostTTL(Duration.ZERO, spec, "beta", prod, "us-west");
+
+ assertHostTTL(Duration.ofHours(6), spec, "gamma", test, null);
+ assertHostTTL(Duration.ofHours(6), spec, "gamma", staging, null);
+ assertHostTTL(Duration.ofDays(7), spec, "gamma", dev, null);
+ assertHostTTL(Duration.ofHours(6), spec, "gamma", perf, null);
+ assertHostTTL(Duration.ofHours(6), spec, "gamma", prod, "us-east");
+ assertHostTTL(Duration.ofHours(6), spec, "gamma", prod, "us-west");
+
+ assertHostTTL(Duration.ofHours(1), spec, "nope", test, null);
+ assertHostTTL(Duration.ofHours(1), spec, "nope", staging, null);
+ assertHostTTL(Duration.ofHours(1), spec, "nope", dev, null);
+ assertHostTTL(Duration.ofHours(1), spec, "nope", perf, null);
+ assertHostTTL(Duration.ofHours(1), spec, "nope", prod, "us-east");
+ assertHostTTL(Duration.ofHours(1), spec, "nope", prod, "us-west");
+ }
+
+ private void assertCloudAccount(String expected, DeploymentSpec spec, CloudName cloud, String instance, Environment environment, String region) {
+ assertEquals(CloudAccount.from(expected),
+ spec.cloudAccount(cloud, InstanceName.from(instance), com.yahoo.config.provision.zone.ZoneId.from(environment, RegionName.from(region))));
+ }
+
+ private void assertHostTTL(Duration expected, DeploymentSpec spec, String instance, Environment environment, String region) {
+ assertEquals(Optional.of(expected), spec.hostTTL(InstanceName.from(instance), environment, region == null ? RegionName.defaultName() : RegionName.from(region)));
}
private static void assertInvalid(String deploymentSpec, String errorMessagePart) {
diff --git a/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecWithoutInstanceTest.java b/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecWithoutInstanceTest.java
index 38410cc5b37..e5578723612 100644
--- a/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecWithoutInstanceTest.java
+++ b/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecWithoutInstanceTest.java
@@ -18,6 +18,7 @@ import java.time.Instant;
import java.time.ZoneId;
import java.util.Collections;
import java.util.List;
+import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
@@ -25,6 +26,10 @@ import java.util.stream.Collectors;
import static com.yahoo.config.application.api.Notifications.Role.author;
import static com.yahoo.config.application.api.Notifications.When.failing;
import static com.yahoo.config.application.api.Notifications.When.failingCommit;
+import static com.yahoo.config.provision.CloudName.AWS;
+import static com.yahoo.config.provision.Environment.dev;
+import static com.yahoo.config.provision.Environment.prod;
+import static com.yahoo.config.provision.Environment.test;
import static com.yahoo.config.provision.zone.ZoneId.defaultId;
import static com.yahoo.config.provision.zone.ZoneId.from;
import static org.junit.Assert.assertEquals;
@@ -49,11 +54,11 @@ public class DeploymentSpecWithoutInstanceTest {
assertEquals(specXml, spec.xmlForm());
assertEquals(1, spec.steps().size());
assertFalse(spec.majorVersion().isPresent());
- assertTrue(spec.steps().get(0).concerns(Environment.test));
- assertTrue(spec.requireInstance("default").concerns(Environment.test, Optional.empty()));
- assertTrue(spec.requireInstance("default").concerns(Environment.test, Optional.of(RegionName.from("region1")))); // test steps specify no region
+ assertTrue(spec.steps().get(0).concerns(test));
+ assertTrue(spec.requireInstance("default").concerns(test, Optional.empty()));
+ assertTrue(spec.requireInstance("default").concerns(test, Optional.of(RegionName.from("region1")))); // test steps specify no region
assertFalse(spec.requireInstance("default").concerns(Environment.staging, Optional.empty()));
- assertFalse(spec.requireInstance("default").concerns(Environment.prod, Optional.empty()));
+ assertFalse(spec.requireInstance("default").concerns(prod, Optional.empty()));
assertFalse(spec.requireInstance("default").globalServiceId().isPresent());
}
@@ -83,9 +88,9 @@ public class DeploymentSpecWithoutInstanceTest {
assertEquals(1, spec.steps().size());
assertEquals(1, spec.requireInstance("default").steps().size());
assertTrue(spec.requireInstance("default").steps().get(0).concerns(Environment.staging));
- assertFalse(spec.requireInstance("default").concerns(Environment.test, Optional.empty()));
+ assertFalse(spec.requireInstance("default").concerns(test, Optional.empty()));
assertTrue(spec.requireInstance("default").concerns(Environment.staging, Optional.empty()));
- assertFalse(spec.requireInstance("default").concerns(Environment.prod, Optional.empty()));
+ assertFalse(spec.requireInstance("default").concerns(prod, Optional.empty()));
assertFalse(spec.requireInstance("default").globalServiceId().isPresent());
}
@@ -104,17 +109,17 @@ public class DeploymentSpecWithoutInstanceTest {
assertEquals(1, spec.steps().size());
assertEquals(2, spec.requireInstance("default").steps().size());
- assertTrue(spec.requireInstance("default").steps().get(0).concerns(Environment.prod, Optional.of(RegionName.from("us-east1"))));
+ assertTrue(spec.requireInstance("default").steps().get(0).concerns(prod, Optional.of(RegionName.from("us-east1"))));
assertFalse(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(0)).active());
- assertTrue(spec.requireInstance("default").steps().get(1).concerns(Environment.prod, Optional.of(RegionName.from("us-west1"))));
+ assertTrue(spec.requireInstance("default").steps().get(1).concerns(prod, Optional.of(RegionName.from("us-west1"))));
assertTrue(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(1)).active());
- assertFalse(spec.requireInstance("default").concerns(Environment.test, Optional.empty()));
+ assertFalse(spec.requireInstance("default").concerns(test, Optional.empty()));
assertFalse(spec.requireInstance("default").concerns(Environment.staging, Optional.empty()));
- assertTrue(spec.requireInstance("default").concerns(Environment.prod, Optional.of(RegionName.from("us-east1"))));
- assertTrue(spec.requireInstance("default").concerns(Environment.prod, Optional.of(RegionName.from("us-west1"))));
- assertFalse(spec.requireInstance("default").concerns(Environment.prod, Optional.of(RegionName.from("no-such-region"))));
+ assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-east1"))));
+ assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-west1"))));
+ assertFalse(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("no-such-region"))));
assertFalse(spec.requireInstance("default").globalServiceId().isPresent());
assertEquals(DeploymentSpec.UpgradePolicy.defaultPolicy, spec.requireInstance("default").upgradePolicy());
@@ -139,25 +144,25 @@ public class DeploymentSpecWithoutInstanceTest {
assertEquals(5, spec.requireInstance("default").steps().size());
assertEquals(4, spec.requireInstance("default").zones().size());
- assertTrue(spec.requireInstance("default").steps().get(0).concerns(Environment.test));
+ assertTrue(spec.requireInstance("default").steps().get(0).concerns(test));
assertTrue(spec.requireInstance("default").steps().get(1).concerns(Environment.staging));
- assertTrue(spec.requireInstance("default").steps().get(2).concerns(Environment.prod, Optional.of(RegionName.from("us-east1"))));
+ assertTrue(spec.requireInstance("default").steps().get(2).concerns(prod, Optional.of(RegionName.from("us-east1"))));
assertFalse(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(2)).active());
assertTrue(spec.requireInstance("default").steps().get(3) instanceof DeploymentSpec.Delay);
assertEquals(3 * 60 * 60 + 30 * 60, spec.requireInstance("default").steps().get(3).delay().getSeconds());
- assertTrue(spec.requireInstance("default").steps().get(4).concerns(Environment.prod, Optional.of(RegionName.from("us-west1"))));
+ assertTrue(spec.requireInstance("default").steps().get(4).concerns(prod, Optional.of(RegionName.from("us-west1"))));
assertTrue(((DeploymentSpec.DeclaredZone)spec.requireInstance("default").steps().get(4)).active());
- assertTrue(spec.requireInstance("default").concerns(Environment.test, Optional.empty()));
- assertTrue(spec.requireInstance("default").concerns(Environment.test, Optional.of(RegionName.from("region1")))); // test steps specify no region
+ assertTrue(spec.requireInstance("default").concerns(test, Optional.empty()));
+ assertTrue(spec.requireInstance("default").concerns(test, Optional.of(RegionName.from("region1")))); // test steps specify no region
assertTrue(spec.requireInstance("default").concerns(Environment.staging, Optional.empty()));
- assertTrue(spec.requireInstance("default").concerns(Environment.prod, Optional.of(RegionName.from("us-east1"))));
- assertTrue(spec.requireInstance("default").concerns(Environment.prod, Optional.of(RegionName.from("us-west1"))));
- assertFalse(spec.requireInstance("default").concerns(Environment.prod, Optional.of(RegionName.from("no-such-region"))));
+ assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-east1"))));
+ assertTrue(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("us-west1"))));
+ assertFalse(spec.requireInstance("default").concerns(prod, Optional.of(RegionName.from("no-such-region"))));
assertFalse(spec.requireInstance("default").globalServiceId().isPresent());
}
@@ -436,9 +441,9 @@ public class DeploymentSpecWithoutInstanceTest {
List<DeploymentSpec.Step> innerParallelSteps = secondSerialSteps.get(2).steps();
assertEquals(3, innerParallelSteps.size());
assertEquals("prod.ap-northeast-1", innerParallelSteps.get(0).toString());
- assertEquals("no-service", spec.requireInstance("default").athenzService(Environment.prod, RegionName.from("ap-northeast-1")).get().value());
+ assertEquals("no-service", spec.requireInstance("default").athenzService(prod, RegionName.from("ap-northeast-1")).get().value());
assertEquals("prod.ap-southeast-2", innerParallelSteps.get(1).toString());
- assertEquals("service", spec.requireInstance("default").athenzService(Environment.prod, RegionName.from("ap-southeast-2")).get().value());
+ assertEquals("service", spec.requireInstance("default").athenzService(prod, RegionName.from("ap-southeast-2")).get().value());
assertEquals("tests for prod.aws-us-east-1a", innerParallelSteps.get(2).toString());
}
@@ -534,7 +539,7 @@ public class DeploymentSpecWithoutInstanceTest {
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals(spec.athenzDomain().get().value(), "domain");
- assertEquals(spec.requireInstance("default").athenzService(Environment.prod, RegionName.from("us-west-1")).get().value(), "service");
+ assertEquals(spec.requireInstance("default").athenzService(prod, RegionName.from("us-west-1")).get().value(), "service");
}
@Test
@@ -553,11 +558,11 @@ public class DeploymentSpecWithoutInstanceTest {
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("domain", spec.athenzDomain().get().value());
assertEquals("service", spec.athenzService().get().value());
- assertEquals("prod-service", spec.requireInstance("default").athenzService(Environment.prod, RegionName.from("us-central-1"))
+ assertEquals("prod-service", spec.requireInstance("default").athenzService(prod, RegionName.from("us-central-1"))
.get().value());
- assertEquals("prod-service", spec.requireInstance("default").athenzService(Environment.prod, RegionName.from("us-west-1"))
+ assertEquals("prod-service", spec.requireInstance("default").athenzService(prod, RegionName.from("us-west-1"))
.get().value());
- assertEquals("prod-service", spec.requireInstance("default").athenzService(Environment.prod, RegionName.from("us-east-3"))
+ assertEquals("prod-service", spec.requireInstance("default").athenzService(prod, RegionName.from("us-east-3"))
.get().value());
}
@@ -575,9 +580,9 @@ public class DeploymentSpecWithoutInstanceTest {
DeploymentSpec spec = DeploymentSpec.fromXml(r);
assertEquals("service", spec.athenzService().get().value());
assertEquals(spec.athenzDomain().get().value(), "domain");
- assertEquals(spec.requireInstance("default").athenzService(Environment.test, RegionName.from("us-east-1")).get().value(), "service");
+ assertEquals(spec.requireInstance("default").athenzService(test, RegionName.from("us-east-1")).get().value(), "service");
assertEquals(spec.requireInstance("default").athenzService(Environment.staging, RegionName.from("us-north-1")).get().value(), "staging-service");
- assertEquals(spec.requireInstance("default").athenzService(Environment.prod, RegionName.from("us-west-1")).get().value(), "prod-service");
+ assertEquals(spec.requireInstance("default").athenzService(prod, RegionName.from("us-west-1")).get().value(), "prod-service");
}
@Test(expected = IllegalArgumentException.class)
@@ -694,7 +699,7 @@ public class DeploymentSpecWithoutInstanceTest {
assertEquals(List.of(RegionName.from("us-east")), spec.requireInstance("default").endpoints().get(0).regions());
- var zone = from(Environment.prod, RegionName.from("us-east"));
+ var zone = from(prod, RegionName.from("us-east"));
assertEquals(ZoneEndpoint.defaultEndpoint,
spec.zoneEndpoint(InstanceName.from("custom"), zone, ClusterSpec.Id.from("bax")));
assertEquals(ZoneEndpoint.defaultEndpoint,
@@ -741,10 +746,10 @@ public class DeploymentSpecWithoutInstanceTest {
);
DeploymentSpec spec = DeploymentSpec.fromXml(r);
DeploymentInstanceSpec instance = spec.requireInstance("default");
- assertEquals(Optional.of(CloudAccount.from("012345678912")), spec.cloudAccount());
- assertEquals(Optional.of(CloudAccount.from("219876543210")), instance.cloudAccount(Environment.prod, Optional.of(RegionName.from("us-east-1"))));
- assertEquals(Optional.of(CloudAccount.from("012345678912")), instance.cloudAccount(Environment.prod, Optional.of(RegionName.from("us-west-1"))));
- assertEquals(Optional.of(CloudAccount.from("012345678912")), instance.cloudAccount(Environment.staging, Optional.empty()));
+ assertEquals(Map.of(AWS, CloudAccount.from("012345678912")), spec.cloudAccounts());
+ assertEquals(Map.of(AWS, CloudAccount.from("219876543210")), instance.cloudAccounts(prod, RegionName.from("us-east-1")));
+ assertEquals(Map.of(AWS, CloudAccount.from("012345678912")), instance.cloudAccounts(prod, RegionName.from("us-west-1")));
+ assertEquals(Map.of(AWS, CloudAccount.from("012345678912")), instance.cloudAccounts(Environment.staging, RegionName.defaultName()));
r = new StringReader(
"<deployment version='1.0'>" +
@@ -755,9 +760,42 @@ public class DeploymentSpecWithoutInstanceTest {
"</deployment>"
);
spec = DeploymentSpec.fromXml(r);
- assertEquals(Optional.empty(), spec.cloudAccount());
- assertEquals(Optional.of(CloudAccount.from("219876543210")), spec.requireInstance("default").cloudAccount(Environment.prod, Optional.of(RegionName.from("us-east-1"))));
- assertEquals(Optional.empty(), spec.requireInstance("default").cloudAccount(Environment.prod, Optional.of(RegionName.from("us-west-1"))));
+ assertEquals(Map.of(), spec.cloudAccounts());
+ assertEquals(Map.of(AWS, CloudAccount.from("219876543210")), spec.requireInstance("default").cloudAccounts(prod, RegionName.from("us-east-1")));
+ assertEquals(Map.of(), spec.requireInstance("default").cloudAccounts(prod, RegionName.from("us-west-1")));
+ }
+
+ @Test
+ public void productionSpecWithHostTTL() {
+ String r = """
+ <deployment version='1.0' cloud-account='012345678912' empty-host-ttl='1d'>
+ <prod>
+ <region empty-host-ttl='1m'>us-east-1</region>
+ <region>us-west-1</region>
+ </prod>
+ </deployment>
+ """;
+ DeploymentSpec spec = DeploymentSpec.fromXml(r);
+ assertEquals(Optional.of(Duration.ofDays(1)), spec.hostTTL());
+ DeploymentInstanceSpec instance = spec.requireInstance("default");
+ assertEquals(Optional.of(Duration.ofMinutes(1)), instance.hostTTL(prod, Optional.of(RegionName.from("us-east-1"))));
+ assertEquals(Optional.of(Duration.ofDays(1)), instance.hostTTL(prod, Optional.of(RegionName.from("us-west-1"))));
+ assertEquals(Optional.of(Duration.ofDays(1)), instance.hostTTL(test, Optional.empty()));
+
+ r = """
+ <deployment version='1.0' cloud-account='012345678912'>
+ <prod empty-host-ttl='1d'>
+ <region empty-host-ttl='1m'>us-east-1</region>
+ <region>us-west-1</region>
+ </prod>
+ </deployment>
+ """;
+ spec = DeploymentSpec.fromXml(r);
+ assertEquals(Optional.empty(), spec.hostTTL());
+ instance = spec.requireInstance("default");
+ assertEquals(Optional.of(Duration.ofMinutes(1)), instance.hostTTL(prod, Optional.of(RegionName.from("us-east-1"))));
+ assertEquals(Optional.of(Duration.ofDays(1)), instance.hostTTL(prod, Optional.of(RegionName.from("us-west-1"))));
+ assertEquals(Optional.empty(), instance.hostTTL(test, Optional.empty()));
}
private static Set<String> endpointRegions(String endpointId, DeploymentSpec spec) {
diff --git a/config-model/src/main/java/com/yahoo/config/model/ConfigModelContext.java b/config-model/src/main/java/com/yahoo/config/model/ConfigModelContext.java
index 13d87b852e4..b4b3dccd440 100644
--- a/config-model/src/main/java/com/yahoo/config/model/ConfigModelContext.java
+++ b/config-model/src/main/java/com/yahoo/config/model/ConfigModelContext.java
@@ -2,16 +2,21 @@
package com.yahoo.config.model;
import com.yahoo.config.application.api.ApplicationPackage;
+import com.yahoo.config.application.api.Bcp.Group;
import com.yahoo.config.application.api.DeployLogger;
+import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.producer.AnyConfigProducer;
import com.yahoo.config.model.producer.TreeConfigProducer;
import com.yahoo.config.provision.ClusterInfo;
+import com.yahoo.config.provision.ClusterInfo.Builder;
+import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.model.VespaModel;
import java.time.Duration;
import java.util.Comparator;
+import java.util.Optional;
import java.util.stream.Stream;
/**
@@ -72,14 +77,22 @@ public final class ConfigModelContext {
/** Returns a cluster info builder pre-populated with info known in this context. */
public ClusterInfo.Builder clusterInfo() {
- var instance = getApplicationPackage().getDeploymentSpec().instance(properties().applicationId().instance());
- if ( ! instance.isPresent()) return new ClusterInfo.Builder();
- var maxDeadline = instance.get().bcp().groups().stream()
- .filter(group -> group.memberRegions().contains(properties().zone().region()))
- .map(group -> group.deadline())
- .min(Comparator.comparing(deadline -> deadline))
- .orElse(Duration.ofMinutes(0));
- return new ClusterInfo.Builder().bcpDeadline(maxDeadline);
+ DeploymentSpec spec = getApplicationPackage().getDeploymentSpec();
+ ClusterInfo.Builder builder = new ClusterInfo.Builder();
+ spec.hostTTL(properties().applicationId().instance(), deployState.zone().environment(), deployState.zone().region())
+ .ifPresent(ttl -> {
+ ZoneId zoneId = ZoneId.from(deployState.zone().environment(), deployState.zone().region());
+ if (spec.cloudAccount(deployState.zone().cloud().name(), properties().applicationId().instance(), zoneId).isUnspecified())
+ throw new IllegalArgumentException("deployment spec specifies host TTL for " + zoneId +
+ " but no cloud account is specified for this zone");
+ });
+ spec.instance(properties().applicationId().instance())
+ .flatMap(instance -> instance.bcp().groups().stream()
+ .filter(group -> group.memberRegions().contains(properties().zone().region()))
+ .map(Group::deadline)
+ .min(Comparator.naturalOrder()))
+ .ifPresent(builder::bcpDeadline);
+ return builder;
}
/**
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
index 56f999f85b4..0e39b7b5c3a 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
@@ -78,7 +78,7 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
private int rpc_num_targets = 2;
private int rpc_events_before_wakeup = 1;
private int mbus_network_threads = 1;
- private int heapSizePercentage = ApplicationContainerCluster.defaultHeapSizePercentageOfTotalNodeMemory;
+ private int heapSizePercentage = ApplicationContainerCluster.defaultHeapSizePercentageOfAvailableMemory;
private Architecture adminClusterNodeResourcesArchitecture = Architecture.getDefault();
private boolean useRestrictedDataPlaneBindings = false;
private Optional<CloudAccount> cloudAccount = Optional.empty();
diff --git a/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java b/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
index 4b993f8e244..585e69d9141 100644
--- a/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
+++ b/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
@@ -160,7 +160,7 @@ public class InMemoryProvisioner implements HostProvisioner {
public List<HostSpec> prepare(ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
provisioned.add(cluster.id(), requested);
clusters.add(cluster);
- if (environment == Environment.dev) {
+ if (environment == Environment.dev && ! requested.isRequired()) {
requested = requested.withLimits(requested.minResources().withNodes(1),
requested.maxResources().withNodes(1));
}
@@ -233,13 +233,8 @@ public class InMemoryProvisioner implements HostProvisioner {
// Minimal capacity policies
private NodeResources decideResources(NodeResources resources) {
- if (resources.vcpuIsUnspecified())
- resources = resources.withVcpu(defaultNodeResources.vcpu());
- if (resources.memoryGbIsUnspecified())
- resources = resources.withMemoryGb(defaultNodeResources.memoryGb());
- if (resources.diskGbIsUnspecified())
- resources = resources.withDiskGb(defaultNodeResources.diskGb());
- return resources;
+ if (defaultNodeResources.isUnspecified()) return resources;
+ return resources.withUnspecifiedNumbersFrom(defaultNodeResources);
}
private List<HostSpec> allocateHostGroup(ClusterSpec clusterGroup, NodeResources requestedResourcesOrUnspecified,
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/Host.java b/config-model/src/main/java/com/yahoo/vespa/model/Host.java
index 047a6ef9bd5..581f20cbfe9 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/Host.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/Host.java
@@ -8,13 +8,17 @@ import com.yahoo.config.model.producer.TreeConfigProducer;
import java.util.Objects;
/**
- * A physical host, running a set of services.
+ * A node with an identity, with some dedicated compute resources, running a set of services.
* The identity of a host is its hostname. Hosts are comparable on their host name.
*
* @author gjoranv
*/
public final class Host extends TreeConfigProducer<AnyConfigProducer> implements SentinelConfig.Producer, Comparable<Host> {
+ // Memory needed for auxiliary processes always running on the node (config-proxy, metrics-proxy).
+ // Keep in sync with node-repository/ClusterModel.
+ public static final double memoryOverheadGb = 0.7;
+
private ConfigSentinel configSentinel = null;
private final String hostname;
private final boolean runsConfigServer;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
index 9e867a4c3bc..28ff8dff620 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
@@ -25,6 +25,7 @@ import com.yahoo.config.model.api.ValidationParameters;
import com.yahoo.config.model.application.provider.ApplicationPackageXmlFilesValidator;
import com.yahoo.config.model.builder.xml.ConfigModelBuilder;
import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.config.provision.QuotaExceededException;
import com.yahoo.config.provision.TransientException;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.config.VespaVersion;
@@ -222,7 +223,7 @@ public class VespaModelFactory implements ModelFactory {
Exceptions.toMessageString(e));
else
rethrowUnlessIgnoreErrors(e, validationParameters.ignoreValidationErrors());
- } catch (IllegalArgumentException | TransientException e) {
+ } catch (IllegalArgumentException | TransientException | QuotaExceededException e) {
rethrowUnlessIgnoreErrors(e, validationParameters.ignoreValidationErrors());
} catch (Exception e) {
throw new RuntimeException(e);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
index 362bc7b0964..8a2bae364a1 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
@@ -131,6 +131,35 @@ public class VespaMetricSet {
addMetric(metrics, ConfigServerMetrics.ZK_CONNECTIONS.last());
addMetric(metrics, ConfigServerMetrics.ZK_OUTSTANDING_REQUESTS.last());
+ // Node repository metrics
+ addMetric(metrics, ConfigServerMetrics.NODES_NON_ACTIVE_FRACTION.last());
+ addMetric(metrics, ConfigServerMetrics.CLUSTER_COST.last());
+ addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.last());
+ addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.last());
+ addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last());
+ addMetric(metrics, ConfigServerMetrics.WANT_TO_REBOOT.max());
+ addMetric(metrics, ConfigServerMetrics.WANT_TO_RESTART.max());
+ addMetric(metrics, ConfigServerMetrics.RETIRED.max());
+ addMetric(metrics, ConfigServerMetrics.WANT_TO_CHANGE_VESPA_VERSION.max());
+ addMetric(metrics, ConfigServerMetrics.HAS_WIRE_GUARD_KEY.last());
+ addMetric(metrics, ConfigServerMetrics.WANT_TO_DEPROVISION.max());
+ addMetric(metrics, ConfigServerMetrics.SUSPENDED.max());
+ addMetric(metrics, ConfigServerMetrics.SOME_SERVICES_DOWN.max());
+ addMetric(metrics, ConfigServerMetrics.NODE_FAILER_BAD_NODE.last());
+ addMetric(metrics, ConfigServerMetrics.LOCK_ATTEMPT_LOCKED_LOAD, EnumSet.of(max,average));
+
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_CPU.average());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_MEM.average());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_DISK.average());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_CPU.max());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_MEM.max());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_DISK.max());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_CPU, EnumSet.of(max,average));
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_DISK, EnumSet.of(max,average));
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_MEM, EnumSet.of(max,average));
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_SKEW.last());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_PENDING_REDEPLOYMENTS.last());
+
return metrics;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/AccessControlFilterExcludeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/AccessControlFilterExcludeValidator.java
index ef695770987..5735a632085 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/AccessControlFilterExcludeValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/AccessControlFilterExcludeValidator.java
@@ -34,7 +34,7 @@ public class AccessControlFilterExcludeValidator extends Validator {
private void verifyNoExclusions(String clusterId, AccessControl accessControl, DeployState deployState) {
if (!accessControl.excludedBindings().isEmpty()) {
String message = "Application cluster %s excludes paths from access control, this is not allowed and should be removed.".formatted(clusterId);
- if (deployState.zone().cloud().name() == CloudName.AWS) {
+ if (deployState.zone().cloud().name().equals(CloudName.AWS)) {
throw new IllegalArgumentException(message);
} else {
deployState.getDeployLogger().log(Level.WARNING, message);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java
index 66da43856b1..eccb6910866 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java
@@ -28,11 +28,14 @@ public class ConstantTensorJsonValidator {
private static final String FIELD_CELLS = "cells";
private static final String FIELD_ADDRESS = "address";
private static final String FIELD_VALUE = "value";
+ private static final String FIELD_VALUES = "values";
private static final JsonFactory jsonFactory = new JsonFactory();
private JsonParser parser;
private Map<String, TensorType.Dimension> tensorDimensions;
+ private boolean isSingleDenseType = false;
+ private boolean isSingleMappedType = false;
public void validate(String fileName, TensorType type, Reader tensorData) {
if (fileName.endsWith(".json")) {
@@ -57,19 +60,69 @@ public class ConstantTensorJsonValidator {
.dimensions()
.stream()
.collect(Collectors.toMap(TensorType.Dimension::name, Function.identity()));
+ if (type.dimensions().size() == 1) {
+ this.isSingleMappedType = (type.indexedSubtype() == TensorType.empty);
+ this.isSingleDenseType = (type.mappedSubtype() == TensorType.empty);
+ }
+ var top = parser.nextToken();
+ if (top == JsonToken.START_ARRAY) {
+ consumeValuesArray();
+ } else if (top == JsonToken.START_OBJECT) {
+ consumeTopObject();
+ }
+ });
+ }
- assertNextTokenIs(JsonToken.START_OBJECT);
- assertNextTokenIs(JsonToken.FIELD_NAME);
- assertFieldNameIs(FIELD_CELLS);
+ private void consumeValuesArray() throws IOException {
+ if (! isSingleDenseType) {
+ throw new InvalidConstantTensorException(parser, String.format("Field 'values' is only valid for simple vectors (1-d dense tensors"));
+ }
+ assertCurrentTokenIs(JsonToken.START_ARRAY);
+ while (parser.nextToken() != JsonToken.END_ARRAY) {
+ validateNumeric(parser.getCurrentToken());
+ }
+ }
+ private void consumeTopObject() throws IOException {
+ assertCurrentTokenIs(JsonToken.START_OBJECT);
+ assertNextTokenIs(JsonToken.FIELD_NAME);
+ String fieldName = parser.getCurrentName();
+ if (fieldName.equals(FIELD_VALUES)) {
assertNextTokenIs(JsonToken.START_ARRAY);
+ consumeValuesArray();
+ } else if (fieldName.equals(FIELD_CELLS)) {
+ consumeCellsField();
+ } else {
+ throw new InvalidConstantTensorException(parser, String.format("Expected 'cells' or 'values', got '%s'", fieldName));
+ }
+ assertNextTokenIs(JsonToken.END_OBJECT);
+ }
- while (parser.nextToken() != JsonToken.END_ARRAY) {
- validateTensorCell();
- }
+ private void consumeCellsField() throws IOException {
+ var token = parser.nextToken();
+ if (token == JsonToken.START_ARRAY) {
+ consumeLiteralFormArray();
+ } else if (token == JsonToken.START_OBJECT) {
+ consumeSimpleMappedObject();
+ } else {
+ throw new InvalidConstantTensorException(parser, String.format("Field 'cells' must be object or array, but got %s", token.toString()));
+ }
+ }
- assertNextTokenIs(JsonToken.END_OBJECT);
- });
+ private void consumeLiteralFormArray() throws IOException {
+ while (parser.nextToken() != JsonToken.END_ARRAY) {
+ validateTensorCell();
+ }
+ }
+
+ private void consumeSimpleMappedObject() throws IOException {
+ if (! isSingleMappedType) {
+ throw new InvalidConstantTensorException(parser, String.format("Field 'cells' must be an array of address/value objects"));
+ }
+ while (parser.nextToken() != JsonToken.END_OBJECT) {
+ assertCurrentTokenIs(JsonToken.FIELD_NAME);
+ validateTensorCellValue();
+ }
}
private void validateTensorCell() {
@@ -87,7 +140,7 @@ public class ConstantTensorJsonValidator {
if (fieldName.equals(FIELD_ADDRESS)) {
validateTensorAddress();
} else if (fieldName.equals(FIELD_VALUE)) {
- validateTensorValue();
+ validateTensorCellValue();
}
} else {
throw new InvalidConstantTensorException(parser, "Only 'address' or 'value' fields are permitted within a cell object");
@@ -169,9 +222,12 @@ public class ConstantTensorJsonValidator {
throw new InvalidConstantTensorException(parser, String.format("Index '%s' for dimension '%s' is not an integer", value, dimensionName));
}
- private void validateTensorValue() throws IOException {
+ private void validateTensorCellValue() throws IOException {
JsonToken token = parser.nextToken();
+ validateNumeric(token);
+ }
+ private void validateNumeric(JsonToken token) throws IOException {
if (token != JsonToken.VALUE_NUMBER_FLOAT && token != JsonToken.VALUE_NUMBER_INT) {
throw new InvalidConstantTensorException(parser, String.format("Tensor value is not a number (%s)", token.toString()));
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java
index 4ea74147aaf..f0c29c74705 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java
@@ -6,7 +6,9 @@ import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeResources;
+import com.yahoo.config.provision.QuotaExceededException;
import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.model.VespaModel;
import java.math.BigDecimal;
@@ -31,11 +33,10 @@ public class QuotaValidator extends Validator {
public void validate(VespaModel model, DeployState deployState) {
var quota = deployState.getProperties().quota();
quota.maxClusterSize().ifPresent(maxClusterSize -> validateMaxClusterSize(maxClusterSize, model));
- quota.budgetAsDecimal().ifPresent(budget -> validateBudget(budget, model, deployState.getProperties().zone().system()));
+ quota.budgetAsDecimal().ifPresent(budget -> validateBudget(budget, model, deployState.getProperties().zone()));
}
- private void validateBudget(BigDecimal budget, VespaModel model, SystemName systemName) {
-
+ private void validateBudget(BigDecimal budget, VespaModel model, Zone zone) {
var maxSpend = model.allClusters().stream()
.filter(id -> !adminClusterIds(model).contains(id))
.map(id -> model.provisioned().all().getOrDefault(id, zeroCapacity))
@@ -52,9 +53,10 @@ public class QuotaValidator extends Validator {
return;
}
- throwIfBudgetNegative(actualSpend, budget, systemName);
- throwIfBudgetExceeded(actualSpend, budget, systemName);
- throwIfBudgetExceeded(maxSpend, budget, systemName);
+ throwIfBudgetNegative(actualSpend, budget, zone.system());
+ throwIfBudgetExceeded(actualSpend, budget, zone.system(), true);
+ if ( ! zone.environment().isTest()) // Usage is constant after deploy in test zones
+ throwIfBudgetExceeded(maxSpend, budget, zone.system(), false);
}
private Set<ClusterSpec.Id> adminClusterIds(VespaModel model) {
@@ -80,24 +82,28 @@ public class QuotaValidator extends Validator {
if (!invalidClusters.isEmpty()) {
var clusterNames = String.join(", ", invalidClusters);
- throw new IllegalArgumentException("Clusters " + clusterNames + " exceeded max cluster size of " + maxClusterSize);
+ throw new QuotaExceededException("Clusters " + clusterNames + " exceeded max cluster size of " + maxClusterSize);
}
}
private static void throwIfBudgetNegative(double spend, BigDecimal budget, SystemName systemName) {
if (budget.doubleValue() < 0) {
- throw new IllegalArgumentException(quotaMessage("Please free up some capacity.", systemName, spend, budget));
+ throw new QuotaExceededException(quotaMessage("Please free up some capacity.", systemName, spend, budget, true));
}
}
- private static void throwIfBudgetExceeded(double spend, BigDecimal budget, SystemName systemName) {
+ private static void throwIfBudgetExceeded(double spend, BigDecimal budget, SystemName systemName, boolean actual) {
if (budget.doubleValue() < spend) {
- throw new IllegalArgumentException(quotaMessage("Contact support to upgrade your plan.", systemName, spend, budget));
+ throw new QuotaExceededException(quotaMessage("Contact support to upgrade your plan.", systemName, spend, budget, actual));
}
}
- private static String quotaMessage(String message, SystemName system, double spend, BigDecimal budget) {
- String quotaDescription = String.format(Locale.ENGLISH, "The max resources specified cost $%.2f but your quota is $%.2f", spend, budget);
+ private static String quotaMessage(String message, SystemName system, double spend, BigDecimal budget, boolean actual) {
+ String quotaDescription = String.format(Locale.ENGLISH,
+ "The %s cost $%.2f but your quota is $%.2f",
+ actual ? "resources used" : "max resources specified",
+ spend,
+ budget);
return (system == SystemName.Public ? "" : system.value() + ": ") + quotaDescription + ": " + message;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomComponentBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomComponentBuilder.java
index c57122e5bf5..d0e1ede2cfa 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomComponentBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomComponentBuilder.java
@@ -3,12 +3,14 @@ package com.yahoo.vespa.model.builder.xml.dom;
import com.yahoo.component.ComponentId;
import com.yahoo.config.model.deploy.DeployState;
-import com.yahoo.container.bundle.BundleInstantiationSpecification;
-import com.yahoo.osgi.provider.model.ComponentModel;
import com.yahoo.config.model.producer.AnyConfigProducer;
import com.yahoo.config.model.producer.TreeConfigProducer;
+import com.yahoo.osgi.provider.model.ComponentModel;
import com.yahoo.text.XML;
+import com.yahoo.vespa.model.container.component.BertEmbedder;
import com.yahoo.vespa.model.container.component.Component;
+import com.yahoo.vespa.model.container.component.HuggingFaceEmbedder;
+import com.yahoo.vespa.model.container.component.HuggingFaceTokenizer;
import com.yahoo.vespa.model.container.xml.BundleInstantiationSpecificationBuilder;
import org.w3c.dom.Element;
@@ -31,17 +33,25 @@ public class DomComponentBuilder extends VespaDomBuilder.DomConfigProducerBuilde
}
@Override
- protected Component doBuild(DeployState deployState, TreeConfigProducer<AnyConfigProducer> ancestor, Element spec) {
- Component component = buildComponent(spec);
+ protected Component<? super Component<?, ?>, ?> doBuild(DeployState deployState, TreeConfigProducer<AnyConfigProducer> ancestor, Element spec) {
+ var component = buildComponent(spec, deployState);
addChildren(deployState, ancestor, spec, component);
return component;
}
- private Component buildComponent(Element spec) {
- BundleInstantiationSpecification bundleSpec =
- BundleInstantiationSpecificationBuilder.build(spec).nestInNamespace(namespace);
-
- return new Component<Component<?, ?>, ComponentModel>(new ComponentModel(bundleSpec));
+ private Component<? super Component<?, ?>, ?> buildComponent(Element spec, DeployState state) {
+ if (spec.hasAttribute("type")) {
+ var type = spec.getAttribute("type");
+ return switch (type) {
+ case "hugging-face-embedder" -> new HuggingFaceEmbedder(spec, state);
+ case "hugging-face-tokenizer" -> new HuggingFaceTokenizer(spec, state);
+ case "bert-embedder" -> new BertEmbedder(spec, state);
+ default -> throw new IllegalArgumentException("Unknown component type '%s'".formatted(type));
+ };
+ } else {
+ var bundleSpec = BundleInstantiationSpecificationBuilder.build(spec).nestInNamespace(namespace);
+ return new Component<>(new ComponentModel(bundleSpec));
+ }
}
public static void addChildren(DeployState deployState, TreeConfigProducer<AnyConfigProducer> ancestor, Element componentNode, Component<? super Component<?, ?>, ?> component) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilder.java
index d9ef5fd2123..64592e75c41 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilder.java
@@ -66,10 +66,22 @@ public class DomSearchTuningBuilder extends VespaDomBuilder.DomConfigProducerBui
handleFeeding(e, t.searchNode);
} else if (equals("removed-db", e)) {
handleRemovedDB(e, t.searchNode);
+ } else if (equals("lidspace", e)) {
+ handleLidSpace(e, t.searchNode);
}
}
}
+ private void handleLidSpace(Element spec, Tuning.SearchNode t) {
+ t.lidSpace = new Tuning.SearchNode.LidSpace();
+ for (Element e : XML.getChildren(spec)) {
+ if (equals("max-bloat-factor", e)) {
+ t.lidSpace.bloatFactor = asDouble(e);
+ }
+ }
+
+ }
+
private void handleRequestThreads(Element spec, Tuning.SearchNode sn) {
sn.threads = new Tuning.SearchNode.RequestThreads();
Tuning.SearchNode.RequestThreads rt = sn.threads;
@@ -180,9 +192,9 @@ public class DomSearchTuningBuilder extends VespaDomBuilder.DomConfigProducerBui
Tuning.SearchNode.Index.Warmup warmup = sn.index.warmup;
for (Element e2 : XML.getChildren(e)) {
if (equals("time", e2)) {
- warmup.time = Double.valueOf(asString(e2));
+ warmup.time = asDouble(e2);
} else if (equals("unpack", e2)) {
- warmup.unpack = Boolean.valueOf(asString(e2));
+ warmup.unpack = Boolean.parseBoolean(asString(e2));
}
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
index 6977a5ca465..3c1c4867f13 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
@@ -31,6 +31,7 @@ import com.yahoo.vespa.config.search.core.OnnxModelsConfig;
import com.yahoo.vespa.config.search.core.RankingConstantsConfig;
import com.yahoo.vespa.config.search.core.RankingExpressionsConfig;
import com.yahoo.vespa.model.AbstractService;
+import com.yahoo.vespa.model.Host;
import com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainer;
import com.yahoo.vespa.model.container.component.BindingPattern;
import com.yahoo.vespa.model.container.component.Component;
@@ -75,8 +76,8 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat
private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH);
private static final BindingPattern PROMETHEUS_V1_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(PrometheusV1Handler.V1_PATH + "/*");
- public static final int defaultHeapSizePercentageOfTotalNodeMemory = 70;
- public static final int heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster = 18;
+ public static final int defaultHeapSizePercentageOfAvailableMemory = 85;
+ public static final int heapSizePercentageOfTotalAvailableMemoryWhenCombinedCluster = 24;
private final Set<FileReference> applicationBundles = new LinkedHashSet<>();
@@ -91,7 +92,9 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat
private int zookeeperSessionTimeoutSeconds = 30;
private final int transport_events_before_wakeup;
private final int transport_connections_per_target;
- private final int heapSizePercentageOfTotalNodeMemory;
+
+ /** The heap size % of total memory available to the JVM process. */
+ private final int heapSizePercentageOfAvailableMemory;
private Integer memoryPercentage = null;
@@ -119,9 +122,9 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat
addTestrunnerComponentsIfTester(deployState);
transport_connections_per_target = deployState.featureFlags().mbusJavaRpcNumTargets();
transport_events_before_wakeup = deployState.featureFlags().mbusJavaEventsBeforeWakeup();
- heapSizePercentageOfTotalNodeMemory = deployState.featureFlags().heapSizePercentage() > 0
+ heapSizePercentageOfAvailableMemory = deployState.featureFlags().heapSizePercentage() > 0
? Math.min(99, deployState.featureFlags().heapSizePercentage())
- : defaultHeapSizePercentageOfTotalNodeMemory;
+ : defaultHeapSizePercentageOfAvailableMemory;
}
@Override
@@ -178,12 +181,18 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat
@Override
public Optional<Integer> getMemoryPercentage() {
- if (memoryPercentage != null) {
- return Optional.of(memoryPercentage);
- } else if (isHostedVespa()) {
- return getHostClusterId().isPresent() ?
- Optional.of(heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster) :
- Optional.of(heapSizePercentageOfTotalNodeMemory);
+ if (memoryPercentage != null) return Optional.of(memoryPercentage);
+
+ if (isHostedVespa()) {
+ int availableMemoryPercentage = getHostClusterId().isPresent() ?
+ heapSizePercentageOfTotalAvailableMemoryWhenCombinedCluster :
+ heapSizePercentageOfAvailableMemory;
+ if (getContainers().isEmpty()) return Optional.of(availableMemoryPercentage); // Node memory is not known
+
+ // Node memory is known so convert available memory percentage to node memory percentage
+ double totalMemory = getContainers().get(0).getHostResource().realResources().memoryGb();
+ double availableMemory = totalMemory - Host.memoryOverheadGb;
+ return Optional.of((int) (availableMemory / totalMemory * availableMemoryPercentage));
}
return Optional.empty();
}
@@ -289,9 +298,7 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat
.compressedClassSpaceSize(0)
.minHeapsize(1536)
.heapsize(1536);
- if (getMemoryPercentage().isPresent()) {
- builder.jvm.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get());
- }
+ getMemoryPercentage().ifPresent(percentage -> builder.jvm.heapSizeAsPercentageOfPhysicalMemory(percentage));
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java
index 34c565871db..c227700733e 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java
@@ -26,6 +26,7 @@ public class ContainerModelEvaluation implements
OnnxModelsConfig.Producer,
RankingExpressionsConfig.Producer {
+ public final static String LINGUISTICS_BUNDLE_NAME = "linguistics-components";
public final static String EVALUATION_BUNDLE_NAME = "model-evaluation";
public final static String INTEGRATION_BUNDLE_NAME = "model-integration";
public final static String ONNXRUNTIME_BUNDLE_NAME = "container-onnxruntime.jar";
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/PlatformBundles.java b/config-model/src/main/java/com/yahoo/vespa/model/container/PlatformBundles.java
index 19df9a4064f..dbc7cd62fbd 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/PlatformBundles.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/PlatformBundles.java
@@ -12,6 +12,7 @@ import java.util.stream.Stream;
import static com.yahoo.vespa.model.container.ContainerModelEvaluation.EVALUATION_BUNDLE_NAME;
import static com.yahoo.vespa.model.container.ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME;
+import static com.yahoo.vespa.model.container.ContainerModelEvaluation.LINGUISTICS_BUNDLE_NAME;
import static com.yahoo.vespa.model.container.ContainerModelEvaluation.ONNXRUNTIME_BUNDLE_NAME;
/**
@@ -57,7 +58,7 @@ public class PlatformBundles {
public static final Set<Path> SEARCH_AND_DOCPROC_BUNDLES = toBundlePaths(
SEARCH_AND_DOCPROC_BUNDLE,
"docprocs",
- "linguistics-components",
+ LINGUISTICS_BUNDLE_NAME,
EVALUATION_BUNDLE_NAME,
INTEGRATION_BUNDLE_NAME,
ONNXRUNTIME_BUNDLE_NAME
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/component/BertEmbedder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/component/BertEmbedder.java
new file mode 100644
index 00000000000..56aa974da48
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/component/BertEmbedder.java
@@ -0,0 +1,70 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.model.container.component;
+
+import com.yahoo.config.ModelReference;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.embedding.BertBaseEmbedderConfig;
+import com.yahoo.vespa.model.container.xml.ModelIdResolver;
+import org.w3c.dom.Element;
+
+import static com.yahoo.config.model.builder.xml.XmlHelper.getOptionalChildValue;
+import static com.yahoo.text.XML.getChild;
+import static com.yahoo.vespa.model.container.ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME;
+
+/**
+ * @author bjorncs
+ */
+public class BertEmbedder extends TypedComponent implements BertBaseEmbedderConfig.Producer {
+
+ private final ModelReference model;
+ private final ModelReference vocab;
+ private final Integer maxTokens;
+ private final String transformerInputIds;
+ private final String transformerAttentionMask;
+ private final String transformerTokenTypeIds;
+ private final String transformerOutput;
+ private final Integer tranformerStartSequenceToken;
+ private final Integer transformerEndSequenceToken;
+ private final String poolingStrategy;
+ private final String onnxExecutionMode;
+ private final Integer onnxInteropThreads;
+ private final Integer onnxIntraopThreads;
+ private final Integer onnxGpuDevice;
+
+
+ public BertEmbedder(Element xml, DeployState state) {
+ super("ai.vespa.embedding.BertBaseEmbedder", INTEGRATION_BUNDLE_NAME, xml);
+ model = ModelIdResolver.resolveToModelReference(getChild(xml, "transformer-model"), state);
+ vocab = ModelIdResolver.resolveToModelReference(getChild(xml, "tokenizer-vocab"), state);
+ maxTokens = getOptionalChildValue(xml, "max-tokens").map(Integer::parseInt).orElse(null);
+ transformerInputIds = getOptionalChildValue(xml, "transformer-input-ids").orElse(null);
+ transformerAttentionMask = getOptionalChildValue(xml, "transformer-attention-mask").orElse(null);
+ transformerTokenTypeIds = getOptionalChildValue(xml, "transformer-token-type-ids").orElse(null);
+ transformerOutput = getOptionalChildValue(xml, "transformer-output").orElse(null);
+ tranformerStartSequenceToken = getOptionalChildValue(xml, "transformer-start-sequence-token").map(Integer::parseInt).orElse(null);
+ transformerEndSequenceToken = getOptionalChildValue(xml, "transformer-end-sequence-token").map(Integer::parseInt).orElse(null);
+ poolingStrategy = getOptionalChildValue(xml, "pooling-strategy").orElse(null);
+ onnxExecutionMode = getOptionalChildValue(xml, "onnx-execution-mode").orElse(null);
+ onnxInteropThreads = getOptionalChildValue(xml, "onnx-interop-threads").map(Integer::parseInt).orElse(null);
+ onnxIntraopThreads = getOptionalChildValue(xml, "onnx-intraop-threads").map(Integer::parseInt).orElse(null);
+ onnxGpuDevice = getOptionalChildValue(xml, "onnx-gpu-device").map(Integer::parseInt).orElse(null);
+ }
+
+ @Override
+ public void getConfig(BertBaseEmbedderConfig.Builder b) {
+ b.transformerModel(model).tokenizerVocab(vocab);
+ if (maxTokens != null) b.transformerMaxTokens(maxTokens);
+ if (transformerInputIds != null) b.transformerInputIds(transformerInputIds);
+ if (transformerAttentionMask != null) b.transformerAttentionMask(transformerAttentionMask);
+ if (transformerTokenTypeIds != null) b.transformerTokenTypeIds(transformerTokenTypeIds);
+ if (transformerOutput != null) b.transformerOutput(transformerOutput);
+ if (tranformerStartSequenceToken != null) b.transformerStartSequenceToken(tranformerStartSequenceToken);
+ if (transformerEndSequenceToken != null) b.transformerEndSequenceToken(transformerEndSequenceToken);
+ if (poolingStrategy != null) b.poolingStrategy(BertBaseEmbedderConfig.PoolingStrategy.Enum.valueOf(poolingStrategy));
+ if (onnxExecutionMode != null) b.onnxExecutionMode(BertBaseEmbedderConfig.OnnxExecutionMode.Enum.valueOf(onnxExecutionMode));
+ if (onnxInteropThreads != null) b.onnxInterOpThreads(onnxInteropThreads);
+ if (onnxIntraopThreads != null) b.onnxIntraOpThreads(onnxIntraopThreads);
+ if (onnxGpuDevice != null) b.onnxGpuDevice(onnxGpuDevice);
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/component/HuggingFaceEmbedder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/component/HuggingFaceEmbedder.java
new file mode 100644
index 00000000000..6e7a1cc31dd
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/component/HuggingFaceEmbedder.java
@@ -0,0 +1,81 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.model.container.component;
+
+import com.yahoo.config.ModelReference;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.embedding.huggingface.HuggingFaceEmbedderConfig;
+import com.yahoo.vespa.model.container.xml.ModelIdResolver;
+import org.w3c.dom.Element;
+
+import java.util.Optional;
+
+import static com.yahoo.config.model.builder.xml.XmlHelper.getOptionalChild;
+import static com.yahoo.config.model.builder.xml.XmlHelper.getOptionalChildValue;
+import static com.yahoo.vespa.model.container.ContainerModelEvaluation.INTEGRATION_BUNDLE_NAME;
+
+
+/**
+ * @author bjorncs
+ */
+public class HuggingFaceEmbedder extends TypedComponent implements HuggingFaceEmbedderConfig.Producer {
+ private final ModelReference model;
+ private final ModelReference vocab;
+ private final Integer maxTokens;
+ private final String transformerInputIds;
+ private final String transformerAttentionMask;
+ private final String transformerTokenTypeIds;
+ private final String transformerOutput;
+ private final Boolean normalize;
+ private final String onnxExecutionMode;
+ private final Integer onnxInteropThreads;
+ private final Integer onnxIntraopThreads;
+ private final Integer onnxGpuDevice;
+ private final String poolingStrategy;
+
+ public HuggingFaceEmbedder(Element xml, DeployState state) {
+ super("ai.vespa.embedding.huggingface.HuggingFaceEmbedder", INTEGRATION_BUNDLE_NAME, xml);
+ var transformerModelElem = getOptionalChild(xml, "transformer-model").orElseThrow();
+ model = ModelIdResolver.resolveToModelReference(transformerModelElem, state);
+ vocab = getOptionalChild(xml, "tokenizer-model")
+ .map(elem -> ModelIdResolver.resolveToModelReference(elem, state))
+ .orElseGet(() -> resolveDefaultVocab(transformerModelElem, state));
+ maxTokens = getOptionalChildValue(xml, "max-tokens").map(Integer::parseInt).orElse(null);
+ transformerInputIds = getOptionalChildValue(xml, "transformer-input-ids").orElse(null);
+ transformerAttentionMask = getOptionalChildValue(xml, "transformer-attention-mask").orElse(null);
+ transformerTokenTypeIds = getOptionalChildValue(xml, "transformer-token-type-ids").orElse(null);
+ transformerOutput = getOptionalChildValue(xml, "transformer-output").orElse(null);
+ normalize = getOptionalChildValue(xml, "normalize").map(Boolean::parseBoolean).orElse(null);
+ onnxExecutionMode = getOptionalChildValue(xml, "onnx-execution-mode").orElse(null);
+ onnxInteropThreads = getOptionalChildValue(xml, "onnx-interop-threads").map(Integer::parseInt).orElse(null);
+ onnxIntraopThreads = getOptionalChildValue(xml, "onnx-intraop-threads").map(Integer::parseInt).orElse(null);
+ onnxGpuDevice = getOptionalChildValue(xml, "onnx-gpu-device").map(Integer::parseInt).orElse(null);
+ poolingStrategy = getOptionalChildValue(xml, "pooling-strategy").orElse(null);
+ }
+
+ private static ModelReference resolveDefaultVocab(Element model, DeployState state) {
+ if (state.isHosted() && model.hasAttribute("model-id")) {
+ var implicitVocabId = model.getAttribute("model-id") + "-vocab";
+ return ModelIdResolver.resolveToModelReference(
+ "tokenizer-model", Optional.of(implicitVocabId), Optional.empty(), Optional.empty(), state);
+ }
+ throw new IllegalArgumentException("'tokenizer-model' must be specified");
+ }
+
+ @Override
+ public void getConfig(HuggingFaceEmbedderConfig.Builder b) {
+ b.transformerModel(model).tokenizerPath(vocab);
+ if (maxTokens != null) b.transformerMaxTokens(maxTokens);
+ if (transformerInputIds != null) b.transformerInputIds(transformerInputIds);
+ if (transformerAttentionMask != null) b.transformerAttentionMask(transformerAttentionMask);
+ if (transformerTokenTypeIds != null) b.transformerTokenTypeIds(transformerTokenTypeIds);
+ if (transformerOutput != null) b.transformerOutput(transformerOutput);
+ if (normalize != null) b.normalize(normalize);
+ if (onnxExecutionMode != null) b.transformerExecutionMode(
+ HuggingFaceEmbedderConfig.TransformerExecutionMode.Enum.valueOf(onnxExecutionMode));
+ if (onnxInteropThreads != null) b.transformerInterOpThreads(onnxInteropThreads);
+ if (onnxIntraopThreads != null) b.transformerIntraOpThreads(onnxIntraopThreads);
+ if (onnxGpuDevice != null) b.transformerGpuDevice(onnxGpuDevice);
+ if (poolingStrategy != null) b.poolingStrategy(HuggingFaceEmbedderConfig.PoolingStrategy.Enum.valueOf(poolingStrategy));
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/component/HuggingFaceTokenizer.java b/config-model/src/main/java/com/yahoo/vespa/model/container/component/HuggingFaceTokenizer.java
new file mode 100644
index 00000000000..966dbe8260a
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/component/HuggingFaceTokenizer.java
@@ -0,0 +1,47 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.container.component;
+
+import com.yahoo.config.ModelReference;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.language.huggingface.config.HuggingFaceTokenizerConfig;
+import com.yahoo.text.XML;
+import com.yahoo.vespa.model.container.xml.ModelIdResolver;
+import org.w3c.dom.Element;
+
+import java.util.Map;
+import java.util.TreeMap;
+
+import static com.yahoo.config.model.builder.xml.XmlHelper.getOptionalChildValue;
+import static com.yahoo.vespa.model.container.ContainerModelEvaluation.LINGUISTICS_BUNDLE_NAME;
+
+/**
+ * @author bjorncs
+ */
+public class HuggingFaceTokenizer extends TypedComponent implements HuggingFaceTokenizerConfig.Producer {
+
+ private final Map<String, ModelReference> langToModel = new TreeMap<>();
+ private final Boolean specialTokens;
+ private final Integer maxLength;
+ private final Boolean truncation;
+
+ public HuggingFaceTokenizer(Element xml, DeployState state) {
+ super("com.yahoo.language.huggingface.HuggingFaceTokenizer", LINGUISTICS_BUNDLE_NAME, xml);
+ for (Element element : XML.getChildren(xml, "model")) {
+ var lang = element.hasAttribute("language") ? element.getAttribute("language") : "unknown";
+ langToModel.put(lang, ModelIdResolver.resolveToModelReference(element, state));
+ }
+ specialTokens = getOptionalChildValue(xml, "special-tokens").map(Boolean::parseBoolean).orElse(null);
+ maxLength = getOptionalChildValue(xml, "max-length").map(Integer::parseInt).orElse(null);
+ truncation = getOptionalChildValue(xml, "truncation").map(Boolean::parseBoolean).orElse(null);
+ }
+
+ @Override
+ public void getConfig(HuggingFaceTokenizerConfig.Builder builder) {
+ langToModel.forEach((lang, vocab) -> {
+ builder.model.add(new HuggingFaceTokenizerConfig.Model.Builder().language(lang).path(vocab));
+ });
+ if (specialTokens != null) builder.addSpecialTokens(specialTokens);
+ if (maxLength != null) builder.maxLength(maxLength);
+ if (truncation != null) builder.truncation(truncation);
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/component/TypedComponent.java b/config-model/src/main/java/com/yahoo/vespa/model/container/component/TypedComponent.java
new file mode 100644
index 00000000000..522c78f2f25
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/component/TypedComponent.java
@@ -0,0 +1,20 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.model.container.component;
+
+import com.yahoo.osgi.provider.model.ComponentModel;
+import org.w3c.dom.Element;
+
+/**
+ * @author bjorncs
+ */
+abstract class TypedComponent extends SimpleComponent {
+
+ private final Element xml;
+
+ protected TypedComponent(String className, String bundle, Element xml) {
+ super(new ComponentModel(xml.getAttribute("id"), className, bundle));
+ this.xml = xml;
+ }
+
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
index 57303d6c9b3..bcebf1a9fdd 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
@@ -933,22 +933,19 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
}
private static boolean applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
- if (memoryPercentage == null || memoryPercentage.isEmpty()) return false;
- memoryPercentage = memoryPercentage.trim();
-
- if ( ! memoryPercentage.endsWith("%"))
- throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
- " must be an integer percentage ending by the '%' sign");
- memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
-
try {
+ if (memoryPercentage == null || memoryPercentage.isEmpty()) return false;
+ memoryPercentage = memoryPercentage.trim();
+ if ( ! memoryPercentage.endsWith("%"))
+ throw new IllegalArgumentException("Missing % sign");
+ memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
cluster.setMemoryPercentage(Integer.parseInt(memoryPercentage));
+ return true;
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
- " must be an integer percentage ending by the '%' sign");
+ " must be an integer percentage ending by the '%' sign", e);
}
- return true;
}
/** Allocate a container cluster without a nodes tag */
@@ -960,9 +957,11 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
int nodeCount = deployState.zone().environment().isProduction() ? 2 : 1;
deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Using " + nodeCount + " nodes in " + cluster);
var nodesSpec = NodesSpecification.dedicated(nodeCount, context);
+ ClusterSpec.Id clusterId = ClusterSpec.Id.from(cluster.getName());
var hosts = nodesSpec.provision(hostSystem,
ClusterSpec.Type.container,
- ClusterSpec.Id.from(cluster.getName()),
+ clusterId,
+ zoneEndpoint(context, clusterId),
deployState.getDeployLogger(),
false,
context.clusterInfo().build());
@@ -1192,9 +1191,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
DeploymentSpec spec) {
spec.athenzDomain()
.ifPresent(domain -> {
- AthenzService service = spec.instance(app.getApplicationId().instance())
- .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
- .or(spec::athenzService)
+ AthenzService service = spec.athenzService(app.getApplicationId().instance(), zone.environment(), zone.region())
.orElseThrow(() -> new IllegalArgumentException("Missing Athenz service configuration in instance '" +
app.getApplicationId().instance() + "'"));
String zoneDnsSuffix = zone.environment().value() + "-" + zone.region().value() + "." + athenzDnsSuffix;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ModelIdResolver.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ModelIdResolver.java
index ff261d2b83a..96f653bf793 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ModelIdResolver.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ModelIdResolver.java
@@ -1,12 +1,17 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.container.xml;
+import com.yahoo.config.ModelReference;
+import com.yahoo.config.UrlReference;
+import com.yahoo.config.model.builder.xml.XmlHelper;
+import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.text.XML;
import org.w3c.dom.Element;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
+import java.util.Optional;
import java.util.stream.Collectors;
/**
@@ -29,6 +34,19 @@ public class ModelIdResolver {
models.put("flan-t5-base-decoder", "https://data.vespa.oath.cloud/onnx_models/flan-t5-base-decoder-model.onnx");
models.put("flan-t5-large-encoder", "https://data.vespa.oath.cloud/onnx_models/flan-t5-large-encoder-model.onnx");
models.put("flan-t5-large-decoder", "https://data.vespa.oath.cloud/onnx_models/flan-t5-large-decoder-model.onnx");
+
+ models.put("multilingual-e5-base", "https://data.vespa.oath.cloud/onnx_models/multilingual-e5-base/model.onnx");
+ models.put("multilingual-e5-base-vocab", "https://data.vespa.oath.cloud/onnx_models/multilingual-e5-base/tokenizer.json");
+
+ models.put("e5-small-v2", "https://data.vespa.oath.cloud/onnx_models/e5-small-v2/model.onnx");
+ models.put("e5-small-v2-vocab", "https://data.vespa.oath.cloud/onnx_models/e5-small-v2/tokenizer.json");
+
+ models.put("e5-base-v2", "https://data.vespa.oath.cloud/onnx_models/e5-base-v2/model.onnx");
+ models.put("e5-base-v2-vocab", "https://data.vespa.oath.cloud/onnx_models/e5-base-v2/tokenizer.json");
+
+ models.put("e5-large-v2", "https://data.vespa.oath.cloud/onnx_models/e5-large-v2/model.onnx");
+ models.put("e5-large-v2-vocab", "https://data.vespa.oath.cloud/onnx_models/e5-large-v2/tokenizer.json");
+
return Collections.unmodifiableMap(models);
}
@@ -57,11 +75,36 @@ public class ModelIdResolver {
value.removeAttribute("path");
}
else if ( ! value.hasAttribute("url") && ! value.hasAttribute("path")) {
- throw new IllegalArgumentException(value.getTagName() + " is configured with only a 'model-id'. " +
- "Add a 'path' or 'url' to deploy this outside Vespa Cloud");
+ throw onlyModelIdInHostedException(value.getTagName());
}
}
+
+ public static ModelReference resolveToModelReference(Element elem, DeployState state) {
+ return resolveToModelReference(
+ elem.getTagName(), XmlHelper.getOptionalAttribute(elem, "model-id"),
+ XmlHelper.getOptionalAttribute(elem, "url"), XmlHelper.getOptionalAttribute(elem, "path"), state);
+ }
+
+ public static ModelReference resolveToModelReference(
+ String paramName, Optional<String> id, Optional<String> url, Optional<String> path, DeployState state) {
+ if (id.isEmpty()) return createModelReference(Optional.empty(), url, path, state);
+ else if (state.isHosted())
+ return createModelReference(id, Optional.of(modelIdToUrl(paramName, id.get())), Optional.empty(), state);
+ else if (url.isEmpty() && path.isEmpty()) throw onlyModelIdInHostedException(paramName);
+ else return createModelReference(id, url, path, state);
+ }
+
+ private static ModelReference createModelReference(Optional<String> id, Optional<String> url, Optional<String> path, DeployState state) {
+ var fileRef = path.map(p -> state.getFileRegistry().addFile(p));
+ return ModelReference.unresolved(id, url.map(UrlReference::valueOf), fileRef);
+ }
+
+ private static IllegalArgumentException onlyModelIdInHostedException(String paramName) {
+ return new IllegalArgumentException(paramName + " is configured with only a 'model-id'. " +
+ "Add a 'path' or 'url' to deploy this outside Vespa Cloud");
+ }
+
private static String modelIdToUrl(String valueName, String modelId) {
if ( ! providedModels.containsKey(modelId))
throw new IllegalArgumentException("Unknown model id '" + modelId + "' on '" + valueName + "'. Available models are [" +
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java b/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
index e044b97546c..43f045940c9 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
@@ -28,6 +28,7 @@ import com.yahoo.vespa.model.container.docproc.DocprocChain;
import com.yahoo.vespa.model.container.docproc.DocprocChains;
import com.yahoo.vespa.model.content.cluster.ContentCluster;
import com.yahoo.vespa.model.search.IndexedSearchCluster;
+import com.yahoo.vespa.model.search.IndexingDocproc;
import com.yahoo.vespa.model.search.IndexingDocprocChain;
import com.yahoo.vespa.model.search.SearchCluster;
import com.yahoo.vespa.model.search.SearchNode;
@@ -213,13 +214,17 @@ public class Content extends ConfigModel {
/** Select/creates and initializes the indexing cluster coupled to this */
private void buildIndexingClusters(Content content, ConfigModelContext modelContext,
ApplicationConfigProducerRoot root) {
- if ( ! content.getCluster().getSearch().hasIndexedCluster()) return;
-
- IndexedSearchCluster indexedSearchCluster = content.getCluster().getSearch().getIndexed();
- if (indexedSearchCluster.hasExplicitIndexingCluster()) {
- setExistingIndexingCluster(indexedSearchCluster, content.containers);
+ var search = content.getCluster().getSearch();
+ if (!search.getIndexingDocproc().isPresent()) {
+ return;
+ }
+ var indexingDocproc = search.getIndexingDocproc().get();
+ if (indexingDocproc.hasExplicitCluster()) {
+ setExistingIndexingCluster(content, indexingDocproc, content.containers);
} else {
- setContainerAsIndexingCluster(indexedSearchCluster, content, modelContext, root);
+ if (search.hasIndexedCluster()) {
+ setContainerAsIndexingCluster(search.getIndexed(), content, modelContext, root);
+ }
}
}
@@ -237,18 +242,19 @@ public class Content extends ConfigModel {
targetCluster = content.containers.iterator().next().getCluster();
addDocproc(targetCluster);
- indexedSearchCluster.setIndexingClusterName(targetCluster.getName());
- addIndexingChainsTo(targetCluster, indexedSearchCluster);
+ var indexingDocproc = indexedSearchCluster.getIndexingDocproc();
+ indexingDocproc.setClusterName(targetCluster.getName());
+ addIndexingChainsTo(targetCluster, content, indexingDocproc);
}
}
- private void setExistingIndexingCluster(IndexedSearchCluster cluster, Collection<ContainerModel> containers) {
- String indexingClusterName = cluster.getIndexingClusterName();
+ private void setExistingIndexingCluster(Content content, IndexingDocproc indexingDocproc, Collection<ContainerModel> containers) {
+ String indexingClusterName = indexingDocproc.getClusterName(content.getCluster().getName());
ContainerModel containerModel = findByName(indexingClusterName, containers);
if (containerModel == null)
- throw new IllegalArgumentException("Content cluster '" + cluster.getClusterName() + "' refers to docproc " +
+ throw new IllegalArgumentException("Content cluster '" + content.getCluster().getName() + "' refers to docproc " +
"cluster '" + indexingClusterName + "', but this cluster does not exist.");
- addIndexingChainsTo(containerModel.getCluster(), cluster);
+ addIndexingChainsTo(containerModel.getCluster(), content, indexingDocproc);
}
private ContainerModel findByName(String name, Collection<ContainerModel> containers) {
@@ -258,19 +264,19 @@ public class Content extends ConfigModel {
return null;
}
- private void addIndexingChainsTo(ContainerCluster<?> indexer, IndexedSearchCluster cluster) {
+ private void addIndexingChainsTo(ContainerCluster<?> indexer, Content content, IndexingDocproc indexingDocproc) {
addIndexingChain(indexer);
DocprocChain indexingChain;
ComponentRegistry<DocprocChain> allChains = indexer.getDocprocChains().allChains();
- if (cluster.hasExplicitIndexingChain()) {
- indexingChain = allChains.getComponent(cluster.getIndexingChainName());
+ if (indexingDocproc.hasExplicitChain() && !indexingDocproc.getChainName().equals(IndexingDocprocChain.NAME)) {
+ indexingChain = allChains.getComponent(indexingDocproc.getChainName());
if (indexingChain == null) {
- throw new IllegalArgumentException(cluster + " refers to docproc " +
- "chain '" + cluster.getIndexingChainName() +
+ throw new IllegalArgumentException(content.getCluster() + " refers to docproc " +
+ "chain '" + indexingDocproc.getChainName() +
"' for indexing, but this chain does not exist");
}
else if (indexingChain.getId().getName().equals("default")) {
- throw new IllegalArgumentException(cluster + " specifies the chain " +
+ throw new IllegalArgumentException(content.getCluster() + " specifies the chain " +
"'default' as indexing chain. As the 'default' chain is run by default, " +
"using it as the indexing chain will run it twice. " +
"Use a different name for the indexing chain.");
@@ -282,7 +288,7 @@ public class Content extends ConfigModel {
indexingChain = allChains.getComponent(IndexingDocprocChain.NAME);
}
- cluster.setIndexingChain(indexingChain);
+ indexingDocproc.setChain(indexingChain);
}
private TreeConfigProducer<AnyConfigProducer> getDocProc(ApplicationConfigProducerRoot root) {
@@ -301,7 +307,7 @@ public class Content extends ConfigModel {
Content content,
ConfigModelContext modelContext,
ApplicationConfigProducerRoot root) {
- String indexerName = cluster.getIndexingClusterName();
+ String indexerName = cluster.getIndexingDocproc().getClusterName(content.getCluster().getName());
TreeConfigProducer<AnyConfigProducer> parent = getDocProc(root);
ApplicationContainerCluster indexingCluster = new ApplicationContainerCluster(parent, "cluster." + indexerName, indexerName, modelContext.getDeployState());
ContainerModel indexingClusterModel = new ContainerModel(modelContext.withParent(parent).withId(indexingCluster.getSubId()));
@@ -334,7 +340,7 @@ public class Content extends ConfigModel {
indexingCluster.addContainers(nodes);
addIndexingChain(indexingCluster);
- cluster.setIndexingChain(indexingCluster.getDocprocChains().allChains().getComponent(IndexingDocprocChain.NAME));
+ cluster.getIndexingDocproc().setChain(indexingCluster.getDocprocChains().allChains().getComponent(IndexingDocprocChain.NAME));
}
private ContainerCluster<?> getContainerWithDocproc(Collection<ContainerModel> containers) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
index a0240d28a3c..ec7acaf819f 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
@@ -16,6 +16,7 @@ import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
import com.yahoo.vespa.model.builder.xml.dom.VespaDomBuilder;
import com.yahoo.vespa.model.content.cluster.ContentCluster;
import com.yahoo.vespa.model.search.IndexedSearchCluster;
+import com.yahoo.vespa.model.search.IndexingDocproc;
import com.yahoo.vespa.model.search.NodeSpec;
import com.yahoo.vespa.model.search.SchemaDefinitionXMLHandler;
import com.yahoo.vespa.model.search.SearchCluster;
@@ -57,6 +58,7 @@ public class ContentSearchCluster extends TreeConfigProducer<AnyConfigProducer>
/** The single, indexed search cluster this sets up (supporting multiple document types), or null if none */
private IndexedSearchCluster indexedCluster;
+ private Optional<IndexingDocproc> indexingDocproc;
private Redundancy redundancy;
private final String clusterName;
@@ -206,6 +208,7 @@ public class ContentSearchCluster extends TreeConfigProducer<AnyConfigProducer>
double fractionOfMemoryReserved)
{
super(parent, "search");
+ this.indexingDocproc = Optional.empty();
this.clusterName = clusterName;
this.documentDefinitions = documentDefinitions;
this.globallyDistributedDocuments = globallyDistributedDocuments;
@@ -259,6 +262,10 @@ public class ContentSearchCluster extends TreeConfigProducer<AnyConfigProducer>
throw new IllegalArgumentException("Duplicate indexed cluster '" + indexedCluster.getClusterName() + "'");
}
indexedCluster = (IndexedSearchCluster)sc;
+ if (indexingDocproc.isPresent()) {
+ throw new IllegalArgumentException("Indexing docproc has previously been setup for streaming search");
+ }
+ indexingDocproc = Optional.of(indexedCluster.getIndexingDocproc());
}
clusters.put(sc.getClusterName(), sc);
}
@@ -458,6 +465,12 @@ public class ContentSearchCluster extends TreeConfigProducer<AnyConfigProducer>
public Map<String, SearchCluster> getClusters() { return clusters; }
public IndexedSearchCluster getIndexed() { return indexedCluster; }
public boolean hasIndexedCluster() { return indexedCluster != null; }
+ public Optional<IndexingDocproc> getIndexingDocproc() { return indexingDocproc; }
+ public void setupStreamingSearchIndexingDocProc() {
+ if (indexingDocproc.isEmpty()) {
+ indexingDocproc = Optional.of(new IndexingDocproc());
+ }
+ }
public String getClusterName() { return clusterName; }
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
index 66a99e1993c..dfdfa9303a7 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
@@ -203,19 +203,24 @@ public class ContentCluster extends TreeConfigProducer<AnyConfigProducer> implem
if (docprocCluster != null) {
docprocCluster = docprocCluster.trim();
}
- if (c.getSearch().hasIndexedCluster()) {
- if (docprocCluster != null && !docprocCluster.isEmpty()) {
- c.getSearch().getIndexed().setIndexingClusterName(docprocCluster);
- }
- }
-
String docprocChain = e.stringAttribute("chain");
if (docprocChain != null) {
docprocChain = docprocChain.trim();
}
- if (c.getSearch().hasIndexedCluster()) {
- if (docprocChain != null && !docprocChain.isEmpty()) {
- c.getSearch().getIndexed().setIndexingChainName(docprocChain);
+ if (docprocCluster != null && !docprocCluster.isEmpty()) {
+ if (!c.getSearch().hasIndexedCluster() && !c.getSearch().getIndexingDocproc().isPresent() &&
+ docprocChain != null && !docprocChain.isEmpty()) {
+ c.getSearch().setupStreamingSearchIndexingDocProc();
+ }
+ var indexingDocproc = c.getSearch().getIndexingDocproc();
+ if (indexingDocproc.isPresent()) {
+ indexingDocproc.get().setClusterName(docprocCluster);
+ }
+ }
+ if (docprocChain != null && !docprocChain.isEmpty()) {
+ var indexingDocproc = c.getSearch().getIndexingDocproc();
+ if (indexingDocproc.isPresent()) {
+ indexingDocproc.get().setChainName(docprocChain);
}
}
}
@@ -451,7 +456,7 @@ public class ContentCluster extends TreeConfigProducer<AnyConfigProducer> implem
@Override
public void getConfig(MessagetyperouteselectorpolicyConfig.Builder builder) {
- if ( ! getSearch().hasIndexedCluster()) return;
+ if ( ! getSearch().getIndexingDocproc().isPresent()) return;
DocumentProtocol.getConfig(builder, getConfigId());
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/routing/DocumentProtocol.java b/config-model/src/main/java/com/yahoo/vespa/model/routing/DocumentProtocol.java
index ad0312705ca..6623efb599d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/routing/DocumentProtocol.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/routing/DocumentProtocol.java
@@ -110,7 +110,7 @@ public final class DocumentProtocol implements Protocol,
for (ContentCluster cluster : Content.getContentClusters(repo)) {
DocumentProtocolPoliciesConfig.Cluster.Builder clusterBuilder = new DocumentProtocolPoliciesConfig.Cluster.Builder();
addSelector(cluster.getConfigId(), cluster.getRoutingSelector(), clusterBuilder);
- if (cluster.getSearch().hasIndexedCluster())
+ if (cluster.getSearch().getIndexingDocproc().isPresent())
addRoutes(getDirectRouteName(cluster.getConfigId()), getIndexedRouteName(cluster.getConfigId()), clusterBuilder);
else
clusterBuilder.defaultRoute(cluster.getConfigId());
@@ -227,10 +227,11 @@ public final class DocumentProtocol implements Protocol,
for (ContentCluster cluster : content) {
RouteSpec spec = new RouteSpec(cluster.getConfigId());
- if (cluster.getSearch().hasIndexedCluster()) {
+ if (cluster.getSearch().getIndexingDocproc().isPresent()) {
+ var indexingDocproc = cluster.getSearch().getIndexingDocproc().get();
table.addRoute(spec.addHop("[MessageType:" + cluster.getConfigId() + "]"));
table.addRoute(new RouteSpec(getIndexedRouteName(cluster.getConfigId()))
- .addHop(cluster.getSearch().getIndexed().getIndexingServiceName())
+ .addHop(indexingDocproc.getServiceName())
.addHop("[Content:cluster=" + cluster.getName() + "]"));
table.addRoute(new RouteSpec(getDirectRouteName(cluster.getConfigId()))
.addHop("[Content:cluster=" + cluster.getName() + "]"));
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
index 670460a9f9f..080a2ca43dc 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
@@ -43,11 +43,7 @@ public class IndexedSearchCluster extends SearchCluster
DispatchNodesConfig.Producer,
ConfigInstance.Producer {
- private String indexingClusterName = null; // The name of the docproc cluster to run indexing, by config.
- private String indexingChainName = null;
-
- private DocprocChain indexingChain; // The actual docproc chain indexing for this.
-
+ private IndexingDocproc indexingDocproc;
private Tuning tuning;
private SearchCoverage searchCoverage;
@@ -77,6 +73,7 @@ public class IndexedSearchCluster extends SearchCluster
public IndexedSearchCluster(TreeConfigProducer<AnyConfigProducer> parent, String clusterName, int index, ModelContext.FeatureFlags featureFlags) {
super(parent, clusterName, index);
+ indexingDocproc = new IndexingDocproc();
documentDbsConfigProducer = new MultipleDocumentDatabasesConfigProducer(this, documentDbs);
rootDispatch = new DispatchGroup(this);
defaultDispatchPolicy = DispatchTuning.Builder.toDispatchPolicy(featureFlags.queryDispatchPolicy());
@@ -87,58 +84,7 @@ public class IndexedSearchCluster extends SearchCluster
@Override
protected IndexingMode getIndexingMode() { return IndexingMode.REALTIME; }
- public final boolean hasExplicitIndexingCluster() {
- return indexingClusterName != null;
- }
-
- public final boolean hasExplicitIndexingChain() {
- return indexingChainName != null;
- }
-
- /**
- * Returns the name of the docproc cluster running indexing for this search cluster. This is derived from the
- * services file on initialization, this can NOT be used at runtime to determine indexing chain. When initialization
- * is done, the {@link #getIndexingServiceName()} method holds the actual indexing docproc chain object.
- *
- * @return the name of the docproc cluster associated with this
- */
- public String getIndexingClusterName() {
- return hasExplicitIndexingCluster() ? indexingClusterName : getClusterName() + ".indexing";
- }
-
- public String getIndexingChainName() {
- return indexingChainName;
- }
-
- public void setIndexingChainName(String indexingChainName) {
- this.indexingChainName = indexingChainName;
- }
-
- /**
- * Sets the name of the docproc cluster running indexing for this search cluster. This is for initial configuration,
- * and will not reflect the actual indexing chain. See {@link #getIndexingClusterName} for more detail.
- *
- * @param name the name of the docproc cluster associated with this
- */
- public void setIndexingClusterName(String name) {
- indexingClusterName = name;
- }
-
- public String getIndexingServiceName() {
- return indexingChain.getServiceName();
- }
-
- /**
- * Sets the docproc chain that will be running indexing for this search cluster. This is set by the
- * {@link com.yahoo.vespa.model.content.Content} model during build.
- *
- * @param chain the chain that is to run indexing for this cluster
- * @return this, to allow chaining
- */
- public SearchCluster setIndexingChain(DocprocChain chain) {
- indexingChain = chain;
- return this;
- }
+ public IndexingDocproc getIndexingDocproc() { return indexingDocproc; }
public DispatchGroup getRootDispatch() { return rootDispatch; }
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexingDocproc.java b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexingDocproc.java
new file mode 100644
index 00000000000..46f3e6f459d
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexingDocproc.java
@@ -0,0 +1,68 @@
+package com.yahoo.vespa.model.search;
+
+import com.yahoo.vespa.model.container.docproc.DocprocChain;
+
+/**
+ * Utility class to track configuration for which indexing docproc to use by a search cluster.
+ */
+public class IndexingDocproc {
+ private String clusterName; // The name of the docproc cluster to run indexing, by config.
+ private String chainName;
+
+ private DocprocChain chain; // The actual docproc chain indexing for this.
+
+ public boolean hasExplicitCluster() {
+ return clusterName != null;
+ }
+
+ public boolean hasExplicitChain() {
+ return chainName != null;
+ }
+
+ /**
+ * Returns the name of the docproc cluster running indexing for this search cluster. This is derived from the
+ * services file on initialization, this can NOT be used at runtime to determine indexing chain. When initialization
+ * is done, the {@link #getServiceName()} method holds the actual indexing docproc chain object.
+ *
+ * @return the name of the docproc cluster associated with this
+ */
+ public String getClusterName(String searchClusterName) {
+ return hasExplicitCluster() ? clusterName : searchClusterName + ".indexing";
+ }
+
+ public String getChainName() {
+ return chainName;
+ }
+
+ public void setChainName(String name) {
+ chainName = name;
+ }
+
+ /**
+ * Sets the name of the docproc cluster running indexing for this search cluster. This is for initial configuration,
+ * and will not reflect the actual indexing chain. See {@link #getClusterName} for more detail.
+ *
+ * @param name the name of the docproc cluster associated with this
+ */
+ public void setClusterName(String name) {
+ clusterName = name;
+ }
+
+ public String getServiceName() {
+ return chain.getServiceName();
+ }
+
+ /**
+ * Sets the docproc chain that will be running indexing for this search cluster. This is set by the
+ * {@link com.yahoo.vespa.model.content.Content} model during build.
+ *
+ * @param chain the chain that is to run indexing for this cluster
+ */
+ public void setChain(DocprocChain chain) { this.chain = chain; }
+
+ public IndexingDocproc() {
+ clusterName = null;
+ chainName = null;
+ chain = null;
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java b/config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java
index 5b747b93268..1ad99404823 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.model.search;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.config.search.core.ProtonConfig;
+import com.yahoo.vespa.model.Host;
import static java.lang.Long.min;
import static java.lang.Long.max;
@@ -27,9 +28,6 @@ public class NodeResourcesTuning implements ProtonConfig.Producer {
private final int threadsPerSearch;
private final double fractionOfMemoryReserved;
- // "Reserve" 0.5GB of memory for other processes running on the content node (config-proxy, metrics-proxy).
- public static final double reservedMemoryGb = 0.7;
-
public NodeResourcesTuning(NodeResources resources,
int threadsPerSearch,
double fractionOfMemoryReserved) {
@@ -128,7 +126,7 @@ public class NodeResourcesTuning implements ProtonConfig.Producer {
/** Returns the memory we can expect will be available for the content node processes */
private double usableMemoryGb() {
- double usableMemoryGb = resources.memoryGb() - reservedMemoryGb;
+ double usableMemoryGb = resources.memoryGb() - Host.memoryOverheadGb;
return usableMemoryGb * (1 - fractionOfMemoryReserved);
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java b/config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java
index a29b7b90b44..93e3a6e7a19 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java
@@ -53,6 +53,16 @@ public class Tuning extends AnyConfigProducer implements ProtonConfig.Producer {
}
}
+ public static class LidSpace implements ProtonConfig.Producer {
+ public Double bloatFactor = null;
+
+ @Override
+ public void getConfig(ProtonConfig.Builder builder) {
+ if (bloatFactor != null) builder.lidspacecompaction.allowedlidbloatfactor(bloatFactor);
+ }
+
+ }
+
public static class RemovedDB implements ProtonConfig.Producer {
public static class Prune implements ProtonConfig.Producer {
@@ -371,6 +381,7 @@ public class Tuning extends AnyConfigProducer implements ProtonConfig.Producer {
}
public RequestThreads threads = null;
+ public LidSpace lidSpace = null;
public FlushStrategy strategy = null;
public Resizing resizing = null;
public Index index = null;
@@ -383,6 +394,7 @@ public class Tuning extends AnyConfigProducer implements ProtonConfig.Producer {
@Override
public void getConfig(ProtonConfig.Builder builder) {
if (threads != null) threads.getConfig(builder);
+ if (lidSpace != null) lidSpace.getConfig(builder);
if (strategy != null) strategy.getConfig(builder);
if (resizing != null) resizing.getConfig(builder);
if (index != null) index.getConfig(builder);
diff --git a/config-model/src/main/resources/schema/common.rnc b/config-model/src/main/resources/schema/common.rnc
index 21f3399a027..061e54740f1 100644
--- a/config-model/src/main/resources/schema/common.rnc
+++ b/config-model/src/main/resources/schema/common.rnc
@@ -53,6 +53,11 @@ GenericConfig = element config {
anyElement*
}
+ModelReference =
+ attribute model-id { xsd:string }? &
+ attribute path { xsd:string }? &
+ attribute url { xsd:string }?
+
ComponentSpec =
( attribute id { xsd:Name | JavaId } | attribute idref { xsd:Name } | attribute ident { xsd:Name } )
@@ -64,7 +69,7 @@ BundleSpec =
attribute bundle { xsd:Name }?
Component = element component {
- ComponentDefinition
+ (ComponentDefinition | TypedComponentDefinition)
}
ComponentDefinition =
@@ -72,3 +77,51 @@ ComponentDefinition =
BundleSpec &
GenericConfig* &
Component*
+
+TypedComponentDefinition =
+ attribute id { xsd:Name } &
+ (HuggingFaceEmbedder | HuggingFaceTokenizer | BertBaseEmbedder) &
+ GenericConfig* &
+ Component*
+
+HuggingFaceEmbedder =
+ attribute type { "hugging-face-embedder" } &
+ element transformer-model { ModelReference } &
+ element tokenizer-model { ModelReference }? &
+ element max-tokens { xsd:nonNegativeInteger }? &
+ element transformer-input-ids { xsd:string }? &
+ element transformer-attention-mask { xsd:string }? &
+ element transformer-token-type-ids { xsd:string }? &
+ element transformer-output { xsd:string }? &
+ element normalize { xsd:boolean }? &
+ OnnxModelExecutionParams &
+ EmbedderPoolingStrategy
+
+HuggingFaceTokenizer =
+ attribute type { "hugging-face-tokenizer" } &
+ element model { attribute language { xsd:string }? & ModelReference }+ &
+ element special-tokens { xsd:boolean }? &
+ element max-length { xsd:integer }? &
+ element truncation { xsd:boolean }?
+
+BertBaseEmbedder =
+ attribute type { "bert-embedder" } &
+ element transformer-model { ModelReference } &
+ element tokenizer-vocab { ModelReference } &
+ element max-tokens { xsd:nonNegativeInteger }? &
+ element transformer-input-ids { xsd:string }? &
+ element transformer-attention-mask { xsd:string }? &
+ element transformer-token-type-ids { xsd:string }? &
+ element transformer-output { xsd:string }? &
+ element transformer-start-sequence-token { xsd:integer }? &
+ element transformer-end-sequence-token { xsd:integer }? &
+ OnnxModelExecutionParams &
+ EmbedderPoolingStrategy
+
+OnnxModelExecutionParams =
+ element onnx-execution-mode { "parallel" | "sequential" }? &
+ element onnx-interop-threads { xsd:integer }? &
+ element onnx-intraop-threads { xsd:integer }? &
+ element onnx-gpu-device { xsd:integer }?
+
+EmbedderPoolingStrategy = element pooling-strategy { "cls" | "mean" }? \ No newline at end of file
diff --git a/config-model/src/main/resources/schema/content.rnc b/config-model/src/main/resources/schema/content.rnc
index 6486fdacc18..5833b575a74 100644
--- a/config-model/src/main/resources/schema/content.rnc
+++ b/config-model/src/main/resources/schema/content.rnc
@@ -301,6 +301,9 @@ Tuning = element tuning {
element persearch { xsd:nonNegativeInteger }? &
element summary { xsd:nonNegativeInteger }?
}? &
+ element lidspace {
+ element max-bloat-factor { xsd:double { minInclusive = "0.0" maxInclusive = "1.0" } }?
+ }? &
element flushstrategy {
element native {
element total {
diff --git a/config-model/src/main/resources/schema/deployment.rnc b/config-model/src/main/resources/schema/deployment.rnc
index ede05ad65ef..0f2eed3f72b 100644
--- a/config-model/src/main/resources/schema/deployment.rnc
+++ b/config-model/src/main/resources/schema/deployment.rnc
@@ -8,6 +8,7 @@ start = element deployment {
attribute athenz-domain { xsd:string }? &
attribute athenz-service { xsd:string }? &
attribute cloud-account { xsd:string }? &
+ attribute empty-host-ttl { xsd:string }? &
Step
}
@@ -39,6 +40,7 @@ Instance = element instance {
attribute tags { xsd:string }? &
attribute athenz-service { xsd:string }? &
attribute cloud-account { xsd:string }? &
+ attribute empty-host-ttl { xsd:string }? &
StepExceptInstance
}
@@ -106,11 +108,13 @@ Staging = element staging {
}
Dev = element dev {
- attribute cloud-account { xsd:string }?
+ attribute cloud-account { xsd:string }? &
+ attribute empty-host-ttl { xsd:string }?
}
Perf = element perf {
- attribute cloud-account { xsd:string }?
+ attribute cloud-account { xsd:string }? &
+ attribute empty-host-ttl { xsd:string }?
}
Prod = element prod {
@@ -118,6 +122,7 @@ Prod = element prod {
attribute athenz-service { xsd:string }? &
attribute tester-flavor { xsd:string }? &
attribute cloud-account { xsd:string }? &
+ attribute empty-host-ttl { xsd:string }? &
Region* &
Delay* &
ProdTest* &
@@ -132,6 +137,7 @@ Region = element region {
attribute active { xsd:boolean }? &
attribute athenz-service { xsd:string }? &
attribute cloud-account { xsd:string }? &
+ attribute empty-host-ttl { xsd:string }? &
text
}
diff --git a/config-model/src/test/cfg/application/embed/configdefinitions/embedding.bert-base-embedder.def b/config-model/src/test/cfg/application/embed/configdefinitions/embedding.bert-base-embedder.def
deleted file mode 100644
index 144dfbd0001..00000000000
--- a/config-model/src/test/cfg/application/embed/configdefinitions/embedding.bert-base-embedder.def
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copy of this Vespa config stored here because Vespa config definitions are not
-# available in unit tests, and are needed (by DomConfigPayloadBuilder.parseLeaf)
-# Alternatively, we could make that not need it as it is not strictly necessaery.
-
-namespace=embedding
-
-# Wordpiece tokenizer
-tokenizerVocab model
-
-transformerModel model
-
-# Max length of token sequence model can handle
-transformerMaxTokens int default=384
-
-# Pooling strategy
-poolingStrategy enum { cls, mean } default=mean
-
-# Input names
-transformerInputIds string default=input_ids
-transformerAttentionMask string default=attention_mask
-transformerTokenTypeIds string default=token_type_ids
-
-# Output name
-transformerOutput string default=output_0
-
-# Settings for ONNX model evaluation
-onnxExecutionMode enum { parallel, sequential } default=sequential
-onnxInterOpThreads int default=1
-onnxIntraOpThreads int default=-4 # n=number of threads -> n<0: CPUs/(-n), n==0: CPUs, n>0: n
-
diff --git a/config-model/src/test/cfg/application/embed/configdefinitions/sentence-embedder.def b/config-model/src/test/cfg/application/embed/configdefinitions/sentence-embedder.def
new file mode 100644
index 00000000000..87b80f1051a
--- /dev/null
+++ b/config-model/src/test/cfg/application/embed/configdefinitions/sentence-embedder.def
@@ -0,0 +1,26 @@
+package=ai.vespa.example.paragraph
+
+# WordPiece tokenizer vocabulary
+vocab model
+
+model model
+
+myValue string
+
+# Max length of token sequence model can handle
+transforerMaxTokens int default=128
+
+# Pooling strategy
+poolingStrategy enum { cls, mean } default=mean
+
+# Input names
+transformerInputIds string default=input_ids
+transformerAttentionMask string default=attention_mask
+
+# Output name
+transformerOutput string default=last_hidden_state
+
+# Settings for ONNX model evaluation
+onnxExecutionMode enum { parallel, sequential } default=sequential
+onnxInterOpThreads int default=1
+onnxIntraOpThreads int default=-4
diff --git a/config-model/src/test/cfg/application/embed/services.xml b/config-model/src/test/cfg/application/embed/services.xml
index fcb1f10f32c..6823ef900ae 100644
--- a/config-model/src/test/cfg/application/embed/services.xml
+++ b/config-model/src/test/cfg/application/embed/services.xml
@@ -3,16 +3,47 @@
<services version="1.0">
<container version="1.0">
+ <component id="hf-embedder" type="hugging-face-embedder">
+ <transformer-model model-id="e5-base-v2" url="https://my/url/model.onnx"/>
+ <tokenizer-model model-id="e5-base-v2-vocab" path="app/tokenizer.json"/>
+ <max-tokens>1024</max-tokens>
+ <transformer-input-ids>my_input_ids</transformer-input-ids>
+ <transformer-attention-mask>my_attention_mask</transformer-attention-mask>
+ <transformer-token-type-ids>my_token_type_ids</transformer-token-type-ids>
+ <transformer-output>my_output</transformer-output>
+ <normalize>true</normalize>
+ <onnx-execution-mode>parallel</onnx-execution-mode>
+ <onnx-intraop-threads>10</onnx-intraop-threads>
+ <onnx-interop-threads>8</onnx-interop-threads>
+ <onnx-gpu-device>1</onnx-gpu-device>
+ <pooling-strategy>mean</pooling-strategy>
+ </component>
+
+ <component id="hf-tokenizer" type="hugging-face-tokenizer">
+ <model language="no" model-id="multilingual-e5-base-vocab" url="https://my/url/tokenizer.json"/>
+ <special-tokens>true</special-tokens>
+ <max-length>768</max-length>
+ <truncation>true</truncation>
+ </component>
+
+ <component id="bert-embedder" type="bert-embedder">
+ <!-- model specifics -->
+ <transformer-model model-id="minilm-l6-v2" url="application-url"/>
+ <tokenizer-vocab path="files/vocab.txt"/>
+ <max-tokens>512</max-tokens>
+ <transformer-input-ids>my_input_ids</transformer-input-ids>
+ <transformer-attention-mask>my_attention_mask</transformer-attention-mask>
+ <transformer-token-type-ids>my_token_type_ids</transformer-token-type-ids>
+ <transformer-output>my_output</transformer-output>
+ <transformer-start-sequence-token>101</transformer-start-sequence-token>
+ <transformer-end-sequence-token>102</transformer-end-sequence-token>
- <component id="transformer" class="ai.vespa.embedding.BertBaseEmbedder" bundle="model-integration">
- <config name="embedding.bert-base-embedder">
- <!-- model specifics -->
- <transformerModel model-id="minilm-l6-v2" url="application-url"/>
- <tokenizerVocab path="files/vocab.txt"/>
- <!-- tunable parameters: number of threads etc -->
- <onnxIntraOpThreads>4</onnxIntraOpThreads>
- </config>
+ <!-- tunable parameters: number of threads etc -->
+ <onnx-execution-mode>parallel</onnx-execution-mode>
+ <onnx-intraop-threads>4</onnx-intraop-threads>
+ <onnx-interop-threads>8</onnx-interop-threads>
+ <onnx-gpu-device>1</onnx-gpu-device>
</component>
<nodes>
diff --git a/config-model/src/test/cfg/application/embed_cloud_only/configdefinitions/embedding.bert-base-embedder.def b/config-model/src/test/cfg/application/embed_cloud_only/configdefinitions/embedding.bert-base-embedder.def
deleted file mode 100644
index 144dfbd0001..00000000000
--- a/config-model/src/test/cfg/application/embed_cloud_only/configdefinitions/embedding.bert-base-embedder.def
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copy of this Vespa config stored here because Vespa config definitions are not
-# available in unit tests, and are needed (by DomConfigPayloadBuilder.parseLeaf)
-# Alternatively, we could make that not need it as it is not strictly necessaery.
-
-namespace=embedding
-
-# Wordpiece tokenizer
-tokenizerVocab model
-
-transformerModel model
-
-# Max length of token sequence model can handle
-transformerMaxTokens int default=384
-
-# Pooling strategy
-poolingStrategy enum { cls, mean } default=mean
-
-# Input names
-transformerInputIds string default=input_ids
-transformerAttentionMask string default=attention_mask
-transformerTokenTypeIds string default=token_type_ids
-
-# Output name
-transformerOutput string default=output_0
-
-# Settings for ONNX model evaluation
-onnxExecutionMode enum { parallel, sequential } default=sequential
-onnxInterOpThreads int default=1
-onnxIntraOpThreads int default=-4 # n=number of threads -> n<0: CPUs/(-n), n==0: CPUs, n>0: n
-
diff --git a/config-model/src/test/cfg/application/embed_cloud_only/configdefinitions/sentence-embedder.def b/config-model/src/test/cfg/application/embed_cloud_only/configdefinitions/sentence-embedder.def
new file mode 100644
index 00000000000..87b80f1051a
--- /dev/null
+++ b/config-model/src/test/cfg/application/embed_cloud_only/configdefinitions/sentence-embedder.def
@@ -0,0 +1,26 @@
+package=ai.vespa.example.paragraph
+
+# WordPiece tokenizer vocabulary
+vocab model
+
+model model
+
+myValue string
+
+# Max length of token sequence model can handle
+transforerMaxTokens int default=128
+
+# Pooling strategy
+poolingStrategy enum { cls, mean } default=mean
+
+# Input names
+transformerInputIds string default=input_ids
+transformerAttentionMask string default=attention_mask
+
+# Output name
+transformerOutput string default=last_hidden_state
+
+# Settings for ONNX model evaluation
+onnxExecutionMode enum { parallel, sequential } default=sequential
+onnxInterOpThreads int default=1
+onnxIntraOpThreads int default=-4
diff --git a/config-model/src/test/cfg/application/embed_cloud_only/services.xml b/config-model/src/test/cfg/application/embed_cloud_only/services.xml
index 57db4f5bfae..e203ec56669 100644
--- a/config-model/src/test/cfg/application/embed_cloud_only/services.xml
+++ b/config-model/src/test/cfg/application/embed_cloud_only/services.xml
@@ -4,14 +4,11 @@
<container version="1.0">
- <component id="transformer" class="ai.vespa.embedding.BertBaseEmbedder" bundle="model-integration">
- <config name="embedding.bert-base-embedder">
- <!-- No fallback to url or path when deploying outside cloud -->
- <transformerModel model-id="minilm-l6-v2"/>
- <tokenizerVocab path="files/vocab.txt"/>
-
- <!-- tunable parameters: number of threads etc -->
- <onnxIntraOpThreads>4</onnxIntraOpThreads>
+ <component id="transformer" class="ai.vespa.example.paragraph.ApplicationSpecificEmbedder" bundle="app">
+ <config name='ai.vespa.example.paragraph.sentence-embedder'>
+ <model model-id="minilm-l6-v2"/>
+ <vocab path="files/vocab.txt"/>
+ <myValue>foo</myValue>
</config>
</component>
diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
index 5472ea2ca82..8b8191ebbbb 100644
--- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
@@ -57,7 +57,7 @@ import static com.yahoo.config.provision.NodeResources.DiskSpeed;
import static com.yahoo.config.provision.NodeResources.StorageType;
import static com.yahoo.vespa.defaults.Defaults.getDefaults;
import static com.yahoo.vespa.model.search.NodeResourcesTuning.GB;
-import static com.yahoo.vespa.model.search.NodeResourcesTuning.reservedMemoryGb;
+import static com.yahoo.vespa.model.Host.memoryOverheadGb;
import static com.yahoo.vespa.model.test.utils.ApplicationPackageUtils.generateSchemas;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -202,6 +202,12 @@ public class ModelProvisioningTest {
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
+ " <container version='1.0' id='container2'>" +
+ " <search/>" +
+ " <nodes count='1'>" +
+ " <resources vcpu='10' memory='100Gb' disk='1Tb'/>" +
+ " </nodes>" +
+ " </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
@@ -219,12 +225,14 @@ public class ModelProvisioningTest {
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(8);
+ tester.addHosts(new NodeResources(20, 200, 2000, 1.0), 1);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(1, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(2, model.getContentClusters().get("content").getRootGroup().getNodes().size(), "Nodes in cluster without ID");
- assertEquals(ApplicationContainerCluster.defaultHeapSizePercentageOfTotalNodeMemory, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size for container");
+ assertEquals(65, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size for container1");
+ assertEquals(84, physicalMemoryPercentage(model.getContainerClusters().get("container2")), "Heap size for container2");
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Type.content, model);
assertProvisioned(1, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content"), ClusterSpec.Type.content, model);
@@ -277,8 +285,7 @@ public class ModelProvisioningTest {
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(18, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
- assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.18)), protonMemorySize(model.getContentClusters()
- .get("content1")), "Memory for proton is lowered to account for the jvm heap");
+ assertEquals(2025077080L, protonMemorySize(model.getContentClusters().get("content1")), "Memory for proton is lowered to account for the jvm heap");
assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model);
assertEquals(1, logger.msgs().size());
@@ -314,8 +321,8 @@ public class ModelProvisioningTest {
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(30, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
- assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3)) * (1 - 0.30)), protonMemorySize(model.getContentClusters()
- .get("content1")), "Memory for proton is lowered to account for the jvm heap");
+ assertEquals((long) ((3 - memoryOverheadGb) * (Math.pow(1024, 3)) * (1 - 0.30)), protonMemorySize(model.getContentClusters()
+ .get("content1")), "Memory for proton is lowered to account for the jvm heap");
assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model);
}
@@ -345,8 +352,8 @@ public class ModelProvisioningTest {
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
- assertEquals(ApplicationContainerCluster.defaultHeapSizePercentageOfTotalNodeMemory, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is normal");
- assertEquals((long) ((3 - reservedMemoryGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1")), "Memory for proton is normal");
+ assertEquals(65, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is normal");
+ assertEquals((long) ((3 - memoryOverheadGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1")), "Memory for proton is normal");
}
@Test
@@ -2569,7 +2576,7 @@ public class ModelProvisioningTest {
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize()); // from config override
assertEquals(1000, cfg.flush().memory().maxmemory()); // from explicit tuning
- assertEquals((long) ((128 - reservedMemoryGb) * GB * 0.08), cfg.flush().memory().each().maxmemory()); // from default node flavor tuning
+ assertEquals((long) ((128 - memoryOverheadGb) * GB * 0.08), cfg.flush().memory().each().maxmemory()); // from default node flavor tuning
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
diff --git a/config-model/src/test/java/com/yahoo/schema/processing/NGramTestCase.java b/config-model/src/test/java/com/yahoo/schema/processing/NGramTestCase.java
index c143aa43d53..06ea202b9c3 100644
--- a/config-model/src/test/java/com/yahoo/schema/processing/NGramTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/processing/NGramTestCase.java
@@ -52,7 +52,7 @@ public class NGramTestCase extends AbstractSchemaTestCase {
@Test
void testInvalidNGramSetting1() throws IOException, ParseException {
try {
- Schema schema = ApplicationBuilder.buildFromFile("src/test/examples/invalidngram1.sd");
+ ApplicationBuilder.buildFromFile("src/test/examples/invalidngram1.sd");
fail("Should cause an exception");
}
catch (IllegalArgumentException e) {
@@ -63,7 +63,7 @@ public class NGramTestCase extends AbstractSchemaTestCase {
@Test
void testInvalidNGramSetting2() throws IOException, ParseException {
try {
- Schema schema = ApplicationBuilder.buildFromFile("src/test/examples/invalidngram2.sd");
+ ApplicationBuilder.buildFromFile("src/test/examples/invalidngram2.sd");
fail("Should cause an exception");
}
catch (IllegalArgumentException e) {
@@ -74,7 +74,7 @@ public class NGramTestCase extends AbstractSchemaTestCase {
@Test
void testInvalidNGramSetting3() throws IOException, ParseException {
try {
- Schema schema = ApplicationBuilder.buildFromFile("src/test/examples/invalidngram3.sd");
+ ApplicationBuilder.buildFromFile("src/test/examples/invalidngram3.sd");
fail("Should cause an exception");
}
catch (IllegalArgumentException e) {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidatorTest.java
index 80643917a58..42be1592eca 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidatorTest.java
@@ -281,7 +281,25 @@ public class ConstantTensorJsonValidatorTest {
" }",
"}"));
});
- assertTrue(exception.getMessage().contains("Expected field name 'cells', got 'stats'"));
+ System.err.println("msg: " + exception.getMessage());
+ assertTrue(exception.getMessage().contains("Expected 'cells' or 'values', got 'stats'"));
+ }
+
+ @Test
+ void ensure_that_values_array_for_vector_works() {
+ validateTensorJson(
+ TensorType.fromSpec("tensor(x[5])"),
+ inputJsonToReader("[5,4.0,3.1,-2,-1.0]"));
+ validateTensorJson(
+ TensorType.fromSpec("tensor(x[5])"),
+ inputJsonToReader("{'values':[5,4.0,3.1,-2,-1.0]}"));
+ }
+
+ @Test
+ void ensure_that_simple_object_for_map_works() {
+ validateTensorJson(
+ TensorType.fromSpec("tensor(x{})"),
+ inputJsonToReader("{'cells':{'a':5,'b':4.0,'c':3.1,'d':-2,'e':-1.0}}"));
}
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java
index 1a7b3d62cb7..a1a3b40a858 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java
@@ -21,19 +21,20 @@ public class QuotaValidatorTest {
private final Zone publicZone = new Zone(SystemName.Public, Environment.prod, RegionName.from("foo"));
private final Zone publicCdZone = new Zone(SystemName.PublicCd, Environment.prod, RegionName.from("foo"));
+ private final Zone devZone = new Zone(SystemName.Public, Environment.dev, RegionName.from("foo"));
private final Quota quota = Quota.unlimited().withClusterSize(10).withBudget(BigDecimal.valueOf(1.25));
@Test
void test_deploy_under_quota() {
var tester = new ValidationTester(8, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
- tester.deploy(null, getServices("testCluster", 4), Environment.prod, null);
+ tester.deploy(null, getServices(4), Environment.prod, null);
}
@Test
void test_deploy_above_quota_clustersize() {
var tester = new ValidationTester(14, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
try {
- tester.deploy(null, getServices("testCluster", 11), Environment.prod, null);
+ tester.deploy(null, getServices(11), Environment.prod, null);
fail();
} catch (RuntimeException e) {
assertEquals("Clusters testCluster exceeded max cluster size of 10", e.getMessage());
@@ -44,10 +45,10 @@ public class QuotaValidatorTest {
void test_deploy_above_quota_budget() {
var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
try {
- tester.deploy(null, getServices("testCluster", 10), Environment.prod, null);
+ tester.deploy(null, getServices(10), Environment.prod, null);
fail();
} catch (RuntimeException e) {
- assertEquals("The max resources specified cost $1.63 but your quota is $1.25: Contact support to upgrade your plan.", e.getMessage());
+ assertEquals("The resources used cost $1.63 but your quota is $1.25: Contact support to upgrade your plan.", e.getMessage());
}
}
@@ -55,10 +56,10 @@ public class QuotaValidatorTest {
void test_deploy_above_quota_budget_in_publiccd() {
var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota.withBudget(BigDecimal.ONE)).setZone(publicCdZone));
try {
- tester.deploy(null, getServices("testCluster", 10), Environment.prod, null);
+ tester.deploy(null, getServices(10), Environment.prod, null);
fail();
} catch (RuntimeException e) {
- assertEquals("publiccd: The max resources specified cost $1.63 but your quota is $1.00: Contact support to upgrade your plan.", e.getMessage());
+ assertEquals("publiccd: The resources used cost $1.63 but your quota is $1.00: Contact support to upgrade your plan.", e.getMessage());
}
}
@@ -66,11 +67,33 @@ public class QuotaValidatorTest {
void test_deploy_max_resources_above_quota() {
var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicCdZone));
try {
- tester.deploy(null, getServices("testCluster", 10), Environment.prod, null);
+ tester.deploy(null, getServices(10), Environment.prod, null);
fail();
} catch (RuntimeException e) {
- assertEquals("publiccd: The max resources specified cost $1.63 but your quota is $1.25: Contact support to upgrade your plan.", e.getMessage());
+ assertEquals("publiccd: The resources used cost $1.63 but your quota is $1.25: Contact support to upgrade your plan.", e.getMessage());
+ }
+ }
+
+
+ @Test
+ void test_deploy_above_quota_budget_in_dev() {
+ var quota = Quota.unlimited().withBudget(BigDecimal.valueOf(0.01));
+ var tester = new ValidationTester(5, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(devZone));
+ // There is downscaling to 1 node per cluster in dev
+ try {
+ tester.deploy(null, getServices(2, false), Environment.dev, null);
+ fail();
+ } catch (RuntimeException e) {
+ assertEquals("The resources used cost $0.16 but your quota is $0.01: Contact support to upgrade your plan.", e.getMessage());
+ }
+
+ // Override so that we will get 2 nodes in content cluster
+ try {
+ tester.deploy(null, getServices(2, true), Environment.dev, null);
+ fail();
+ } catch (RuntimeException e) {
+ assertEquals("The resources used cost $0.33 but your quota is $0.01: Contact support to upgrade your plan.", e.getMessage());
}
}
@@ -79,25 +102,26 @@ public class QuotaValidatorTest {
var quota = Quota.unlimited().withBudget(BigDecimal.valueOf(-1));
var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
try {
- tester.deploy(null, getServices("testCluster", 10), Environment.prod, null);
+ tester.deploy(null, getServices(10), Environment.prod, null);
fail();
} catch (RuntimeException e) {
- assertEquals("The max resources specified cost $-.-- but your quota is $--.--: Please free up some capacity.",
+ assertEquals("The resources used cost $-.-- but your quota is $--.--: Please free up some capacity.",
ValidationTester.censorNumbers(e.getMessage()));
}
}
- private static String getServices(String contentClusterId, int nodeCount) {
- return "<services version='1.0'>" +
- " <content id='" + contentClusterId + "' version='1.0'>" +
+ private static String getServices(int nodeCount) {
+ return getServices(nodeCount, false);
+ }
+
+ private static String getServices(int nodeCount, boolean devOverride) {
+ return "<services version='1.0' xmlns:deploy='vespa' xmlns:preprocess='properties'>" +
+ " <content id='" + "testCluster" + "' version='1.0'>" +
" <redundancy>1</redundancy>" +
- " <engine>" +
- " <proton/>" +
- " </engine>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
- " <nodes count='" + nodeCount + "'>" +
+ " <nodes count='" + nodeCount + "' " + (devOverride ? "required='true'" : "") + " >\n" +
" <resources vcpu=\"[0.5, 2]\" memory=\"[1Gb, 6Gb]\" disk=\"[1Gb, 18Gb]\"/>\n" +
" </nodes>" +
" </content>" +
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java
index 78d3838d39d..1517f7971ed 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java
@@ -57,6 +57,7 @@ public class ValidationTester {
public ValidationTester(InMemoryProvisioner hostProvisioner, TestProperties testProperties) {
this.hostProvisioner = hostProvisioner;
this.properties = testProperties;
+ hostProvisioner.setEnvironment(testProperties.zone().environment());
}
/**
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/CloudAccountChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/CloudAccountChangeValidatorTest.java
index a8a063cb5fb..77704817045 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/CloudAccountChangeValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/CloudAccountChangeValidatorTest.java
@@ -1,14 +1,14 @@
package com.yahoo.vespa.model.application.validation.change;
-import com.yahoo.config.provision.ClusterInfo;
-import com.yahoo.config.provision.IntRange;
import com.yahoo.config.model.api.Provisioned;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.deploy.TestProperties;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.config.provision.ClusterInfo;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.IntRange;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.test.utils.VespaModelCreatorWithMockPkg;
@@ -36,7 +36,7 @@ class CloudAccountChangeValidatorTest {
fail("Expected exception");
} catch (IllegalArgumentException e) {
assertEquals(e.getMessage(), "Cannot change cloud account from unspecified account to " +
- "account '000000000000'. The existing deployment must be removed before " +
+ "account '000000000000' in aws. The existing deployment must be removed before " +
"changing accounts");
}
assertEquals(List.of(), validator.validate(model0, model0, new DeployState.Builder().build()));
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomComponentBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomComponentBuilderTest.java
index ed3073a0ef4..78c95c03b44 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomComponentBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomComponentBuilderTest.java
@@ -30,13 +30,13 @@ public class DomComponentBuilderTest extends DomBuilderTest {
@Test
@SuppressWarnings("unchecked")
void components_can_be_nested() {
- Component<Component<?, ?>, ?> parent = new DomComponentBuilder().doBuild(root.getDeployState(), root, parse(
+ Component<? super Component<?, ?>, ?> parent = new DomComponentBuilder().doBuild(root.getDeployState(), root, parse(
"<component id='parent'>",
" <component id='child' />",
"</component>"));
assertEquals(ComponentId.fromString("parent"), parent.getGlobalComponentId());
- Component<?, ?> child = first(parent.getChildren().values());
+ Component<?, ?> child = (Component<?, ?>) first(parent.getChildren().values());
assertNotNull(child);
assertEquals(ComponentId.fromString("child@parent"), child.getGlobalComponentId());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSchemaTuningBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSchemaTuningBuilderTest.java
index 41cef783b68..e3e9fc1a232 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSchemaTuningBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSchemaTuningBuilderTest.java
@@ -58,6 +58,16 @@ public class DomSchemaTuningBuilderTest extends DomBuilderTest {
}
@Test
+ void requireThatWeCanParseLidSpaceTag() {
+ Tuning t = createTuning(parseXml("<lidspace>",
+ "<max-bloat-factor>0.5</max-bloat-factor>",
+ "</lidspace>"));
+ assertEquals(0.5, t.searchNode.lidSpace.bloatFactor.doubleValue());
+ ProtonConfig cfg = getProtonCfg(t);
+ assertEquals(cfg.lidspacecompaction().allowedlidbloatfactor(), 0.5);
+ }
+
+ @Test
void requireThatWeCanParseFlushStrategyTag() {
Tuning t = createTuning(parseXml("<flushstrategy>", "<native>",
"<total>",
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java
index 5973ef56962..2562e1e3124 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java
@@ -129,10 +129,10 @@ public class ContainerClusterTest {
int heapSizeInFlag = 89;
boolean hosted = true;
boolean combined = true; // a cluster running on content nodes (only relevant with hosted)
- verifyHeapSizeAsPercentageOfPhysicalMemory(createRoot(hosted), !combined, null, ApplicationContainerCluster.defaultHeapSizePercentageOfTotalNodeMemory);
+ verifyHeapSizeAsPercentageOfPhysicalMemory(createRoot(hosted), !combined, null, ApplicationContainerCluster.defaultHeapSizePercentageOfAvailableMemory);
verifyHeapSizeAsPercentageOfPhysicalMemory(createRoot(hosted, heapSizeInFlag), !combined, null, heapSizeInFlag);
- verifyHeapSizeAsPercentageOfPhysicalMemory(createRoot(hosted), combined, null, 18);
- verifyHeapSizeAsPercentageOfPhysicalMemory(createRoot(hosted, heapSizeInFlag), combined, null, 18);
+ verifyHeapSizeAsPercentageOfPhysicalMemory(createRoot(hosted), combined, null, 24);
+ verifyHeapSizeAsPercentageOfPhysicalMemory(createRoot(hosted, heapSizeInFlag), combined, null, 24);
verifyHeapSizeAsPercentageOfPhysicalMemory(createRoot(!hosted), !combined, null, 0);
verifyHeapSizeAsPercentageOfPhysicalMemory(createRoot(!hosted, heapSizeInFlag), !combined, null, 0);
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/EmbedderTestCase.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/EmbedderTestCase.java
index 50416d50fe5..2a82daef9e3 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/EmbedderTestCase.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/EmbedderTestCase.java
@@ -2,16 +2,26 @@
package com.yahoo.vespa.model.container.xml;
import com.yahoo.component.ComponentId;
+import com.yahoo.config.InnerNode;
+import com.yahoo.config.ModelNode;
+import com.yahoo.config.ModelReference;
import com.yahoo.config.model.application.provider.FilesApplicationPackage;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.deploy.TestProperties;
+import com.yahoo.embedding.BertBaseEmbedderConfig;
+import com.yahoo.embedding.huggingface.HuggingFaceEmbedderConfig;
+import com.yahoo.language.huggingface.config.HuggingFaceTokenizerConfig;
import com.yahoo.path.Path;
import com.yahoo.text.XML;
import com.yahoo.vespa.config.ConfigDefinitionKey;
import com.yahoo.vespa.config.ConfigPayloadBuilder;
import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.container.ApplicationContainerCluster;
+import com.yahoo.vespa.model.container.component.BertEmbedder;
import com.yahoo.vespa.model.container.component.Component;
+import com.yahoo.vespa.model.container.component.HuggingFaceEmbedder;
+import com.yahoo.vespa.model.container.component.HuggingFaceTokenizer;
+import com.yahoo.vespa.model.test.utils.VespaModelCreatorWithFilePkg;
import com.yahoo.yolean.Exceptions;
import org.junit.jupiter.api.Test;
import org.w3c.dom.Document;
@@ -30,55 +40,18 @@ import static org.junit.jupiter.api.Assertions.fail;
public class EmbedderTestCase {
- private static final String BUNDLED_EMBEDDER_CLASS = "ai.vespa.embedding.BertBaseEmbedder";
- private static final String BUNDLED_EMBEDDER_CONFIG = "embedding.bert-base-embedder";
-
- @Test
- void testBundledEmbedder_selfhosted() throws IOException, SAXException {
- String input = "<component id='test' class='" + BUNDLED_EMBEDDER_CLASS + "' bundle='model-integration'>" +
- " <config name='" + BUNDLED_EMBEDDER_CONFIG + "'>" +
- " <transformerModel id='my_model_id' url='my-model-url' />" +
- " <tokenizerVocab id='my_vocab_id' url='my-vocab-url' />" +
- " </config>" +
- "</component>";
- String component = "<component id='test' class='" + BUNDLED_EMBEDDER_CLASS + "' bundle='model-integration'>" +
- " <config name='" + BUNDLED_EMBEDDER_CONFIG + "'>" +
- " <transformerModel id='my_model_id' url='my-model-url' />" +
- " <tokenizerVocab id='my_vocab_id' url='my-vocab-url' />" +
- " </config>" +
- "</component>";
- assertTransform(input, component, false);
- }
-
- @Test
- void testBundledEmbedder_hosted() throws IOException, SAXException {
- String input = "<component id='test' class='" + BUNDLED_EMBEDDER_CLASS + "' bundle='model-integration'>" +
- " <config name='" + BUNDLED_EMBEDDER_CONFIG + "'>" +
- " <transformerModel model-id='minilm-l6-v2' />" +
- " <tokenizerVocab model-id='bert-base-uncased' path='ignored.txt'/>" +
- " </config>" +
- "</component>";
- String component = "<component id='test' class='" + BUNDLED_EMBEDDER_CLASS + "' bundle='model-integration'>" +
- " <config name='" + BUNDLED_EMBEDDER_CONFIG + "'>" +
- " <transformerModel model-id='minilm-l6-v2' url='https://data.vespa.oath.cloud/onnx_models/sentence_all_MiniLM_L6_v2.onnx' />" +
- " <tokenizerVocab model-id='bert-base-uncased' url='https://data.vespa.oath.cloud/onnx_models/bert-base-uncased-vocab.txt' />" +
- " </config>" +
- "</component>";
- assertTransform(input, component, true);
- }
-
@Test
void testApplicationComponentWithModelReference_hosted() throws IOException, SAXException {
- String input = "<component id='test' class='ApplicationSpecificEmbedder' bundle='model-integration'>" +
- " <config name='" + BUNDLED_EMBEDDER_CONFIG + "'>" +
- " <transformerModel model-id='minilm-l6-v2' />" +
- " <tokenizerVocab model-id='bert-base-uncased' />" +
+ String input = "<component id='test' class='ai.vespa.example.paragraph.ApplicationSpecificEmbedder' bundle='app'>" +
+ " <config name='ai.vespa.example.paragraph.sentence-embedder'>" +
+ " <model model-id='minilm-l6-v2' />" +
+ " <vocab model-id='bert-base-uncased' />" +
" </config>" +
"</component>";
- String component = "<component id='test' class='ApplicationSpecificEmbedder' bundle='model-integration'>" +
- " <config name='" + BUNDLED_EMBEDDER_CONFIG + "'>" +
- " <transformerModel model-id='minilm-l6-v2' url='https://data.vespa.oath.cloud/onnx_models/sentence_all_MiniLM_L6_v2.onnx' />" +
- " <tokenizerVocab model-id='bert-base-uncased' url='https://data.vespa.oath.cloud/onnx_models/bert-base-uncased-vocab.txt' />" +
+ String component = "<component id='test' class='ai.vespa.example.paragraph.ApplicationSpecificEmbedder' bundle='app'>" +
+ " <config name='ai.vespa.example.paragraph.sentence-embedder'>" +
+ " <model model-id='minilm-l6-v2' url='https://data.vespa.oath.cloud/onnx_models/sentence_all_MiniLM_L6_v2.onnx' />" +
+ " <vocab model-id='bert-base-uncased' url='https://data.vespa.oath.cloud/onnx_models/bert-base-uncased-vocab.txt' />" +
" </config>" +
"</component>";
assertTransform(input, component, true);
@@ -86,42 +59,65 @@ public class EmbedderTestCase {
@Test
void testUnknownModelId_hosted() throws IOException, SAXException {
- String embedder = "<component id='test' class='" + BUNDLED_EMBEDDER_CLASS + "'>" +
- " <config name='" + BUNDLED_EMBEDDER_CONFIG + "'>" +
- " <transformerModel model-id='my_model_id' />" +
- " <tokenizerVocab model-id='my_vocab_id' />" +
+ String embedder = "<component id='test' class='ai.vespa.example.paragraph.ApplicationSpecificEmbedder'>" +
+ " <config name='ai.vespa.example.paragraph.sentence-embedder'>" +
+ " <model model-id='my_model_id' />" +
+ " <vocab model-id='my_vocab_id' />" +
" </config>" +
"</component>";
assertTransformThrows(embedder,
- "Unknown model id 'my_model_id' on 'transformerModel'",
+ "Unknown model id 'my_model_id' on 'model'",
true);
}
@Test
- void testApplicationPackageWithEmbedder_selfhosted() throws Exception {
- Path applicationDir = Path.fromString("src/test/cfg/application/embed/");
- VespaModel model = loadModel(applicationDir, false);
- ApplicationContainerCluster containerCluster = model.getContainerClusters().get("container");
+ void huggingfaceEmbedder_selfhosted() throws Exception {
+ var model = loadModel(Path.fromString("src/test/cfg/application/embed/"), false);
+ var cluster = model.getContainerClusters().get("container");
+ var embedderCfg = assertHuggingfaceEmbedderComponentPresent(cluster);
+ assertEquals("my_input_ids", embedderCfg.transformerInputIds());
+ assertEquals("https://my/url/model.onnx", modelReference(embedderCfg, "transformerModel").url().orElseThrow().value());
+ var tokenizerCfg = assertHuggingfaceTokenizerComponentPresent(cluster);
+ assertEquals("https://my/url/tokenizer.json", modelReference(tokenizerCfg.model().get(0), "path").url().orElseThrow().value());
+ assertEquals(768, tokenizerCfg.maxLength());
+ }
- Component<?, ?> transformer = containerCluster.getComponentsMap().get(new ComponentId("transformer"));
- ConfigPayloadBuilder config = transformer.getUserConfigs().get(new ConfigDefinitionKey("bert-base-embedder", "embedding"));
- assertEquals("minilm-l6-v2 application-url \"\"", config.getObject("transformerModel").getValue());
- assertEquals("\"\" \"\" files/vocab.txt", config.getObject("tokenizerVocab").getValue());
- assertEquals("4", config.getObject("onnxIntraOpThreads").getValue());
+ @Test
+ void huggingfaceEmbedder_hosted() throws Exception {
+ var model = loadModel(Path.fromString("src/test/cfg/application/embed/"), true);
+ var cluster = model.getContainerClusters().get("container");
+ var embedderCfg = assertHuggingfaceEmbedderComponentPresent(cluster);
+ assertEquals("my_input_ids", embedderCfg.transformerInputIds());
+ assertEquals("https://data.vespa.oath.cloud/onnx_models/e5-base-v2/model.onnx", modelReference(embedderCfg, "transformerModel").url().orElseThrow().value());
+ var tokenizerCfg = assertHuggingfaceTokenizerComponentPresent(cluster);
+ assertEquals("https://data.vespa.oath.cloud/onnx_models/multilingual-e5-base/tokenizer.json", modelReference(tokenizerCfg.model().get(0), "path").url().orElseThrow().value());
+ assertEquals(768, tokenizerCfg.maxLength());
}
+
@Test
- void testApplicationPackageWithEmbedder_hosted() throws Exception {
- Path applicationDir = Path.fromString("src/test/cfg/application/embed/");
- VespaModel model = loadModel(applicationDir, true);
- ApplicationContainerCluster containerCluster = model.getContainerClusters().get("container");
+ void bertEmbedder_selfhosted() throws Exception {
+ var model = loadModel(Path.fromString("src/test/cfg/application/embed/"), false);
+ var cluster = model.getContainerClusters().get("container");
+ var embedderCfg = assertBertEmbedderComponentPresent(cluster);
+ assertEquals("application-url", modelReference(embedderCfg, "transformerModel").url().orElseThrow().value());
+ assertEquals("files/vocab.txt", modelReference(embedderCfg, "tokenizerVocab").path().orElseThrow().value());
+ }
- Component<?, ?> transformer = containerCluster.getComponentsMap().get(new ComponentId("transformer"));
- ConfigPayloadBuilder config = transformer.getUserConfigs().get(new ConfigDefinitionKey("bert-base-embedder", "embedding"));
- assertEquals("minilm-l6-v2 https://data.vespa.oath.cloud/onnx_models/sentence_all_MiniLM_L6_v2.onnx \"\"",
- config.getObject("transformerModel").getValue());
- assertEquals("\"\" \"\" files/vocab.txt", config.getObject("tokenizerVocab").getValue());
- assertEquals("4", config.getObject("onnxIntraOpThreads").getValue());
+ @Test
+ void bertEmbedder_hosted() throws Exception {
+ var model = loadModel(Path.fromString("src/test/cfg/application/embed/"), true);
+ var cluster = model.getContainerClusters().get("container");
+ var embedderCfg = assertBertEmbedderComponentPresent(cluster);
+ assertEquals("https://data.vespa.oath.cloud/onnx_models/sentence_all_MiniLM_L6_v2.onnx",
+ modelReference(embedderCfg, "transformerModel").url().orElseThrow().value());
+ assertTrue(modelReference(embedderCfg, "tokenizerVocab").url().isEmpty());
+ assertEquals("files/vocab.txt", modelReference(embedderCfg, "tokenizerVocab").path().orElseThrow().value());
+ }
+
+ @Test
+ void passesXmlValidation() {
+ new VespaModelCreatorWithFilePkg("src/test/cfg/application/embed/").create();
}
@Test
@@ -157,7 +153,7 @@ public class EmbedderTestCase {
fail("Expected failure");
}
catch (IllegalArgumentException e) {
- assertEquals("transformerModel is configured with only a 'model-id'. Add a 'path' or 'url' to deploy this outside Vespa Cloud",
+ assertEquals("model is configured with only a 'model-id'. Add a 'path' or 'url' to deploy this outside Vespa Cloud",
Exceptions.toMessageString(e));
}
}
@@ -217,4 +213,39 @@ public class EmbedderTestCase {
return (Element) doc.getFirstChild();
}
+ private static HuggingFaceTokenizerConfig assertHuggingfaceTokenizerComponentPresent(ApplicationContainerCluster cluster) {
+ var hfTokenizer = (HuggingFaceTokenizer) cluster.getComponentsMap().get(new ComponentId("hf-tokenizer"));
+ assertEquals("com.yahoo.language.huggingface.HuggingFaceTokenizer", hfTokenizer.getClassId().getName());
+ var cfgBuilder = new HuggingFaceTokenizerConfig.Builder();
+ hfTokenizer.getConfig(cfgBuilder);
+ return cfgBuilder.build();
+ }
+
+ private static HuggingFaceEmbedderConfig assertHuggingfaceEmbedderComponentPresent(ApplicationContainerCluster cluster) {
+ var hfEmbedder = (HuggingFaceEmbedder) cluster.getComponentsMap().get(new ComponentId("hf-embedder"));
+ assertEquals("ai.vespa.embedding.huggingface.HuggingFaceEmbedder", hfEmbedder.getClassId().getName());
+ var cfgBuilder = new HuggingFaceEmbedderConfig.Builder();
+ hfEmbedder.getConfig(cfgBuilder);
+ return cfgBuilder.build();
+ }
+
+ private static BertBaseEmbedderConfig assertBertEmbedderComponentPresent(ApplicationContainerCluster cluster) {
+ var bertEmbedder = (BertEmbedder) cluster.getComponentsMap().get(new ComponentId("bert-embedder"));
+ assertEquals("ai.vespa.embedding.BertBaseEmbedder", bertEmbedder.getClassId().getName());
+ var cfgBuilder = new BertBaseEmbedderConfig.Builder();
+ bertEmbedder.getConfig(cfgBuilder);
+ return cfgBuilder.build();
+ }
+
+ // Ugly hack to read underlying model reference from config instance
+ private static ModelReference modelReference(InnerNode cfg, String name) {
+ try {
+ var f = cfg.getClass().getDeclaredField(name);
+ f.setAccessible(true);
+ return ((ModelNode) f.get(cfg)).getModelReference();
+ } catch (NoSuchFieldException | IllegalAccessException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
index 14fe7bbcc36..f9b1edf4f35 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
@@ -1397,4 +1397,42 @@ public class ContentClusterTest extends ContentBaseTest {
assertEquals(2, fleetControllerConfigBuilder.build().max_number_of_groups_allowed_to_be_down());
}
+ private void assertIndexingDocprocEnabled(boolean indexed, boolean force, boolean expEnabled)
+ {
+ String services = "<?xml version='1.0' encoding='UTF-8' ?>" +
+ "<services version='1.0'>" +
+ " <container id='default' version='1.0'>" +
+ " <document-processing/>" +
+ " </container>" +
+ " <content id='search' version='1.0'>" +
+ " <redundancy>1</redundancy>" +
+ " <documents>" +
+ " <document-processing cluster='default'" + (force ? " chain='indexing'" : "") + "/>" +
+ " <document type='type1' mode='" + (indexed ? "index" : "streaming") + "'/>" +
+ " </documents>" +
+ " </content>" +
+ "</services>";
+ VespaModel model = createEnd2EndOneNode(new TestProperties(), services);
+ var searchCluster = model.getContentClusters().get("search").getSearch();
+ assertEquals(expEnabled, searchCluster.getIndexingDocproc().isPresent());
+ }
+
+ @Test
+ void testIndexingDocprocEnabledWhenIndexMode()
+ {
+ assertIndexingDocprocEnabled(true, false, true);
+ }
+
+ @Test
+ void testIndexingDocprocNotEnabledWhenStreamingMode()
+ {
+ assertIndexingDocprocEnabled(false, false, false);
+ }
+
+ @Test
+ void testIndexingDocprocEnabledWhenStreamingModeAndForced()
+ {
+ assertIndexingDocprocEnabled(false, true, true);
+ }
+
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java
index 4476e128196..ac9d0ad8724 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java
@@ -220,7 +220,7 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
fail("Expected exception");
}
catch (IllegalArgumentException e) {
- assertTrue(e.getMessage().startsWith("Indexing cluster 'musiccluster' specifies the chain 'default' as indexing chain"));
+ assertTrue(e.getMessage().startsWith("content cluster 'musiccluster' specifies the chain 'default' as indexing chain"));
}
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java b/config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java
index 5831090c261..8e719fa90c3 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java
@@ -11,7 +11,7 @@ import org.junit.jupiter.api.Test;
import java.util.Arrays;
import java.util.List;
-import static com.yahoo.vespa.model.search.NodeResourcesTuning.reservedMemoryGb;
+import static com.yahoo.vespa.model.Host.memoryOverheadGb;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static com.yahoo.vespa.model.search.NodeResourcesTuning.MB;
import static com.yahoo.vespa.model.search.NodeResourcesTuning.GB;
@@ -33,13 +33,13 @@ public class NodeResourcesTuningTest {
@Test
void require_that_hwinfo_memory_size_is_set() {
- assertEquals(24 * GB, configFromMemorySetting(24 + reservedMemoryGb, 0).hwinfo().memory().size());
- assertEquals(combinedFactor * 24 * GB, configFromMemorySetting(24 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster * 0.01).hwinfo().memory().size(), 1000);
+ assertEquals(24 * GB, configFromMemorySetting(24 + memoryOverheadGb, 0).hwinfo().memory().size());
+ assertEquals(1.9585050869E10, configFromMemorySetting(24 + memoryOverheadGb, ApplicationContainerCluster.heapSizePercentageOfTotalAvailableMemoryWhenCombinedCluster * 0.01).hwinfo().memory().size(), 1000);
}
@Test
void reserved_memory_on_content_node() {
- assertEquals(0.7, reservedMemoryGb, delta);
+ assertEquals(0.7, memoryOverheadGb, delta);
}
private ProtonConfig getProtonMemoryConfig(List<Pair<String, String>> sdAndMode, double gb) {
@@ -54,7 +54,7 @@ public class NodeResourcesTuningTest {
}
private void verify_that_initial_numdocs_is_dependent_of_mode() {
- ProtonConfig cfg = getProtonMemoryConfig(Arrays.asList(new Pair<>("a", "INDEX"), new Pair<>("b", "STREAMING"), new Pair<>("c", "STORE_ONLY")), 24 + reservedMemoryGb);
+ ProtonConfig cfg = getProtonMemoryConfig(Arrays.asList(new Pair<>("a", "INDEX"), new Pair<>("b", "STREAMING"), new Pair<>("c", "STORE_ONLY")), 24 + memoryOverheadGb);
assertEquals(3, cfg.documentdb().size());
assertEquals(1024, cfg.documentdb(0).allocation().initialnumdocs());
assertEquals("a", cfg.documentdb(0).inputdoctypename());
@@ -162,14 +162,14 @@ public class NodeResourcesTuningTest {
@Test
void require_that_summary_cache_max_bytes_is_set_based_on_memory() {
- assertEquals(1 * GB / 25, configFromMemorySetting(1 + reservedMemoryGb, 0).summary().cache().maxbytes());
- assertEquals(256 * GB / 25, configFromMemorySetting(256 + reservedMemoryGb, 0).summary().cache().maxbytes());
+ assertEquals(1 * GB / 25, configFromMemorySetting(1 + memoryOverheadGb, 0).summary().cache().maxbytes());
+ assertEquals(256 * GB / 25, configFromMemorySetting(256 + memoryOverheadGb, 0).summary().cache().maxbytes());
}
@Test
void require_that_summary_cache_memory_is_reduced_with_combined_cluster() {
- assertEquals(combinedFactor * 1 * GB / 25, configFromMemorySetting(1 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster * 0.01).summary().cache().maxbytes(), 1000);
- assertEquals(combinedFactor * 256 * GB / 25, configFromMemorySetting(256 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster * 0.01).summary().cache().maxbytes(), 1000);
+ assertEquals(3.2641751E7, configFromMemorySetting(1 + memoryOverheadGb, ApplicationContainerCluster.heapSizePercentageOfTotalAvailableMemoryWhenCombinedCluster * 0.01).summary().cache().maxbytes(), 1000);
+ assertEquals(8.356288371E9, configFromMemorySetting(256 + memoryOverheadGb, ApplicationContainerCluster.heapSizePercentageOfTotalAvailableMemoryWhenCombinedCluster * 0.01).summary().cache().maxbytes(), 1000);
}
@Test
@@ -191,12 +191,12 @@ public class NodeResourcesTuningTest {
}
private static void assertDocumentStoreMaxFileSize(long expFileSizeBytes, int wantedMemoryGb) {
- assertEquals(expFileSizeBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).summary().log().maxfilesize());
+ assertEquals(expFileSizeBytes, configFromMemorySetting(wantedMemoryGb + memoryOverheadGb, 0).summary().log().maxfilesize());
}
private static void assertFlushStrategyMemory(long expMemoryBytes, int wantedMemoryGb) {
- assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).flush().memory().maxmemory());
- assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).flush().memory().each().maxmemory());
+ assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + memoryOverheadGb, 0).flush().memory().maxmemory());
+ assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + memoryOverheadGb, 0).flush().memory().each().maxmemory());
}
private static void assertFlushStrategyTlsSize(long expTlsSizeBytes, int diskGb) {
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java b/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java
index f3c214da6ec..735f4afd974 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java
@@ -1,9 +1,9 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.provision;
+import java.time.Duration;
import java.util.Objects;
import java.util.Optional;
-import java.util.stream.Stream;
/**
* A capacity request.
@@ -35,6 +35,8 @@ public final class Capacity {
if (max.smallerThan(min))
throw new IllegalArgumentException("The max capacity must be larger than the min capacity, but got min " +
min + " and max " + max);
+ if (cloudAccount.isEmpty() && ! clusterInfo.hostTTL().isZero())
+ throw new IllegalArgumentException("Cannot set hostTTL without a custom cloud account");
this.min = min;
this.max = max;
this.groupSize = groupSize;
@@ -105,36 +107,40 @@ public final class Capacity {
}
public static Capacity from(ClusterResources resources, boolean required, boolean canFail) {
- return from(resources, required, canFail, NodeType.tenant);
+ return from(resources, required, canFail, Duration.ZERO);
}
- // TODO: Remove after March 2023
- public static Capacity from(ClusterResources min, ClusterResources max, IntRange groupSize, boolean required, boolean canFail, Optional<CloudAccount> cloudAccount) {
- return new Capacity(min, max, groupSize, required, canFail, NodeType.tenant, cloudAccount, ClusterInfo.empty());
+ public static Capacity from(ClusterResources resources, boolean required, boolean canFail, Duration hostTTL) {
+ return from(resources, required, canFail, NodeType.tenant, hostTTL);
+ }
+
+ public static Capacity from(ClusterResources min, ClusterResources max, IntRange groupSize, boolean required, boolean canFail,
+ Optional<CloudAccount> cloudAccount, ClusterInfo clusterInfo) {
+ return new Capacity(min, max, groupSize, required, canFail, NodeType.tenant, cloudAccount, clusterInfo);
}
- // TODO: Remove after March 2023
+ // TODO: remove at some point, much later than March 2023 ... ?
public static Capacity from(ClusterResources min, ClusterResources max, boolean required, boolean canFail) {
return new Capacity(min, max, IntRange.empty(), required, canFail, NodeType.tenant, Optional.empty(), ClusterInfo.empty());
}
- // TODO: Remove after March 2023
+ // TODO: remove at some point, much later than March 2023 ... ?
public static Capacity from(ClusterResources min, ClusterResources max, boolean required, boolean canFail, Optional<CloudAccount> cloudAccount) {
return new Capacity(min, max, IntRange.empty(), required, canFail, NodeType.tenant, cloudAccount, ClusterInfo.empty());
}
- public static Capacity from(ClusterResources min, ClusterResources max, IntRange groupSize, boolean required, boolean canFail,
- Optional<CloudAccount> cloudAccount, ClusterInfo clusterInfo) {
- return new Capacity(min, max, groupSize, required, canFail, NodeType.tenant, cloudAccount, clusterInfo);
+ // TODO: remove at some point, much later than March 2023 ... ?
+ public static Capacity from(ClusterResources min, ClusterResources max, IntRange groupSize, boolean required, boolean canFail, Optional<CloudAccount> cloudAccount) {
+ return new Capacity(min, max, groupSize, required, canFail, NodeType.tenant, cloudAccount, ClusterInfo.empty());
}
/** Creates this from a node type */
public static Capacity fromRequiredNodeType(NodeType type) {
- return from(new ClusterResources(0, 1, NodeResources.unspecified()), true, false, type);
+ return from(new ClusterResources(0, 1, NodeResources.unspecified()), true, false, type, Duration.ZERO);
}
- private static Capacity from(ClusterResources resources, boolean required, boolean canFail, NodeType type) {
- return new Capacity(resources, resources, IntRange.empty(), required, canFail, type, Optional.empty(), ClusterInfo.empty());
+ private static Capacity from(ClusterResources resources, boolean required, boolean canFail, NodeType type, Duration hostTTL) {
+ return new Capacity(resources, resources, IntRange.empty(), required, canFail, type, Optional.empty(), new ClusterInfo.Builder().hostTTL(hostTTL).build());
}
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/CloudAccount.java b/config-provisioning/src/main/java/com/yahoo/config/provision/CloudAccount.java
index 215afbca255..5e14e287a12 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/CloudAccount.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/CloudAccount.java
@@ -1,39 +1,43 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.provision;
-import ai.vespa.validation.PatternedStringWrapper;
-import ai.vespa.validation.Validation;
-
+import java.util.Map;
+import java.util.Objects;
import java.util.regex.Pattern;
+import java.util.stream.Collectors;
/**
* Identifies an account in a public cloud, such as {@link CloudName#AWS} or {@link CloudName#GCP}.
*
* @author mpolden
*/
-public class CloudAccount extends PatternedStringWrapper<CloudAccount> {
+public class CloudAccount implements Comparable<CloudAccount> {
- private static final String EMPTY = "";
- private static final String AWS_ACCOUNT_ID = "[0-9]{12}";
- private static final Pattern AWS_ACCOUNT_ID_PATTERN = Pattern.compile(AWS_ACCOUNT_ID);
- private static final String GCP_PROJECT_ID = "[a-z][a-z0-9-]{4,28}[a-z0-9]";
- private static final Pattern GCP_PROJECT_ID_PATTERN = Pattern.compile(GCP_PROJECT_ID);
+ private record CloudMeta(String accountType, Pattern pattern) {
+ private boolean matches(String account) { return pattern.matcher(account).matches(); }
+ }
+ private static final Map<String, CloudMeta> META_BY_CLOUD = Map.of(
+ "aws", new CloudMeta("Account ID", Pattern.compile("[0-9]{12}")),
+ "gcp", new CloudMeta("Project ID", Pattern.compile("[a-z][a-z0-9-]{4,28}[a-z0-9]")));
/** Empty value. When this is used, either implicitly or explicitly, the zone will use its default account */
- public static final CloudAccount empty = new CloudAccount("", EMPTY, "cloud account");
+ public static final CloudAccount empty = new CloudAccount("", CloudName.DEFAULT);
- /** Verifies accountId is a valid AWS account ID, or throw an IllegalArgumentException. */
- public static void requireAwsAccountId(String accountId) {
- Validation.requireMatch(accountId, "AWS account ID", AWS_ACCOUNT_ID_PATTERN);
- }
+ private final String account;
+ private final CloudName cloudName;
- /** Verifies accountId is a valid GCP project ID, or throw an IllegalArgumentException. */
- public static void requireGcpProjectId(String projectId) {
- Validation.requireMatch(projectId, "GCP project ID", GCP_PROJECT_ID_PATTERN);
+ private CloudAccount(String account, CloudName cloudName) {
+ this.account = account;
+ this.cloudName = cloudName;
}
- private CloudAccount(String value, String regex, String description) {
- super(value, Pattern.compile("^(" + regex + ")$"), description);
+ public String account() { return account; }
+ public CloudName cloudName() { return cloudName; }
+
+ /** Returns the serialized value of this account that can be deserialized with {@link CloudAccount#from} */
+ public final String value() {
+ if (isUnspecified()) return account;
+ return cloudName.value() + ':' + account;
}
public boolean isUnspecified() {
@@ -47,27 +51,56 @@ public class CloudAccount extends PatternedStringWrapper<CloudAccount> {
!equals(zone.cloud().account());
}
- /** Verifies this account is a valid AWS account ID, or throw an IllegalArgumentException. */
- public void requireAwsAccountId() {
- requireAwsAccountId(value());
+ @Override
+ public String toString() {
+ return isUnspecified() ? "unspecified account" : "account '" + account + "' in " + cloudName;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ CloudAccount that = (CloudAccount) o;
+ return account.equals(that.account) && cloudName.equals(that.cloudName);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(account, cloudName);
}
- /** Verifies this account is a valid GCP project ID, or throw an IllegalArgumentException. */
- public void requireGcpProjectId() {
- requireGcpProjectId(value());
+ @Override
+ public int compareTo(CloudAccount o) {
+ return this.value().compareTo(o.value());
}
+
public static CloudAccount from(String cloudAccount) {
- return switch (cloudAccount) {
+ int index = cloudAccount.indexOf(':');
+ if (index < 0) {
// Tenants are allowed to specify "default" in services.xml.
- case "", "default" -> empty;
- default -> new CloudAccount(cloudAccount, AWS_ACCOUNT_ID + "|" + GCP_PROJECT_ID, "cloud account");
- };
+ if (cloudAccount.isEmpty() || cloudAccount.equals("default"))
+ return empty;
+ if (META_BY_CLOUD.get("aws").matches(cloudAccount))
+ return new CloudAccount(cloudAccount, CloudName.AWS);
+ if (META_BY_CLOUD.get("gcp").matches(cloudAccount)) // TODO (freva): Remove July 2023
+ return new CloudAccount(cloudAccount, CloudName.GCP);
+ throw illegal(cloudAccount, "Must be on format '<cloud-name>:<account>' or 'default'");
+ }
+
+ String cloud = cloudAccount.substring(0, index);
+ String account = cloudAccount.substring(index + 1);
+ CloudMeta cloudMeta = META_BY_CLOUD.get(cloud);
+ if (cloudMeta == null)
+ throw illegal(cloudAccount, "Cloud name must be one of: " + META_BY_CLOUD.keySet().stream().sorted().collect(Collectors.joining(", ")));
+
+ if (!cloudMeta.matches(account))
+ throw illegal(cloudAccount, cloudMeta.accountType + " must match '" + cloudMeta.pattern.pattern() + "'");
+ return new CloudAccount(account, CloudName.from(cloud));
}
- @Override
- public String toString() {
- return isUnspecified() ? "unspecified account" : "account '" + value() + "'";
+ private static IllegalArgumentException illegal(String cloudAccount, String details) {
+ return new IllegalArgumentException("Invalid cloud account '" + cloudAccount + "': " + details);
}
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/CloudName.java b/config-provisioning/src/main/java/com/yahoo/config/provision/CloudName.java
index ba262136abe..e1d7afdc9f0 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/CloudName.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/CloudName.java
@@ -3,7 +3,6 @@ package com.yahoo.config.provision;
import ai.vespa.validation.PatternedStringWrapper;
-import java.util.Objects;
import java.util.regex.Pattern;
/**
@@ -14,17 +13,23 @@ import java.util.regex.Pattern;
public class CloudName extends PatternedStringWrapper<CloudName> {
private static final Pattern pattern = Pattern.compile("[a-z]([a-z0-9-]*[a-z0-9])*");
- public static final CloudName AWS = from("aws");
- public static final CloudName GCP = from("gcp");
- public static final CloudName DEFAULT = from("default");
- public static final CloudName YAHOO = from("yahoo");
+ public static final CloudName AWS = new CloudName("aws");
+ public static final CloudName GCP = new CloudName("gcp");
+ public static final CloudName DEFAULT = new CloudName("default");
+ public static final CloudName YAHOO = new CloudName("yahoo");
private CloudName(String cloud) {
super(cloud, pattern, "cloud name");
}
public static CloudName from(String cloud) {
- return new CloudName(cloud);
+ return switch (cloud) {
+ case "aws" -> AWS;
+ case "gcp" -> GCP;
+ case "default" -> DEFAULT;
+ case "yahoo" -> YAHOO;
+ default -> new CloudName(cloud);
+ };
}
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterInfo.java b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterInfo.java
index fe8acb0c3c0..d9076557ac7 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterInfo.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterInfo.java
@@ -3,6 +3,8 @@ package com.yahoo.config.provision;
import java.time.Duration;
import java.util.Objects;
+import static ai.vespa.validation.Validation.requireAtLeast;
+
/**
* Auxiliary information about a cluster, provided by the config model to the node repo during a
* capacity request.
@@ -14,13 +16,18 @@ public class ClusterInfo {
private static final ClusterInfo empty = new ClusterInfo.Builder().build();
private final Duration bcpDeadline;
+ private final Duration hostTTL;
private ClusterInfo(Builder builder) {
this.bcpDeadline = builder.bcpDeadline;
+ this.hostTTL = builder.hostTTL;
+ requireAtLeast(hostTTL, "host TTL", Duration.ZERO);
}
public Duration bcpDeadline() { return bcpDeadline; }
+ public Duration hostTTL() { return hostTTL; }
+
public static ClusterInfo empty() { return empty; }
public boolean isEmpty() { return this.equals(empty); }
@@ -30,28 +37,35 @@ public class ClusterInfo {
if (o == this) return true;
if ( ! (o instanceof ClusterInfo other)) return false;
if ( ! other.bcpDeadline.equals(this.bcpDeadline)) return false;
+ if ( ! other.hostTTL.equals(this.hostTTL)) return false;
return true;
}
@Override
public int hashCode() {
- return Objects.hash(bcpDeadline);
+ return Objects.hash(bcpDeadline, hostTTL);
}
@Override
public String toString() {
- return "cluster info: [bcp deadline: " + bcpDeadline + "]";
+ return "cluster info: [bcp deadline: " + bcpDeadline + ", host TTL: " + hostTTL + "]";
}
public static class Builder {
- private Duration bcpDeadline = Duration.ofMinutes(0);
+ private Duration bcpDeadline = Duration.ZERO;
+ private Duration hostTTL = Duration.ZERO;
public Builder bcpDeadline(Duration duration) {
this.bcpDeadline = Objects.requireNonNull(duration);
return this;
}
+ public Builder hostTTL(Duration duration) {
+ this.hostTTL = Objects.requireNonNull(duration);
+ return this;
+ }
+
public ClusterInfo build() {
return new ClusterInfo(this);
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java
index d1fd409fc93..a431dd61b0d 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java
@@ -213,6 +213,7 @@ public class NodeResources {
public boolean vcpuIsUnspecified() { return vcpu == 0; }
public boolean memoryGbIsUnspecified() { return memoryGb == 0; }
public boolean diskGbIsUnspecified() { return diskGb == 0; }
+ public boolean bandwidthGbpsIsUnspecified() { return bandwidthGbps == 0; }
/** Returns the standard cost of these resources, in dollars per hour */
public double cost() {
@@ -267,6 +268,19 @@ public class NodeResources {
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType, architecture, gpuResources);
}
+ public NodeResources withUnspecifiedNumbersFrom(NodeResources fullySpecified) {
+ var resources = this;
+ if (resources.vcpuIsUnspecified())
+ resources = resources.withVcpu(fullySpecified.vcpu());
+ if (resources.memoryGbIsUnspecified())
+ resources = resources.withMemoryGb(fullySpecified.memoryGb());
+ if (resources.diskGbIsUnspecified())
+ resources = resources.withDiskGb(fullySpecified.diskGb());
+ if (resources.bandwidthGbpsIsUnspecified())
+ resources = resources.withBandwidthGbps(fullySpecified.bandwidthGbps());
+ return resources;
+ }
+
/** Returns this with disk speed, storage type and architecture set to any */
public NodeResources justNumbers() {
if (isUnspecified()) return unspecified();
@@ -362,7 +376,7 @@ public class NodeResources {
appendDouble(sb, vcpu);
sb.append(", memory: ");
appendDouble(sb, memoryGb);
- sb.append(" Gb, disk ");
+ sb.append(" Gb, disk: ");
appendDouble(sb, diskGb);
sb.append(" Gb");
if (bandwidthGbps > 0) {
@@ -461,7 +475,7 @@ public class NodeResources {
throw new IllegalStateException("Cannot perform this on unspecified resources");
}
- // Returns squared euclidean distance of the relevant numerical values of two node resources
+ // Returns squared Euclidean distance of the relevant numerical values of two node resources
public double distanceTo(NodeResources other) {
if ( ! this.diskSpeed().compatibleWith(other.diskSpeed())) return Double.MAX_VALUE;
if ( ! this.storageType().compatibleWith(other.storageType())) return Double.MAX_VALUE;
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/QuotaExceededException.java b/config-provisioning/src/main/java/com/yahoo/config/provision/QuotaExceededException.java
new file mode 100644
index 00000000000..12289f44c6a
--- /dev/null
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/QuotaExceededException.java
@@ -0,0 +1,17 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.config.provision;
+
+/**
+ * @author hmusum
+ */
+public class QuotaExceededException extends RuntimeException {
+
+ public QuotaExceededException(Throwable t) {
+ super(t);
+ }
+
+ public QuotaExceededException(String message) {
+ super(message);
+ }
+
+}
diff --git a/config-provisioning/src/test/java/com/yahoo/config/provision/CapacityTest.java b/config-provisioning/src/test/java/com/yahoo/config/provision/CapacityTest.java
index a7614bbc016..b3d2e0afa7d 100644
--- a/config-provisioning/src/test/java/com/yahoo/config/provision/CapacityTest.java
+++ b/config-provisioning/src/test/java/com/yahoo/config/provision/CapacityTest.java
@@ -3,9 +3,11 @@ package com.yahoo.config.provision;
import org.junit.jupiter.api.Test;
+import java.time.Duration;
import java.util.Optional;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.fail;
/**
@@ -35,20 +37,23 @@ public class CapacityTest {
new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)));
assertValidationFailure(new ClusterResources(4, 2, new NodeResources(1, 2, 3, 5)),
new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)));
- // It's enough than one dimension is smaller also when the others are larger
+ // It's enough that one dimension is smaller also when the others are larger
assertValidationFailure(new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)),
new ClusterResources(8, 4, new NodeResources(2, 1, 6, 8)));
+
+ assertEquals("Cannot set hostTTL without a custom cloud account",
+ assertThrows(IllegalArgumentException.class,
+ () -> Capacity.from(new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)),
+ new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)),
+ IntRange.empty(), false, true, Optional.empty(), new ClusterInfo.Builder().hostTTL(Duration.ofSeconds(1)).build()))
+ .getMessage());
}
private void assertValidationFailure(ClusterResources min, ClusterResources max) {
- try {
- Capacity.from(min, max, IntRange.empty(), false, true, Optional.empty(), ClusterInfo.empty());
- fail("Expected exception with min " + min + " and max " + max);
- }
- catch (IllegalArgumentException e) {
- assertEquals("The max capacity must be larger than the min capacity, but got min " + min + " and max " + max,
- e.getMessage());
- }
+ assertEquals("The max capacity must be larger than the min capacity, but got min " + min + " and max " + max,
+ assertThrows(IllegalArgumentException.class,
+ () -> Capacity.from(min, max, IntRange.empty(), false, true, Optional.empty(), ClusterInfo.empty()))
+ .getMessage());
}
}
diff --git a/config-provisioning/src/test/java/com/yahoo/config/provision/CloudAccountTest.java b/config-provisioning/src/test/java/com/yahoo/config/provision/CloudAccountTest.java
new file mode 100644
index 00000000000..2a994ac607e
--- /dev/null
+++ b/config-provisioning/src/test/java/com/yahoo/config/provision/CloudAccountTest.java
@@ -0,0 +1,75 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.config.provision;
+
+import org.junit.jupiter.api.Test;
+
+import java.util.List;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+/**
+ * @author freva
+ */
+class CloudAccountTest {
+
+ @Test
+ void aws_accounts() {
+ CloudAccount oldFormat = CloudAccount.from("123456789012");
+ CloudAccount newFormat = CloudAccount.from("aws:123456789012");
+ assertEquals(oldFormat, newFormat);
+
+ for (CloudAccount account : List.of(oldFormat, newFormat)) {
+ assertFalse(account.isUnspecified());
+ assertEquals(account, CloudAccount.from(account.value()));
+ assertEquals("123456789012", account.account());
+ assertEquals(CloudName.AWS, account.cloudName());
+ assertEquals("aws:123456789012", account.value());
+ }
+ }
+
+ @Test
+ void gcp_accounts() {
+ CloudAccount oldFormat = CloudAccount.from("my-project");
+ CloudAccount newFormat = CloudAccount.from("gcp:my-project");
+ assertEquals(oldFormat, newFormat);
+
+ for (CloudAccount account : List.of(oldFormat, newFormat)) {
+ assertFalse(account.isUnspecified());
+ assertEquals(account, CloudAccount.from(account.value()));
+ assertEquals("my-project", account.account());
+ assertEquals(CloudName.GCP, account.cloudName());
+ assertEquals("gcp:my-project", account.value());
+ }
+ }
+
+ @Test
+ void default_accounts() {
+ CloudAccount variant1 = CloudAccount.from("");
+ CloudAccount variant2 = CloudAccount.from("default");
+ assertEquals(variant1, variant2);
+
+ for (CloudAccount account : List.of(variant1, variant2)) {
+ assertTrue(account.isUnspecified());
+ assertEquals(account, CloudAccount.from(account.value()));
+ assertEquals("", account.account());
+ assertEquals(CloudName.DEFAULT, account.cloudName());
+ assertEquals("", account.value());
+ }
+ }
+
+ @Test
+ void invalid_accounts() {
+ assertInvalidAccount("aws:123", "Invalid cloud account 'aws:123': Account ID must match '[0-9]{12}'");
+ assertInvalidAccount("gcp:123", "Invalid cloud account 'gcp:123': Project ID must match '[a-z][a-z0-9-]{4,28}[a-z0-9]'");
+ assertInvalidAccount("$something", "Invalid cloud account '$something': Must be on format '<cloud-name>:<account>' or 'default'");
+ assertInvalidAccount("unknown:account", "Invalid cloud account 'unknown:account': Cloud name must be one of: aws, gcp");
+ }
+
+ private static void assertInvalidAccount(String account, String message) {
+ IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> CloudAccount.from(account));
+ assertEquals(message, exception.getMessage());
+ }
+}
diff --git a/config-provisioning/src/test/java/com/yahoo/config/provision/CloudNameTest.java b/config-provisioning/src/test/java/com/yahoo/config/provision/CloudNameTest.java
new file mode 100644
index 00000000000..b030233d459
--- /dev/null
+++ b/config-provisioning/src/test/java/com/yahoo/config/provision/CloudNameTest.java
@@ -0,0 +1,22 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.config.provision;
+
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+/**
+ * @author freva
+ */
+class CloudNameTest {
+
+ @Test
+ void returns_same_instance_for_known_clouds() {
+ assertSame(CloudName.from("aws"), CloudName.AWS);
+ assertSame(CloudName.from("gcp"), CloudName.GCP);
+ assertSame(CloudName.from("default"), CloudName.DEFAULT);
+ assertSame(CloudName.from("yahoo"), CloudName.YAHOO);
+ assertThrows(IllegalArgumentException.class, () -> CloudName.from("aWs")); // Must be lower case
+ }
+}
diff --git a/config-provisioning/src/test/java/com/yahoo/config/provision/NodeResourcesTest.java b/config-provisioning/src/test/java/com/yahoo/config/provision/NodeResourcesTest.java
index ae052c03556..230d28e04aa 100644
--- a/config-provisioning/src/test/java/com/yahoo/config/provision/NodeResourcesTest.java
+++ b/config-provisioning/src/test/java/com/yahoo/config/provision/NodeResourcesTest.java
@@ -6,6 +6,8 @@ import org.junit.jupiter.api.Test;
import java.util.function.Supplier;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
/**
@@ -20,11 +22,11 @@ public class NodeResourcesTest {
@Test
void testToString() {
- assertEquals("[vcpu: 1.0, memory: 10.0 Gb, disk 100.0 Gb, architecture: any]",
+ assertEquals("[vcpu: 1.0, memory: 10.0 Gb, disk: 100.0 Gb, architecture: any]",
new NodeResources(1., 10., 100., 0).toString());
- assertEquals("[vcpu: 0.3, memory: 3.3 Gb, disk 33.3 Gb, bandwidth: 0.3 Gbps, architecture: any]",
+ assertEquals("[vcpu: 0.3, memory: 3.3 Gb, disk: 33.3 Gb, bandwidth: 0.3 Gbps, architecture: any]",
new NodeResources(1 / 3., 10 / 3., 100 / 3., 0.3).toString());
- assertEquals("[vcpu: 0.7, memory: 9.0 Gb, disk 66.7 Gb, bandwidth: 0.7 Gbps, architecture: any]",
+ assertEquals("[vcpu: 0.7, memory: 9.0 Gb, disk: 66.7 Gb, bandwidth: 0.7 Gbps, architecture: any]",
new NodeResources(2 / 3., 8.97, 200 / 3., 0.67).toString());
}
@@ -37,6 +39,33 @@ public class NodeResourcesTest {
}
@Test
+ void testSatisfies() {
+ var hostResources = new NodeResources(1, 2, 3, 1);
+ assertTrue(hostResources.satisfies(new NodeResources(1, 2, 3, 1)));
+ assertTrue(hostResources.satisfies(new NodeResources(1, 1, 1, 1)));
+ assertFalse(hostResources.satisfies(new NodeResources(2, 2, 3, 1)));
+ assertFalse(hostResources.satisfies(new NodeResources(1, 3, 3, 1)));
+ assertFalse(hostResources.satisfies(new NodeResources(1, 2, 4, 1)));
+
+ var gpuHostResources = new NodeResources(1, 2, 3, 1,
+ NodeResources.DiskSpeed.fast,
+ NodeResources.StorageType.local,
+ NodeResources.Architecture.x86_64,
+ new NodeResources.GpuResources(1, 16));
+ assertTrue(gpuHostResources.satisfies(new NodeResources(1, 2, 3, 1,
+ NodeResources.DiskSpeed.fast,
+ NodeResources.StorageType.local,
+ NodeResources.Architecture.x86_64,
+ new NodeResources.GpuResources(1, 16))));
+ assertFalse(gpuHostResources.satisfies(new NodeResources(1, 2, 3, 1,
+ NodeResources.DiskSpeed.fast,
+ NodeResources.StorageType.local,
+ NodeResources.Architecture.x86_64,
+ new NodeResources.GpuResources(1, 32))));
+ assertFalse(hostResources.satisfies(gpuHostResources));
+ }
+
+ @Test
void benchmark() {
NodeResources [] resouces = new NodeResources[100];
for (int i = 0; i < resouces.length; i++) {
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java
index 17025b10568..627a15aab65 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java
@@ -34,7 +34,7 @@ class FileReferencesAndDownloadsMaintainer implements Runnable {
private static final Logger log = Logger.getLogger(FileReferencesAndDownloadsMaintainer.class.getName());
private static final File defaultUrlDownloadDir = UrlDownloadRpcServer.downloadDir;
private static final File defaultFileReferencesDownloadDir = FileDownloader.defaultDownloadDirectory;
- private static final Duration defaultDurationToKeepFiles = Duration.ofDays(21);
+ private static final Duration defaultDurationToKeepFiles = Duration.ofDays(30);
private static final Duration interval = Duration.ofMinutes(1);
private final ScheduledExecutorService executor =
diff --git a/config/pom.xml b/config/pom.xml
index 83a25b8631d..ae80d00923b 100755
--- a/config/pom.xml
+++ b/config/pom.xml
@@ -176,6 +176,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/configdefinitions/pom.xml b/configdefinitions/pom.xml
index 51221c4899f..e163f0292b5 100644
--- a/configdefinitions/pom.xml
+++ b/configdefinitions/pom.xml
@@ -32,6 +32,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/configdefinitions/src/main/java/com/yahoo/embedding/huggingface/package-info.java b/configdefinitions/src/main/java/com/yahoo/embedding/huggingface/package-info.java
new file mode 100644
index 00000000000..7bcc994e616
--- /dev/null
+++ b/configdefinitions/src/main/java/com/yahoo/embedding/huggingface/package-info.java
@@ -0,0 +1,9 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package com.yahoo.embedding.huggingface;
+
+import com.yahoo.osgi.annotation.ExportPackage; \ No newline at end of file
diff --git a/configdefinitions/src/main/java/com/yahoo/language/huggingface/config/package-info.java b/configdefinitions/src/main/java/com/yahoo/language/huggingface/config/package-info.java
new file mode 100644
index 00000000000..fb9048b5fb4
--- /dev/null
+++ b/configdefinitions/src/main/java/com/yahoo/language/huggingface/config/package-info.java
@@ -0,0 +1,9 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package com.yahoo.language.huggingface.config;
+
+import com.yahoo.osgi.annotation.ExportPackage; \ No newline at end of file
diff --git a/configdefinitions/src/vespa/CMakeLists.txt b/configdefinitions/src/vespa/CMakeLists.txt
index 6a737e6f57d..496e92916ee 100644
--- a/configdefinitions/src/vespa/CMakeLists.txt
+++ b/configdefinitions/src/vespa/CMakeLists.txt
@@ -84,4 +84,8 @@ vespa_generate_config(configdefinitions proton.def)
install_config_definition(proton.def vespa.config.search.core.proton.def)
vespa_generate_config(configdefinitions hwinfo.def)
vespa_generate_config(configdefinitions dataplane-proxy.def)
-install_config_definition(dataplane-proxy.def cloud.config.dataplane-proxy.def) \ No newline at end of file
+install_config_definition(dataplane-proxy.def cloud.config.dataplane-proxy.def)
+install_config_definition(hugging-face-embedder.def embedding.huggingface.hugging-face-embedder.def)
+install_config_definition(hugging-face-tokenizer.def language.huggingface.config.hugging-face-tokenizer.def)
+install_config_definition(bert-base-embedder.def embedding.bert-base-embedder.def)
+
diff --git a/model-integration/src/main/resources/configdefinitions/embedding.bert-base-embedder.def b/configdefinitions/src/vespa/bert-base-embedder.def
index e37a33d3b81..2d8e840377b 100644
--- a/model-integration/src/main/resources/configdefinitions/embedding.bert-base-embedder.def
+++ b/configdefinitions/src/vespa/bert-base-embedder.def
@@ -28,4 +28,5 @@ transformerOutput string default=output_0
onnxExecutionMode enum { parallel, sequential } default=sequential
onnxInterOpThreads int default=1
onnxIntraOpThreads int default=-4 # n=number of threads -> n<0: CPUs/(-n), n==0: CPUs, n>0: n
-onnxGpuDevice int default=-1
+# GPU device id, -1 for CPU
+onnxGpuDevice int default=0
diff --git a/configdefinitions/src/vespa/fleetcontroller.def b/configdefinitions/src/vespa/fleetcontroller.def
index 93a20e4ee0d..c3e161eb038 100644
--- a/configdefinitions/src/vespa/fleetcontroller.def
+++ b/configdefinitions/src/vespa/fleetcontroller.def
@@ -29,6 +29,7 @@ master_zookeeper_cooldown_period double default=60.0
## If set to 1, only master will gather state. If set higher, others will
## also do so, prioritizing those fleetcontrollers likely to be the ones to
## take over if the master fails.
+# TODO: Deprecated, not used anymore, remove in Vespa 9
state_gather_count int default=1
## Location of ZooKeeper servers
diff --git a/model-integration/src/main/resources/configdefinitions/hugging-face-embedder.def b/configdefinitions/src/vespa/hugging-face-embedder.def
index 97515818f14..7ea4227b3cd 100644
--- a/model-integration/src/main/resources/configdefinitions/hugging-face-embedder.def
+++ b/configdefinitions/src/vespa/hugging-face-embedder.def
@@ -21,8 +21,11 @@ transformerOutput string default=last_hidden_state
# Normalize tensors from tokenizer
normalize bool default=false
+poolingStrategy enum { cls, mean } default=mean
+
# Settings for ONNX model evaluation
transformerExecutionMode enum { parallel, sequential } default=sequential
transformerInterOpThreads int default=1
transformerIntraOpThreads int default=-4
-transformerGpuDevice int default=-1
+# GPU device id, -1 for CPU
+transformerGpuDevice int default=0
diff --git a/linguistics-components/src/main/resources/configdefinitions/language.huggingface.hugging-face-tokenizer.def b/configdefinitions/src/vespa/hugging-face-tokenizer.def
index 5e58547879c..18b3631e494 100644
--- a/linguistics-components/src/main/resources/configdefinitions/language.huggingface.hugging-face-tokenizer.def
+++ b/configdefinitions/src/vespa/hugging-face-tokenizer.def
@@ -1,6 +1,6 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-namespace=language.huggingface
+namespace=language.huggingface.config
# The language a model is for, one of the language tags in com.yahoo.language.Language.
# Use "unknown" for models to be used with any language.
@@ -8,4 +8,6 @@ model[].language string
# The path to the model relative to the application package root
model[].path model
-addSpecialTokens bool default=true \ No newline at end of file
+addSpecialTokens bool default=true
+maxLength int default=-1
+truncation bool default=false \ No newline at end of file
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
index 81de2e06b6c..0d3cea59a46 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
@@ -4,8 +4,6 @@ package com.yahoo.vespa.config.server;
import ai.vespa.http.DomainName;
import ai.vespa.http.HttpURL;
import ai.vespa.http.HttpURL.Query;
-import ai.vespa.util.http.hc5.DefaultHttpClientBuilder;
-import ai.vespa.util.http.hc5.VespaHttpClientBuilder;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.component.Version;
import com.yahoo.component.annotation.Inject;
@@ -21,10 +19,8 @@ import com.yahoo.config.provision.ApplicationTransaction;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.EndpointsChecker;
import com.yahoo.config.provision.EndpointsChecker.Availability;
-import com.yahoo.config.provision.EndpointsChecker.HealthCheckerProvider;
-import com.yahoo.config.provision.EndpointsChecker.HealthChecker;
import com.yahoo.config.provision.EndpointsChecker.Endpoint;
-import com.yahoo.config.provision.EndpointsChecker.Status;
+import com.yahoo.config.provision.EndpointsChecker.HealthCheckerProvider;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostFilter;
import com.yahoo.config.provision.InfraDeployer;
@@ -95,10 +91,7 @@ import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.orchestrator.Orchestrator;
-import org.apache.hc.client5.http.classic.methods.HttpGet;
-import org.apache.hc.client5.http.impl.classic.CloseableHttpClient;
-import org.apache.hc.core5.http.HttpHeaders;
-import org.apache.hc.core5.http.message.BasicHeader;
+
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
@@ -124,7 +117,6 @@ import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
-import static ai.vespa.http.HttpURL.Path.parse;
import static com.yahoo.config.model.api.container.ContainerServiceType.CONTAINER;
import static com.yahoo.config.model.api.container.ContainerServiceType.LOGSERVER_CONTAINER;
import static com.yahoo.vespa.config.server.application.ConfigConvergenceChecker.ServiceListResponse;
@@ -185,7 +177,7 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
infraDeployerProvider.getInfraDeployer(),
configConvergenceChecker,
httpProxy,
- createEndpointsChecker(configserverConfig, zone, healthCheckers.getHealthChecker()),
+ EndpointsChecker.of(healthCheckers.getHealthChecker()),
configserverConfig,
orchestrator,
new LogRetriever(),
@@ -1227,37 +1219,4 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
}
- private static EndpointsChecker createEndpointsChecker(ConfigserverConfig config, Zone zone, HealthChecker healthChecker) {
- CloseableHttpClient client = (SystemName.from(config.system()).isPublic()
- ? DefaultHttpClientBuilder.create(() -> null, "hosted-vespa-convergence-health-checker")
- : VespaHttpClientBuilder.custom().apacheBuilder().setUserAgent("hosted-vespa-convergence-health-checker"))
- .setDefaultHeaders(List.of(new BasicHeader(HttpHeaders.CONNECTION, "close")))
- .build();
- return EndpointsChecker.of(endpoint -> {
- Availability health = healthChecker.healthy(endpoint);
- if ( health.status() != Status.available // Unhealthy targets is the root cause, so return those details.
- || endpoint.isPublic() // Controller checks /status.html on its own.
- || endpoint.account().isEnclave(zone)) // Private endpoints in enclave are not reachable by us.
- return health;
-
- int remainingFailures = 3;
- int remainingSuccesses = 10;
- while (remainingSuccesses > 0 && remainingFailures > 0) {
- try {
- if (client.execute(new HttpGet(endpoint.url().withPath(parse("/status.html")).asURI()),
- response -> response.getCode() == 200))
- remainingSuccesses--;
- else
- throw new IOException("got non-200 status code");
- }
- catch (Exception e) {
- log.log(Level.FINE, e, () -> "Failed to check " + endpoint + "status.html: " + e.getMessage());
- if (--remainingFailures == 0)
- return new Availability(Status.containersUnhealthy, "Failed to get enough healthy responses from " + endpoint.url());
- }
- }
- return Availability.ready;
- });
- }
-
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java
index 040f230a40e..cb8a4fec77a 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java
@@ -4,8 +4,8 @@ package com.yahoo.vespa.config.server.application;
import ai.vespa.util.http.hc5.VespaAsyncHttpClientBuilder;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
-import com.yahoo.component.annotation.Inject;
import com.yahoo.component.AbstractComponent;
+import com.yahoo.component.annotation.Inject;
import com.yahoo.concurrent.DaemonThreadFactory;
import com.yahoo.config.model.api.ApplicationClusterInfo;
import com.yahoo.config.model.api.HostInfo;
@@ -103,7 +103,7 @@ public class ConfigConvergenceChecker extends AbstractComponent {
.filter(serviceInfo -> shouldCheckService(hostsToCheck, application, serviceInfo))
.forEach(service -> getStatePort(service).ifPresent(port -> servicesToCheck.add(service))));
- log.log(Level.FINE, "Services to check for config convergence: " + servicesToCheck);
+ log.log(Level.FINE, () -> "Services to check for config convergence: " + servicesToCheck);
return getServiceGenerations(servicesToCheck, timeoutPerService);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java
index 3b5269cdf11..c87d77eaf07 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java
@@ -53,7 +53,8 @@ public class HttpErrorResponse extends HttpResponse {
LOAD_BALANCER_NOT_READY,
CONFIG_NOT_CONVERGED,
REINDEXING_STATUS_UNAVAILABLE,
- PRECONDITION_FAILED
+ PRECONDITION_FAILED,
+ QUOTA_EXCEEDED
}
public static HttpErrorResponse notFoundError(String msg) {
@@ -120,6 +121,10 @@ public class HttpErrorResponse extends HttpResponse {
return new HttpErrorResponse(PRECONDITION_FAILED, ErrorCode.PRECONDITION_FAILED.name(), msg);
}
+ public static HttpResponse quotaExceeded(String msg) {
+ return new HttpErrorResponse(BAD_REQUEST, ErrorCode.QUOTA_EXCEEDED.name(), msg);
+ }
+
@Override
public void render(OutputStream stream) throws IOException {
new JsonFormat(true).encode(stream, slime);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java
index a0e814f32d8..58651af54f3 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java
@@ -5,6 +5,7 @@ import com.yahoo.config.provision.ApplicationLockException;
import com.yahoo.config.provision.CertificateNotReadyException;
import com.yahoo.config.provision.NodeAllocationException;
import com.yahoo.config.provision.ParentHostUnavailableException;
+import com.yahoo.config.provision.QuotaExceededException;
import com.yahoo.config.provision.exception.ActivationConflictException;
import com.yahoo.config.provision.exception.LoadBalancerServiceException;
import com.yahoo.container.jdisc.HttpRequest;
@@ -73,6 +74,8 @@ public class HttpHandler extends ThreadedHttpRequestHandler {
return HttpErrorResponse.reindexingStatusUnavailable(getMessage(e, request));
} catch (PreconditionFailedException e) {
return HttpErrorResponse.preconditionFailed(getMessage(e, request));
+ } catch (QuotaExceededException e) {
+ return HttpErrorResponse.quotaExceeded(getMessage(e, request));
} catch (Exception e) {
log.log(Level.WARNING, "Unexpected exception handling a config server request", e);
return HttpErrorResponse.internalServerError(getMessage(e, request));
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java
index 8891f108af9..1ce6902abd0 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java
@@ -30,11 +30,13 @@ import java.time.Duration;
import java.util.List;
import java.util.Map;
import java.util.Optional;
-import java.util.logging.Level;
import static com.yahoo.vespa.config.server.application.CompressedApplicationInputStream.createFromCompressedStream;
import static com.yahoo.vespa.config.server.http.Utils.checkThatTenantExists;
import static com.yahoo.vespa.config.server.http.v2.SessionCreateHandler.validateDataAndHeader;
+import static java.util.logging.Level.FINE;
+import static java.util.logging.Level.INFO;
+import static java.util.logging.Level.WARNING;
/**
* * The implementation of the /application/v2 API.
@@ -81,18 +83,22 @@ public class ApplicationApiHandler extends SessionHandler {
.map(contentType -> contentType.getMimeType().equalsIgnoreCase(MULTIPART_FORM_DATA))
.orElse(false);
if (multipartRequest) {
+ Map<String, PartItem> parts = Map.of();
try {
- Map<String, PartItem> parts = new MultiPartFormParser(request).readParts();
+ parts = new MultiPartFormParser(request).readParts();
byte[] params;
try (InputStream part = parts.get(MULTIPART_PARAMS).data()) { params = part.readAllBytes(); }
- log.log(Level.FINE, "Deploy parameters: [{0}]", new String(params, StandardCharsets.UTF_8));
+ log.log(FINE, "Deploy parameters: [{0}]", new String(params, StandardCharsets.UTF_8));
prepareParams = PrepareParams.fromJson(params, tenantName, zookeeperBarrierTimeout);
PartItem appPackagePart = parts.get(MULTIPART_APPLICATION_PACKAGE);
compressedStream = createFromCompressedStream(appPackagePart.data(), appPackagePart.contentType(), maxApplicationPackageSize);
} catch (IOException e) {
// Multipart exception happens when controller abandons the request due to other exceptions while deploying.
- log.log(e instanceof MultiPartFormParser.MultiPartException ? Level.INFO : Level.WARNING,
+ log.log(e instanceof MultiPartFormParser.MultiPartException ? INFO : WARNING,
"Unable to parse multipart in deploy from tenant '" + tenantName.value() + "': " + Exceptions.toMessageString(e));
+
+ var message = "Deploy request from '" + tenantName.value() + "' contains invalid data: " + e.getMessage();
+ log.log(INFO, message + ", parts: " + parts, e);
throw new BadRequestException("Deploy request from '" + tenantName.value() + "' contains invalid data: " + e.getMessage());
}
} else {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
index bd1837707d9..4faa475fa08 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
@@ -14,6 +14,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationLockException;
import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.NodeAllocationException;
+import com.yahoo.config.provision.QuotaExceededException;
import com.yahoo.config.provision.TransientException;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.config.server.http.InternalServerException;
@@ -122,7 +123,7 @@ public abstract class ModelsBuilder<MODELRESULT extends ModelResult> {
buildLatestModelForThisMajor, majorVersion));
buildLatestModelForThisMajor = false; // We have successfully built latest model version, do it only for this major
}
- catch (NodeAllocationException | ApplicationLockException | TransientException e) {
+ catch (NodeAllocationException | ApplicationLockException | TransientException | QuotaExceededException e) {
// Don't wrap this exception, and don't try to load other model versions as this is (most likely)
// caused by the state of the system, not the model version/application combination
throw e;
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/HostedDeployNodeAllocationTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/HostedDeployNodeAllocationTest.java
index e546569b255..e9dca44ed81 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/HostedDeployNodeAllocationTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/HostedDeployNodeAllocationTest.java
@@ -4,14 +4,17 @@ package com.yahoo.vespa.config.server.deploy;
import com.yahoo.component.Version;
import com.yahoo.config.model.api.HostProvisioner;
import com.yahoo.config.model.api.ModelFactory;
+import com.yahoo.config.model.api.Quota;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterMembership;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.ProvisionLogger;
+import com.yahoo.config.provision.QuotaExceededException;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.config.server.MockProvisioner;
+import com.yahoo.vespa.config.server.session.PrepareParams;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
@@ -24,6 +27,7 @@ import java.util.stream.Collectors;
import static com.yahoo.vespa.config.server.deploy.DeployTester.createHostedModelFactory;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
public class HostedDeployNodeAllocationTest {
@@ -50,6 +54,26 @@ public class HostedDeployNodeAllocationTest {
assertEquals(resources(2), get("host4", hosts).advertisedResources());
}
+ @Test
+ public void testExceedsQuota() {
+ List<ModelFactory> modelFactories = List.of(createHostedModelFactory(Version.fromString("7.2")),
+ createHostedModelFactory(Version.fromString("7.3")));
+ var provisioner = new VersionProvisioner();
+ DeployTester tester = new DeployTester.Builder(temporaryFolder).modelFactories(modelFactories)
+ .provisioner(new MockProvisioner().hostProvisioner(provisioner))
+ .hostedConfigserverConfig(Zone.defaultZone())
+ .build();
+
+ try {
+ tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder()
+ .vespaVersion("7.3")
+ .quota(new Quota(Optional.of(4), Optional.of(0))));
+ fail("Expected to get a QuotaExceededException");
+ } catch (QuotaExceededException e) {
+ assertEquals("main: The resources used cost $1.02 but your quota is $0.00: Contact support to upgrade your plan.", e.getMessage());
+ }
+ }
+
private HostSpec get(String hostname, Set<HostSpec> hosts) {
return hosts.stream().filter(host -> host.hostname().equals(hostname)).findAny().orElseThrow();
}
diff --git a/container-core/pom.xml b/container-core/pom.xml
index 5fed4f02912..eec8b60077b 100644
--- a/container-core/pom.xml
+++ b/container-core/pom.xml
@@ -445,6 +445,9 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/state/HostLifeGatherer.java b/container-core/src/main/java/com/yahoo/container/jdisc/state/HostLifeGatherer.java
index 8c0356517e3..538397fad24 100644
--- a/container-core/src/main/java/com/yahoo/container/jdisc/state/HostLifeGatherer.java
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/state/HostLifeGatherer.java
@@ -4,6 +4,7 @@ package com.yahoo.container.jdisc.state;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
+import com.yahoo.component.Vtag;
import java.io.IOException;
import java.nio.file.Path;
@@ -41,6 +42,9 @@ public class HostLifeGatherer {
metrics.put("uptime", upTime);
metrics.put("alive", 1);
jsonObject.set("metrics", metrics);
+ ObjectNode dimensions = jsonMapper.createObjectNode();
+ dimensions.put("vespaVersion", Vtag.currentVersion.toFullString());
+ jsonObject.set("dimensions", dimensions);
return jsonObject;
}
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/utils/MultiPartFormParser.java b/container-core/src/main/java/com/yahoo/container/jdisc/utils/MultiPartFormParser.java
index 57fbd5eb96c..51264f6b3a9 100644
--- a/container-core/src/main/java/com/yahoo/container/jdisc/utils/MultiPartFormParser.java
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/utils/MultiPartFormParser.java
@@ -56,6 +56,7 @@ public class MultiPartFormParser {
public String name() { return name; }
public InputStream data() { return data; }
public String contentType() { return contentType; }
+ @Override public String toString() { return "PartItem{" + "name='" + name + '\'' + ", contentType='" + contentType + '\'' + '}'; }
}
public static class MultiPartException extends IOException {
diff --git a/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java b/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java
index e2eeb5d3517..2f1dc1fd96f 100644
--- a/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java
+++ b/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java
@@ -38,6 +38,8 @@ public class ConnectionLogEntry {
private final List<String> sslSubjectAlternativeNames;
private final String httpProtocol;
private final String proxyProtocolVersion;
+ private final Long sslBytesReceived;
+ private final Long sslBytesSent;
private ConnectionLogEntry(Builder builder) {
@@ -67,6 +69,8 @@ public class ConnectionLogEntry {
this.sslSubjectAlternativeNames = builder.sslSubjectAlternativeNames;
this.httpProtocol = builder.httpProtocol;
this.proxyProtocolVersion = builder.proxyProtocolVersion;
+ this.sslBytesReceived = builder.sslBytesReceived;
+ this.sslBytesSent = builder.sslBytesSent;
}
public static Builder builder(UUID id, Instant timestamp) {
@@ -99,6 +103,8 @@ public class ConnectionLogEntry {
public List<String> sslSubjectAlternativeNames() { return sslSubjectAlternativeNames == null ? List.of() : sslSubjectAlternativeNames; }
public Optional<String> httpProtocol() { return Optional.ofNullable(httpProtocol); }
public Optional<String> proxyProtocolVersion() { return Optional.ofNullable(proxyProtocolVersion); }
+ public Optional<Long> sslBytesReceived() { return Optional.ofNullable(sslBytesReceived); }
+ public Optional<Long> sslBytesSent() { return Optional.ofNullable(sslBytesSent); }
public static class SslHandshakeFailure {
private final String type;
@@ -153,6 +159,8 @@ public class ConnectionLogEntry {
private List<String> sslSubjectAlternativeNames;
private String httpProtocol;
private String proxyProtocolVersion;
+ private Long sslBytesReceived;
+ private Long sslBytesSent;
Builder(UUID id, Instant timestamp) {
@@ -257,6 +265,14 @@ public class ConnectionLogEntry {
this.proxyProtocolVersion = version;
return this;
}
+ public Builder withSslBytesReceived(long bytesReceived) {
+ this.sslBytesReceived = bytesReceived;
+ return this;
+ }
+ public Builder withSslBytesSent(long bytesSent) {
+ this.sslBytesSent = bytesSent;
+ return this;
+ }
public ConnectionLogEntry build(){
return new ConnectionLogEntry(this);
diff --git a/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java b/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java
index 6d98c247ca0..20a3e9753cc 100644
--- a/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java
+++ b/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java
@@ -70,12 +70,15 @@ class JsonConnectionLogWriter implements LogWriter<ConnectionLogEntry> {
String sslSniServerName = unwrap(record.sslSniServerName());
String sslPeerIssuerSubject = unwrap(record.sslPeerIssuerSubject());
String sslPeerFingerprint = unwrap(record.sslPeerFingerprint());
+ Long sslBytesReceived = unwrap(record.sslBytesReceived());
+ Long sslBytesSent = unwrap(record.sslBytesSent());
ConnectionLogEntry.SslHandshakeFailure sslHandshakeFailure = unwrap(record.sslHandshakeFailure());
List<String> sslSubjectAlternativeNames = record.sslSubjectAlternativeNames();
if (isAnyValuePresent(
sslProtocol, sslSessionId, sslCipherSuite, sslPeerSubject, sslPeerNotBefore, sslPeerNotAfter,
- sslSniServerName, sslHandshakeFailure, sslPeerIssuerSubject, sslPeerFingerprint)) {
+ sslSniServerName, sslHandshakeFailure, sslPeerIssuerSubject, sslPeerFingerprint,
+ sslBytesReceived, sslBytesSent)) {
generator.writeObjectFieldStart("ssl");
writeOptionalString(generator, "protocol", sslProtocol);
@@ -87,6 +90,8 @@ class JsonConnectionLogWriter implements LogWriter<ConnectionLogEntry> {
writeOptionalTimestamp(generator, "peerNotAfter", sslPeerNotAfter);
writeOptionalString(generator, "peerFingerprint", sslPeerFingerprint);
writeOptionalString(generator, "sniServerName", sslSniServerName);
+ writeOptionalLong(generator, "bytesReceived", sslBytesReceived);
+ writeOptionalLong(generator, "bytesSent", sslBytesSent);
if (sslHandshakeFailure != null) {
generator.writeObjectFieldStart("handshake-failure");
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
index 6406125dcc3..2ea3863cc5a 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
@@ -42,7 +42,6 @@ import java.util.List;
import java.util.UUID;
import java.util.logging.Level;
import java.util.logging.Logger;
-import java.util.stream.Collectors;
/**
* Jetty integration for jdisc connection log ({@link ConnectionLog}).
@@ -137,6 +136,9 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
if (connection instanceof HttpConnection) {
info.setHttpBytes(connection.getBytesIn(), connection.getBytesOut());
}
+ if (connection.getEndPoint() instanceof SslConnection.DecryptedEndPoint ssl) {
+ info.setSslBytes(ssl.getSslConnection().getBytesIn(), ssl.getSslConnection().getBytesOut());
+ }
if (!endpoint.isOpen()) {
info.setClosedAt(System.currentTimeMillis());
connectionLog.log(info.toLogEntry());
@@ -258,6 +260,8 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
private List<String> sslSubjectAlternativeNames;
private String proxyProtocolVersion;
private String httpProtocol;
+ private long sslBytesReceived = 0;
+ private long sslBytesSent = 0;
private ConnectionInfo(UUID uuid, long createdAt, InetSocketAddress localAddress, InetSocketAddress peerAddress) {
this.uuid = uuid;
@@ -330,6 +334,12 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
synchronized ConnectionInfo setProxyProtocolVersion(String version) { this.proxyProtocolVersion = version; return this; }
+ synchronized ConnectionInfo setSslBytes(long received, long sent) {
+ this.sslBytesReceived = received;
+ this.sslBytesSent = sent;
+ return this;
+ }
+
synchronized ConnectionLogEntry toLogEntry() {
ConnectionLogEntry.Builder builder = ConnectionLogEntry.builder(uuid, Instant.ofEpochMilli(createdAt));
if (closedAt > 0) {
@@ -400,6 +410,12 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
if (proxyProtocolVersion != null) {
builder.withProxyProtocolVersion(proxyProtocolVersion);
}
+ if (sslBytesReceived > 0) {
+ builder.withSslBytesReceived(sslBytesReceived);
+ }
+ if (sslBytesSent > 0) {
+ builder.withSslBytesSent(sslBytesSent);
+ }
return builder.build();
}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
index d56daa34f03..0a697bd8fb3 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
@@ -706,6 +706,8 @@ public class HttpServerTest {
Assertions.assertThat(logEntry.sslSessionId()).hasValueSatisfying(sessionId -> Assertions.assertThat(sessionId).hasSize(64));
Assertions.assertThat(logEntry.sslPeerNotBefore()).hasValue(Instant.EPOCH);
Assertions.assertThat(logEntry.sslPeerNotAfter()).hasValue(Instant.EPOCH.plus(100_000, ChronoUnit.DAYS));
+ Assertions.assertThat(logEntry.sslBytesReceived()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(100000L));
+ Assertions.assertThat(logEntry.sslBytesSent()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(10000L));
}
@Test
diff --git a/container-disc/pom.xml b/container-disc/pom.xml
index 173979fbe81..16ce99220f1 100644
--- a/container-disc/pom.xml
+++ b/container-disc/pom.xml
@@ -186,6 +186,7 @@
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
<configuration>
+ <bundleType>CORE</bundleType>
<discApplicationClass>com.yahoo.container.jdisc.ConfiguredApplication</discApplicationClass>
<buildLegacyVespaPlatformBundle>true</buildLegacyVespaPlatformBundle>
<discPreInstallBundle>
diff --git a/container-documentapi/pom.xml b/container-documentapi/pom.xml
index 9a2572a9918..395c91a3df2 100644
--- a/container-documentapi/pom.xml
+++ b/container-documentapi/pom.xml
@@ -45,6 +45,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
</plugins>
</build>
diff --git a/container-onnxruntime/pom.xml b/container-onnxruntime/pom.xml
index 59d23e1b8c1..b4a4ace380d 100644
--- a/container-onnxruntime/pom.xml
+++ b/container-onnxruntime/pom.xml
@@ -45,6 +45,9 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/container-search-and-docproc/pom.xml b/container-search-and-docproc/pom.xml
index 470c1b1fa6f..71d547ecacd 100644
--- a/container-search-and-docproc/pom.xml
+++ b/container-search-and-docproc/pom.xml
@@ -229,6 +229,9 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/Tokenizer.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/Tokenizer.java
index c1d415b8e27..01bb606e9ee 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/parser/Tokenizer.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/parser/Tokenizer.java
@@ -107,7 +107,9 @@ public final class Tokenizer {
if (i >= source.length()) break;
int c = source.codePointAt(i);
- if (characterClasses.isLetterOrDigit(c) || (c == '\'' && acceptApostropheAsWordCharacter(currentIndex))) {
+ if (characterClasses.isSymbol(c)) { // treat each symbol is a separate word
+ addToken(WORD, Character.toString(c), i, i + 1);
+ } else if (characterClasses.isLetterOrDigit(c) || (c == '\'' && acceptApostropheAsWordCharacter(currentIndex))) {
i = consumeWordOrNumber(i, currentIndex);
} else if (Character.isWhitespace(c)) {
addToken(SPACE, " ", i, i + 1);
diff --git a/container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java b/container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java
index 7c4bcb38c41..9050b82fd69 100644
--- a/container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java
+++ b/container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java
@@ -43,10 +43,8 @@ import com.yahoo.search.Searcher;
import com.yahoo.search.searchchain.Execution;
import com.yahoo.search.searchchain.PhaseNames;
-
import static com.yahoo.prelude.querytransform.CJKSearcher.TERM_ORDER_RELAXATION;
-
/**
* Replaces query terms with their stems
*
@@ -111,9 +109,8 @@ public class StemmingSearcher extends Searcher {
private Item replaceTerms(Query q, IndexFacts.Session indexFacts) {
Language language = q.getModel().getParsingLanguage();
- if (language == Language.UNKNOWN) {
- return q.getModel().getQueryTree().getRoot();
- }
+ if (language == Language.UNKNOWN) return q.getModel().getQueryTree().getRoot();
+
StemContext context = new StemContext();
context.isCJK = language.isCjk();
context.language = language;
@@ -144,9 +141,8 @@ public class StemmingSearcher extends Searcher {
}
private Item scan(Item item, StemContext context) {
- if (item == null) {
- return null;
- }
+ if (item == null) return null;
+
boolean old = context.insidePhrase;
if (item instanceof PhraseItem || item instanceof PhraseSegmentItem) {
context.insidePhrase = true;
@@ -155,7 +151,6 @@ public class StemmingSearcher extends Searcher {
item = checkBlock((BlockItem) item, context);
} else if (item instanceof CompositeItem comp) {
ListIterator<Item> i = comp.getItemIterator();
-
while (i.hasNext()) {
Item original = i.next();
Item transformed = scan(original, context);
@@ -186,7 +181,7 @@ public class StemmingSearcher extends Searcher {
if (i instanceof TermItem) {
return ((TermItem) i).getOrigin(); // this should always be the case
} else {
- getLogger().log(Level.WARNING, "Weird, BlockItem '" + b + "' was a composite containing " +
+ getLogger().log(Level.WARNING, "BlockItem '" + b + "' was a composite containing " +
i.getClass().getName() + ", expected TermItem.");
}
}
@@ -198,24 +193,14 @@ public class StemmingSearcher extends Searcher {
Item blockAsItem = (Item)current;
CompositeItem composite;
List<StemList> segments = linguistics.getStemmer().stem(current.stringValue(), index.getStemMode(), context.language);
+ if (segments.isEmpty()) return blockAsItem;
+
String indexName = current.getIndexName();
Substring substring = getOffsets(current);
-
if (segments.size() == 1) {
- getLogger().log(Level.FINE, () -> "Stem '"+current.stringValue()+"' mode "+index.getStemMode()
- +" and language '"+context.language+"' -> '"+segments.get(0)+"'");
TaggableItem w = singleWordSegment(current, segments.get(0), index, substring, context.insidePhrase);
setMetaData(current, context.reverseConnectivity, w);
- return (Item) w;
- } else if (getLogger().isLoggable(Level.FINE)) {
- var buf = new StringBuilder();
- buf.append("Stem '").append(current.stringValue());
- buf.append("' mode ").append(index.getStemMode());
- buf.append(" and language '").append(context.language).append("' ->");
- for (StemList segment : segments) {
- buf.append(" '").append(segment).append("'");
- }
- getLogger().log(Level.FINE, buf.toString());
+ return (Item)w;
}
if (context.isCJK)
@@ -224,7 +209,6 @@ public class StemmingSearcher extends Searcher {
composite = chooseComposite(current, ((Item) current).getParent(), indexName);
for (StemList segment : segments) {
- getLogger().log(Level.FINE, () -> "Stem to multiple segments '"+segment+"'");
TaggableItem w = singleWordSegment(current, segment, index, substring, context.insidePhrase);
if (composite instanceof AndSegmentItem) {
@@ -242,7 +226,6 @@ public class StemmingSearcher extends Searcher {
setSignificance(replacement, current);
phraseSegmentConnectivity(current, context.reverseConnectivity, replacement);
}
-
return composite;
}
@@ -372,8 +355,8 @@ public class StemmingSearcher extends Searcher {
case PHRASE -> createPhraseSegment(current, indexName);
case BOOLEAN_AND -> createAndSegment(current);
default -> throw new IllegalArgumentException("Unknown segmenting rule: " + current.getSegmentingRule() +
- ". This is a bug in Vespa, as the implementation has gotten out of sync." +
- " Please create an issue.");
+ ". This is a bug in Vespa, as the implementation has gotten out of sync." +
+ " Please create an issue.");
};
}
diff --git a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
index 6a0015f9d3a..01420e89e10 100644
--- a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
+++ b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
@@ -59,6 +59,8 @@ import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
+import static java.io.InputStream.nullInputStream;
+
/**
* Handles search request.
*
@@ -139,6 +141,8 @@ public class SearchHandler extends LoggingRequestHandler {
this.numRequestsLeftToTrace = new AtomicLong(numQueriesToTraceOnDebugAfterStartup);
metric.set(SEARCH_CONNECTIONS, 0.0d, null);
this.zoneInfo = zoneInfo;
+
+ warmup();
}
Metric metric() { return metric; }
@@ -150,6 +154,20 @@ public class SearchHandler extends LoggingRequestHandler {
return Integer.MAX_VALUE; // assume unbound
}
+ private void warmup() {
+ try {
+ handle(HttpRequest.createTestRequest("/search/" +
+ "?timeout=2s" +
+ "&ranking.profile=unranked" +
+ "&yql=select+*+from+sources+*+where+true+limit+0;",
+ com.yahoo.jdisc.http.HttpRequest.Method.GET,
+ nullInputStream()));
+ }
+ catch (RuntimeException e) {
+ log.log(Level.INFO, "Exception warming up search handler", e);
+ }
+ }
+
@Override
public final HttpResponse handle(com.yahoo.container.jdisc.HttpRequest request) {
requestsInFlight.incrementAndGet();
@@ -280,9 +298,7 @@ public class SearchHandler extends LoggingRequestHandler {
}
private Renderer<Result> toRendererCopy(ComponentSpecification format) {
- Renderer<Result> renderer = executionFactory.rendererRegistry().getRenderer(format);
- renderer = perRenderingCopy(renderer);
- return renderer;
+ return perRenderingCopy(executionFactory.rendererRegistry().getRenderer(format));
}
private Tuple2<String, Chain<Searcher>> resolveChain(String explicitChainName) {
diff --git a/container-search/src/main/java/com/yahoo/search/logging/AbstractSpoolingLogger.java b/container-search/src/main/java/com/yahoo/search/logging/AbstractSpoolingLogger.java
index 7641b04addf..601da11ab33 100644
--- a/container-search/src/main/java/com/yahoo/search/logging/AbstractSpoolingLogger.java
+++ b/container-search/src/main/java/com/yahoo/search/logging/AbstractSpoolingLogger.java
@@ -2,6 +2,7 @@
package com.yahoo.search.logging;
import com.yahoo.concurrent.DaemonThreadFactory;
+
import java.io.IOException;
import java.time.Clock;
import java.util.concurrent.RejectedExecutionException;
@@ -30,21 +31,21 @@ public abstract class AbstractSpoolingLogger extends AbstractThreadedLogger impl
public AbstractSpoolingLogger(Spooler spooler) {
this.spooler = spooler;
this.executorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("AbstractSpoolingLogger-send-"));
- executorService.scheduleWithFixedDelay(this, 0, 10L, TimeUnit.MILLISECONDS);
+ executorService.scheduleWithFixedDelay(this, 0, 1L, TimeUnit.SECONDS);
}
public void run() {
try {
spooler.switchFileIfNeeded();
spooler.processFiles(this::transport);
- } catch (IOException e) {
- e.printStackTrace();
+ } catch (Exception e) {
+ log.log(Level.WARNING, "Exception when processing files: " + e.getMessage());
}
}
@Override
public boolean send(LoggerEntry entry) {
- log.log(Level.INFO, "Sending");
+ log.log(Level.FINE, "Sending entry " + entry + " to spooler");
try {
executor.execute(() -> spooler.write(entry));
} catch (RejectedExecutionException e) {
diff --git a/container-search/src/main/java/com/yahoo/search/logging/LoggerEntry.java b/container-search/src/main/java/com/yahoo/search/logging/LoggerEntry.java
index ffac8b89860..7dd31d8e1a2 100644
--- a/container-search/src/main/java/com/yahoo/search/logging/LoggerEntry.java
+++ b/container-search/src/main/java/com/yahoo/search/logging/LoggerEntry.java
@@ -58,17 +58,19 @@ public class LoggerEntry {
}
public String toString() {
- return serialize();
+ return serialize(false);
}
- public String serialize() {
+ public String serialize() { return serialize(true); }
+
+ public String serialize(boolean encodeBlob) {
try {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setLong("timestamp", timestamp == null ? 0 : timestamp);
root.setString("query", queryString());
- root.setString("blob", Base64.getEncoder().encodeToString(blob.array()));
+ root.setString("blob", encodeBlob? Base64.getEncoder().encodeToString(blob.array()) : Utf8.toString(blob.array()));
root.setString("track", track());
return Utf8.toString(SlimeUtils.toJsonBytes(slime)); // TODO
diff --git a/container-search/src/main/java/com/yahoo/search/logging/Spooler.java b/container-search/src/main/java/com/yahoo/search/logging/Spooler.java
index 46f7fbb0b3c..e6d5bfc59ff 100644
--- a/container-search/src/main/java/com/yahoo/search/logging/Spooler.java
+++ b/container-search/src/main/java/com/yahoo/search/logging/Spooler.java
@@ -5,7 +5,6 @@ import ai.vespa.validation.Validation;
import com.yahoo.vespa.defaults.Defaults;
import java.io.File;
import java.io.IOException;
-import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
@@ -16,6 +15,8 @@ import java.time.Instant;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
@@ -53,21 +54,25 @@ public class Spooler {
private final Clock clock;
private final AtomicReference<Instant> firstWriteTimestamp = new AtomicReference<>();
private final boolean keepSuccessFiles;
+ private final int maxFailures;
+ private final Map<File, Integer> failures = new ConcurrentHashMap<>();
public Spooler(Clock clock) {
- this(clock, false);
+ this(clock, false, 1000);
}
- public Spooler(Clock clock, boolean keepSuccessFiles) {
- this(defaultSpoolPath, defaultMaxEntriesPerFile, clock, keepSuccessFiles);
+ public Spooler(Clock clock, boolean keepSuccessFiles, int maxFailures) {
+ this(defaultSpoolPath, defaultMaxEntriesPerFile, clock, keepSuccessFiles, maxFailures);
}
- public Spooler(Path spoolPath, int maxEntriesPerFile, Clock clock, boolean keepSuccessFiles) {
+ // Note: Needs to be public, used in system tests
+ public Spooler(Path spoolPath, int maxEntriesPerFile, Clock clock, boolean keepSuccessFiles, int maxFailures) {
this.spoolPath = spoolPath;
this.maxEntriesPerFile = maxEntriesPerFile;
this.clock = clock;
this.fileNameBase.set(newFileNameBase(clock));
this.keepSuccessFiles = keepSuccessFiles;
+ this.maxFailures = maxFailures;
firstWriteTimestamp.set(Instant.EPOCH);
createDirs(spoolPath);
}
@@ -105,34 +110,54 @@ public class Spooler {
public void processFiles(List<File> files, Function<LoggerEntry, Boolean> transport) {
for (File f : files) {
log.log(Level.FINE, "Processing file " + f);
- boolean succcess = false;
+ boolean success = false;
try {
List<String> lines = Files.readAllLines(f.toPath());
for (String line : lines) {
LoggerEntry entry = LoggerEntry.deserialize(line);
log.log(Level.FINE, "Read entry " + entry + " from " + f);
- succcess = transport.apply(entry);
- if (! succcess) {
- log.log(Level.WARNING, "unsuccessful call to transport() for " + entry);
+ success = transport.apply(entry);
+ if (! success) {
+ throw new RuntimeException("Unable to process file " + f + ": unsuccessful call to transport() for " + entry);
}
- };
- } catch (IOException e) {
- throw new UncheckedIOException("Unable to process file " + f.toPath(), e);
- // TODO: Move to failures path
+ }
+ failures.remove(f);
+ } catch (Exception e) {
+ handleFailure(f);
} finally {
- if (succcess && keepSuccessFiles) {
- Path file = f.toPath();
- Path target = spoolPath.resolve(successesPath).resolve(f.toPath().relativize(file)).resolve(f.getName());
- try {
- Files.move(file, target);
- } catch (IOException e) {
- log.log(Level.SEVERE, "Unable to move processed file " + file + " to " + target, e);
- }
+ if (success) {
+ if (keepSuccessFiles)
+ moveProcessedFile(f, successesPath);
+ else
+ try {
+ Files.delete(f.toPath());
+ } catch (IOException e) {
+ log.log(Level.WARNING, "Unable to delete file " + f, e);
+ }
}
}
}
}
+ private void handleFailure(File file) {
+ failures.putIfAbsent(file, 0);
+ var failCount = failures.compute(file, (f, count) -> count + 1);
+ if (failCount > maxFailures) {
+ log.log(Level.WARNING, "Unable to process file " + file + " after trying " + maxFailures + " times, moving it to " + failuresPath);
+ moveProcessedFile(file, failuresPath);
+ }
+ }
+
+ private void moveProcessedFile(File f, Path path) {
+ Path file = f.toPath();
+ Path target = spoolPath.resolve(path).resolve(f.toPath().relativize(file)).resolve(f.getName());
+ try {
+ Files.move(file, target);
+ } catch (IOException e) {
+ log.log(Level.SEVERE, "Unable to move processed file " + file + " to " + target, e);
+ }
+ }
+
public Path processingPath() { return processingPath; }
public Path readyPath() { return readyPath; }
public Path successesPath() { return successesPath; }
@@ -178,6 +203,8 @@ public class Spooler {
switchFileIfNeeded(file, fileName);
}
+ Map<File, Integer> failures() { return failures; }
+
private synchronized void switchFileIfNeeded(Path file, String fileName) throws IOException {
if (file.toFile().exists()
&& (entryCounter.get() >= maxEntriesPerFile || firstWriteTimestamp.get().plus(maxDelayAfterFirstWrite).isBefore(clock.instant()))) {
diff --git a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java
index 583e89bacd6..f35ffcee0c6 100644
--- a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java
@@ -2580,4 +2580,16 @@ public class ParseTestCase {
void testNoGrammar4() {
tester.assertParsed("WEAKAND(100) foo bar baz one two 37", "foo -(bar baz \"one two\" 37)", Query.Type.TOKENIZE);
}
+
+ @Test
+ void testEmojis() {
+ String emoji1 = "\uD83D\uDD2A"; // 🔪
+ String emoji2 = "\uD83D\uDE00"; // 😀
+
+ tester.assertParsed(emoji1, emoji1, Query.Type.ANY);
+ tester.assertParsed(emoji2, emoji2, Query.Type.ANY);
+ tester.assertParsed("AND " + emoji1 + " " + emoji2, emoji1 + emoji2, Query.Type.ANY);
+ tester.assertParsed("AND " + emoji1 + " foo " + emoji2, emoji1 + "foo" + emoji2, Query.Type.ANY);
+ }
+
}
diff --git a/container-search/src/test/java/com/yahoo/prelude/querytransform/test/StemmingSearcherTestCase.java b/container-search/src/test/java/com/yahoo/prelude/querytransform/test/StemmingSearcherTestCase.java
index bcb243b4563..d1514267a9b 100644
--- a/container-search/src/test/java/com/yahoo/prelude/querytransform/test/StemmingSearcherTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/querytransform/test/StemmingSearcherTestCase.java
@@ -23,7 +23,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
- * @author <a href="mailto:mathiasm@yahoo-inc.com">Mathias M. Lidal</a>
+ * @author Mathias M. Lidal
*/
public class StemmingSearcherTestCase {
@@ -33,8 +33,8 @@ public class StemmingSearcherTestCase {
@Test
void testStemOnlySomeTerms() {
- assertStem("/search?query=Holes in CVS and Subversion nostem:Found",
- "WEAKAND(100) hole in cvs and subversion nostem:Found");
+ assertStemmed("WEAKAND(100) hole in cvs and subversion nostem:Found", "/search?query=Holes in CVS and Subversion nostem:Found"
+ );
}
@Test
@@ -78,7 +78,7 @@ public class StemmingSearcherTestCase {
@Test
void testDontStemPrefixes() {
- assertStem("/search?query=ist*&language=de", "WEAKAND(100) ist*");
+ assertStemmed("WEAKAND(100) ist*", "/search?query=ist*&language=de");
}
@Test
@@ -90,10 +90,10 @@ public class StemmingSearcherTestCase {
@Test
void testNounStemming() {
- assertStem("/search?query=noun:towers noun:tower noun:tow",
- "WEAKAND(100) noun:tower noun:tower noun:tow");
- assertStem("/search?query=notnoun:towers notnoun:tower notnoun:tow",
- "WEAKAND(100) notnoun:tower notnoun:tower notnoun:tow");
+ assertStemmed("WEAKAND(100) noun:tower noun:tower noun:tow", "/search?query=noun:towers noun:tower noun:tow"
+ );
+ assertStemmed("WEAKAND(100) notnoun:tower notnoun:tower notnoun:tow", "/search?query=notnoun:towers notnoun:tower notnoun:tow"
+ );
}
@SuppressWarnings("deprecation")
@@ -133,11 +133,19 @@ public class StemmingSearcherTestCase {
@Test
void testMultipleStemming() {
- Query q = new Query(QueryTestCase.httpEncode("/search?language=en&search=four&query=trees \"nouns girls\" flowers \"a verbs a\" girls&default-index=foobar"));
- executeStemming(q);
- assertEquals("WEAKAND(100) WORD_ALTERNATIVES foobar:[ tree(0.7) trees(1.0) ] " +
- "foobar:\"noun girl\" WORD_ALTERNATIVES foobar:[ flower(0.7) flowers(1.0) ] " +
- "foobar:\"a verb a\" WORD_ALTERNATIVES foobar:[ girl(0.7) girls(1.0) ]", q.getModel().getQueryTree().getRoot().toString());
+ assertStemmed("WEAKAND(100) WORD_ALTERNATIVES foobar:[ tree(0.7) trees(1.0) ] " +
+ "foobar:\"noun girl\" WORD_ALTERNATIVES foobar:[ flower(0.7) flowers(1.0) ] " +
+ "foobar:\"a verb a\" WORD_ALTERNATIVES foobar:[ girl(0.7) girls(1.0) ]",
+ "/search?language=en&search=four&query=trees \"nouns girls\" flowers \"a verbs a\" girls&default-index=foobar");
+ }
+
+ @Test
+ void testEmojiStemming() {
+ String emoji1 = "\uD83C\uDF49"; // 🍉
+ String emoji2 = "\uD83D\uDE00"; // 😀
+ assertStemmed("WEAKAND(100) " + emoji1, "/search?query=" + emoji1);
+ assertStemmed("WEAKAND(100) (AND " + emoji1 + " " + emoji2 + ")", "/search?query=" + emoji1 + emoji2);
+ assertStemmed("WEAKAND(100) (AND " + emoji1 + " foo " + emoji2 + ")", "/search?query=" + emoji1 + "foo" + emoji2);
}
private Execution.Context newExecutionContext() {
@@ -153,12 +161,8 @@ public class StemmingSearcherTestCase {
newExecutionContext()).search(query);
}
- private void assertStem(String queryString, String expectedQueryTree) {
- assertStemEncoded(QueryTestCase.httpEncode(queryString), expectedQueryTree);
- }
-
- private void assertStemEncoded(String encodedQueryString, String expectedQueryTree) {
- Query query = new Query(encodedQueryString);
+ private void assertStemmed(String expectedQueryTree, String queryString) {
+ Query query = new Query(QueryTestCase.httpEncode(queryString));
executeStemming(query);
assertEquals(expectedQueryTree, query.getModel().getQueryTree().getRoot().toString());
}
diff --git a/container-search/src/test/java/com/yahoo/search/logging/SpoolerTest.java b/container-search/src/test/java/com/yahoo/search/logging/SpoolerTest.java
index eb5d7f85481..b07e576241b 100644
--- a/container-search/src/test/java/com/yahoo/search/logging/SpoolerTest.java
+++ b/container-search/src/test/java/com/yahoo/search/logging/SpoolerTest.java
@@ -27,8 +27,7 @@ public class SpoolerTest {
public void testSpoolingLogger() throws IOException {
Path spoolDir = tempDir.resolve("spool");
- int maxEntriesPerFile = 1;
- Spooler spooler = new Spooler(spoolDir, maxEntriesPerFile, clock, true);
+ Spooler spooler = createSpooler(spoolDir, 1);
TestLogger logger = new TestLogger(spooler);
assertTrue(sendEntry(logger, "Yo entry"));
@@ -53,14 +52,44 @@ public class SpoolerTest {
assertReadyFiles(spooler, 0);
assertSuccessFiles(spooler, 2);
assertFailureFiles(spooler, 0);
+
+ assertTrue(spooler.failures().isEmpty(), spooler.failures().toString());
+ }
+
+ @Test
+ public void testSpoolingLoggerCleanup() throws IOException {
+ Path spoolDir = tempDir.resolve("spool");
+
+ Spooler spooler = createSpooler(spoolDir, 1, false, 5);
+
+ TestLogger logger = new TestLogger(spooler);
+ assertTrue(sendEntry(logger, "Yo entry"));
+
+ Path readyPath = spooler.readyPath();
+ Path readyFile1 = readyPath.resolve(spooler.fileNameBase.get() + "-0");
+ waitUntilFileExists(readyFile1);
+
+ // Check content after being moved to ready path
+ assertContent(readyFile1, "Yo entry");
+
+ // Process files (read, transport files)
+ logger.manualRun();
+ assertEquals(1, logger.entriesSent());
+
+ // No files in processing or ready or successes
+ assertProcessedFiles(spooler, 0);
+ assertReadyFiles(spooler, 0);
+ assertSuccessFiles(spooler, 0);
+ assertFailureFiles(spooler, 0);
+
+ assertTrue(spooler.failures().isEmpty(), spooler.failures().toString());
}
@Test
public void testSpoolingManyEntriesPerFile() throws IOException {
Path spoolDir = tempDir.resolve("spool");
- int maxEntriesPerFile = 2;
- Spooler spooler = new Spooler(spoolDir, maxEntriesPerFile, clock, true);
+ Spooler spooler = createSpooler(spoolDir, 2);
TestLogger logger = new TestLogger(spooler);
assertTrue(sendEntry(logger, "Yo entry"));
@@ -111,8 +140,8 @@ public class SpoolerTest {
@Test
public void failingToTransportIsRetried() throws IOException {
Path spoolDir = tempDir.resolve("spool");
- Spooler spooler = new Spooler(spoolDir, 1, clock, true);
- FailingToTransportSecondEntryLogger logger = new FailingToTransportSecondEntryLogger(spooler);
+ Spooler spooler = createSpooler(spoolDir, 1, true, 2);
+ FailingToTransportNthEntryLogger logger = new FailingToTransportNthEntryLogger(spooler, 2);
assertTrue(sendEntry(logger, "Yo entry"));
logger.manualRun(); // Success for first message
@@ -121,17 +150,47 @@ public class SpoolerTest {
assertTrue(sendEntry(logger, "Yo entry 2"));
logger.manualRun(); // Failure for second message, so still just 1 file in successes path
assertEquals(1, spooler.listFilesInPath(spooler.successesPath()).size());
+ assertEquals(0, spooler.listFilesInPath(spooler.failuresPath()).size());
logger.manualRun(); // Success when retrying second message, so 2 files in successes path
assertEquals(2, spooler.listFilesInPath(spooler.successesPath()).size());
}
@Test
+ public void failingToTransportGivesUpAfterNTries() throws IOException {
+ Path spoolDir = tempDir.resolve("spool");
+ Spooler spooler = createSpooler(spoolDir, 1, true, 2);
+ FailingToTransportAfterNEntriesLogger logger = new FailingToTransportAfterNEntriesLogger(spooler, 2);
+
+ assertTrue(sendEntry(logger, "Yo entry"));
+ assertEquals(1, spooler.listFilesInPath(spooler.readyPath()).size());
+ logger.manualRun(); // Success for first message
+ assertEquals(1, spooler.listFilesInPath(spooler.successesPath()).size());
+ assertEquals(0, spooler.listFilesInPath(spooler.failuresPath()).size());
+
+ assertTrue(sendEntry(logger, "Yo entry 2"));
+ assertEquals(1, spooler.listFilesInPath(spooler.readyPath()).size());
+ logger.manualRun(); // Failure for second message, so still just 1 file in successes path
+ assertEquals(1, spooler.listFilesInPath(spooler.successesPath()).size());
+ assertEquals(0, spooler.listFilesInPath(spooler.failuresPath()).size());
+
+ logger.manualRun(); // Fails again, but should be retried
+ assertEquals(1, spooler.listFilesInPath(spooler.readyPath()).size());
+ assertEquals(1, spooler.listFilesInPath(spooler.successesPath()).size());
+ assertEquals(0, spooler.listFilesInPath(spooler.failuresPath()).size());
+
+ logger.manualRun(); // Fails again, should be moved to failures path
+ assertEquals(0, spooler.listFilesInPath(spooler.readyPath()).size());
+ assertEquals(1, spooler.listFilesInPath(spooler.successesPath()).size());
+ assertEquals(1, spooler.listFilesInPath(spooler.failuresPath()).size());
+ }
+
+ @Test
public void noSuccessFiles() throws IOException {
Path spoolDir = tempDir.resolve("spool");
boolean keepSuccessFiles = false;
- Spooler spooler = new Spooler(spoolDir, 1, clock, keepSuccessFiles);
- FailingToTransportSecondEntryLogger logger = new FailingToTransportSecondEntryLogger(spooler);
+ Spooler spooler = createSpooler(spoolDir, 1, keepSuccessFiles, 2);
+ FailingToTransportNthEntryLogger logger = new FailingToTransportNthEntryLogger(spooler, 2);
assertTrue(sendEntry(logger, "Yo entry"));
logger.manualRun(); // Success for first message
@@ -163,6 +222,14 @@ public class SpoolerTest {
assertTrue(content.contains(Base64.getEncoder().encodeToString(expectedContent.getBytes())));
}
+ private static Spooler createSpooler(Path spoolDir, int maxEntriesPerFile) {
+ return new Spooler(spoolDir, maxEntriesPerFile, clock, true, 1000);
+ }
+
+ private static Spooler createSpooler(Path spoolDir, int maxEntriesPerFile, boolean keepSuccessFiles, int maxFailures) {
+ return new Spooler(spoolDir, maxEntriesPerFile, clock, keepSuccessFiles, maxFailures);
+ }
+
private static class TestLogger extends AbstractSpoolingLogger {
private final List<LoggerEntry> entriesSent = new ArrayList<>();
@@ -178,9 +245,7 @@ public class SpoolerTest {
}
@Override
- public void run() {
- // Do nothing, use manualRun
- }
+ public void run() {} // do nothing, call manualRun() to do something
@Override
public boolean send(LoggerEntry entry) {
@@ -198,12 +263,14 @@ public class SpoolerTest {
}
- private static class FailingToTransportSecondEntryLogger extends AbstractSpoolingLogger {
+ private static class FailingToTransportNthEntryLogger extends AbstractSpoolingLogger {
private int transportCount = 0;
+ private final int entriesToFail;
- public FailingToTransportSecondEntryLogger(Spooler spooler) {
+ public FailingToTransportNthEntryLogger(Spooler spooler, int entriesToFail) {
super(spooler);
+ this.entriesToFail = entriesToFail;
}
@Override
@@ -215,14 +282,43 @@ public class SpoolerTest {
@Override
public boolean transport(LoggerEntry entry) {
transportCount++;
- return transportCount != 2;
+ return transportCount != entriesToFail;
+ }
+
+ @Override
+ public void run() {} // do nothing, call manualRun() to do something
+
+ public void manualRun() {
+ super.run();
+ }
+
+ }
+
+ private static class FailingToTransportAfterNEntriesLogger extends AbstractSpoolingLogger {
+
+ private int transportCount = 0;
+ private final int entriesToFailAfter;
+
+ public FailingToTransportAfterNEntriesLogger(Spooler spooler, int entriesToFailAfter) {
+ super(spooler);
+ this.entriesToFailAfter = entriesToFailAfter;
}
@Override
- public void run() {
- // do nothing
+ public boolean send(LoggerEntry entry) {
+ spooler.write(entry);
+ return true;
+ }
+
+ @Override
+ public boolean transport(LoggerEntry entry) {
+ transportCount++;
+ return transportCount < entriesToFailAfter;
}
+ @Override
+ public void run() {} // do nothing, call manualRun() to do something
+
public void manualRun() {
super.run();
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java
index 2b35334e14b..94a565b2974 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java
@@ -49,15 +49,17 @@ public class ConfigServerException extends RuntimeException {
CERTIFICATE_NOT_READY,
LOAD_BALANCER_NOT_READY,
INCOMPLETE_RESPONSE,
- CONFIG_NOT_CONVERGED
+ CONFIG_NOT_CONVERGED,
+ QUOTA_EXCEEDED
}
+ // Note: Used by code in internal repo
public static ConfigServerException readException(byte[] body, String context) {
Inspector root = SlimeUtils.jsonToSlime(body).get();
String codeName = root.field("error-code").asString();
ErrorCode code = Stream.of(ErrorCode.values())
- .filter(value -> value.name().equals(codeName))
- .findAny().orElse(ErrorCode.INCOMPLETE_RESPONSE);
+ .filter(value -> value.name().equals(codeName))
+ .findAny().orElse(ErrorCode.INCOMPLETE_RESPONSE);
String message = root.field("message").valid() ? root.field("message").asString() : new String(body, UTF_8);
return new ConfigServerException(code, message, context);
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/zone/ZoneRegistry.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/zone/ZoneRegistry.java
index 6cdb197b307..f2cd55b88b2 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/zone/ZoneRegistry.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/zone/ZoneRegistry.java
@@ -39,8 +39,12 @@ public interface ZoneRegistry {
/** Returns whether cloudAccount in this system supports given zone */
boolean hasZone(ZoneId zoneId, CloudAccount cloudAccount);
- /** Returns whether the given cloud account is an enclave */
- boolean isEnclave(CloudAccount cloudAccount);
+ /** Returns whether the given cloud account is not one of the system accounts */
+ boolean isExternal(CloudAccount cloudAccount);
+
+ default boolean isExclave(CloudAccount cloudAccount) {
+ return system().isPublic() && isExternal(cloudAccount);
+ }
/** Returns a list containing the id of all zones in this registry */
ZoneFilter zones();
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
index ac895022130..ccf79e7eca3 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
@@ -21,6 +21,9 @@ enum PathGroup {
/** Paths exclusive to operators (including read), used for system management. */
classifiedOperator("/application/v4/notifications",
+ "/routing/v1/",
+ "/routing/v1/status/environment/{*}",
+ "/routing/v1/inactive/environment/{*}",
"/configserver/v1/{*}",
"/deployment/v1/{*}"),
@@ -34,9 +37,6 @@ enum PathGroup {
"/os/v1/{*}",
"/provision/v2/{*}",
"/zone/v2/{*}",
- "/routing/v1/",
- "/routing/v1/status/environment/{*}",
- "/routing/v1/inactive/environment/{*}",
"/state/v1/{*}",
"/changemanagement/v1/{*}"),
@@ -139,8 +139,10 @@ enum PathGroup {
"/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{ignored}/suspended",
"/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{ignored}/service/{*}",
"/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{ignored}/global-rotation/{*}",
- "/application/v4/tenant/{tenant}/application/{application}/metering",
- "/routing/v1/inactive/tenant/{tenant}/application/{application}/instance/{ignored}/environment/prod/region/{region}"),
+ "/application/v4/tenant/{tenant}/application/{application}/metering"),
+
+ applicationRouting(Matcher.tenant,
+ Matcher.application, "/routing/v1/inactive/tenant/{tenant}/application/{application}/instance/{ignored}/environment/prod/region/{region}"),
// TODO jonmv: remove
/** Path used to restart development nodes. */
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
index 9a28226c921..2f8ea368b21 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
@@ -33,10 +33,10 @@ enum Policy {
/** Full access to everything. */
supporter(Privilege.grant(Action.read)
- .on(PathGroup.allExcept(PathGroup.classifiedOperator))
+ .on(PathGroup.allExcept(PathGroup.classifiedOperator, PathGroup.applicationRouting))
.in(SystemName.all()),
Privilege.grant(Action.all())
- .on(PathGroup.classifiedOperator)
+ .on(PathGroup.classifiedOperator, PathGroup.applicationRouting)
.in(SystemName.all())),
/** Full access to user management for a tenant in select systems. */
@@ -87,12 +87,12 @@ enum Policy {
/** Read access to application information and settings. */
applicationRead(Privilege.grant(Action.read)
- .on(PathGroup.application, PathGroup.applicationInfo, PathGroup.reindexing, PathGroup.serviceDump, PathGroup.dropDocuments)
+ .on(PathGroup.application, PathGroup.applicationInfo, PathGroup.applicationRouting, PathGroup.reindexing, PathGroup.serviceDump, PathGroup.dropDocuments)
.in(SystemName.all())),
/** Update access to application information and settings. */
applicationUpdate(Privilege.grant(Action.update)
- .on(PathGroup.application, PathGroup.applicationInfo)
+ .on(PathGroup.application, PathGroup.applicationInfo, PathGroup.applicationRouting)
.in(SystemName.all())),
/** Access to delete a certain application. */
@@ -102,7 +102,7 @@ enum Policy {
/** Full access to application information and settings. */
applicationOperations(Privilege.grant(Action.write())
- .on(PathGroup.applicationInfo, PathGroup.productionRestart, PathGroup.reindexing, PathGroup.serviceDump, PathGroup.dropDocuments)
+ .on(PathGroup.applicationInfo, PathGroup.applicationRouting, PathGroup.productionRestart, PathGroup.reindexing, PathGroup.serviceDump, PathGroup.dropDocuments)
.in(SystemName.all())),
/** Access to create and delete developer and deploy keys under a tenant. */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
index 08a8440fbe2..eedc94c729c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
@@ -10,6 +10,7 @@ import com.yahoo.config.application.api.ValidationId;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.TenantName;
@@ -694,24 +695,22 @@ public class ApplicationController {
public Optional<CloudAccount> decideCloudAccountOf(DeploymentId deployment, DeploymentSpec spec) {
ZoneId zoneId = deployment.zoneId();
- Optional<CloudAccount> requestedAccount = spec.instance(deployment.applicationId().instance())
- .flatMap(instanceSpec -> instanceSpec.cloudAccount(zoneId.environment(),
- Optional.of(zoneId.region())))
- .or(spec::cloudAccount);
- if (requestedAccount.isEmpty() || requestedAccount.get().isUnspecified()) {
+ CloudName cloud = controller.zoneRegistry().get(zoneId).getCloudName();
+ CloudAccount requestedAccount = spec.cloudAccount(cloud, deployment.applicationId().instance(), deployment.zoneId());
+ if (requestedAccount.isUnspecified())
return Optional.empty();
- }
+
TenantName tenant = deployment.applicationId().tenant();
Set<CloudAccount> tenantAccounts = accountsOf(tenant);
- if (!tenantAccounts.contains(requestedAccount.get())) {
- throw new IllegalArgumentException("Requested cloud account '" + requestedAccount.get().value() +
+ if ( ! tenantAccounts.contains(requestedAccount)) {
+ throw new IllegalArgumentException("Requested cloud account '" + requestedAccount.value() +
"' is not valid for tenant '" + tenant + "'");
}
- if ( ! controller.zoneRegistry().hasZone(zoneId, requestedAccount.get())) {
+ if ( ! controller.zoneRegistry().hasZone(zoneId, requestedAccount)) {
throw new IllegalArgumentException("Zone " + zoneId + " is not configured in requested cloud account '" +
- requestedAccount.get().value() + "'");
+ requestedAccount.value() + "'");
}
- return requestedAccount;
+ return Optional.of(requestedAccount);
}
private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) {
@@ -948,7 +947,7 @@ public class ApplicationController {
* @param applicationPackage application package
* @param deployer principal initiating the deployment, possibly empty
*/
- public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
+ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<DeploymentId> deployment, ApplicationPackage applicationPackage, Optional<Principal> deployer) {
Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain()
.map(domain -> new AthenzDomain(domain.value()));
if (identityDomain.isEmpty()) {
@@ -969,14 +968,12 @@ public class ApplicationController {
// Either the user is member of the domain admin role, or is given the "launch" privilege on the service.
Optional<AthenzUser> athenzUser = getUser(deployer);
if (athenzUser.isPresent()) {
- // We only need to validate the root and instance in deployment.xml. Dev/perf entries are found at the instance level as well.
- var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment"));
- var serviceToLaunch = instanceName
- .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance))
- .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region()))
- .or(() -> applicationPackage.deploymentSpec().athenzService())
- .map(service -> new AthenzService(identityDomain.get(), service.value()));
-
+ // This is a direct deployment, and we need only validate what the configserver will actually launch.
+ DeploymentId id = deployment.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment"));
+ var serviceToLaunch = applicationPackage.deploymentSpec().athenzService(id.applicationId().instance(),
+ id.zoneId().environment(),
+ id.zoneId().region())
+ .map(service -> new AthenzService(identityDomain.get(), service.value()));
if (serviceToLaunch.isPresent()) {
if (
! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && // launch privilege
@@ -989,7 +986,7 @@ public class ApplicationController {
} else {
// This is a rare edge case where deployment.xml specifies athenz-service on each step, but not on the root.
// It is undefined which service should be launched, so handle this as an error.
- throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value());
+ throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + id.zoneId().value());
}
} else {
// If this is a deployment pipeline, verify that the domain in deployment.xml is the same as the tenant domain. Access control is already validated before this step.
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
index c76616c6d2c..9e0a94f3e85 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
@@ -112,7 +112,6 @@ public class RoutingController {
Set<Endpoint> endpoints = new LinkedHashSet<>();
// To discover the cluster name for a zone-scoped endpoint, we need to read routing policies
for (var policy : routingPolicies.read(deployment)) {
- if (!policy.status().isActive()) continue;
RoutingMethod routingMethod = controller.zoneRegistry().routingMethod(policy.id().zone());
endpoints.addAll(policy.zoneEndpointsIn(controller.system(), routingMethod));
endpoints.add(policy.regionEndpointIn(controller.system(), routingMethod));
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java
index bd42587576d..0d8e7745f65 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java
@@ -67,11 +67,10 @@ public class ApplicationPackageValidator {
private void validateCloudAccounts(Application application, ApplicationPackage applicationPackage) {
Set<CloudAccount> tenantAccounts = new TreeSet<>(controller.applications().accountsOf(application.id().tenant()));
- Set<CloudAccount> declaredAccounts = new TreeSet<>();
- applicationPackage.deploymentSpec().cloudAccount().ifPresent(declaredAccounts::add);
+ Set<CloudAccount> declaredAccounts = new TreeSet<>(applicationPackage.deploymentSpec().cloudAccounts().values());
for (DeploymentInstanceSpec instance : applicationPackage.deploymentSpec().instances())
for (ZoneId zone : controller.zoneRegistry().zones().controllerUpgraded().ids())
- instance.cloudAccount(zone.environment(), Optional.of(zone.region())).ifPresent(declaredAccounts::add);
+ declaredAccounts.addAll(instance.cloudAccounts(zone.environment(), zone.region()).values());
declaredAccounts.removeIf(tenantAccounts::contains);
declaredAccounts.removeIf(CloudAccount::isUnspecified);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java
index afb854b2aaa..eceaae80cef 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java
@@ -5,7 +5,10 @@ import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.DeploymentSpec.Step;
import com.yahoo.config.provision.AthenzDomain;
import com.yahoo.config.provision.AthenzService;
+import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.path.Path;
@@ -48,6 +51,7 @@ import java.util.jar.JarInputStream;
import java.util.jar.Manifest;
import java.util.regex.Pattern;
+import static com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud.Suite.of;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud.Suite.production;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud.Suite.staging;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud.Suite.staging_setup;
@@ -76,7 +80,7 @@ public class TestPackage {
private final ApplicationPackageStream applicationPackageStream;
private final X509Certificate certificate;
- public TestPackage(Supplier<InputStream> inZip, boolean isPublicSystem, RunId id, Testerapp testerApp,
+ public TestPackage(Supplier<InputStream> inZip, boolean isPublicSystem, CloudName cloud, RunId id, Testerapp testerApp,
DeploymentSpec spec, Instant certificateValidFrom, Duration certificateValidDuration) {
KeyPair keyPair;
if (certificateValidFrom != null) {
@@ -128,10 +132,7 @@ public class TestPackage {
testerApp)));
entries.put(deploymentFile,
- __ -> new ByteArrayInputStream(deploymentXml(id.tester(),
- spec.athenzDomain(),
- spec.requireInstance(id.application().instance())
- .athenzService(id.type().zone().environment(), id.type().zone().region()))));
+ __ -> new ByteArrayInputStream(deploymentXml(id.tester(), id.application().instance(), cloud, id.type().zone(), spec)));
if (certificate != null) {
entries.put("artifacts/key", __ -> new ByteArrayInputStream(KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8)));
@@ -297,13 +298,27 @@ public class TestPackage {
}
/** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */
- static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) {
+ static byte[] deploymentXml(TesterId id, InstanceName instance, CloudName cloud, ZoneId zone, DeploymentSpec original) {
+ Optional<AthenzDomain> athenzDomain = original.athenzDomain();
+ Optional<AthenzService> athenzService = original.requireInstance(instance)
+ .athenzService(zone.environment(), zone.region());
+ Optional<CloudAccount> cloudAccount = Optional.of(original.cloudAccount(cloud, instance, zone))
+ .filter(account -> ! account.isUnspecified());
+ Optional<Duration> hostTTL = (zone.environment().isProduction()
+ ? original.requireInstance(instance)
+ .steps().stream().filter(step -> step.isTest() && step.concerns(zone.environment(), Optional.of(zone.region())))
+ .findFirst().flatMap(Step::hostTTL)
+ : original.requireInstance(instance).hostTTL(zone.environment(), Optional.of(zone.region())))
+ .filter(__ -> cloudAccount.isPresent());
String deploymentSpec =
"<?xml version='1.0' encoding='UTF-8'?>\n" +
- "<deployment version=\"1.0\" " +
- athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") +
- athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" +
- " <instance id=\"" + id.id().instance().value() + "\" />" +
+ "<deployment version='1.0'" +
+ athenzDomain.map(domain -> " athenz-domain='" + domain.value() + "'").orElse("") +
+ athenzService.map(service -> " athenz-service='" + service.value() + "'").orElse("") +
+ cloudAccount.map(account -> " cloud-account='" + account.value() + "'").orElse("") +
+ hostTTL.map(ttl -> " empty-host-ttl='" + ttl.getSeconds() / 60 + "m'").orElse("") +
+ ">" +
+ " <instance id='" + id.id().instance().value() + "' />" +
"</deployment>";
return deploymentSpec.getBytes(UTF_8);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
index 91ece6733e1..ac896338643 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
@@ -645,10 +645,16 @@ public class DeploymentStatus {
Optional<Instant> platformReadyAt = step.dependenciesCompletedAt(change.withoutApplication(), Optional.of(job));
Optional<Instant> revisionReadyAt = step.dependenciesCompletedAt(change.withoutPlatform(), Optional.of(job));
+ boolean failingUpgradeOnlyTests = ! jobs().type(systemTest(job.type()), stagingTest(job.type()))
+ .failingHardOn(Versions.from(change.withoutApplication(), application, deploymentFor(job), () -> systemVersion))
+ .isEmpty();
+
// If neither change is ready, we guess based on the specified rollout.
if (platformReadyAt.isEmpty() && revisionReadyAt.isEmpty()) {
return switch (rollout) {
- case separate -> List.of(change.withoutApplication(), change); // Platform should stay ahead.
+ case separate -> ! failingUpgradeOnlyTests
+ ? List.of(change.withoutApplication(), change) // Platform should stay ahead ...
+ : List.of(change); // ... unless upgrade-only is failing tests.
case leading -> List.of(change); // They should eventually join.
case simultaneous -> List.of(change.withoutPlatform(), change); // Revision should get ahead.
};
@@ -663,9 +669,6 @@ public class DeploymentStatus {
// Both changes are ready for this step, and we look to the specified rollout to decide.
boolean platformReadyFirst = platformReadyAt.get().isBefore(revisionReadyAt.get());
boolean revisionReadyFirst = revisionReadyAt.get().isBefore(platformReadyAt.get());
- boolean failingUpgradeOnlyTests = ! jobs().type(systemTest(job.type()), stagingTest(job.type()))
- .failingHardOn(Versions.from(change.withoutApplication(), application, deploymentFor(job), () -> systemVersion))
- .isEmpty();
return switch (rollout) {
case separate -> // Let whichever change rolled out first, keep rolling first, unless upgrade alone is failing.
(platformReadyFirst || platformReadyAt.get().equals(Instant.EPOCH)) // Assume platform was first if no jobs have run yet.
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
index 71ab1c4d7da..59ebe0d07d3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
@@ -71,13 +71,13 @@ import static com.yahoo.config.application.api.Notifications.When.failing;
import static com.yahoo.config.application.api.Notifications.When.failingCommit;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.Node.State.active;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.Node.State.reserved;
-import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.cancelled;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.deploymentFailed;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.error;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.installationFailed;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.invalidApplication;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.noTests;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.nodeAllocationFailure;
+import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.quotaExceeded;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.reset;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.running;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.success;
@@ -250,7 +250,7 @@ public class InternalStepRunner implements StepRunner {
case LOAD_BALANCER_NOT_READY, PARENT_HOST_NOT_READY -> {
logger.log(e.message()); // Consider splitting these messages in summary and details, on config server.
Instant someTimeAfterStart = startTime.plusSeconds(200);
- Instant inALittleWhile = controller.clock().instant().plusSeconds(90);
+ Instant inALittleWhile = controller.clock().instant().plusSeconds(60);
controller.jobController().locked(id, run -> run.sleepingUntil(someTimeAfterStart.isAfter(inALittleWhile) ? someTimeAfterStart : inALittleWhile));
return result;
}
@@ -268,6 +268,10 @@ public class InternalStepRunner implements StepRunner {
logger.log(WARNING, e.getMessage());
return Optional.of(deploymentFailed);
}
+ case QUOTA_EXCEEDED -> {
+ logger.log(WARNING, e.getMessage());
+ return Optional.of(quotaExceeded);
+ }
}
throw e;
@@ -804,7 +808,7 @@ public class InternalStepRunner implements StepRunner {
NotificationSource source = NotificationSource.from(run.id());
Consumer<String> updater = msg -> controller.notificationsDb().setNotification(source, Notification.Type.deployment, Notification.Level.error, msg);
switch (isRemoved ? success : run.status()) {
- case aborted: return; // wait and see how the next run goes.
+ case aborted, cancelled: return; // wait and see how the next run goes.
case noTests:
case running:
case success:
@@ -828,6 +832,9 @@ public class InternalStepRunner implements StepRunner {
case error:
case endpointCertificateTimeout:
break;
+ case quotaExceeded:
+ updater.accept("quota exceeded. Contact support to upgrade your plan.");
+ return;
default:
logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'");
}
@@ -905,6 +912,7 @@ public class InternalStepRunner implements StepRunner {
TestPackage testPackage = new TestPackage(() -> controller.applications().applicationStore().streamTester(id.application().tenant(),
id.application().application(), revision),
controller.system().isPublic(),
+ controller.zoneRegistry().get(id.type().zone()).getCloudName(),
id,
controller.controllerConfig().steprunner().testerapp(),
spec,
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobMetrics.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobMetrics.java
index e2b231e0946..b9bff5f777e 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobMetrics.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobMetrics.java
@@ -25,6 +25,7 @@ public class JobMetrics {
public static final String abort = "deployment.abort";
public static final String cancel = "deployment.cancel";
public static final String success = "deployment.success";
+ public static final String quotaExceeded = "deployment.quotaExceeded";
private final Metric metric;
@@ -61,6 +62,7 @@ public class JobMetrics {
case cancelled -> cancel;
case aborted -> abort;
case success -> success;
+ case quotaExceeded -> quotaExceeded;
default -> throw new IllegalArgumentException("Unexpected run status '" + status + "'");
};
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RevisionHistory.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RevisionHistory.java
index 72fff51d6b2..a8dd1c442fe 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RevisionHistory.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RevisionHistory.java
@@ -16,9 +16,11 @@ import java.util.NavigableMap;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.TreeMap;
+import java.util.function.Predicate;
import static ai.vespa.validation.Validation.require;
import static java.util.Collections.emptyNavigableMap;
+import static java.util.function.Predicate.not;
/**
* History of application revisions for an {@link com.yahoo.vespa.hosted.controller.Application}.
@@ -58,10 +60,12 @@ public class RevisionHistory {
return new RevisionHistory(production, development);
}
- /** Returns a copy of this without any production revisions older than the given. */
+ /** Returns a copy of this where any production revisions without packages, and older than the given one, are removed. */
public RevisionHistory withoutOlderThan(RevisionId id) {
if (production.headMap(id).isEmpty()) return this;
- return new RevisionHistory(production.tailMap(id, true), development);
+ NavigableMap<RevisionId, ApplicationVersion> production = new TreeMap<>(this.production);
+ production.headMap(id).values().removeIf(not(ApplicationVersion::hasPackage));
+ return new RevisionHistory(production, development);
}
/** Returns a copy of this without any development revisions older than the given. */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RunStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RunStatus.java
index b89e89e7002..5d625285a7d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RunStatus.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RunStatus.java
@@ -45,6 +45,9 @@ public enum RunStatus {
cancelled,
/** Run should be reset to its starting state. Used for production tests. */
- reset
+ reset,
+
+ /** Deployment of the real application was rejected due to exceeding quota. */
+ quotaExceeded
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java
index c4f3c611cc5..a8d025dbb6a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java
@@ -57,7 +57,7 @@ public class ArchiveUriUpdater extends ControllerMaintainer {
for (var application : applications.asList()) {
for (var instance : application.instances().values()) {
for (var deployment : instance.deployments().values()) {
- if (zoneRegistry.isEnclave(deployment.cloudAccount())) accountsByZone.get(deployment.zone()).add(deployment.cloudAccount());
+ if (zoneRegistry.isExclave(deployment.cloudAccount())) accountsByZone.get(deployment.zone()).add(deployment.cloudAccount());
else tenantsByZone.get(deployment.zone()).add(instance.id().tenant());
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java
index f21803283eb..ff8fdf7ace4 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java
@@ -38,7 +38,7 @@ public abstract class ControllerMaintainer extends Maintainer {
this(controller, interval, name, activeSystems, 1.0);
}
- public ControllerMaintainer(Controller controller, Duration interval, String name, Set<SystemName> activeSystems, Double successFactorBaseline) {
+ public ControllerMaintainer(Controller controller, Duration interval, String name, Set<SystemName> activeSystems, double successFactorBaseline) {
super(name, interval, controller.clock(), controller.jobControl(),
new ControllerJobMetrics(controller.metric()), controller.curator().cluster(), true, successFactorBaseline);
this.controller = controller;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java
index 47b27aac79a..bc977baf048 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java
@@ -67,8 +67,7 @@ public class RoutingPolicySerializer {
policy.instanceEndpoints().forEach(endpointId -> instanceEndpointsArray.addString(endpointId.id()));
var applicationEndpointsArray = policyObject.setArray(applicationEndpointsField);
policy.applicationEndpoints().forEach(endpointId -> applicationEndpointsArray.addString(endpointId.id()));
- policyObject.setBool(loadBalancerActiveField, policy.status().isActive());
- globalRoutingToSlime(policy.status().routingStatus(), policyObject.setObject(globalRoutingField));
+ globalRoutingToSlime(policy.routingStatus(), policyObject.setObject(globalRoutingField));
if ( ! policy.isPublic()) policyObject.setBool(privateOnlyField, true);
});
return slime;
@@ -93,8 +92,7 @@ public class RoutingPolicySerializer {
SlimeUtils.optionalString(inspect.field(dnsZoneField)),
instanceEndpoints,
applicationEndpoints,
- new RoutingPolicy.Status(inspect.field(loadBalancerActiveField).asBool(),
- globalRoutingFromSlime(inspect.field(globalRoutingField))),
+ routingStatusFromSlime(inspect.field(globalRoutingField)),
isPublic));
});
return Collections.unmodifiableList(policies);
@@ -106,7 +104,7 @@ public class RoutingPolicySerializer {
object.setLong(changedAtField, routingStatus.changedAt().toEpochMilli());
}
- public RoutingStatus globalRoutingFromSlime(Inspector object) {
+ public RoutingStatus routingStatusFromSlime(Inspector object) {
var status = RoutingStatus.Value.valueOf(object.field(statusField).asString());
var agent = RoutingStatus.Agent.valueOf(object.field(agentField).asString());
var changedAt = SlimeUtils.optionalInstant(object.field(changedAtField)).orElse(Instant.EPOCH);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
index 4da7aa4b2bd..4547eed24c8 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
@@ -38,6 +38,7 @@ import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.installatio
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.invalidApplication;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.noTests;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.nodeAllocationFailure;
+import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.quotaExceeded;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.reset;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.running;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.success;
@@ -345,6 +346,7 @@ class RunSerializer {
case aborted -> "aborted";
case cancelled -> "cancelled";
case reset -> "reset";
+ case quotaExceeded -> "quotaExceeded";
};
}
@@ -363,6 +365,7 @@ class RunSerializer {
case "aborted" -> aborted;
case "cancelled" -> cancelled;
case "reset" -> reset;
+ case "quotaExceeded" -> quotaExceeded;
default -> throw new IllegalArgumentException("No run status defined by '" + status + "'!");
};
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ZoneRoutingPolicySerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ZoneRoutingPolicySerializer.java
index 5932c54650b..d6342bc355f 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ZoneRoutingPolicySerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ZoneRoutingPolicySerializer.java
@@ -31,7 +31,7 @@ public class ZoneRoutingPolicySerializer {
public ZoneRoutingPolicy fromSlime(ZoneId zone, Slime slime) {
var root = slime.get();
- return new ZoneRoutingPolicy(zone, routingPolicySerializer.globalRoutingFromSlime(root.field(GLOBAL_ROUTING_FIELD)));
+ return new ZoneRoutingPolicy(zone, routingPolicySerializer.routingStatusFromSlime(root.field(GLOBAL_ROUTING_FIELD)));
}
public Slime toSlime(ZoneRoutingPolicy policy) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index f911bde9535..426c89ff20e 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -131,6 +131,7 @@ import com.yahoo.vespa.hosted.controller.tenant.TenantInfo;
import com.yahoo.vespa.hosted.controller.versions.VersionStatus;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import com.yahoo.yolean.Exceptions;
+
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@@ -1426,6 +1427,14 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
};
}
+ private static String valueOf(NodeResources.Architecture architecture) {
+ return switch (architecture) {
+ case x86_64 : yield "x86_64";
+ case arm64 : yield "arm64";
+ case any : yield "any";
+ };
+ }
+
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName);
ZoneId zone = requireZone(environment, region);
@@ -1882,12 +1891,11 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
- // TODO (freva): Get cloudAccount from deployment once all applications have redeployed once
- controller.applications().decideCloudAccountOf(deploymentId, application.deploymentSpec()).ifPresent(cloudAccount -> {
+ if (controller.zoneRegistry().isExternal(deployment.cloudAccount())) {
Cursor enclave = response.setObject("enclave");
- enclave.setString("cloudAccount", cloudAccount.value());
- controller.zoneRegistry().cloudAccountAthenzDomain(cloudAccount).ifPresent(domain -> enclave.setString("athensDomain", domain.value()));
- });
+ enclave.setString("cloudAccount", deployment.cloudAccount().value());
+ controller.zoneRegistry().cloudAccountAthenzDomain(deployment.cloudAccount()).ifPresent(domain -> enclave.setString("athensDomain", domain.value()));
+ }
var instance = application.instances().get(deploymentId.applicationId().instance());
if (instance != null) {
@@ -1918,7 +1926,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
response.setDouble("quota", deployment.quota().rate());
deployment.cost().ifPresent(cost -> response.setDouble("cost", cost));
- (controller.zoneRegistry().isEnclave(deployment.cloudAccount()) ?
+ (controller.zoneRegistry().isExclave(deployment.cloudAccount()) ?
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deployment.cloudAccount(), false) :
controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false))
.ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString()));
@@ -2428,14 +2436,15 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
if ( ! type.environment().isManuallyDeployed() && ! (isOperator(request) || controller.system().isCd()))
throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments.");
+ controller.applications().verifyPlan(id.tenant());
+
Map<String, byte[]> dataParts = parseDataParts(request);
if ( ! dataParts.containsKey("applicationZip"))
throw new IllegalArgumentException("Missing required form part 'applicationZip'");
ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(APPLICATION_ZIP));
controller.applications().verifyApplicationIdentityConfiguration(id.tenant(),
- Optional.of(id.instance()),
- Optional.of(type.zone()),
+ Optional.of(new DeploymentId(id, type.zone())),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
@@ -2835,6 +2844,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
object.setDouble("bandwidthGbps", resources.bandwidthGbps());
object.setString("diskSpeed", valueOf(resources.diskSpeed()));
object.setString("storageType", valueOf(resources.storageType()));
+ object.setString("architecture", valueOf(resources.architecture()));
}
// A tenant has different content when in a list ... antipattern, but not solvable before application/v5
@@ -3047,6 +3057,9 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
}
private HttpResponse submit(String tenant, String application, HttpRequest request) {
+ TenantName tenantName = TenantName.from(tenant);
+ controller.applications().verifyPlan(tenantName);
+
Map<String, byte[]> dataParts = parseDataParts(request);
Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get();
long projectId = submitOptions.field("projectId").asLong(); // Absence of this means it's not a prod app :/
@@ -3072,11 +3085,8 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
byte[] testPackage = dataParts.getOrDefault(APPLICATION_TEST_ZIP, new byte[0]);
Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, controller.clock().instant(), risk);
- TenantName tenantName = TenantName.from(tenant);
- controller.applications().verifyPlan(tenantName);
controller.applications().verifyApplicationIdentityConfiguration(tenantName,
Optional.empty(),
- Optional.empty(),
applicationPackage,
Optional.of(requireUserPrincipal(request)));
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
index 2f93ce999cd..d1d0fb54eef 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
@@ -27,7 +27,6 @@ import com.yahoo.vespa.hosted.controller.deployment.ConvergenceSummary;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentStatus;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentStatus.DelayCause;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentStatus.Readiness;
-import com.yahoo.vespa.hosted.controller.deployment.DeploymentStatus.StepType;
import com.yahoo.vespa.hosted.controller.deployment.JobController;
import com.yahoo.vespa.hosted.controller.deployment.JobStatus;
import com.yahoo.vespa.hosted.controller.deployment.Run;
@@ -240,6 +239,7 @@ class JobControllerApiHandlerHelper {
case installationFailed -> "installationFailed";
case invalidApplication, deploymentFailed -> "deploymentFailed";
case success -> "success";
+ case quotaExceeded -> "quotaExceeded";
};
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiHandler.java
index 862fa08ab86..feb8a89b057 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiHandler.java
@@ -268,6 +268,7 @@ public class DeploymentApiHandler extends ThreadedHttpRequestHandler {
case installationFailed -> "installationFailed";
case invalidApplication, deploymentFailed -> "deploymentFailed";
case success -> "success";
+ case quotaExceeded -> "quotaExceeded";
};
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
index 59f886bdea6..2726c778218 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
@@ -107,7 +107,7 @@ public class RoutingPolicies {
ApplicationId instance = deployment.applicationId();
List<LoadBalancer> loadBalancers = controller.serviceRegistry().configServer()
.getLoadBalancers(instance, deployment.zoneId());
- LoadBalancerAllocation allocation = new LoadBalancerAllocation(loadBalancers, deployment, deploymentSpec);
+ LoadBalancerAllocation allocation = new LoadBalancerAllocation(deployment, deploymentSpec, loadBalancers);
Set<ZoneId> inactiveZones = inactiveZones(instance, deploymentSpec);
Optional<TenantAndApplicationId> owner = ownerOf(allocation);
try (var lock = db.lockRoutingPolicies()) {
@@ -147,8 +147,7 @@ public class RoutingPolicies {
RoutingPolicyList deploymentPolicies = applicationPolicies.deployment(deployment);
Map<RoutingPolicyId, RoutingPolicy> updatedPolicies = new LinkedHashMap<>(applicationPolicies.asMap());
for (var policy : deploymentPolicies) {
- var newPolicy = policy.with(policy.status().with(RoutingStatus.create(value, agent,
- controller.clock().instant())));
+ var newPolicy = policy.with(RoutingStatus.create(value, agent, controller.clock().instant()));
updatedPolicies.put(policy.id(), newPolicy);
}
@@ -360,11 +359,11 @@ public class RoutingPolicies {
var newPolicy = new RoutingPolicy(policyId, loadBalancer.hostname(), loadBalancer.ipAddress(), dnsZone,
allocation.instanceEndpointsOf(loadBalancer),
allocation.applicationEndpointsOf(loadBalancer),
- new RoutingPolicy.Status(isActive(loadBalancer), RoutingStatus.DEFAULT),
+ RoutingStatus.DEFAULT,
loadBalancer.isPublic());
// Preserve global routing status for existing policy
if (existingPolicy != null) {
- newPolicy = newPolicy.with(newPolicy.status().with(existingPolicy.status().routingStatus()));
+ newPolicy = newPolicy.with(existingPolicy.routingStatus());
}
updateZoneDnsOf(newPolicy, loadBalancer, allocation.deployment);
policies.put(newPolicy.id(), newPolicy);
@@ -553,19 +552,10 @@ public class RoutingPolicies {
// - deployment level (RoutingPolicy)
// - application package level (deployment.xml)
return zonePolicy.routingStatus().value() == RoutingStatus.Value.out ||
- policy.status().routingStatus().value() == RoutingStatus.Value.out ||
+ policy.routingStatus().value() == RoutingStatus.Value.out ||
inactiveZones.contains(policy.id().zone());
}
- private static boolean isActive(LoadBalancer loadBalancer) {
- return switch (loadBalancer.state()) {
- // Count reserved as active as we want callers (application API) to see the endpoint as early
- // as possible
- case reserved, active -> true;
- default -> false;
- };
- }
-
/** Represents records for a region-wide endpoint */
private static class RegionEndpoint {
@@ -604,20 +594,27 @@ public class RoutingPolicies {
}
- /** Load balancers allocated to a deployment */
- private static class LoadBalancerAllocation {
+ /** Active load balancers allocated to a deployment */
+ record LoadBalancerAllocation(DeploymentId deployment,
+ DeploymentSpec deploymentSpec,
+ List<LoadBalancer> loadBalancers) {
- private final DeploymentId deployment;
- private final List<LoadBalancer> loadBalancers;
- private final DeploymentSpec deploymentSpec;
-
- private LoadBalancerAllocation(List<LoadBalancer> loadBalancers, DeploymentId deployment,
- DeploymentSpec deploymentSpec) {
+ public LoadBalancerAllocation(DeploymentId deployment,
+ DeploymentSpec deploymentSpec,
+ List<LoadBalancer> loadBalancers) {
this.deployment = deployment;
- this.loadBalancers = List.copyOf(loadBalancers);
+ this.loadBalancers = loadBalancers.stream().filter(LoadBalancerAllocation::isActive).toList();
this.deploymentSpec = deploymentSpec;
}
+ private static boolean isActive(LoadBalancer loadBalancer) {
+ return switch (loadBalancer.state()) {
+ // Count reserved as active as we want to do DNS updates as early as possible
+ case reserved, active -> true;
+ default -> false;
+ };
+ }
+
/** Returns the policy IDs of the load balancers contained in this */
private Set<RoutingPolicyId> asPolicyIds() {
return loadBalancers.stream()
@@ -637,7 +634,7 @@ public class RoutingPolicies {
return Set.of();
}
if (instanceSpec.get().globalServiceId().filter(id -> id.equals(loadBalancer.cluster().value())).isPresent()) {
- // Legacy assignment always has the default endpoint Id
+ // Legacy assignment always has the default endpoint ID
return Set.of(EndpointId.defaultId());
}
return instanceSpec.get().endpoints().stream()
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java
index 38ecff452c8..b4d83b7ded6 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java
@@ -28,19 +28,19 @@ public record RoutingPolicy(RoutingPolicyId id,
Optional<String> dnsZone,
Set<EndpointId> instanceEndpoints,
Set<EndpointId> applicationEndpoints,
- Status status,
+ RoutingStatus routingStatus,
boolean isPublic) {
/** DO NOT USE. Public for serialization purposes */
public RoutingPolicy(RoutingPolicyId id, Optional<DomainName> canonicalName, Optional<String> ipAddress, Optional<String> dnsZone,
- Set<EndpointId> instanceEndpoints, Set<EndpointId> applicationEndpoints, Status status, boolean isPublic) {
+ Set<EndpointId> instanceEndpoints, Set<EndpointId> applicationEndpoints, RoutingStatus routingStatus, boolean isPublic) {
this.id = Objects.requireNonNull(id, "id must be non-null");
this.canonicalName = Objects.requireNonNull(canonicalName, "canonicalName must be non-null");
this.ipAddress = Objects.requireNonNull(ipAddress, "ipAddress must be non-null");
this.dnsZone = Objects.requireNonNull(dnsZone, "dnsZone must be non-null");
this.instanceEndpoints = ImmutableSortedSet.copyOf(Objects.requireNonNull(instanceEndpoints, "instanceEndpoints must be non-null"));
this.applicationEndpoints = ImmutableSortedSet.copyOf(Objects.requireNonNull(applicationEndpoints, "applicationEndpoints must be non-null"));
- this.status = Objects.requireNonNull(status, "status must be non-null");
+ this.routingStatus = Objects.requireNonNull(routingStatus, "status must be non-null");
this.isPublic = isPublic;
if (canonicalName.isEmpty() == ipAddress.isEmpty())
@@ -82,9 +82,9 @@ public record RoutingPolicy(RoutingPolicyId id,
return applicationEndpoints;
}
- /** Returns the status of this */
- public Status status() {
- return status;
+ /** Return status of routing */
+ public RoutingStatus routingStatus() {
+ return routingStatus;
}
/** Returns whether this has a load balancer which is available from public internet. */
@@ -98,9 +98,9 @@ public record RoutingPolicy(RoutingPolicyId id,
id.zone().equals(deployment.zoneId());
}
- /** Returns a copy of this with status set to given status */
- public RoutingPolicy with(Status status) {
- return new RoutingPolicy(id, canonicalName, ipAddress, dnsZone, instanceEndpoints, applicationEndpoints, status, isPublic);
+ /** Returns a copy of this with routing status set to given status */
+ public RoutingPolicy with(RoutingStatus routingStatus) {
+ return new RoutingPolicy(id, canonicalName, ipAddress, dnsZone, instanceEndpoints, applicationEndpoints, routingStatus, isPublic);
}
/** Returns the zone endpoints of this */
@@ -140,30 +140,4 @@ public record RoutingPolicy(RoutingPolicyId id,
.on(Port.fromRoutingMethod(routingMethod))
.routingMethod(routingMethod);
}
-
- /** The status of a routing policy */
- public record Status(boolean active, RoutingStatus routingStatus) {
-
- /** DO NOT USE. Public for serialization purposes */
- public Status {
- Objects.requireNonNull(routingStatus, "routingStatus must be non-null");
- }
-
- /** Returns whether this is considered active according to the load balancer status */
- public boolean isActive() {
- return active;
- }
-
- /** Return status of routing */
- public RoutingStatus routingStatus() {
- return routingStatus;
- }
-
- /** Returns a copy of this with routing status changed */
- public Status with(RoutingStatus routingStatus) {
- return new Status(active, routingStatus);
- }
-
- }
-
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java
index 0b1411d5bc3..2a7e4cb5c14 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java
@@ -146,8 +146,7 @@ public abstract class DeploymentRoutingContext implements RoutingContext {
// first matching policy here
return controller.policies().read(deployment)
.first()
- .map(RoutingPolicy::status)
- .map(RoutingPolicy.Status::routingStatus)
+ .map(RoutingPolicy::routingStatus)
.orElse(RoutingStatus.DEFAULT);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
index 693a74f8651..9efdee28063 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
@@ -56,6 +56,7 @@ import com.yahoo.vespa.hosted.controller.routing.rotation.RotationLock;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion.Confidence;
import com.yahoo.vespa.hosted.rotation.config.RotationsConfig;
import org.junit.jupiter.api.Test;
+
import java.io.InputStream;
import java.time.Duration;
import java.time.Instant;
@@ -71,7 +72,9 @@ import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.yahoo.config.provision.SystemName.main;
+import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.devAwsUsEast2a;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.devUsEast1;
+import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.productionAwsUsEast1a;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.productionUsEast3;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.productionUsWest1;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.stagingTest;
@@ -1465,40 +1468,39 @@ public class ControllerTest {
@Test
void testCloudAccount() {
DeploymentContext context = tester.newDeploymentContext();
- ZoneId devZone = devUsEast1.zone();
- ZoneId prodZone = productionUsWest1.zone();
- String cloudAccount = "012345678912";
+ ZoneId devZone = devAwsUsEast2a.zone();
+ ZoneId prodZone = productionAwsUsEast1a.zone();
+ String cloudAccount = "aws:012345678912";
var applicationPackage = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone.region())
.build();
// Submission fails because cloud account is not declared for this tenant
- assertEquals("cloud accounts [012345678912] are not valid for tenant tenant",
+ assertEquals("cloud accounts [aws:012345678912] are not valid for tenant tenant",
assertThrows(IllegalArgumentException.class,
() -> context.submit(applicationPackage))
.getMessage());
- assertEquals("cloud accounts [012345678912] are not valid for tenant tenant",
+ assertEquals("cloud accounts [aws:012345678912] are not valid for tenant tenant",
assertThrows(IllegalArgumentException.class,
- () -> context.runJob(devUsEast1, applicationPackage))
+ () -> context.runJob(devZone, applicationPackage))
.getMessage());
// Deployment fails because zone is not configured in requested cloud account
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class);
- assertEquals("Zone test.us-east-1 is not configured in requested cloud account '012345678912'",
+ assertEquals("Zone prod.aws-us-east-1a is not configured in requested cloud account 'aws:012345678912'",
assertThrows(IllegalArgumentException.class,
() -> context.submit(applicationPackage))
.getMessage());
- assertEquals("Zone dev.us-east-1 is not configured in requested cloud account '012345678912'",
+
+ context.runJob(devUsEast1, applicationPackage); // OK, because no special account is used.
+ assertEquals("Zone dev.aws-us-east-2a is not configured in requested cloud account 'aws:012345678912'",
assertThrows(IllegalArgumentException.class,
- () -> context.runJob(devUsEast1, applicationPackage))
+ () -> context.runJob(devZone, applicationPackage))
.getMessage());
// Deployment to prod succeeds once all zones are configured in requested account
- tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount),
- systemTest.zone(),
- stagingTest.zone(),
- prodZone);
+ tester.controllerTester().zoneRegistry().configureCloudAccount(CloudAccount.from(cloudAccount), prodZone);
context.submit(applicationPackage).deploy();
// Dev zone is added as a configured zone and deployment succeeds
@@ -1506,19 +1508,24 @@ public class ControllerTest {
context.runJob(devZone, applicationPackage);
// All deployments use the custom account
- for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) {
+ for (var zoneId : List.of(devZone, prodZone)) {
assertEquals(cloudAccount, tester.controllerTester().configServer()
.cloudAccount(context.deploymentIdIn(zoneId))
.get().value());
}
+ // Tests are run in the default cloud, however, where the default cloud account is used
+ for (var zoneId : List.of(systemTest.zone(), stagingTest.zone())) {
+ assertEquals(Optional.empty(), tester.controllerTester().configServer()
+ .cloudAccount(context.deploymentIdIn(zoneId)));
+ }
}
@Test
void testCloudAccountWithDefaultOverride() {
var context = tester.newDeploymentContext();
- var prodZone1 = productionUsEast3.zone();
+ var prodZone1 = productionAwsUsEast1a.zone();
var prodZone2 = productionUsWest1.zone();
- var cloudAccount = "012345678912";
+ var cloudAccount = "aws:012345678912";
var application = new ApplicationPackageBuilder()
.cloudAccount(cloudAccount)
.region(prodZone1.region())
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java
index 6da8db1c259..e7109b551ed 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java
@@ -2,10 +2,13 @@ package com.yahoo.vespa.hosted.controller.application.pkg;
import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.CloudName;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId;
import com.yahoo.vespa.hosted.controller.application.pkg.TestPackage.TestSummary;
import com.yahoo.vespa.hosted.controller.config.ControllerConfig;
import com.yahoo.vespa.hosted.controller.config.ControllerConfig.Steprunner.Testerapp;
@@ -22,6 +25,9 @@ import java.util.Set;
import java.util.jar.JarOutputStream;
import java.util.zip.ZipEntry;
+import static com.yahoo.config.provision.CloudName.AWS;
+import static com.yahoo.config.provision.CloudName.DEFAULT;
+import static com.yahoo.config.provision.CloudName.GCP;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud.Suite.production;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud.Suite.staging;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud.Suite.staging_setup;
@@ -30,7 +36,6 @@ import static com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPacka
import static com.yahoo.vespa.hosted.controller.application.pkg.TestPackage.validateTests;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* @author jonmv
@@ -120,11 +125,12 @@ public class TestPackageTest {
}
@Test
- void testTestPacakgeAssembly() throws IOException {
+ void testTestPackageAssembly() throws IOException {
byte[] bundleZip = ApplicationPackage.filesZip(Map.of("components/foo-tests.jar", testsJar("SystemTest", "ProductionTest"),
"artifacts/key", new byte[0]));
TestPackage bundleTests = new TestPackage(() -> new ByteArrayInputStream(bundleZip),
false,
+ CloudName.DEFAULT,
new RunId(ApplicationId.defaultId(), JobType.dev("abc"), 123),
new Testerapp.Builder().tenantCdBundle("foo").runtimeProviderClass("bar").build(),
DeploymentSpec.fromXml("""
@@ -147,28 +153,73 @@ public class TestPackageTest {
}
@Test
+ void generates_correct_deployment_spec() {
+ DeploymentSpec spec = DeploymentSpec.fromXml("""
+ <deployment version='1.0' athenz-domain='domain' athenz-service='service' cloud-account='123123123123,gcp:foobar' empty-host-ttl='1h'>
+ <test empty-host-ttl='1d' />
+ <staging cloud-account='aws:321321321321'/>
+ <prod>
+ <region>us-east-3</region>
+ <test>us-east-3</test>
+ <region>us-west-1</region>
+ <test empty-host-ttl='0m'>us-west-1</test>
+ <region empty-host-ttl='1d'>us-central-1</region>
+ <test>us-central-1</test>
+ </prod>
+ </deployment>
+ """);
+ verifyAttributes("", 0, DEFAULT, ZoneId.from("test", "us-east-1"), spec);
+ verifyAttributes("", 0, DEFAULT, ZoneId.from("staging", "us-east-2"), spec);
+ verifyAttributes("", 0, DEFAULT, ZoneId.from("prod", "us-east-3"), spec);
+ verifyAttributes("", 0, DEFAULT, ZoneId.from("prod", "us-west-1"), spec);
+ verifyAttributes("", 0, DEFAULT, ZoneId.from("prod", "us-central-1"), spec);
+
+ verifyAttributes("aws:123123123123", 1440, AWS, ZoneId.from("test", "us-east-1"), spec);
+ verifyAttributes("aws:321321321321", 60, AWS, ZoneId.from("staging", "us-east-2"), spec);
+ verifyAttributes("aws:123123123123", 60, AWS, ZoneId.from("prod", "us-east-3"), spec);
+ verifyAttributes("aws:123123123123", 0, AWS, ZoneId.from("prod", "us-west-1"), spec);
+ verifyAttributes("aws:123123123123", 60, AWS, ZoneId.from("prod", "us-central-1"), spec);
+
+ verifyAttributes("gcp:foobar", 1440, GCP, ZoneId.from("test", "us-east-1"), spec);
+ verifyAttributes("", 0, GCP, ZoneId.from("staging", "us-east-2"), spec);
+ verifyAttributes("gcp:foobar", 60, GCP, ZoneId.from("prod", "us-east-3"), spec);
+ verifyAttributes("gcp:foobar", 0, GCP, ZoneId.from("prod", "us-west-1"), spec);
+ verifyAttributes("gcp:foobar", 60, GCP, ZoneId.from("prod", "us-central-1"), spec);
+ }
+
+ private void verifyAttributes(String expectedAccount, int expectedTTL, CloudName cloud, ZoneId zone, DeploymentSpec spec) {
+ assertEquals("<?xml version='1.0' encoding='UTF-8'?>\n" +
+ "<deployment version='1.0' athenz-domain='domain' athenz-service='service'" +
+ (expectedAccount.isEmpty() ? "" : " cloud-account='" + expectedAccount + "' empty-host-ttl='" + expectedTTL + "m'") + "> " +
+ "<instance id='default-t' /></deployment>",
+ new String(TestPackage.deploymentXml(TesterId.of(ApplicationId.defaultId()), InstanceName.defaultName(), cloud, zone, spec)));
+ }
+
+ @Test
void generates_correct_tester_flavor() {
- DeploymentSpec spec = DeploymentSpec.fromXml("<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" +
- " <instance id='first'>\n" +
- " <test tester-flavor=\"d-6-16-100\" />\n" +
- " <prod>\n" +
- " <region active=\"true\">us-west-1</region>\n" +
- " <test>us-west-1</test>\n" +
- " </prod>\n" +
- " </instance>\n" +
- " <instance id='second'>\n" +
- " <test />\n" +
- " <staging />\n" +
- " <prod tester-flavor=\"d-6-16-100\">\n" +
- " <parallel>\n" +
- " <region active=\"true\">us-east-3</region>\n" +
- " <region active=\"true\">us-central-1</region>\n" +
- " </parallel>\n" +
- " <region active=\"true\">us-west-1</region>\n" +
- " <test>us-west-1</test>\n" +
- " </prod>\n" +
- " </instance>\n" +
- "</deployment>\n");
+ DeploymentSpec spec = DeploymentSpec.fromXml("""
+ <deployment version='1.0' athenz-domain='domain' athenz-service='service'>
+ <instance id='first'>
+ <test tester-flavor="d-6-16-100" />
+ <prod>
+ <region active="true">us-west-1</region>
+ <test>us-west-1</test>
+ </prod>
+ </instance>
+ <instance id='second'>
+ <test />
+ <staging />
+ <prod tester-flavor="d-6-16-100">
+ <parallel>
+ <region active="true">us-east-3</region>
+ <region active="true">us-central-1</region>
+ </parallel>
+ <region active="true">us-west-1</region>
+ <test>us-west-1</test>
+ </prod>
+ </instance>
+ </deployment>
+ """);
NodeResources firstResources = TestPackage.testerResourcesFor(ZoneId.from("prod", "us-west-1"), spec.requireInstance("first"));
assertEquals(TestPackage.DEFAULT_TESTER_RESOURCES, firstResources);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
index da982fa67a8..967a821c506 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
@@ -15,7 +15,6 @@ import com.yahoo.security.KeyUtils;
import com.yahoo.security.SignatureAlgorithm;
import com.yahoo.security.X509CertificateBuilder;
import com.yahoo.vespa.hosted.controller.Application;
-import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
@@ -53,7 +52,6 @@ import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
-import java.util.OptionalLong;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Supplier;
@@ -270,22 +268,6 @@ public class DeploymentContext {
}
}
- /** Add a routing policy for this in given zone, with status set to inactive */
- public DeploymentContext addInactiveRoutingPolicy(ZoneId zone) {
- var clusterId = "default-inactive";
- var id = new RoutingPolicyId(instanceId, Id.from(clusterId), zone);
- var policies = new LinkedHashMap<>(tester.controller().routing().policies().read(instanceId).asMap());
- policies.put(id, new RoutingPolicy(id, Optional.of(HostName.of("lb-host")),
- Optional.empty(),
- Optional.empty(),
- Set.of(EndpointId.of("default")),
- Set.of(),
- new RoutingPolicy.Status(false, RoutingStatus.DEFAULT),
- true));
- tester.controller().curator().writeRoutingPolicies(instanceId, List.copyOf(policies.values()));
- return this;
- }
-
/** Submit given application package for deployment */
public DeploymentContext resubmit(ApplicationPackage applicationPackage) {
return submit(applicationPackage, Optional.of(defaultSourceRevision), salt.get(), 0);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
index 2c2cc333f9c..7783f9af5a4 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
@@ -31,7 +31,6 @@ import org.junit.jupiter.api.Test;
import java.security.cert.X509Certificate;
import java.time.Duration;
import java.time.Instant;
-import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.Executors;
@@ -45,12 +44,12 @@ import static com.yahoo.vespa.hosted.controller.deployment.DeploymentTester.inst
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.deploymentFailed;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.installationFailed;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.noTests;
+import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.quotaExceeded;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.running;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.success;
import static com.yahoo.vespa.hosted.controller.deployment.Step.Status.failed;
import static com.yahoo.vespa.hosted.controller.deployment.Step.Status.succeeded;
import static com.yahoo.vespa.hosted.controller.deployment.Step.Status.unfinished;
-import static java.time.temporal.ChronoUnit.SECONDS;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
@@ -540,6 +539,18 @@ public class InternalStepRunnerTest {
assertEquals(RunStatus.error, tester.jobs().run(id).status());
}
+
+ @Test
+ public void quotaExceededAbortsJob() {
+ RuntimeException exception = new ConfigServerException(ConfigServerException.ErrorCode.QUOTA_EXCEEDED,
+ "Quota exceeded",
+ "deploy failure");
+ tester.configServer().throwOnNextPrepare(exception);
+ tester.jobs().deploy(app.instanceId(), DeploymentContext.devUsEast1, Optional.empty(), applicationPackage());
+ assertEquals(failed, tester.jobs().last(app.instanceId(), DeploymentContext.devUsEast1).get().stepStatuses().get(Step.deployReal));
+ assertEquals(quotaExceeded, tester.jobs().last(app.instanceId(), DeploymentContext.devUsEast1).get().status());
+ }
+
private void assertTestLogEntries(RunId id, Step step, LogEntry... entries) {
assertEquals(List.of(entries), tester.jobs().details(id).get().get(step));
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
index d542c06b5bf..fe74e305b63 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
@@ -103,7 +103,7 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
private final Map<DeploymentId, TestReport> testReport = new HashMap<>();
private final Map<DeploymentId, CloudAccount> cloudAccounts = new HashMap<>();
private final Map<DeploymentId, List<X509Certificate>> additionalCertificates = new HashMap<>();
- private List<SearchNodeMetrics> searchnodeMetrics;
+ private List<SearchNodeMetrics> searchNodeMetrics;
private Version lastPrepareVersion = null;
private Consumer<ApplicationId> prepareException = null;
@@ -308,7 +308,7 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
}
public void setProtonMetrics(List<SearchNodeMetrics> searchnodeMetrics) {
- this.searchnodeMetrics = searchnodeMetrics;
+ this.searchNodeMetrics = searchnodeMetrics;
}
public void deferLoadBalancerProvisioningIn(Set<Environment> environments) {
@@ -499,7 +499,15 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
applications.remove(deployment);
serviceStatus.remove(deployment);
- removeLoadBalancers(deployment.applicationId(), deployment.zoneId());
+
+ // This simulates what a real config server does: It deactivates the LB. Actual removal happens in the background
+ loadBalancers.computeIfPresent(deployment.zoneId(), (k, old) ->
+ old.stream().map(lb -> lb.application().equals(deployment.applicationId())
+ ? new LoadBalancer(lb.id(), lb.application(), lb.cluster(), lb.hostname(), lb.ipAddress(),
+ LoadBalancer.State.inactive, lb.dnsZone(), lb.cloudAccount(),
+ lb.service(), lb.isPublic())
+ : lb)
+ .collect(Collectors.toCollection(LinkedHashSet::new)));
}
@Override
@@ -509,7 +517,7 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
@Override
public List<SearchNodeMetrics> getSearchNodeMetrics(DeploymentId deployment) {
- return this.searchnodeMetrics;
+ return this.searchNodeMetrics;
}
@Override
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java
index 611f0bab904..e6a9014df94 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java
@@ -71,7 +71,7 @@ public class ZoneRegistryMock extends AbstractComponent implements ZoneRegistry
this.zones = List.of(ZoneApiMock.fromId("test.us-east-1"),
ZoneApiMock.fromId("staging.us-east-3"),
ZoneApiMock.fromId("dev.us-east-1"),
- ZoneApiMock.fromId("dev.aws-us-east-2a"),
+ ZoneApiMock.newBuilder().withId("dev.aws-us-east-2a").withCloud("aws").build(),
ZoneApiMock.fromId("perf.us-east-3"),
ZoneApiMock.newBuilder().withId("prod.aws-us-east-1a").withCloud("aws").build(),
ZoneApiMock.newBuilder().withId("prod.aws-us-east-1b").withCloud("aws").build(),
@@ -272,7 +272,7 @@ public class ZoneRegistryMock extends AbstractComponent implements ZoneRegistry
}
@Override
- public boolean isEnclave(CloudAccount cloudAccount) {
+ public boolean isExternal(CloudAccount cloudAccount) {
return system.isPublic() && !cloudAccount.isUnspecified() && !cloudAccount.equals(systemCloudAccount);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EnclaveAccessMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EnclaveAccessMaintainerTest.java
index f5188d52db6..5bfac2866ce 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EnclaveAccessMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EnclaveAccessMaintainerTest.java
@@ -33,7 +33,7 @@ class EnclaveAccessMaintainerTest {
tester.flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of("123123123123", "321321321321"), String.class);
assertEquals(1, sharer.maintain());
- assertEquals(Set.of(CloudAccount.from("123123123123"), CloudAccount.from("321321321321")), amis.currentAccounts());
+ assertEquals(Set.of(CloudAccount.from("aws:123123123123"), CloudAccount.from("aws:321321321321")), amis.currentAccounts());
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
index 96c1d7c545d..f1e8697cf41 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
@@ -170,7 +170,10 @@ public class UpgraderTest {
// --- Failing application is repaired by changing the application, causing confidence to move above 'high' threshold
// Deploy application change
default0.submit(applicationPackage("default"));
- default0.deploy();
+ default0.runJob(systemTest)
+ .jobAborted(stagingTest) // New revision causes run with failing upgrade alone to be aborted.
+ .runJob(stagingTest)
+ .deploy();
tester.controllerTester().computeVersionStatus();
assertEquals(VespaVersion.Confidence.high, tester.controller().readVersionStatus().systemVersion().get().confidence());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java
index 8e0b1dd1d4e..c1267ad5edf 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java
@@ -43,7 +43,7 @@ public class RoutingPolicySerializerTest {
Optional.of("zone1"),
Set.of(),
Set.of(),
- new RoutingPolicy.Status(true, RoutingStatus.DEFAULT),
+ RoutingStatus.DEFAULT,
false),
new RoutingPolicy(id2,
Optional.of(HostName.of("long-and-ugly-name-2")),
@@ -51,10 +51,9 @@ public class RoutingPolicySerializerTest {
Optional.empty(),
instanceEndpoints,
Set.of(),
- new RoutingPolicy.Status(false,
- new RoutingStatus(RoutingStatus.Value.out,
- RoutingStatus.Agent.tenant,
- Instant.ofEpochSecond(123))),
+ new RoutingStatus(RoutingStatus.Value.out,
+ RoutingStatus.Agent.tenant,
+ Instant.ofEpochSecond(123)),
true),
new RoutingPolicy(id1,
Optional.empty(),
@@ -62,7 +61,7 @@ public class RoutingPolicySerializerTest {
Optional.of("zone2"),
instanceEndpoints,
applicationEndpoints,
- new RoutingPolicy.Status(true, RoutingStatus.DEFAULT),
+ RoutingStatus.DEFAULT,
true));
var serialized = serializer.fromSlime(owner, serializer.toSlime(policies));
assertEquals(policies.size(), serialized.size());
@@ -75,7 +74,7 @@ public class RoutingPolicySerializerTest {
assertEquals(expected.dnsZone(), actual.dnsZone());
assertEquals(expected.instanceEndpoints(), actual.instanceEndpoints());
assertEquals(expected.applicationEndpoints(), actual.applicationEndpoints());
- assertEquals(expected.status(), actual.status());
+ assertEquals(expected.routingStatus(), actual.routingStatus());
assertEquals(expected.isPublic(), actual.isPublic());
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index c6d68bc5d9d..ac16aa727d5 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -1461,6 +1461,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
.userIdentity(unauthorizedUser),
accessDenied,
403);
+
}
@Test
@@ -1624,7 +1625,6 @@ public class ApplicationApiTest extends ControllerContainerTest {
.region(zone.region().value())
.build();
app.submit(applicationPackage).deploy();
- app.addInactiveRoutingPolicy(zone);
// GET application
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
@@ -1777,6 +1777,21 @@ public class ApplicationApiTest extends ControllerContainerTest {
assertFalse(tester.controller().applications().getApplication(appId).isPresent());
}
+ @Test
+ void only_build_job_can_submit() {
+ createTenantAndApplication();
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
+ .data(createApplicationSubmissionData(applicationPackageDefault, SCREWDRIVER_ID.value()))
+ .userIdentity(USER_ID),
+ accessDenied,
+ 403);
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
+ .data(createApplicationSubmissionData(applicationPackageDefault, SCREWDRIVER_ID.value()))
+ .screwdriverIdentity(SCREWDRIVER_ID),
+ "{\"message\":\"application build 1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}",
+ 200);
+ }
+
private static String serializeInstant(Instant i) {
return DateTimeFormatter.ISO_INSTANT.format(i.truncatedTo(ChronoUnit.SECONDS));
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json
index eb4c3294fe0..1040e809362 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json
@@ -11,7 +11,8 @@
"diskGb": 20.0,
"bandwidthGbps": 1.0,
"diskSpeed": "slow",
- "storageType": "remote"
+ "storageType": "remote",
+ "architecture": "any"
},
"cost": 0.11
},
@@ -24,7 +25,8 @@
"diskGb": 90.0,
"bandwidthGbps": 1.0,
"diskSpeed": "slow",
- "storageType": "remote"
+ "storageType": "remote",
+ "architecture": "any"
},
"cost": 0.43
},
@@ -40,7 +42,8 @@
"diskGb": 50.0,
"bandwidthGbps": 1.0,
"diskSpeed": "slow",
- "storageType": "remote"
+ "storageType": "remote",
+ "architecture": "any"
},
"cost": 0.22
},
@@ -56,7 +59,8 @@
"diskGb": 50.0,
"bandwidthGbps": 1.0,
"diskSpeed": "slow",
- "storageType": "remote"
+ "storageType": "remote",
+ "architecture": "any"
},
"cost": 0.29
},
@@ -98,7 +102,8 @@
"diskGb": 0.0,
"bandwidthGbps": 0.0,
"diskSpeed": "fast",
- "storageType": "any"
+ "storageType": "any",
+ "architecture": "any"
},
"cost": 0.0
},
@@ -111,7 +116,8 @@
"diskGb": 50.0,
"bandwidthGbps": 1.0,
"diskSpeed": "slow",
- "storageType": "remote"
+ "storageType": "remote",
+ "architecture": "any"
},
"cost": 0.22
},
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-nodes.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-nodes.json
index 34d2f054e0f..ffcb9ab6dc8 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-nodes.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-nodes.json
@@ -12,6 +12,7 @@
"bandwidthGbps": 1.0,
"diskSpeed": "slow",
"storageType": "remote",
+ "architecture": "any",
"clusterId": "default",
"clusterType": "container",
"down": false,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/routing/responses/recursion/environment.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/routing/responses/recursion/environment.json
index 2d4553978c6..3b085162393 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/routing/responses/recursion/environment.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/routing/responses/recursion/environment.json
@@ -25,11 +25,11 @@
"changedAt": 0
},
{
- "routingMethod": "sharedLayer4",
+ "routingMethod": "exclusive",
"environment": "dev",
"region": "aws-us-east-2a",
"status": "in",
- "agent": "operator",
+ "agent": "system",
"changedAt": 0
},
{
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
index 24d5e02240d..772877de8e3 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
@@ -309,10 +309,7 @@ public class RoutingPoliciesTest {
// Remove app2 completely
tester.controllerTester().controller().applications().requireInstance(context2.instanceId()).deployments().keySet()
- .forEach(zone -> {
- tester.controllerTester().configServer().removeLoadBalancers(context2.instanceId(), zone);
- tester.controllerTester().controller().applications().deactivate(context2.instanceId(), zone);
- });
+ .forEach(zone -> tester.controllerTester().controller().applications().deactivate(context2.instanceId(), zone));
context2.flushDnsUpdates();
expectedRecords = Set.of(
"c0.app1.tenant1.us-west-1.vespa.oath.cloud",
@@ -573,15 +570,15 @@ public class RoutingPoliciesTest {
// Status details is stored in policy
var policy1 = tester.routingPolicies().read(context.deploymentIdIn(zone1)).first().get();
- assertEquals(RoutingStatus.Value.out, policy1.status().routingStatus().value());
- assertEquals(RoutingStatus.Agent.tenant, policy1.status().routingStatus().agent());
- assertEquals(changedAt.truncatedTo(ChronoUnit.MILLIS), policy1.status().routingStatus().changedAt());
+ assertEquals(RoutingStatus.Value.out, policy1.routingStatus().value());
+ assertEquals(RoutingStatus.Agent.tenant, policy1.routingStatus().agent());
+ assertEquals(changedAt.truncatedTo(ChronoUnit.MILLIS), policy1.routingStatus().changedAt());
// Other zone remains in
var policy2 = tester.routingPolicies().read(context.deploymentIdIn(zone2)).first().get();
- assertEquals(RoutingStatus.Value.in, policy2.status().routingStatus().value());
- assertEquals(RoutingStatus.Agent.system, policy2.status().routingStatus().agent());
- assertEquals(Instant.EPOCH, policy2.status().routingStatus().changedAt());
+ assertEquals(RoutingStatus.Value.in, policy2.routingStatus().value());
+ assertEquals(RoutingStatus.Agent.system, policy2.routingStatus().agent());
+ assertEquals(Instant.EPOCH, policy2.routingStatus().changedAt());
// Next deployment does not affect status
context.submit(applicationPackage).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
@@ -598,9 +595,9 @@ public class RoutingPoliciesTest {
tester.assertTargets(context.instanceId(), EndpointId.of("r1"), 0, zone1, zone2);
policy1 = tester.routingPolicies().read(context.deploymentIdIn(zone1)).first().get();
- assertEquals(RoutingStatus.Value.in, policy1.status().routingStatus().value());
- assertEquals(RoutingStatus.Agent.tenant, policy1.status().routingStatus().agent());
- assertEquals(changedAt.truncatedTo(ChronoUnit.MILLIS), policy1.status().routingStatus().changedAt());
+ assertEquals(RoutingStatus.Value.in, policy1.routingStatus().value());
+ assertEquals(RoutingStatus.Agent.tenant, policy1.routingStatus().agent());
+ assertEquals(changedAt.truncatedTo(ChronoUnit.MILLIS), policy1.routingStatus().changedAt());
// Deployment is set out through a new deployment.xml
var applicationPackage2 = applicationPackageBuilder()
@@ -652,8 +649,7 @@ public class RoutingPoliciesTest {
for (var context : contexts) {
var policies = tester.routingPolicies().read(context.instanceId());
assertTrue(policies.asList().stream()
- .map(RoutingPolicy::status)
- .map(RoutingPolicy.Status::routingStatus)
+ .map(RoutingPolicy::routingStatus)
.map(RoutingStatus::value)
.allMatch(status -> status == RoutingStatus.Value.in),
"Global routing status for policy remains " + RoutingStatus.Value.in);
@@ -763,7 +759,7 @@ public class RoutingPoliciesTest {
RoutingStatus.Agent.tenant);
context.flushDnsUpdates();
for (var policy : tester.routingPolicies().read(context.instanceId())) {
- assertSame(RoutingStatus.Value.in, policy.status().routingStatus().value());
+ assertSame(RoutingStatus.Value.in, policy.routingStatus().value());
}
tester.assertTargets(context.instanceId(), EndpointId.of("r0"), 0, zone1, zone2);
}
diff --git a/default_build_settings.cmake b/default_build_settings.cmake
index 2785a98a396..4c855f9c923 100644
--- a/default_build_settings.cmake
+++ b/default_build_settings.cmake
@@ -203,8 +203,8 @@ function(vespa_use_default_cxx_compiler)
unset(DEFAULT_CMAKE_CXX_COMPILER)
if(NOT DEFINED VESPA_COMPILER_VARIANT OR VESPA_COMPILER_VARIANT STREQUAL "gcc")
if(APPLE)
- set(DEFAULT_CMAKE_C_COMPILER "${VESPA_HOMEBREW_PREFIX}/bin/gcc-12")
- set(DEFAULT_CMAKE_CXX_COMPILER "${VESPA_HOMEBREW_PREFIX}/bin/g++-12")
+ set(DEFAULT_CMAKE_C_COMPILER "${VESPA_HOMEBREW_PREFIX}/bin/gcc-13")
+ set(DEFAULT_CMAKE_CXX_COMPILER "${VESPA_HOMEBREW_PREFIX}/bin/g++-13")
elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "amzn 2")
set(DEFAULT_CMAKE_C_COMPILER "/usr/bin/gcc10-gcc")
set(DEFAULT_CMAKE_CXX_COMPILER "/usr/bin/gcc10-g++")
diff --git a/defaults/pom.xml b/defaults/pom.xml
index 4a5299e1782..e7dbc7981ff 100644
--- a/defaults/pom.xml
+++ b/defaults/pom.xml
@@ -50,17 +50,21 @@
<build>
<plugins>
<plugin>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>bundle-plugin</artifactId>
- <extensions>true</extensions>
- </plugin>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
+ </plugin>
<plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-deploy-plugin</artifactId>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
</plugin>
<plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-compiler-plugin</artifactId>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/document/src/vespa/document/annotation/spantree.h b/document/src/vespa/document/annotation/spantree.h
index a0010fafa74..8745fb4421d 100644
--- a/document/src/vespa/document/annotation/spantree.h
+++ b/document/src/vespa/document/annotation/spantree.h
@@ -4,7 +4,6 @@
#include "annotation.h"
#include <vector>
-#include <cassert>
namespace document {
struct SpanNode;
@@ -24,7 +23,6 @@ public:
SpanTree(vespalib::stringref name, std::unique_ptr<T> root)
: _name(name),
_root(std::move(root)) {
- assert(_root.get());
}
~SpanTree();
diff --git a/document/src/vespa/document/base/testdocman.cpp b/document/src/vespa/document/base/testdocman.cpp
index 471a2f8c196..d5b24b51f24 100644
--- a/document/src/vespa/document/base/testdocman.cpp
+++ b/document/src/vespa/document/base/testdocman.cpp
@@ -7,6 +7,7 @@
#include <vespa/document/fieldvalue/stringfieldvalue.h>
#include <vespa/vespalib/util/rand48.h>
#include <sstream>
+#include <cassert>
namespace document {
diff --git a/document/src/vespa/document/select/valuenodes.cpp b/document/src/vespa/document/select/valuenodes.cpp
index b3052cc07e2..06205e6b7d1 100644
--- a/document/src/vespa/document/select/valuenodes.cpp
+++ b/document/src/vespa/document/select/valuenodes.cpp
@@ -11,6 +11,7 @@
#include <vespa/vespalib/util/md5.h>
#include <vespa/document/util/stringutil.h>
#include <vespa/vespalib/text/lowercase.h>
+#include <cassert>
#include <iomanip>
#include <sys/time.h>
diff --git a/document/src/vespa/document/serialization/annotationdeserializer.cpp b/document/src/vespa/document/serialization/annotationdeserializer.cpp
index 41bc9ec8aaa..c449029440f 100644
--- a/document/src/vespa/document/serialization/annotationdeserializer.cpp
+++ b/document/src/vespa/document/serialization/annotationdeserializer.cpp
@@ -40,7 +40,7 @@ unique_ptr<SpanTree> AnnotationDeserializer::readSpanTree() {
deserializer.read(tree_name);
_nodes.clear();
SpanNode::UP root = readSpanNode();
- unique_ptr<SpanTree> span_tree(new SpanTree(tree_name.getValue(), std::move(root)));
+ auto span_tree = std::make_unique<SpanTree>(tree_name.getValue(), std::move(root));
uint32_t annotation_count = getInt1_2_4Bytes(_stream);
span_tree->reserveAnnotations(annotation_count);
diff --git a/eval/src/tests/eval/value_cache/dense-short1.json b/eval/src/tests/eval/value_cache/dense-short1.json
new file mode 100644
index 00000000000..4e170001c96
--- /dev/null
+++ b/eval/src/tests/eval/value_cache/dense-short1.json
@@ -0,0 +1 @@
+[ 1, 2.0, 3.5 ]
diff --git a/eval/src/tests/eval/value_cache/dense-short2.json b/eval/src/tests/eval/value_cache/dense-short2.json
new file mode 100644
index 00000000000..40121135544
--- /dev/null
+++ b/eval/src/tests/eval/value_cache/dense-short2.json
@@ -0,0 +1,3 @@
+{
+ "values": [ 1, 2.0, 3.5 ]
+}
diff --git a/eval/src/tests/eval/value_cache/sparse-short1.json b/eval/src/tests/eval/value_cache/sparse-short1.json
new file mode 100644
index 00000000000..949b7b2b8bd
--- /dev/null
+++ b/eval/src/tests/eval/value_cache/sparse-short1.json
@@ -0,0 +1,5 @@
+{
+ "foo": 1.0,
+ "bar": 2.0,
+ "three": 3.0
+}
diff --git a/eval/src/tests/eval/value_cache/sparse-short2.json b/eval/src/tests/eval/value_cache/sparse-short2.json
new file mode 100644
index 00000000000..f10b1b6f9fb
--- /dev/null
+++ b/eval/src/tests/eval/value_cache/sparse-short2.json
@@ -0,0 +1,7 @@
+{
+ "cells": {
+ "foo": 1.0,
+ "bar": 2.0,
+ "three": 3.0
+ }
+}
diff --git a/eval/src/tests/eval/value_cache/tensor_loader_test.cpp b/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
index 1a77cfe847b..4b4ba3fc0d3 100644
--- a/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
+++ b/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
@@ -19,12 +19,26 @@ TensorSpec make_dense_tensor() {
.add({{"x", 1}, {"y", 1}}, 4.0);
}
+TensorSpec make_simple_dense_tensor() {
+ return TensorSpec("tensor(z[3])")
+ .add({{"z", 0}}, 1.0)
+ .add({{"z", 1}}, 2.0)
+ .add({{"z", 2}}, 3.5);
+}
+
TensorSpec make_sparse_tensor() {
return TensorSpec("tensor(x{},y{})")
.add({{"x", "foo"}, {"y", "bar"}}, 1.0)
.add({{"x", "bar"}, {"y", "foo"}}, 2.0);
}
+TensorSpec make_simple_sparse_tensor() {
+ return TensorSpec("tensor(mydim{})")
+ .add({{"mydim", "foo"}}, 1.0)
+ .add({{"mydim", "three"}}, 3.0)
+ .add({{"mydim", "bar"}}, 2.0);
+}
+
TensorSpec make_mixed_tensor() {
return TensorSpec("tensor(x{},y[2])")
.add({{"x", "foo"}, {"y", 0}}, 1.0)
@@ -75,6 +89,16 @@ TEST_F("require that lz4 compressed sparse tensor can be loaded", ConstantTensor
TEST_DO(verify_tensor(make_sparse_tensor(), f1.create(TEST_PATH("sparse.json.lz4"), "tensor(x{},y{})")));
}
+TEST_F("require that sparse tensor short form can be loaded", ConstantTensorLoader(factory)) {
+ TEST_DO(verify_tensor(make_simple_sparse_tensor(), f1.create(TEST_PATH("sparse-short1.json"), "tensor(mydim{})")));
+ TEST_DO(verify_tensor(make_simple_sparse_tensor(), f1.create(TEST_PATH("sparse-short2.json"), "tensor(mydim{})")));
+}
+
+TEST_F("require that dense tensor short form can be loaded", ConstantTensorLoader(factory)) {
+ TEST_DO(verify_tensor(make_simple_dense_tensor(), f1.create(TEST_PATH("dense-short1.json"), "tensor(z[3])")));
+ TEST_DO(verify_tensor(make_simple_dense_tensor(), f1.create(TEST_PATH("dense-short2.json"), "tensor(z[3])")));
+}
+
TEST_F("require that bad lz4 file fails to load creating empty result", ConstantTensorLoader(factory)) {
TEST_DO(verify_tensor(sparse_tensor_nocells(), f1.create(TEST_PATH("bad_lz4.json.lz4"), "tensor(x{},y{})")));
}
diff --git a/eval/src/tests/instruction/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp b/eval/src/tests/instruction/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp
index 8ee0ba90af9..0d171fa0668 100644
--- a/eval/src/tests/instruction/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp
+++ b/eval/src/tests/instruction/dense_multi_matmul_function/dense_multi_matmul_function_test.cpp
@@ -42,8 +42,8 @@ void verify_optimized(const vespalib::string &expr, const FunInfo &details)
CellTypeSpace stable_types(CellTypeUtils::list_stable_types(), 2);
CellTypeSpace unstable_types(CellTypeUtils::list_unstable_types(), 2);
EvalFixture::verify<FunInfo>(expr, {details}, CellTypeSpace(stable_types).same());
- EvalFixture::verify<FunInfo>(expr, {}, CellTypeSpace(stable_types).different());
- EvalFixture::verify<FunInfo>(expr, {}, unstable_types);
+ EvalFixture::verify<FunInfo>(expr, {}, CellTypeSpace(std::move(stable_types)).different());
+ EvalFixture::verify<FunInfo>(expr, {}, std::move(unstable_types));
}
void verify_not_optimized(const vespalib::string &expr) {
diff --git a/eval/src/vespa/eval/eval/test/cell_type_space.h b/eval/src/vespa/eval/eval/test/cell_type_space.h
index e4ab26c2f91..3f8abfe5936 100644
--- a/eval/src/vespa/eval/eval/test/cell_type_space.h
+++ b/eval/src/vespa/eval/eval/test/cell_type_space.h
@@ -36,6 +36,8 @@ public:
assert(n > 0);
skip_unwanted();
}
+ CellTypeSpace(const CellTypeSpace& rhs) = default;
+ CellTypeSpace(CellTypeSpace&& rhs) noexcept = default;
~CellTypeSpace();
CellTypeSpace &same() {
_drop_different = true;
diff --git a/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp b/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
index 9af473f1f94..5654a3abcbe 100644
--- a/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
+++ b/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
@@ -41,6 +41,52 @@ struct AddressExtractor : ObjectTraverser {
}
};
+struct SingleMappedExtractor : ObjectTraverser {
+ const vespalib::string &dimension;
+ TensorSpec &spec;
+ SingleMappedExtractor(const vespalib::string &dimension_in, TensorSpec &spec_in)
+ : dimension(dimension_in),
+ spec(spec_in)
+ {}
+ void field(const Memory &symbol, const Inspector &inspector) override {
+ vespalib::string label = symbol.make_string();
+ double value = inspector.asDouble();
+ TensorSpec::Address address;
+ address.emplace(dimension, label);
+ spec.add(address, value);
+ }
+};
+
+
+void decodeSingleMappedForm(const Inspector &root, const ValueType &value_type, TensorSpec &spec) {
+ auto extractor = SingleMappedExtractor(value_type.dimensions()[0].name, spec);
+ root.traverse(extractor);
+}
+
+void decodeSingleDenseForm(const Inspector &values, const ValueType &value_type, TensorSpec &spec) {
+ const auto &dimension = value_type.dimensions()[0].name;
+ for (size_t i = 0; i < values.entries(); ++i) {
+ TensorSpec::Address address;
+ address.emplace(dimension, TensorSpec::Label(i));
+ spec.add(address, values[i].asDouble());
+ }
+}
+
+void decodeLiteralForm(const Inspector &cells, const ValueType &value_type, TensorSpec &spec) {
+ std::set<vespalib::string> indexed;
+ for (const auto &dimension: value_type.dimensions()) {
+ if (dimension.is_indexed()) {
+ indexed.insert(dimension.name);
+ }
+ }
+ for (size_t i = 0; i < cells.entries(); ++i) {
+ TensorSpec::Address address;
+ AddressExtractor extractor(indexed, address);
+ cells[i]["address"].traverse(extractor);
+ spec.add(address, cells[i]["value"].asDouble());
+ }
+}
+
void decode_json(const vespalib::string &path, Input &input, Slime &slime) {
if (slime::JsonFormat::decode(input, slime) == 0) {
LOG(warning, "file contains invalid json: %s", path.c_str());
@@ -90,19 +136,26 @@ ConstantTensorLoader::create(const vespalib::string &path, const vespalib::strin
}
Slime slime;
decode_json(path, slime);
- std::set<vespalib::string> indexed;
- for (const auto &dimension: value_type.dimensions()) {
- if (dimension.is_indexed()) {
- indexed.insert(dimension.name);
- }
- }
TensorSpec spec(type);
- const Inspector &cells = slime.get()["cells"];
- for (size_t i = 0; i < cells.entries(); ++i) {
- TensorSpec::Address address;
- AddressExtractor extractor(indexed, address);
- cells[i]["address"].traverse(extractor);
- spec.add(address, cells[i]["value"].asDouble());
+ bool isSingleDenseType = value_type.is_dense() && (value_type.count_indexed_dimensions() == 1);
+ bool isSingleMappedType = value_type.is_sparse() && (value_type.count_mapped_dimensions() == 1);
+ const Inspector &root = slime.get();
+ const Inspector &cells = root["cells"];
+ const Inspector &values = root["values"];
+ if (cells.type().getId() == vespalib::slime::ARRAY::ID) {
+ decodeLiteralForm(cells, value_type, spec);
+ }
+ else if (cells.type().getId() == vespalib::slime::OBJECT::ID && isSingleMappedType) {
+ decodeSingleMappedForm(cells, value_type, spec);
+ }
+ else if (values.type().getId() == vespalib::slime::ARRAY::ID && isSingleDenseType) {
+ decodeSingleDenseForm(values, value_type, spec);
+ }
+ else if (root.type().getId() == vespalib::slime::OBJECT::ID && isSingleMappedType) {
+ decodeSingleMappedForm(root, value_type, spec);
+ }
+ else if (root.type().getId() == vespalib::slime::ARRAY::ID && isSingleDenseType) {
+ decodeSingleDenseForm(root, value_type, spec);
}
try {
return std::make_unique<SimpleConstantValue>(value_from_spec(spec, _factory));
diff --git a/flags/pom.xml b/flags/pom.xml
index 0c268fbd644..2c736d04ccb 100644
--- a/flags/pom.xml
+++ b/flags/pom.xml
@@ -97,6 +97,9 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index d75e925f3fa..63313c6ed60 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -49,14 +49,14 @@ public class Flags {
private static volatile TreeMap<FlagId, FlagDefinition> flags = new TreeMap<>();
public static final UnboundBooleanFlag IPV6_IN_GCP = defineFeatureFlag(
- "ipv6-in-gcp", true,
+ "ipv6-in-gcp", false,
List.of("hakonhall"), "2023-05-15", "2023-06-15",
"Provision GCP hosts with external IPv6 addresses",
"Takes effect on the next host provisioning");
public static final UnboundBooleanFlag DROP_CACHES = defineFeatureFlag(
"drop-caches", false,
- List.of("hakonhall", "baldersheim"), "2023-03-06", "2023-06-05",
+ List.of("hakonhall", "baldersheim"), "2023-03-06", "2023-08-05",
"Drop caches on tenant hosts",
"Takes effect on next tick",
ZONE_ID,
@@ -265,7 +265,7 @@ public class Flags {
public static final UnboundBooleanFlag ENABLED_HORIZON_DASHBOARD = defineFeatureFlag(
"enabled-horizon-dashboard", false,
- List.of("olaa"), "2021-09-13", "2023-06-01",
+ List.of("olaa"), "2021-09-13", "2023-09-01",
"Enable Horizon dashboard",
"Takes effect immediately",
TENANT_ID, CONSOLE_USER_EMAIL
@@ -322,7 +322,7 @@ public class Flags {
public static final UnboundBooleanFlag SEPARATE_METRIC_CHECK_CONFIG = defineFeatureFlag(
"separate-metric-check-config", false,
- List.of("olaa"), "2022-07-04", "2023-06-01",
+ List.of("olaa"), "2022-07-04", "2023-09-01",
"Determines whether one metrics config check should be written per Vespa node",
"Takes effect on next tick",
HOSTNAME);
@@ -344,18 +344,11 @@ public class Flags {
public static final UnboundBooleanFlag ENABLE_OTELCOL = defineFeatureFlag(
"enable-otel-collector", false,
- List.of("olaa"), "2022-09-23", "2023-06-01",
+ List.of("olaa"), "2022-09-23", "2023-09-01",
"Whether an OpenTelemetry collector should be enabled",
"Takes effect at next tick",
APPLICATION_ID);
- public static final UnboundBooleanFlag CONSOLE_CSRF = defineFeatureFlag(
- "console-csrf", false,
- List.of("bjorncs", "tokle"), "2022-09-26", "2023-06-01",
- "Enable CSRF token in console",
- "Takes effect immediately",
- CONSOLE_USER_EMAIL);
-
public static final UnboundStringFlag CORE_ENCRYPTION_PUBLIC_KEY_ID = defineStringFlag(
"core-encryption-public-key-id", "",
List.of("vekterli"), "2022-11-03", "2023-10-01",
@@ -391,13 +384,13 @@ public class Flags {
HOSTNAME);
public static final UnboundBooleanFlag ALLOW_MORE_THAN_ONE_CONTENT_GROUP_DOWN = defineFeatureFlag(
- "allow-more-than-one-content-group-down", false, List.of("hmusum"), "2023-04-14", "2023-06-14",
+ "allow-more-than-one-content-group-down", false, List.of("hmusum"), "2023-04-14", "2023-07-01",
"Whether to enable possible configuration of letting more than one content group down",
"Takes effect at redeployment",
ZONE_ID, APPLICATION_ID);
public static final UnboundBooleanFlag NEW_IDDOC_LAYOUT = defineFeatureFlag(
- "new_iddoc_layout", false, List.of("tokle", "bjorncs", "olaa"), "2023-04-24", "2023-05-31",
+ "new_iddoc_layout", false, List.of("tokle", "bjorncs", "olaa"), "2023-04-24", "2023-06-30",
"Whether to use new identity document layout",
"Takes effect on node reboot",
HOSTNAME, APPLICATION_ID, VESPA_VERSION);
@@ -408,18 +401,18 @@ public class Flags {
"Takes effect on application deployment",
APPLICATION_ID);
- public static final UnboundBooleanFlag USE_VESPA_ALMA_LINUX_X86_64_AMI = defineFeatureFlag(
- "use-vespa-alma-linux-x86_64-ami", false, List.of("hmusum"), "2023-05-04", "2023-07-01",
- "Whether to use VESPA-ALMALINUX-8-* AMI for x86_64 architecture",
- "Takes effect when provisioning new AWS hosts",
- APPLICATION_ID);
-
public static final UnboundBooleanFlag ENABLE_THE_ONE_THAT_SHOULD_NOT_BE_NAMED = defineFeatureFlag(
- "enable-the-one-that-should-not-be-named", false, List.of("hmusum"), "2023-05-08", "2023-06-01",
+ "enable-the-one-that-should-not-be-named", false, List.of("hmusum"), "2023-05-08", "2023-07-01",
"Whether to enable the one program that should not be named",
"Takes effect at next host-admin tick",
ZONE_ID);
+ public static final UnboundListFlag<String> WEIGHTED_ENDPOINT_RECORD_TTL = defineListFlag(
+ "weighted-endpoint-record-ttl", List.of(), String.class, List.of("jonmv"), "2023-05-16", "2023-06-16",
+ "A list of endpoints and custom TTLs, on the form \"endpoint-fqdn:TTL-seconds\". " +
+ "Where specified, CNAME records are used instead of the default ALIAS records, which have a default 60s TTL.",
+ "Takes effect at redeployment from controller");
+
public static final UnboundBooleanFlag ENABLE_CONDITIONAL_PUT_REMOVE_WRITE_REPAIR = defineFeatureFlag(
"enable-conditional-put-remove-write-repair", false,
List.of("vekterli", "havardpe"), "2023-05-10", "2023-07-01",
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
index e80348261ef..c19ebc5dcd4 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
@@ -9,6 +9,7 @@ import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.List;
+import java.util.Set;
import java.util.function.Predicate;
import java.util.regex.Pattern;
@@ -304,8 +305,9 @@ public class PermanentFlags {
public static final UnboundStringFlag ADMIN_CLUSTER_NODE_ARCHITECTURE = defineStringFlag(
"admin-cluster-node-architecture", "x86_64",
"Architecture to use for node resources. Used when implicitly creating admin clusters " +
- "(logserver, clustercontroller). Valid values: x86_64, arm64",
+ "(logserver and clustercontroller clusters).",
"Takes effect on next redeployment",
+ value -> Set.of("any", "arm64", "x86_64").contains(value),
ZONE_ID, APPLICATION_ID);
public static final UnboundListFlag<String> CLOUD_ACCOUNTS = defineListFlag(
@@ -348,7 +350,7 @@ public class PermanentFlags {
TENANT_ID);
public static final UnboundIntFlag KEEP_FILE_REFERENCES_ON_TENANT_NODES = defineIntFlag(
- "keep-file-references-on-tenant-nodes", 21,
+ "keep-file-references-on-tenant-nodes", 30,
"How many days to keep file references on tenant nodes (based on last modification time)",
"Takes effect on restart of Docker container",
ZONE_ID, APPLICATION_ID
diff --git a/fnet/src/vespa/fnet/connection.cpp b/fnet/src/vespa/fnet/connection.cpp
index fef8a6bf01b..314fc7517e5 100644
--- a/fnet/src/vespa/fnet/connection.cpp
+++ b/fnet/src/vespa/fnet/connection.cpp
@@ -78,7 +78,7 @@ struct DoHandshakeWork : vespalib::Executor::Task {
}
-FNET_Connection::ResolveHandler::ResolveHandler(FNET_Connection *conn)
+FNET_Connection::ResolveHandler::ResolveHandler(FNET_Connection *conn) noexcept
: connection(conn),
address()
{
diff --git a/fnet/src/vespa/fnet/connection.h b/fnet/src/vespa/fnet/connection.h
index 80927fd375c..0db71db14e0 100644
--- a/fnet/src/vespa/fnet/connection.h
+++ b/fnet/src/vespa/fnet/connection.h
@@ -67,7 +67,7 @@ private:
struct ResolveHandler : public vespalib::AsyncResolver::ResultHandler {
FNET_Connection *connection;
vespalib::SocketAddress address;
- ResolveHandler(FNET_Connection *conn);
+ ResolveHandler(FNET_Connection *conn) noexcept;
void handle_result(vespalib::SocketAddress result) override;
~ResolveHandler();
};
diff --git a/fnet/src/vespa/fnet/transport.cpp b/fnet/src/vespa/fnet/transport.cpp
index 1553fc010c0..be6dd3e5e39 100644
--- a/fnet/src/vespa/fnet/transport.cpp
+++ b/fnet/src/vespa/fnet/transport.cpp
@@ -20,12 +20,16 @@ struct HashState {
const void *self;
clock::time_point now;
uint64_t key_hash;
- HashState(const void *key, size_t key_len)
- : self(this),
- now(clock::now()),
- key_hash(XXH64(key, key_len, 0)) {}
+ HashState(const void *key, size_t key_len) __attribute__((noinline));
};
+HashState::HashState(const void *key, size_t key_len)
+ : self(this),
+ now(clock::now()),
+ key_hash(XXH64(key, key_len, 0))
+{
+}
+
VESPA_THREAD_STACK_TAG(fnet_work_pool);
struct DefaultTimeTools : fnet::TimeTools {
diff --git a/fsa/pom.xml b/fsa/pom.xml
index 5d18bdb666c..374f7ac5e21 100644
--- a/fsa/pom.xml
+++ b/fsa/pom.xml
@@ -40,6 +40,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/hosted-tenant-base/pom.xml b/hosted-tenant-base/pom.xml
index 289854173c3..adde2a32720 100644
--- a/hosted-tenant-base/pom.xml
+++ b/hosted-tenant-base/pom.xml
@@ -33,6 +33,7 @@
<properties>
<vespaversion>${project.version}</vespaversion>
+ <bundle-plugin.failOnWarnings>false</bundle-plugin.failOnWarnings>
<target_jdk_version>17</target_jdk_version>
<maven-compiler-plugin.version>3.10.1</maven-compiler-plugin.version>
<maven-surefire-plugin.version>2.22.2</maven-surefire-plugin.version>
@@ -260,6 +261,7 @@
<version>${vespaversion}</version>
<extensions>true</extensions>
<configuration>
+ <failOnWarnings>${bundle-plugin.failOnWarnings}</failOnWarnings>
<!-- override default test bundle scope translation which translates 'test' to 'compile' -->
<!-- note: ordering affects how overrides are evaluated; put the most specific overrides first! -->
<testBundleScopeOverrides>
diff --git a/hosted-zone-api/pom.xml b/hosted-zone-api/pom.xml
index 4227d457c4d..bd5b759e972 100644
--- a/hosted-zone-api/pom.xml
+++ b/hosted-zone-api/pom.xml
@@ -51,6 +51,9 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ </configuration>
</plugin>
<plugin>
<groupId>com.yahoo.vespa</groupId>
diff --git a/http-client/pom.xml b/http-client/pom.xml
index 133da65631c..c8a58330f24 100644
--- a/http-client/pom.xml
+++ b/http-client/pom.xml
@@ -72,6 +72,9 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
@@ -81,4 +84,3 @@
</build>
</project>
-
diff --git a/jaxrs_utils/pom.xml b/jaxrs_utils/pom.xml
index c174fea1092..02d1c1b8915 100644
--- a/jaxrs_utils/pom.xml
+++ b/jaxrs_utils/pom.xml
@@ -33,6 +33,9 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
</plugins>
</build>
diff --git a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogic.java b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogic.java
index e261f420e1c..f24778d1241 100644
--- a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogic.java
+++ b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogic.java
@@ -2,15 +2,19 @@
package com.yahoo.jdisc.http.filter.security.cors;
import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
+import java.util.regex.Pattern;
/**
* @author bjorncs
*/
class CorsLogic {
- private CorsLogic() {}
static final String CORS_PREFLIGHT_REQUEST_CACHE_TTL = Long.toString(Duration.ofDays(7).getSeconds());
@@ -25,23 +29,49 @@ class CorsLogic {
"Vary", "*"
);
- static Map<String, String> createCorsResponseHeaders(String requestOriginHeader,
- Set<String> allowedOrigins) {
+ private final boolean allowAnyOrigin;
+ private final Set<String> allowedOrigins;
+ private final List<Pattern> allowedOriginPatterns;
+ private CorsLogic(boolean allowAnyOrigin, Set<String> allowedOrigins, List<Pattern> allowedOriginPatterns) {
+ this.allowAnyOrigin = allowAnyOrigin;
+ this.allowedOrigins = Set.copyOf(allowedOrigins);
+ this.allowedOriginPatterns = List.copyOf(allowedOriginPatterns);
+ }
+
+ boolean originMatches(String origin) {
+ if (allowAnyOrigin) return true;
+ if (allowedOrigins.contains(origin)) return true;
+ return allowedOriginPatterns.stream().anyMatch(pattern -> pattern.matcher(origin).matches());
+ }
+
+ Map<String, String> createCorsResponseHeaders(String requestOriginHeader) {
if (requestOriginHeader == null) return Map.of();
TreeMap<String, String> headers = new TreeMap<>();
- if (requestOriginMatchesAnyAllowed(requestOriginHeader, allowedOrigins))
+ if (originMatches(requestOriginHeader))
headers.put(ALLOW_ORIGIN_HEADER, requestOriginHeader);
headers.putAll(ACCESS_CONTROL_HEADERS);
return headers;
}
- static Map<String, String> createCorsPreflightResponseHeaders(String requestOriginHeader,
- Set<String> allowedOrigins) {
- return createCorsResponseHeaders(requestOriginHeader, allowedOrigins);
+ Map<String, String> preflightResponseHeaders(String requestOriginHeader) {
+ return createCorsResponseHeaders(requestOriginHeader);
}
- private static boolean requestOriginMatchesAnyAllowed(String requestOrigin, Set<String> allowedUrls) {
- return allowedUrls.stream().anyMatch(requestOrigin::equals) || allowedUrls.contains("*");
+ static CorsLogic forAllowedOrigins(Collection<String> allowedOrigins) {
+ Set<String> allowedOriginsVerbatim = new HashSet<>();
+ List<Pattern> allowedOriginPatterns = new ArrayList<>();
+ for (String allowedOrigin : allowedOrigins) {
+ if (allowedOrigin.isBlank()) continue;
+ if (allowedOrigin.length() > 0) {
+ if ("*".equals(allowedOrigin))
+ return new CorsLogic(true, Set.of(), List.of());
+ else if (allowedOrigin.contains("*"))
+ allowedOriginPatterns.add(Pattern.compile(allowedOrigin.replace(".", "\\.").replace("*", ".*")));
+ else
+ allowedOriginsVerbatim.add(allowedOrigin);
+ }
+ }
+ return new CorsLogic(false, allowedOriginsVerbatim, allowedOriginPatterns);
}
}
diff --git a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsPreflightRequestFilter.java b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsPreflightRequestFilter.java
index e2efd2d220c..935e738b5e3 100644
--- a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsPreflightRequestFilter.java
+++ b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsPreflightRequestFilter.java
@@ -10,8 +10,6 @@ import com.yahoo.jdisc.http.filter.DiscFilterRequest;
import com.yahoo.jdisc.http.filter.SecurityRequestFilter;
import com.yahoo.yolean.chain.Provides;
-import java.util.Set;
-
import static com.yahoo.jdisc.http.HttpRequest.Method.OPTIONS;
/**
@@ -33,11 +31,11 @@ import static com.yahoo.jdisc.http.HttpRequest.Method.OPTIONS;
*/
@Provides("CorsPreflightRequestFilter")
public class CorsPreflightRequestFilter implements SecurityRequestFilter {
- private final Set<String> allowedUrls;
+ private final CorsLogic cors;
@Inject
public CorsPreflightRequestFilter(CorsFilterConfig config) {
- this.allowedUrls = Set.copyOf(config.allowedUrls());
+ this.cors = CorsLogic.forAllowedOrigins(config.allowedUrls());
}
@Override
@@ -46,8 +44,7 @@ public class CorsPreflightRequestFilter implements SecurityRequestFilter {
return;
HttpResponse response = HttpResponse.newInstance(Response.Status.OK);
- String origin = discFilterRequest.getHeader("Origin");
- CorsLogic.createCorsPreflightResponseHeaders(origin, allowedUrls)
+ cors.preflightResponseHeaders(discFilterRequest.getHeader("Origin"))
.forEach(response.headers()::put);
ContentChannel cc = responseHandler.handleResponse(response);
diff --git a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilter.java b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilter.java
index f56965ea6a8..4b6c7211d11 100644
--- a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilter.java
+++ b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilter.java
@@ -8,9 +8,6 @@ import com.yahoo.jdisc.http.filter.RequestView;
import com.yahoo.jdisc.http.filter.SecurityResponseFilter;
import com.yahoo.yolean.chain.Provides;
-import java.util.Set;
-
-
/**
* @author gv
* @author Tony Vaagenes
@@ -19,16 +16,16 @@ import java.util.Set;
@Provides("CorsResponseFilter")
public class CorsResponseFilter extends AbstractResource implements SecurityResponseFilter {
- private final Set<String> allowedUrls;
+ private final CorsLogic cors;
@Inject
public CorsResponseFilter(CorsFilterConfig config) {
- this.allowedUrls = Set.copyOf(config.allowedUrls());
+ this.cors = CorsLogic.forAllowedOrigins(config.allowedUrls());
}
@Override
public void filter(DiscFilterResponse response, RequestView request) {
- CorsLogic.createCorsResponseHeaders(request.getFirstHeader("Origin").orElse(null), allowedUrls)
+ cors.createCorsResponseHeaders(request.getFirstHeader("Origin").orElse(null))
.forEach(response::setHeader);
}
diff --git a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogicTest.java b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogicTest.java
new file mode 100644
index 00000000000..60b5edde97d
--- /dev/null
+++ b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogicTest.java
@@ -0,0 +1,40 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.jdisc.http.filter.security.cors;
+
+import org.junit.jupiter.api.Test;
+
+import java.util.List;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * @author freva
+ */
+class CorsLogicTest {
+
+ @Test
+ void wildcard_matches_everything() {
+ CorsLogic logic = CorsLogic.forAllowedOrigins(List.of("*"));
+ assertMatches(logic, true, "http://any.origin", "https://any.origin", "http://any.origin:8080");
+ }
+
+ @Test
+ void matches_verbatim_and_pattern() {
+ CorsLogic logic = CorsLogic.forAllowedOrigins(List.of("http://my.origin", "http://*.domain.origin", "*://do.main", "*.tld"));
+ assertMatches(logic, true,
+ "http://my.origin", // Matches verbatim
+ "http://any.domain.origin", // Matches first pattern
+ "http://any.sub.domain.origin", // Matches first pattern
+ "http://do.main", "https://do.main", // Matches second pattern
+ "https://any.thing.tld"); // Matches third pattern
+ assertMatches(logic, false,
+ "https://my.origin", // Different scheme from verbatim
+ "http://domain.origin", // Missing subdomain to match the first pattern
+ "https://sub.do.main"); // Second pattern, but with subdomain
+ }
+
+ private static void assertMatches(CorsLogic logic, boolean expected, String... origins) {
+ for (String origin : origins)
+ assertEquals(expected, logic.originMatches(origin), origin);
+ }
+}
diff --git a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java
index 7762fde1a72..1fded811eed 100644
--- a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java
+++ b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java
@@ -54,6 +54,12 @@ public class CorsResponseFilterTest {
assertEquals("http://any.origin", headers.get(ALLOW_ORIGIN_HEADER));
}
+ @Test
+ void matches_subdomains() {
+ Map<String, String> headers = doFilterRequest(newResponseFilter("http://*.domain.origin"), "http://any.domain.origin");
+ assertEquals("http://any.domain.origin", headers.get(ALLOW_ORIGIN_HEADER));
+ }
+
private static Map<String, String> doFilterRequest(SecurityResponseFilter filter, String originUrl) {
TestResponse response = new TestResponse();
filter.filter(response, newRequestView(originUrl));
diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/core/ContainerWatchdog.java b/jdisc_core/src/main/java/com/yahoo/jdisc/core/ContainerWatchdog.java
index f28d5ea2b26..09e52beba59 100644
--- a/jdisc_core/src/main/java/com/yahoo/jdisc/core/ContainerWatchdog.java
+++ b/jdisc_core/src/main/java/com/yahoo/jdisc/core/ContainerWatchdog.java
@@ -160,8 +160,15 @@ class ContainerWatchdog implements ContainerWatchdogMetrics, AutoCloseable {
record ThreadDetails(Thread thread, Bundle bundle) {}
List<ThreadDetails> staleThreads = new ArrayList<>();
for (Thread t : threads) {
+ // Find threads with context classloader from an uninstalled bundle
+ Bundle b = isClassloaderForUninstalledBundle(t.getContextClassLoader()).orElse(null);
+ if (b != null) {
+ staleThreads.add(new ThreadDetails(t, b));
+ continue;
+ }
+
// Find threads which are sub-classes of java.lang.Thread from an uninstalled bundle
- Bundle b = hasClassloaderForUninstalledBundle(t).orElse(null);
+ b = hasClassloaderForUninstalledBundle(t).orElse(null);
if (b != null) {
staleThreads.add(new ThreadDetails(t, b));
continue;
@@ -201,8 +208,12 @@ class ContainerWatchdog implements ContainerWatchdogMetrics, AutoCloseable {
}
private static Optional<Bundle> hasClassloaderForUninstalledBundle(Object o) {
- if (o.getClass().getClassLoader() instanceof BundleWiringImpl.BundleClassLoader cl) {
- Bundle b = cl.getBundle();
+ return isClassloaderForUninstalledBundle(o.getClass().getClassLoader());
+ }
+
+ private static Optional<Bundle> isClassloaderForUninstalledBundle(ClassLoader cl) {
+ if (cl instanceof BundleWiringImpl.BundleClassLoader bcl) {
+ Bundle b = bcl.getBundle();
if (b.getState() == Bundle.UNINSTALLED) return Optional.of(b);
}
return Optional.empty();
diff --git a/jrt/pom.xml b/jrt/pom.xml
index 926756da4a0..74d69c4b117 100644
--- a/jrt/pom.xml
+++ b/jrt/pom.xml
@@ -50,6 +50,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/linguistics-components/pom.xml b/linguistics-components/pom.xml
index 5031ad73556..b3bc52c5e23 100644
--- a/linguistics-components/pom.xml
+++ b/linguistics-components/pom.xml
@@ -89,6 +89,12 @@
<scope>provided</scope>
<classifier>no_aop</classifier>
</dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>configdefinitions</artifactId>
+ <version>${project.version}</version>
+ <scope>compile</scope>
+ </dependency>
</dependencies>
<build>
<plugins>
diff --git a/linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java b/linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java
index b92e0678970..2c66fc18c9b 100644
--- a/linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java
+++ b/linguistics-components/src/main/java/com/yahoo/language/huggingface/HuggingFaceTokenizer.java
@@ -6,6 +6,7 @@ import com.yahoo.api.annotations.Beta;
import com.yahoo.component.AbstractComponent;
import com.yahoo.component.annotation.Inject;
import com.yahoo.language.Language;
+import com.yahoo.language.huggingface.config.HuggingFaceTokenizerConfig;
import com.yahoo.language.process.Embedder;
import com.yahoo.language.process.Segmenter;
import com.yahoo.language.tools.Embed;
@@ -39,10 +40,14 @@ public class HuggingFaceTokenizer extends AbstractComponent implements Embedder,
try {
b.models.forEach((language, path) -> {
models.put(language,
- uncheck(() -> ai.djl.huggingface.tokenizers.HuggingFaceTokenizer.builder()
- .optTokenizerPath(path)
- .optAddSpecialTokens(b.addSpecialTokens != null ? b.addSpecialTokens : true)
- .build()));
+ uncheck(() -> {
+ var hfb = ai.djl.huggingface.tokenizers.HuggingFaceTokenizer.builder()
+ .optTokenizerPath(path)
+ .optAddSpecialTokens(b.addSpecialTokens != null ? b.addSpecialTokens : true);
+ if (b.maxLength != null) hfb.optMaxLength(b.maxLength);
+ if (b.truncation != null) hfb.optTruncation(b.truncation);
+ return hfb.build();
+ }));
});
} finally {
Thread.currentThread().setContextClassLoader(original);
@@ -76,6 +81,7 @@ public class HuggingFaceTokenizer extends AbstractComponent implements Embedder,
public String decode(List<Long> tokens, Language language) { return resolve(language).decode(toArray(tokens)); }
@Override public void close() { models.forEach((__, model) -> model.close()); }
+ @Override public void deconstruct() { close(); }
private ai.djl.huggingface.tokenizers.HuggingFaceTokenizer resolve(Language language) {
// Disregard language if there is default model
@@ -89,17 +95,23 @@ public class HuggingFaceTokenizer extends AbstractComponent implements Embedder,
public static final class Builder {
private final Map<Language, Path> models = new EnumMap<>(Language.class);
private Boolean addSpecialTokens;
+ private Integer maxLength;
+ private Boolean truncation;
public Builder() {}
public Builder(HuggingFaceTokenizerConfig cfg) {
for (var model : cfg.model())
addModel(Language.fromLanguageTag(model.language()), model.path());
addSpecialTokens(cfg.addSpecialTokens());
+ if (cfg.maxLength() != -1) setMaxLength(cfg.maxLength());
+ if (cfg.truncation()) setTruncation(true);
}
public Builder addModel(Language lang, Path path) { models.put(lang, path); return this; }
public Builder addDefaultModel(Path path) { return addModel(Language.UNKNOWN, path); }
public Builder addSpecialTokens(boolean enabled) { addSpecialTokens = enabled; return this; }
+ public Builder setMaxLength(int length) { maxLength = length; return this; }
+ public Builder setTruncation(boolean enabled) { truncation = enabled; return this; }
public HuggingFaceTokenizer build() { return new HuggingFaceTokenizer(this); }
}
diff --git a/linguistics-components/src/test/java/com/yahoo/language/huggingface/HuggingFaceTokenizerTest.java b/linguistics-components/src/test/java/com/yahoo/language/huggingface/HuggingFaceTokenizerTest.java
index c79ecbfbfbe..6197fe214f1 100644
--- a/linguistics-components/src/test/java/com/yahoo/language/huggingface/HuggingFaceTokenizerTest.java
+++ b/linguistics-components/src/test/java/com/yahoo/language/huggingface/HuggingFaceTokenizerTest.java
@@ -15,6 +15,9 @@ import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.zip.GZIPInputStream;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+
/**
* @author bjorncs
*/
@@ -69,14 +72,37 @@ class HuggingFaceTokenizerTest {
}
}
+ @Test
+ void truncates_to_max_length() throws IOException {
+ int maxLength = 3;
+ var builder = new HuggingFaceTokenizer.Builder()
+ .addDefaultModel(decompressModelFile(tmp, "bert-base-uncased"))
+ .setMaxLength(maxLength)
+ .setTruncation(true);
+ String input = "what was the impact of the manhattan project";
+ try (var tokenizerWithoutSpecialTokens = builder.addSpecialTokens(false).build();
+ var tokenizerWithSpecialTokens = builder.addSpecialTokens(true).build()) {
+ assertMaxLengthRespected(maxLength, tokenizerWithoutSpecialTokens.encode(input));
+ assertMaxLengthRespected(maxLength, tokenizerWithSpecialTokens.encode(input));
+ }
+ }
+
+ private static void assertMaxLengthRespected(int maxLength, Encoding encoding) {
+ assertEquals(maxLength, encoding.ids().size());
+ assertEquals(maxLength, encoding.tokens().size());
+ assertEquals(maxLength, encoding.attentionMask().size());
+ assertEquals(maxLength, encoding.typeIds().size());
+ }
+
private static HuggingFaceTokenizer createTokenizer(Path tmp, String model) throws IOException {
return new HuggingFaceTokenizer.Builder()
.addSpecialTokens(false)
- .addDefaultModel(decompressModelFile(tmp, Paths.get("src/test/models/huggingface/%s.json.gz".formatted(model))))
+ .addDefaultModel(decompressModelFile(tmp, model))
.build();
}
- private static Path decompressModelFile(Path tmp, Path source) throws IOException {
+ private static Path decompressModelFile(Path tmp, String model) throws IOException {
+ var source = Paths.get("src/test/models/huggingface/%s.json.gz".formatted(model));
Path destination = tmp.resolve(source.getFileName().toString().replace(".gz", ""));
try (InputStream in = new GZIPInputStream(Files.newInputStream(source));
OutputStream out = Files.newOutputStream(destination, StandardOpenOption.CREATE)) {
diff --git a/linguistics/abi-spec.json b/linguistics/abi-spec.json
index f35b9036fd8..dc85a2e6f0b 100644
--- a/linguistics/abi-spec.json
+++ b/linguistics/abi-spec.json
@@ -322,6 +322,7 @@
"methods" : [
"public void <init>()",
"public boolean isLetter(int)",
+ "public boolean isSymbol(int)",
"public boolean isDigit(int)",
"public boolean isLatinDigit(int)",
"public boolean isLatin(int)",
@@ -723,6 +724,7 @@
"public static final enum com.yahoo.language.process.TokenType SYMBOL",
"public static final enum com.yahoo.language.process.TokenType ALPHABETIC",
"public static final enum com.yahoo.language.process.TokenType NUMERIC",
+ "public static final enum com.yahoo.language.process.TokenType INDEXABLE_SYMBOL",
"public static final enum com.yahoo.language.process.TokenType MARKER"
]
},
diff --git a/linguistics/pom.xml b/linguistics/pom.xml
index bfbf1beeaea..5db3302b597 100644
--- a/linguistics/pom.xml
+++ b/linguistics/pom.xml
@@ -65,6 +65,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/linguistics/src/main/java/com/yahoo/language/process/CharacterClasses.java b/linguistics/src/main/java/com/yahoo/language/process/CharacterClasses.java
index 413dce0d6c1..f6177262bf9 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/CharacterClasses.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/CharacterClasses.java
@@ -13,9 +13,8 @@ public class CharacterClasses {
* which are useful to view as letters even though not defined as such in unicode.
*/
public boolean isLetter(int c) {
- if (java.lang.Character.isLetter(c)) return true;
+ if (Character.isLetter(c)) return true;
if (Character.isDigit(c) && ! isLatin(c)) return true; // Not considering these digits, so treat them as letters
- // if (c == '_') return true;
// Some CJK punctuation defined as word characters
if (c == '\u3008' || c == '\u3009' || c == '\u300a' || c == '\u300b' ||
@@ -30,6 +29,13 @@ public class CharacterClasses {
}
/**
+ * Returns true if the character is in the class "other symbol" - emojis etc.
+ */
+ public boolean isSymbol(int c) {
+ return Character.getType(c) == Character.OTHER_SYMBOL;
+ }
+
+ /**
* Returns true for code points which should be considered digits - same as java.lang.Character.isDigit
*/
public boolean isDigit(int c) {
diff --git a/linguistics/src/main/java/com/yahoo/language/process/GramSplitter.java b/linguistics/src/main/java/com/yahoo/language/process/GramSplitter.java
index 83110c0021e..210d7ac94ff 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/GramSplitter.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/GramSplitter.java
@@ -88,46 +88,54 @@ public class GramSplitter {
}
private Gram findNext() {
- // Skip to next word character
- while (i < input.length() && !characterClasses.isLetterOrDigit(input.codePointAt(i))) {
+ // Skip to next indexable character
+ while (i < input.length() && !isIndexable(input.codePointAt(i))) {
i = input.next(i);
isFirstAfterSeparator = true;
}
- if (i >= input.length()) return null;
-
- UnicodeString gram = input.substring(i, n);
- int nonWordChar = indexOfNonWordCodepoint(gram);
- if (nonWordChar == 0) throw new RuntimeException("Programming error");
-
- if (nonWordChar > 0)
- gram = new UnicodeString(gram.toString().substring(0, nonWordChar));
+ if (i >= input.length()) return null; // no indexable characters
+ int tokenStart = i;
+ UnicodeString gram = input.substring(tokenStart, n);
+ int tokenEnd = tokenEnd(gram);
+ gram = new UnicodeString(gram.toString().substring(0, tokenEnd));
if (gram.codePointCount() == n) { // normal case: got a full length gram
Gram g = new Gram(i, gram.codePointCount());
i = input.next(i);
isFirstAfterSeparator = false;
return g;
}
- else { // gram is too short due either to a non-word separator or end of string
- if (isFirstAfterSeparator) { // make a gram anyway
+ else { // gram is too short due either to being a symbol, being followed by a non-word separator, or end of string
+ if (isFirstAfterSeparator || ( gram.codePointCount() == 1 && characterClasses.isSymbol(gram.codePointAt(0)))) { // make a gram anyway
Gram g = new Gram(i, gram.codePointCount());
i = input.next(i);
isFirstAfterSeparator = false;
return g;
} else { // skip to next
- i = input.skip(gram.codePointCount() + 1, i);
+ i = input.skip(gram.codePointCount(), i);
isFirstAfterSeparator = true;
return findNext();
}
}
}
- private int indexOfNonWordCodepoint(UnicodeString s) {
- for (int i = 0; i < s.length(); i = s.next(i)) {
+ private boolean isIndexable(int codepoint) {
+ if (characterClasses.isLetterOrDigit(codepoint)) return true;
+ if (characterClasses.isSymbol(codepoint)) return true;
+ return false;
+ }
+
+ /** Given a string s starting by an indexable character, return the position where that token should end. */
+ private int tokenEnd(UnicodeString s) {
+ if (characterClasses.isSymbol(s.codePointAt(0)))
+ return s.next(0); // symbols have length 1
+
+ int i = 0;
+ for (; i < s.length(); i = s.next(i)) {
if ( ! characterClasses.isLetterOrDigit(s.codePointAt(i)))
return i;
}
- return -1;
+ return i;
}
@Override
diff --git a/linguistics/src/main/java/com/yahoo/language/process/TokenType.java b/linguistics/src/main/java/com/yahoo/language/process/TokenType.java
index 14c7e9bc144..6c3e0c2ab36 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/TokenType.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/TokenType.java
@@ -14,6 +14,7 @@ public enum TokenType {
SYMBOL(3),
ALPHABETIC(4),
NUMERIC(5),
+ INDEXABLE_SYMBOL(6),
MARKER(255);
private final int value;
@@ -34,10 +35,10 @@ public enum TokenType {
* @return whether this type of token can be indexed
*/
public boolean isIndexable() {
- switch (this) {
- case ALPHABETIC: case NUMERIC: return true;
- default: return false;
- }
+ return switch (this) {
+ case ALPHABETIC, NUMERIC, INDEXABLE_SYMBOL -> true;
+ default -> false;
+ };
}
/** Translates this from the int code representation returned from {@link #getValue} */
diff --git a/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenType.java b/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenType.java
index 5c321e4da9b..8a88ae8f005 100644
--- a/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenType.java
+++ b/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenType.java
@@ -31,8 +31,9 @@ public class SimpleTokenType {
case Character.MATH_SYMBOL:
case Character.CURRENCY_SYMBOL:
case Character.MODIFIER_SYMBOL:
- case Character.OTHER_SYMBOL:
return TokenType.SYMBOL;
+ case Character.OTHER_SYMBOL:
+ return TokenType.INDEXABLE_SYMBOL;
case Character.OTHER_NUMBER:
// "SUPERSCRIPT TWO",
diff --git a/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java b/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java
index b791c843357..d86ca30a632 100644
--- a/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java
+++ b/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java
@@ -57,7 +57,7 @@ public class SimpleTokenizer implements Tokenizer {
}
/** Tokenize the input, and apply the given transform to each token string. */
- public Iterable<Token> tokenize(String input, Function<String, String> tokenProocessor) {
+ public Iterable<Token> tokenize(String input, Function<String, String> tokenProcessor) {
if (input.isEmpty()) return List.of();
List<Token> tokens = new ArrayList<>();
@@ -67,11 +67,11 @@ public class SimpleTokenizer implements Tokenizer {
for (int prev = 0, next = Character.charCount(nextCode); next <= input.length(); ) {
nextCode = next < input.length() ? input.codePointAt(next) : SPACE_CODE;
TokenType nextType = SimpleTokenType.valueOf(nextCode);
- if (!prevType.isIndexable() || !nextType.isIndexable()) {
+ if (isAtTokenBoundary(prevType, nextType)) {
String original = input.substring(prev, next);
tokens.add(new SimpleToken(original).setOffset(prev)
.setType(tokenType)
- .setTokenString(tokenProocessor.apply(original)));
+ .setTokenString(tokenProcessor.apply(original)));
prev = next;
prevType = nextType;
tokenType = prevType;
@@ -84,6 +84,12 @@ public class SimpleTokenizer implements Tokenizer {
return tokens;
}
+ private boolean isAtTokenBoundary(TokenType prevType, TokenType nextType) {
+ // Always index each symbol as a token
+ if (prevType == TokenType.INDEXABLE_SYMBOL || nextType == TokenType.INDEXABLE_SYMBOL) return true;
+ return !prevType.isIndexable() || !nextType.isIndexable();
+ }
+
private TokenType determineType(TokenType tokenType, TokenType characterType) {
if (characterType == TokenType.ALPHABETIC) return TokenType.ALPHABETIC;
return tokenType;
diff --git a/linguistics/src/test/java/com/yahoo/language/process/GramSplitterTestCase.java b/linguistics/src/test/java/com/yahoo/language/process/GramSplitterTestCase.java
index 6cefcfbf67a..a219efce3cd 100644
--- a/linguistics/src/test/java/com/yahoo/language/process/GramSplitterTestCase.java
+++ b/linguistics/src/test/java/com/yahoo/language/process/GramSplitterTestCase.java
@@ -49,6 +49,17 @@ public class GramSplitterTestCase {
}
@Test
+ public void testEmojis() {
+ String emoji1 = "\uD83D\uDD2A"; // 🔪
+ String emoji2 = "\uD83D\uDE00"; // 😀
+ assertGramSplit(emoji1, 2, "[" + emoji1+ "]");
+ assertGramSplit(emoji1 + emoji2, 2, "[" + emoji1 + ", " + emoji2 + "]");
+ assertGramSplit(emoji1 + "." + emoji2, 2, "[" + emoji1 + ", " + emoji2 + "]");
+ assertGramSplit("." + emoji1 + "." + emoji2 + ".", 2, "[" + emoji1 + ", " + emoji2 + "]");
+ assertGramSplit("foo" + emoji1 + "bar" + emoji2 + "baz", 2, "[fo, oo, " + emoji1 + ", ba, ar, " + emoji2 + ", ba, az]");
+ }
+
+ @Test
public void testSpaceCornerCases() {
// space corner cases
assertGramSplit("e en e", 1, "[e, e, n, e]");
diff --git a/linguistics/src/test/java/com/yahoo/language/process/TokenTypeTestCase.java b/linguistics/src/test/java/com/yahoo/language/process/TokenTypeTestCase.java
index 0ce9b327533..70a97cda7e3 100644
--- a/linguistics/src/test/java/com/yahoo/language/process/TokenTypeTestCase.java
+++ b/linguistics/src/test/java/com/yahoo/language/process/TokenTypeTestCase.java
@@ -23,9 +23,9 @@ public class TokenTypeTestCase {
}
@Test
- public void requireThatOnlyAlphaNumericsAreIndexable() {
+ public void testIsIndexable() {
for (TokenType type : TokenType.values()) {
- if (type == TokenType.ALPHABETIC || type == TokenType.NUMERIC) {
+ if (type == TokenType.ALPHABETIC || type == TokenType.NUMERIC || type == TokenType.INDEXABLE_SYMBOL) {
assertTrue(type.isIndexable());
} else {
assertFalse(type.isIndexable());
diff --git a/linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenizerTestCase.java b/linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenizerTestCase.java
index f9ff66ee345..b4f080405bd 100644
--- a/linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenizerTestCase.java
+++ b/linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenizerTestCase.java
@@ -1,10 +1,18 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.language.simple;
+import com.yahoo.language.Language;
import com.yahoo.language.process.AbstractTokenizerTestCase;
import com.yahoo.language.process.StemMode;
+import com.yahoo.language.process.Token;
import org.junit.Test;
+import java.util.Iterator;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
/**
* @author Steinar Knutsen
* @author bratseth
@@ -33,4 +41,15 @@ public class SimpleTokenizerTestCase extends AbstractTokenizerTestCase {
" ", "gods", ".", "running", ")");
}
+ @Test
+ public void testTokenizeEmojis() {
+ TokenizerTester tester = new TokenizerTester().setStemMode(StemMode.ALL);
+
+ String emoji1 = "\uD83D\uDD2A"; // 🔪
+ String emoji2 = "\uD83D\uDE00"; // 😀
+ tester.assertTokens(emoji1, emoji1);
+ tester.assertTokens(emoji1 + "foo", emoji1, "foo");
+ tester.assertTokens(emoji1 + emoji2, emoji1, emoji2);
+ }
+
}
diff --git a/logd/src/logd/empty_forwarder.cpp b/logd/src/logd/empty_forwarder.cpp
index dda03c46c01..b601ea6d890 100644
--- a/logd/src/logd/empty_forwarder.cpp
+++ b/logd/src/logd/empty_forwarder.cpp
@@ -5,6 +5,7 @@
#include <vespa/log/exceptions.h>
#include <vespa/log/log_message.h>
#include <vespa/vespalib/util/size_literals.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".logd.empty_forwarder");
diff --git a/logforwarder/src/apps/vespa-logforwarder-start/splunk-starter.cpp b/logforwarder/src/apps/vespa-logforwarder-start/splunk-starter.cpp
index 23c2565b0af..ca872266708 100644
--- a/logforwarder/src/apps/vespa-logforwarder-start/splunk-starter.cpp
+++ b/logforwarder/src/apps/vespa-logforwarder-start/splunk-starter.cpp
@@ -36,6 +36,15 @@ cfFilePath(const vespalib::string &parent, const vespalib::string &filename) {
return path + "/" + filename;
}
+vespalib::string splunkCertPath(const vespalib::string &parent, const vespalib::string &filename) {
+ vespalib::string path = parent;
+ path = fixDir(path, "var");
+ path = fixDir(path, "lib");
+ path = fixDir(path, "sia");
+ path = fixDir(path, "certs");
+ return path + "/" + filename;
+ }
+
void appendFile(FILE *target, const vespalib::string &filename) {
FILE *fp = fopen(filename.c_str(), "r");
if (fp != NULL) {
@@ -95,12 +104,12 @@ void SplunkStarter::gotConfig(const LogforwarderConfig& config) {
vespalib::string clientCert = clientCertFile();
vespalib::string clientKey = clientKeyFile();
if (!clientCert.empty() && !clientKey.empty()) {
- vespalib::string certPath = cfFilePath(config.splunkHome, "clientcert.pem");
+ vespalib::string certPath = splunkCertPath(config.splunkHome, "servercert.pem");
tmpPath = certPath + ".new";
fp = fopen(tmpPath.c_str(), "w");
appendFile(fp, clientCert);
appendFile(fp, clientKey);
- appendFile(fp, "/etc/ssl/certs/ca-bundle.crt");
+ appendFile(fp, "/opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem");
fclose(fp);
rename(tmpPath.c_str(), certPath.c_str());
@@ -113,6 +122,21 @@ void SplunkStarter::gotConfig(const LogforwarderConfig& config) {
fclose(fp);
rename(tmpPath.c_str(), path.c_str());
}
+ path = cfFilePath(config.splunkHome, "server.conf");
+ tmpPath = path + ".new";
+ fp = fopen(tmpPath.c_str(), "w");
+ if (fp != NULL) {
+ fprintf(fp, "[sslConfig]\n");
+ fprintf(fp, "enableSplunkdSSL = true\n");
+ fprintf(fp, "requireClientCert = true\n");
+ fprintf(fp, "sslRootCAPath = /opt/yahoo/share/ssl/certs/athenz_certificate_bundle.pem\n");
+ fprintf(fp, "serverCert = %s\n", certPath.c_str());
+ fprintf(fp, "\n");
+ fprintf(fp, "[httpServer]\n");
+ fprintf(fp, "disableDefaultPort = true\n");
+ fclose(fp);
+ rename(tmpPath.c_str(), path.c_str());
+ }
}
if (config.clientName.size() == 0 ||
config.deploymentServer.size() == 0)
diff --git a/maven-plugins/allowed-maven-dependencies.txt b/maven-plugins/allowed-maven-dependencies.txt
index 29c5fa69429..f2334a6ef00 100644
--- a/maven-plugins/allowed-maven-dependencies.txt
+++ b/maven-plugins/allowed-maven-dependencies.txt
@@ -15,7 +15,7 @@ commons-io:commons-io:2.11.0
javax.annotation:javax.annotation-api:1.2
javax.inject:javax.inject:1
org.apache.commons:commons-collections4:4.2
-org.apache.commons:commons-compress:1.22
+org.apache.commons:commons-compress:1.23.0
org.apache.commons:commons-lang3:3.12.0
org.apache.maven:maven-archiver:3.6.0
org.apache.maven:maven-artifact:3.8.7
diff --git a/messagebus/src/tests/routing/routing.cpp b/messagebus/src/tests/routing/routing.cpp
index 6cbcb58b24a..e33996dcfb4 100644
--- a/messagebus/src/tests/routing/routing.cpp
+++ b/messagebus/src/tests/routing/routing.cpp
@@ -84,7 +84,7 @@ private:
public:
RemoveReplyPolicyFactory(bool selectOnRetry,
std::vector<uint32_t> consumableErrors,
- uint32_t idxRemove);
+ uint32_t idxRemove) noexcept;
~RemoveReplyPolicyFactory() override;
IRoutingPolicy::UP create(const string &param) override;
};
@@ -93,10 +93,10 @@ RemoveReplyPolicyFactory::~RemoveReplyPolicyFactory() = default;
RemoveReplyPolicyFactory::RemoveReplyPolicyFactory(bool selectOnRetry,
std::vector<uint32_t> consumableErrors,
- uint32_t idxRemove) :
- _selectOnRetry(selectOnRetry),
- _consumableErrors(std::move(consumableErrors)),
- _idxRemove(idxRemove)
+ uint32_t idxRemove) noexcept
+ : _selectOnRetry(selectOnRetry),
+ _consumableErrors(std::move(consumableErrors)),
+ _idxRemove(idxRemove)
{
// empty
}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/MetricsManager.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/MetricsManager.java
index 7ce8dd12b30..32c59aed67b 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/MetricsManager.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/MetricsManager.java
@@ -9,7 +9,6 @@ import ai.vespa.metricsproxy.metric.model.DimensionId;
import ai.vespa.metricsproxy.metric.model.MetricsPacket;
import ai.vespa.metricsproxy.service.VespaService;
import ai.vespa.metricsproxy.service.VespaServices;
-import com.yahoo.component.Vtag;
import java.time.Duration;
import java.time.Instant;
@@ -138,7 +137,6 @@ public class MetricsManager {
private Map<DimensionId, String> getGlobalDimensions() {
Map<DimensionId, String> globalDimensions = new LinkedHashMap<>(applicationDimensions.getDimensions());
globalDimensions.putAll(nodeDimensions.getDimensions());
- globalDimensions.put(VESPA_VERSION, Vtag.currentVersion.toFullString());
return globalDimensions;
}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasHandler.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasHandler.java
index 41c7542b613..eb635d8c641 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasHandler.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasHandler.java
@@ -8,17 +8,20 @@ import ai.vespa.metricsproxy.metric.dimensions.ApplicationDimensions;
import ai.vespa.metricsproxy.metric.dimensions.NodeDimensions;
import ai.vespa.metricsproxy.metric.model.MetricsPacket;
import ai.vespa.metricsproxy.metric.model.json.JsonRenderingException;
-import ai.vespa.metricsproxy.metric.model.json.YamasJsonUtil;
import ai.vespa.metricsproxy.node.NodeMetricGatherer;
import ai.vespa.metricsproxy.service.VespaServices;
import com.yahoo.component.annotation.Inject;
import com.yahoo.container.handler.metrics.ErrorResponse;
import com.yahoo.container.handler.metrics.HttpHandlerBase;
-import com.yahoo.container.handler.metrics.JsonResponse;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.restapi.Path;
+import com.yahoo.slime.JsonFormat;
+import com.yahoo.slime.Slime;
+import java.io.IOException;
+import java.io.OutputStream;
import java.net.URI;
+import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.Executor;
@@ -33,10 +36,12 @@ import static com.yahoo.jdisc.Response.Status.OK;
public class YamasHandler extends HttpHandlerBase {
public static final String V1_PATH = "/yamas/v1";
- private static final String VALUES_PATH = V1_PATH + "/values";
+ public static final String VALUES_PATH = V1_PATH + "/values";
+ public static final String CONSUMERS_PATH = V1_PATH + "/consumers";
private final ValuesFetcher valuesFetcher;
private final NodeMetricGatherer nodeMetricGatherer;
+ private final MetricsConsumers metricsConsumers;
@Inject
public YamasHandler(Executor executor,
@@ -48,23 +53,44 @@ public class YamasHandler extends HttpHandlerBase {
super(executor);
valuesFetcher = new ValuesFetcher(metricsManager, vespaServices, metricsConsumers);
this.nodeMetricGatherer = new NodeMetricGatherer(metricsManager, vespaServices, applicationDimensions, nodeDimensions);
+ this.metricsConsumers = metricsConsumers;
}
@Override
public Optional<HttpResponse> doHandle(URI requestUri, Path apiPath, String consumer) {
- if (apiPath.matches(V1_PATH)) return Optional.of(resourceListResponse(requestUri, List.of(VALUES_PATH)));
+ if (apiPath.matches(V1_PATH)) return Optional.of(resourceListResponse(requestUri, List.of(VALUES_PATH, CONSUMERS_PATH)));
if (apiPath.matches(VALUES_PATH)) return Optional.of(valuesResponse(consumer));
+ if (apiPath.matches(CONSUMERS_PATH)) return Optional.of(consumersResponse());
return Optional.empty();
}
private HttpResponse valuesResponse(String consumer) {
try {
- List<MetricsPacket> metrics = consumer == null ? valuesFetcher.fetchAllMetrics() : valuesFetcher.fetch(consumer);
- metrics.addAll(nodeMetricGatherer.gatherMetrics()); // TODO: Currently only add these metrics in this handler. Eventually should be included in all handlers
+ List<MetricsPacket> metrics = new ArrayList<>(consumer == null ? valuesFetcher.fetchAllMetrics() : valuesFetcher.fetch(consumer));
+ if (consumer == null || "Vespa".equalsIgnoreCase(consumer)) {
+ metrics.addAll(nodeMetricGatherer.gatherMetrics()); // TODO: Currently only add these metrics in this handler. Eventually should be included in all handlers
+ }
return new YamasResponse(OK, metrics);
} catch (JsonRenderingException e) {
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getMessage());
}
}
+ private HttpResponse consumersResponse() {
+ var slime = new Slime();
+ var consumers = slime.setObject().setArray("consumers");
+ metricsConsumers.getAllConsumers().forEach(consumer -> consumers.addString(consumer.id));
+ return new HttpResponse(OK) {
+ @Override
+ public String getContentType() {
+ return "application/json";
+ }
+
+ @Override
+ public void render(OutputStream outputStream) throws IOException {
+ new JsonFormat(true).encode(outputStream, slime);
+ }
+ };
+ }
+
} \ No newline at end of file
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/NodeMetricGatherer.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/NodeMetricGatherer.java
index 1a7c1b4df4d..02785674103 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/NodeMetricGatherer.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/NodeMetricGatherer.java
@@ -5,6 +5,7 @@ import ai.vespa.metricsproxy.core.MetricsManager;
import ai.vespa.metricsproxy.metric.dimensions.ApplicationDimensions;
import ai.vespa.metricsproxy.metric.dimensions.NodeDimensions;
import ai.vespa.metricsproxy.metric.model.ConsumerId;
+import ai.vespa.metricsproxy.metric.model.DimensionId;
import ai.vespa.metricsproxy.metric.model.MetricId;
import ai.vespa.metricsproxy.metric.model.MetricsPacket;
import ai.vespa.metricsproxy.metric.model.ServiceId;
@@ -80,6 +81,14 @@ public class NodeMetricGatherer {
builder.putMetric(MetricId.toMetricId(key), metrics.get(key).asLong());
}
}
+ if (object.has("dimensions")) {
+ JsonNode dimensions = object.get("dimensions");
+ Iterator<?> keys = dimensions.fieldNames();
+ while(keys.hasNext()) {
+ String key = (String) keys.next();
+ builder.putDimension(DimensionId.toDimensionId(key), dimensions.get(key).asText());
+ }
+ }
builder.addConsumers(Set.of(ConsumerId.toConsumerId("Vespa")));
builders.add(builder);
}
diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/core/MetricsManagerTest.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/core/MetricsManagerTest.java
index 70f126a8514..12995adffc7 100644
--- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/core/MetricsManagerTest.java
+++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/core/MetricsManagerTest.java
@@ -112,7 +112,6 @@ public class MetricsManagerTest {
}
private void assertGlobalDimensions(Map<DimensionId, String> dimensions) {
- assertTrue(dimensions.containsKey(VESPA_VERSION));
assertEquals("value", dimensions.get(toDimensionId("global")));
assertEquals("metric-dim", dimensions.get(toDimensionId("dim0")));
}
diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/prometheus/PrometheusHandlerTest.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/prometheus/PrometheusHandlerTest.java
index c546f76f3da..2c216272022 100644
--- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/prometheus/PrometheusHandlerTest.java
+++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/prometheus/PrometheusHandlerTest.java
@@ -68,7 +68,6 @@ public class PrometheusHandlerTest extends HttpHandlerTestBase {
public void response_contains_node_metrics() {
String cpu = getLine(valuesResponse, CPU_METRIC + "{");
assertTrue(cpu.contains("} 12.345")); // metric value
- assertTrue(cpu.contains("{vespaVersion="));
}
@Test
diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/yamas/YamasHandlerTest.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/yamas/YamasHandlerTest.java
new file mode 100644
index 00000000000..a4e61d5965e
--- /dev/null
+++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/yamas/YamasHandlerTest.java
@@ -0,0 +1,55 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.metricsproxy.http.yamas;
+
+import ai.vespa.metricsproxy.http.HttpHandlerTestBase;
+import com.yahoo.container.jdisc.RequestHandlerTestDriver;
+import com.yahoo.slime.ArrayTraverser;
+import com.yahoo.slime.SlimeUtils;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Executors;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class YamasHandlerTest extends HttpHandlerTestBase {
+
+ private static final String VALUES_URI = URI_BASE + YamasHandler.VALUES_PATH;
+ private static final String CONSUMERS_URI = URI_BASE + YamasHandler.CONSUMERS_PATH;
+
+ private static String valuesResponse;
+ private static String consumerResponse;
+
+ @BeforeClass
+ public static void setup() {
+ YamasHandler handler = new YamasHandler(Executors.newSingleThreadExecutor(),
+ getMetricsManager(),
+ vespaServices,
+ getMetricsConsumers(),
+ getApplicationDimensions(),
+ getNodeDimensions());
+ testDriver = new RequestHandlerTestDriver(handler);
+ valuesResponse = testDriver.sendRequest(VALUES_URI).readAll();
+ consumerResponse = testDriver.sendRequest(CONSUMERS_URI).readAll();
+ }
+
+
+ @Test
+ public void response_contains_consumer_list() {
+ var slime = SlimeUtils.jsonToSlime(consumerResponse.getBytes());
+ var consumers = new ArrayList<>();
+ slime.get().field("consumers").traverse((ArrayTraverser) (idx, object) ->
+ consumers.add(object.asString())
+ );
+ assertEquals(List.of("default", "custom-consumer"), consumers);
+ }
+
+ @Test
+ public void value_response_contains_coredump_metric() {
+ assertTrue(valuesResponse.contains("\"application\":\"system-coredumps-processing\",\"routing\":{\"yamas\":{\"namespaces\":[\"Vespa\"]}}"));
+ }
+
+}
diff --git a/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java b/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java
index 013c50e77cf..1c61b65f77b 100644
--- a/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java
+++ b/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java
@@ -27,7 +27,6 @@ public enum ConfigServerMetrics implements VespaMetrics {
MAINTENANCE_DEPLOYMENT_TRANSIENT_FAILURE("maintenanceDeployment.transientFailure", Unit.OPERATION, "Number of maintenance deployments that failed with a transient failure"),
MAINTENANCE_DEPLOYMENT_FAILURE("maintenanceDeployment.failure", Unit.OPERATION, "Number of maintenance deployments that failed with a permanent failure"),
-
// ZooKeeper related metrics
ZK_CONNECTIONS_LOST("configserver.zkConnectionLost", Unit.CONNECTION, "Number of ZooKeeper connections lost"),
ZK_RECONNECTED("configserver.zkReconnected", Unit.CONNECTION, "Number of ZooKeeper reconnections"),
@@ -45,8 +44,71 @@ public enum ConfigServerMetrics implements VespaMetrics {
ORCHESTRATOR_LOCK_ACQUIRE_TIMEOUT("orchestrator.lock.acquire-timedout", Unit.OPERATION, "Number of times zookeeper lock couldn't be acquired within timeout"),
ORCHESTRATOR_LOCK_ACQUIRE("orchestrator.lock.acquire", Unit.OPERATION, "Number of attempts to acquire zookeeper lock"),
ORCHESTRATOR_LOCK_ACQUIRED("orchestrator.lock.acquired", Unit.OPERATION, "Number of times zookeeper lock was acquired"),
- ORCHESTRATOR_LOCK_HOLD_LATENCY("orchestrator.lock.hold-latency", Unit.SECOND, "Time zookeeper lock was held before it was released");
+ ORCHESTRATOR_LOCK_HOLD_LATENCY("orchestrator.lock.hold-latency", Unit.SECOND, "Time zookeeper lock was held before it was released"),
+
+ // Node repository metrics
+ NODES_ACTIVE("nodes.active", Unit.NODE, "The number of active nodes in a cluster"),
+ NODES_NON_ACTIVE("nodes.nonActive", Unit.NODE, "The number of non-active nodes in a cluster"),
+ NODES_NON_ACTIVE_FRACTION("nodes.nonActiveFraction", Unit.NODE, "The fraction of non-active nodes vs total nodes in a cluster"),
+ NODES_EXCLUSIVE_SWITCH_FRACTION("nodes.exclusiveSwitchFraction", Unit.FRACTION, "The fraction of nodes in a cluster on exclusive network switches"),
+
+ CLUSTER_COST("cluster.cost", Unit.DOLLAR_PER_HOUR, "The cost of the nodes allocated to a certain cluster, in $/hr"),
+ CLUSTER_LOAD_IDEAL_CPU("cluster.load.ideal.cpu", Unit.FRACTION, "The ideal cpu load of a certain cluster"),
+ CLUSTER_LOAD_IDEAL_MEMORY("cluster.load.ideal.memory", Unit.FRACTION, "The ideal memory load of a certain cluster"),
+ CLUSTER_LOAD_IDEAL_DISK("cluster.load.ideal.disk", Unit.FRACTION, "The ideal disk load of a certain cluster"),
+
+ ZONE_WORKING("zone.working", Unit.BINARY, "The value 1 if zone is considered healthy, 0 if not. This is decided by considering the number of non-active nodes vs the number of active nodes in a zone"),
+ CACHE_NODE_OBJECT_HIT_RATE("cache.nodeObject.hitRate", Unit.FRACTION, "The fraction of cache hits vs cache lookups for the node object cache"),
+ CACHE_NODE_OBJECT_EVICTION_COUNT("cache.nodeObject.evictionCount", Unit.ITEM, "The number of cache elements evicted from the node object cache"),
+ CACHE_NODE_OBJECT_SIZE("cache.nodeObject.size", Unit.ITEM, "The number of cache elements in the node object cache"),
+ CACHE_CURATOR_HIT_RATE("cache.curator.hitRate", Unit.FRACTION, "The fraction of cache hits vs cache lookups for the curator cache"),
+ CACHE_CURATOR_EVICTION_COUNT("cache.curator.evictionCount", Unit.ITEM, "The number of cache elements evicted from the curator cache"),
+ CACHE_CURATOR_SIZE("cache.curator.size", Unit.ITEM, "The number of cache elements in the curator cache"),
+ WANTED_RESTART_GENERATION("wantedRestartGeneration", Unit.GENERATION, "Wanted restart generation for tenant node"),
+ CURRENT_RESTART_GENERATION("currentRestartGeneration", Unit.GENERATION, "Current restart generation for tenant node"),
+ WANT_TO_RESTART("wantToRestart", Unit.BINARY, "One if node wants to restart, zero if not"),
+ WANTED_REBOOT_GENERATION("wantedRebootGeneration", Unit.GENERATION, "Wanted reboot generation for tenant node"),
+ CURRENT_REBOOT_GENERATION("currentRebootGeneration", Unit.GENERATION, "Current reboot generation for tenant node"),
+ WANT_TO_REBOOT("wantToReboot", Unit.BINARY, "One if node wants to reboot, zero if not"),
+ RETIRED("retired", Unit.BINARY, "One if node is retired, zero if not"),
+ WANTED_VESPA_VERSION("wantedVespaVersion", Unit.VERSION, "Wanted vespa version for the node, in the form <MINOR.PATCH>. Major version is not included here"),
+ CURRENT_VESPA_VERSION("currentVespaVersion", Unit.VERSION, "Current vespa version for the node, in the form <MINOR.PATCH>. Major version is not included here"),
+ WANT_TO_CHANGE_VESPA_VERSION("wantToChangeVespaVersion", Unit.BINARY, "One if node want to change Vespa version, zero if not"),
+ HAS_WIRE_GUARD_KEY("hasWireguardKey", Unit.BINARY, "One if node has a WireGuard key, zero if not"),
+ WANT_TO_RETIRE("wantToRetire", Unit.BINARY, "One if node wants to retire, zero if not"),
+ WANT_TO_DEPROVISION("wantToDeprovision", Unit.BINARY, "One if node wants to be deprovisioned, zero if not"),
+ FAIL_REPORT("failReport", Unit.BINARY, "One if there is a fail report for the node, zero if not"),
+ SUSPENDED("suspended", Unit.BINARY, "One if the node is suspended, zero if not"),
+ SUSPENDED_SECONDS("suspendedSeconds", Unit.SECOND, "The number of seconds the node has been suspended"),
+ NUMBER_OF_SERVICES_UP("numberOfServicesUp", Unit.INSTANCE, "The number of services confirmed to be running on a node"),
+ NUMBER_OF_SERVICES_NOT_CHECKED("numberOfServicesNotChecked", Unit.INSTANCE, "The number of services supposed to run on a node, that has not checked"),
+ NUMBER_OF_SERVICES_DOWN("numberOfServicesDown", Unit.INSTANCE, "The number of services confirmed to not be running on a node"),
+ SOME_SERVICES_DOWN("someServicesDown", Unit.BINARY, "One if one or more services has been confirmed to not run on a node, zero if not"),
+ NUMBER_OF_SERVICES_UNKNOWN("numberOfServicesUnknown", Unit.INSTANCE, "The number of services the config server does not know if is running on a node"),
+ NODE_FAILER_BAD_NODE("nodeFailerBadNode", Unit.BINARY, "One if the node is failed due to being bad, zero if not"),
+ DOWN_IN_NODE_REPO("downInNodeRepo", Unit.BINARY, "One if the node is registered as being down in the node repository, zero if not"),
+ NUMBER_OF_SERVICES("numberOfServices", Unit.INSTANCE, "Number of services supposed to run on a node"),
+ LOCK_ATTEMPT_ACQUIRE_MAX_ACTIVE_LATENCY("lockAttempt.acquireMaxActiveLatency", Unit.SECOND, "Maximum duration for keeping a lock, ending during the metrics snapshot, or still being kept at the end or this snapshot period"),
+ LOCK_ATTEMPT_ACQUIRE_HZ("lockAttempt.acquireHz", Unit.OPERATION_PER_SECOND, "Average number of locks acquired per second the snapshot period"),
+ LOCK_ATTEMPT_ACQUIRE_LOAD("lockAttempt.acquireLoad", Unit.OPERATION, "Average number of locks held concurrently during the snapshot period"),
+ LOCK_ATTEMPT_LOCKED_LATENCY("lockAttempt.lockedLatency", Unit.SECOND, "Longest lock duration in the snapshot period"),
+ LOCK_ATTEMPT_LOCKED_LOAD("lockAttempt.lockedLoad", Unit.OPERATION, "Average number of locks held concurrently during the snapshot period"),
+ LOCK_ATTEMPT_ACQUIRE_TIMED_OUT("lockAttempt.acquireTimedOut", Unit.OPERATION, " Number of locking attempts that timed out during the snapshot period"),
+ LOCK_ATTEMPT_DEADLOCK("lockAttempt.deadlock", Unit.OPERATION, "Number of lock grab deadlocks detected during the snapshont period"),
+ LOCK_ATTEMPT_ERRORS("lockAttempt.errors", Unit.OPERATION, "Number of other lock related errors detected during the snapshont period"),
+ HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_CPU("hostedVespa.docker.totalCapacityCpu", Unit.VCPU, "Total number of VCPUs on tenant hosts managed by hosted Vespa in a zone"),
+ HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_MEM("hostedVespa.docker.totalCapacityMem", Unit.GIGABYTE, "Total amount of memory on tenant hosts managed by hosted Vespa in a zone"),
+ HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_DISK("hostedVespa.docker.totalCapacityDisk", Unit.GIGABYTE, "Total amount of disk space on tenant hosts managed by hosted Vespa in a zone"),
+ HOSTED_VESPA_DOCKER_FREE_CAPACITY_CPU("hostedVespa.docker.freeCapacityCpu", Unit.VCPU, "Total number of free VCPUs on tenant hosts managed by hosted Vespa in a zone"),
+ HOSTED_VESPA_DOCKER_FREE_CAPACITY_MEM("hostedVespa.docker.freeCapacityMem", Unit.GIGABYTE, "Total amount of free memory on tenant hosts managed by hosted Vespa in a zone"),
+ HOSTED_VESPA_DOCKER_FREE_CAPACITY_DISK("hostedVespa.docker.freeCapacityDisk", Unit.GIGABYTE, "Total amount of free disk space on tenant hosts managed by hosted Vespa in a zone"),
+ HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_CPU("hostedVespa.docker.allocatedCapacityCpu", Unit.VCPU, "Total number of allocated VCPUs on tenant hosts managed by hosted Vespa in a zone"),
+ HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_MEM("hostedVespa.docker.allocatedCapacityMem", Unit.GIGABYTE, "Total amount of allocated memory on tenant hosts managed by hosted Vespa in a zone"),
+ HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_DISK("hostedVespa.docker.allocatedCapacityDisk", Unit.GIGABYTE, "Total amount of allocated disk space on tenant hosts managed by hosted Vespa in a zone"),
+ HOSTED_VESPA_BREAKFIXED_HOSTS("hostedVespa.breakfixedHosts", Unit.HOST, "Number of hosts managed that are breakfixed in a zone"),
+ HOSTED_VESPA_PENDING_REDEPLOYMENTS("hostedVespa.pendingRedeployments", Unit.TASK, "The number of hosted Vespa re-deployments pending"),
+ HOSTED_VESPA_DOCKER_SKEW("hostedVespa.docker.skew", Unit.FRACTION, "A number in the range 0..1 indicating how well allocated resources are balanced with availability on hosts");
private final String name;
private final Unit unit;
diff --git a/metrics/src/main/java/ai/vespa/metrics/Unit.java b/metrics/src/main/java/ai/vespa/metrics/Unit.java
index a2123d72246..d514b9e9839 100644
--- a/metrics/src/main/java/ai/vespa/metrics/Unit.java
+++ b/metrics/src/main/java/ai/vespa/metrics/Unit.java
@@ -12,11 +12,15 @@ public enum Unit {
CONNECTION(BaseUnit.CONNECTION, "A link used for communication between a client and a server"),
DOCUMENT(BaseUnit.DOCUMENT, "Vespa document, a collection of fields defined in a schema file"),
DOCUMENTID(BaseUnit.DOCUMENTID, "A unique document identifier"),
+ DOLLAR_PER_HOUR(BaseUnit.DOLLAR, BaseUnit.HOUR, "Total current cost of the cluster in $/hr"),
FAILURE(BaseUnit.FAILURE, "Failures, typically for requests, operations or nodes"),
FILE(BaseUnit.FILE, "Data file stored on the disk on a node"),
FRACTION(BaseUnit.FRACTION, "A value in the range [0..1]. Higher values can occur for some metrics, but would indicate the value is outside of the allowed range."),
+ GENERATION(BaseUnit.GENERATION,"Typically generation of configuration or application package"),
+ GIGABYTE(BaseUnit.GIGABYTE,"One billion bytes"),
HIT(BaseUnit.HIT, "Document that meets the filtering/restriction criteria specified by a given query"),
HIT_PER_QUERY(BaseUnit.HIT, BaseUnit.QUERY, "Number of hits per query over a period of time"),
+ HOST(BaseUnit.HOST, "Bare metal computer that contain nodes"),
INSTANCE(BaseUnit.INSTANCE, "Typically tenant or application"),
ITEM(BaseUnit.ITEM, "Object or unit maintained in e.g. a queue"),
MILLISECOND(BaseUnit.MILLISECOND, "Millisecond, 1/1000 of a second"),
@@ -37,6 +41,8 @@ public enum Unit {
SESSION(BaseUnit.SESSION, "A set of operations taking place during one connection or as part of a higher level operation"),
TASK(BaseUnit.TASK, "Piece of work executed by a server, e.g. to perform back-ground data maintenance"),
THREAD(BaseUnit.THREAD, "Computer thread for executing e.g. tasks, operations or queries"),
+ VCPU(BaseUnit.VCPU,"Virtual CPU"),
+
VERSION(BaseUnit.VERSION, "Software or config version"),
WAKEUP(BaseUnit.WAKEUP, "Computer thread wake-ups for doing some work");
@@ -80,10 +86,15 @@ public enum Unit {
CONNECTION("connection"),
DOCUMENT("document"),
DOCUMENTID("documentid"),
+ DOLLAR("dollar"),
FAILURE("failure"),
FILE("file"),
FRACTION("fraction"),
+ GENERATION("generation"),
+ GIGABYTE("gigabyte"),
HIT("hit"),
+ HOST("host"),
+ HOUR("hour"),
INSTANCE("instance"),
ITEM("item"),
MILLISECOND("millisecond", "ms"),
@@ -102,6 +113,7 @@ public enum Unit {
SESSION("session"),
TASK("task"),
THREAD("thread"),
+ VCPU("vcpu"),
VERSION("version"),
WAKEUP("wakeup");
diff --git a/model-evaluation/pom.xml b/model-evaluation/pom.xml
index 7c2ee046556..1e0ac4debbd 100644
--- a/model-evaluation/pom.xml
+++ b/model-evaluation/pom.xml
@@ -90,6 +90,9 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ </configuration>
</plugin>
<plugin>
<groupId>com.yahoo.vespa</groupId>
diff --git a/model-integration/pom.xml b/model-integration/pom.xml
index 681003fdc89..854e15298c6 100644
--- a/model-integration/pom.xml
+++ b/model-integration/pom.xml
@@ -81,6 +81,12 @@
<scope>provided</scope>
</dependency>
<dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>configdefinitions</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
<groupId>net.java.dev.jna</groupId>
<artifactId>jna</artifactId>
<scope>provided</scope>
@@ -128,6 +134,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <!-- defines package 'com.yahoo.embedding' which is also defined in 'configdefinitions' -->
+ <suppressWarningOverlappingPackages>true</suppressWarningOverlappingPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/model-integration/src/main/java/ai/vespa/embedding/BertBaseEmbedder.java b/model-integration/src/main/java/ai/vespa/embedding/BertBaseEmbedder.java
index b172ef7beee..a12424c7d12 100644
--- a/model-integration/src/main/java/ai/vespa/embedding/BertBaseEmbedder.java
+++ b/model-integration/src/main/java/ai/vespa/embedding/BertBaseEmbedder.java
@@ -10,7 +10,6 @@ import com.yahoo.language.process.Embedder;
import com.yahoo.language.wordpiece.WordPieceEmbedder;
import com.yahoo.tensor.IndexedTensor;
import com.yahoo.tensor.Tensor;
-import com.yahoo.tensor.TensorAddress;
import com.yahoo.tensor.TensorType;
import java.util.ArrayList;
@@ -39,7 +38,7 @@ public class BertBaseEmbedder extends AbstractComponent implements Embedder {
private final String attentionMaskName;
private final String tokenTypeIdsName;
private final String outputName;
- private final String poolingStrategy;
+ private final PoolingStrategy poolingStrategy;
private final WordPieceEmbedder tokenizer;
private final OnnxEvaluator evaluator;
@@ -53,7 +52,7 @@ public class BertBaseEmbedder extends AbstractComponent implements Embedder {
attentionMaskName = config.transformerAttentionMask();
tokenTypeIdsName = config.transformerTokenTypeIds();
outputName = config.transformerOutput();
- poolingStrategy = config.poolingStrategy().toString();
+ poolingStrategy = PoolingStrategy.fromString(config.poolingStrategy().toString());
OnnxEvaluatorOptions options = new OnnxEvaluatorOptions();
options.setExecutionMode(config.onnxExecutionMode().toString());
@@ -124,20 +123,7 @@ public class BertBaseEmbedder extends AbstractComponent implements Embedder {
Tensor tokenEmbeddings = outputs.get(outputName);
- Tensor.Builder builder = Tensor.Builder.of(type);
- if (poolingStrategy.equals("mean")) { // average over tokens
- Tensor summedEmbeddings = tokenEmbeddings.sum("d1");
- Tensor summedAttentionMask = attentionMask.expand("d0").sum("d1");
- Tensor averaged = summedEmbeddings.join(summedAttentionMask, (x, y) -> x / y);
- for (int i = 0; i < type.dimensions().get(0).size().get(); i++) {
- builder.cell(averaged.get(TensorAddress.of(0,i)), i);
- }
- } else { // CLS - use first token
- for (int i = 0; i < type.dimensions().get(0).size().get(); i++) {
- builder.cell(tokenEmbeddings.get(TensorAddress.of(0,0,i)), i);
- }
- }
- return builder.build();
+ return poolingStrategy.toSentenceEmbedding(type, tokenEmbeddings, attentionMask);
}
private List<Integer> embedWithSeparatorTokens(String text, Context context, int maxLength) {
diff --git a/model-integration/src/main/java/ai/vespa/embedding/PoolingStrategy.java b/model-integration/src/main/java/ai/vespa/embedding/PoolingStrategy.java
new file mode 100644
index 00000000000..28104d8eeef
--- /dev/null
+++ b/model-integration/src/main/java/ai/vespa/embedding/PoolingStrategy.java
@@ -0,0 +1,48 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package ai.vespa.embedding;
+
+import com.yahoo.tensor.Tensor;
+import com.yahoo.tensor.TensorAddress;
+import com.yahoo.tensor.TensorType;
+
+/**
+ * @author bjorncs
+ */
+public enum PoolingStrategy {
+ MEAN {
+ @Override
+ public Tensor toSentenceEmbedding(TensorType type, Tensor tokenEmbeddings, Tensor attentionMask) {
+ var builder = Tensor.Builder.of(type);
+ var summedEmbeddings = tokenEmbeddings.sum("d1");
+ var summedAttentionMask = attentionMask.expand("d0").sum("d1");
+ var averaged = summedEmbeddings.join(summedAttentionMask, (x, y) -> x / y);
+ for (int i = 0; i < type.dimensions().get(0).size().get(); i++) {
+ builder.cell(averaged.get(TensorAddress.of(0, i)), i);
+ }
+ return builder.build();
+ }
+ },
+ CLS {
+ @Override
+ public Tensor toSentenceEmbedding(TensorType type, Tensor tokenEmbeddings, Tensor ignored) {
+ var builder = Tensor.Builder.of(type);
+ for (int i = 0; i < type.dimensions().get(0).size().get(); i++) {
+ builder.cell(tokenEmbeddings.get(TensorAddress.of(0,0,i)), i);
+ }
+ return builder.build();
+ }
+ };
+
+ public abstract Tensor toSentenceEmbedding(TensorType type, Tensor tokenEmbeddings, Tensor attentionMask);
+
+ public static PoolingStrategy fromString(String strategy) {
+ return switch (strategy.toLowerCase()) {
+ case "mean" -> MEAN;
+ case "cls" -> CLS;
+ default -> throw new IllegalArgumentException("Unknown pooling strategy '%s'".formatted(strategy));
+ };
+ }
+}
diff --git a/model-integration/src/main/java/ai/vespa/embedding/huggingface/HuggingFaceEmbedder.java b/model-integration/src/main/java/ai/vespa/embedding/huggingface/HuggingFaceEmbedder.java
index 0c1cc80544e..f93b1a3c1f8 100644
--- a/model-integration/src/main/java/ai/vespa/embedding/huggingface/HuggingFaceEmbedder.java
+++ b/model-integration/src/main/java/ai/vespa/embedding/huggingface/HuggingFaceEmbedder.java
@@ -1,5 +1,6 @@
package ai.vespa.embedding.huggingface;
+import ai.vespa.embedding.PoolingStrategy;
import ai.vespa.modelintegration.evaluator.OnnxEvaluator;
import ai.vespa.modelintegration.evaluator.OnnxEvaluatorOptions;
import ai.vespa.modelintegration.evaluator.OnnxRuntime;
@@ -13,8 +14,6 @@ import com.yahoo.tensor.IndexedTensor;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorAddress;
import com.yahoo.tensor.TensorType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import java.nio.file.Paths;
import java.util.List;
@@ -23,20 +22,17 @@ import java.util.Map;
@Beta
public class HuggingFaceEmbedder extends AbstractComponent implements Embedder {
- private static final Logger LOG = LoggerFactory.getLogger(HuggingFaceEmbedder.class.getName());
-
private final String inputIdsName;
private final String attentionMaskName;
private final String tokenTypeIdsName;
private final String outputName;
- private final int maxTokens;
private final boolean normalize;
private final HuggingFaceTokenizer tokenizer;
private final OnnxEvaluator evaluator;
+ private final PoolingStrategy poolingStrategy;
@Inject
public HuggingFaceEmbedder(OnnxRuntime onnx, HuggingFaceEmbedderConfig config) {
- maxTokens = config.transformerMaxTokens();
inputIdsName = config.transformerInputIds();
attentionMaskName = config.transformerAttentionMask();
tokenTypeIdsName = config.transformerTokenTypeIds();
@@ -45,7 +41,10 @@ public class HuggingFaceEmbedder extends AbstractComponent implements Embedder {
tokenizer = new HuggingFaceTokenizer.Builder()
.addSpecialTokens(true)
.addDefaultModel(Paths.get(config.tokenizerPath().toString()))
+ .setTruncation(true)
+ .setMaxLength(config.transformerMaxTokens())
.build();
+ poolingStrategy = PoolingStrategy.fromString(config.poolingStrategy().toString());
var onnxOpts = new OnnxEvaluatorOptions();
if (config.transformerGpuDevice() >= 0)
onnxOpts.setGpuDevice(config.transformerGpuDevice());
@@ -74,16 +73,7 @@ public class HuggingFaceEmbedder extends AbstractComponent implements Embedder {
@Override
public List<Integer> embed(String s, Context context) {
- var tokenIds = tokenizer.embed(s, context);
-
- int tokensSize = tokenIds.size();
-
- if (tokensSize > maxTokens) {
- Integer lastElement = tokenIds.get(tokensSize - 1);
- tokenIds = tokenIds.subList(0, maxTokens - 1);
- tokenIds.add(lastElement);
- }
- return tokenIds;
+ return tokenizer.embed(s, context);
}
@Override
@@ -97,11 +87,11 @@ public class HuggingFaceEmbedder extends AbstractComponent implements Embedder {
var encoding = tokenizer.encode(s, context.getLanguage());
Tensor inputSequence = createTensorRepresentation(encoding.ids(), "d1");
Tensor attentionMask = createTensorRepresentation(encoding.attentionMask(), "d1");
- Tensor tokenTypeIds = createTensorRepresentation(encoding.typeIds(), "d1");
+ Tensor tokenTypeIds = tokenTypeIdsName.isEmpty() ? null : createTensorRepresentation(encoding.typeIds(), "d1");
Map<String, Tensor> inputs;
- if (tokenTypeIds.isEmpty()) {
+ if (tokenTypeIdsName.isEmpty() || tokenTypeIds.isEmpty()) {
inputs = Map.of(inputIdsName, inputSequence.expand("d0"),
attentionMaskName, attentionMask.expand("d0"));
} else {
@@ -156,9 +146,6 @@ public class HuggingFaceEmbedder extends AbstractComponent implements Embedder {
return builder.build();
}
- private Tensor createAttentionMask(Tensor inputSequence) {
- return inputSequence.map((x) -> 1);
- }
}
diff --git a/model-integration/src/test/java/ai/vespa/embedding/huggingface/HuggingFaceEmbedderTest.java b/model-integration/src/test/java/ai/vespa/embedding/huggingface/HuggingFaceEmbedderTest.java
deleted file mode 100644
index 0ff9acc9a69..00000000000
--- a/model-integration/src/test/java/ai/vespa/embedding/huggingface/HuggingFaceEmbedderTest.java
+++ /dev/null
@@ -1,36 +0,0 @@
-package ai.vespa.embedding.huggingface;
-
-public class HuggingFaceEmbedderTest {
-/*
- @Test
- public void testEmbedder() {
-
- String modelPath = "src/test/models/hf/model.onnx";
- String tokenizerPath = "src/test/models/hf/tokenizer.json";
- assumeTrue(OnnxRuntime.isRuntimeAvailable(modelPath));
-
- HuggingFaceEmbedderConfig.Builder builder = new HuggingFaceEmbedderConfig.Builder();
- builder.tokenizerPath(ModelReference.valueOf(tokenizerPath));
- builder.transformerModel(ModelReference.valueOf(modelPath));
-
- HuggingFaceEmbedder embedder;
-
- try {
- embedder = new HuggingFaceEmbedder(builder.build());
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
-
- TensorType destType = TensorType.fromSpec("tensor<float>(x[768])");
- List<Integer> tokens = List.of(1,2,3,4,5);
- Tensor embedding = embedder.embedTokens(tokens, destType);
-
- System.out.println(embedding);
-
- Tensor expected = Tensor.from("tensor<float>(x[768]):[-0.025724048, 0.020880165, -0.011260326, -0.023737747, 0.06904736, -0.023877826, -0.020314846, 0.0032329028, -0.015538657, -0.07391539, 0.017203337, -0.011266706, 0.010958312, 0.011904508, -0.013701068, 0.027089471, -0.016722197, -0.020041, 0.021507785, 0.023721753, -0.07874908, 0.011369475, 0.046657883, -0.042779557, 0.048052263, 0.037120715, 0.0012078708, 0.019323641, 0.013024646, 0.061841156, 0.01753008, 1.2066379E-4, 0.023636049, 0.018369958, 0.036082096, 0.03932147, 0.0046853777, -0.015098697, 0.038477935, -0.01895684, -0.040239938, 2.6470664E-4, 0.03997473, 0.02041734, 0.02412652, 0.018273998, 0.017018031, 0.006871845, 0.0025124447, -0.0018908525, -0.013397233, 0.042458713, 0.007796125, 0.028542817, 0.031890307, -0.0074867285, -0.0033081016, -0.02232893, 0.039048433, 0.00957053, 0.06763975, 0.040223297, 0.0064583384, -0.014190483, 0.045714546, 0.0029999055, 0.014651245, 0.024208939, 0.020654708, -0.012122954, -0.0036424815, 0.00488385, 0.029132547, 0.067792565, 0.0075463247, -0.009096316, -0.038455218, 0.015037789, -0.01743026, -0.004400987, -1.5690622E-4, 0.016168159, 0.0020400928, 0.031062322, -0.008158351, 0.0292213, 0.008834568, -0.048937295, -0.00890528, 0.017726518, -0.0067773387, -0.046057213, -0.066518776, 0.0018978252, -0.04398522, 0.011562229, 0.031211298, 0.0103532905, -0.0037940282, 0.093772806, -0.031089822, 0.040764417, -0.053171575, -0.03156361, 0.036163535, 0.03484915, 0.04917469, -0.0045245993, 0.0058647553, 0.05267792, 0.028012566, 0.028851494, -0.022312999, -0.020575663, 0.03345691, 0.025952421, -0.052168794, -0.061676178, -0.017157838, -0.03421253, -0.035815753, -0.06571464, -0.007408596, -2.02389E-4, -0.023351457, -0.055525146, -0.04038344, -0.006495214, -0.017078917, -0.035309125, 0.041886955, -0.09497299, -0.0189574, 0.016921803, 0.017511738, 0.082098976, -0.018675305, 0.033731908, -0.028046045, -0.013675128, 0.0072140736, -0.020495338, 0.009846083, 0.013070329, -0.011773132, 0.035009257, 0.0074090296, -0.014208246, -5.310546E-4, 0.021474011, 0.014579644, -0.09338692, -0.010726686, 0.007154424, 0.057590302, -0.04826717, 0.040737577, 0.014072642, 0.04285114, -0.061159305, 0.013216943, -0.035471566, -0.03792605, 0.015285408, 0.031102464, 0.030012386, -0.023884479, -0.04371121, -0.024413597, -0.010348542, 0.017916787, 0.0042866515, 0.018110914, -0.041588936, 0.024906408, -0.031663194, 0.03195878, -0.06372821, 0.019083183, -0.01137915, -0.018030347, 0.010138715, -0.0582689, 0.031122282, 0.008210103, 0.012292584, 0.027713217, 0.028951935, 0.045635186, -0.009818348, 0.025670283, 0.03957527, -0.028106295, 0.03346287, 0.006125563, 0.013537182, 0.012909673, -0.001204659, 0.018613683, 0.0018722271, -0.019579338, 0.008905144, -0.05733141, 0.025476566, -0.0056283884, 0.017892752, 0.011068579, 0.07707967, -0.024977751, -0.024308717, 0.013858339, -0.0058020353, -0.014463086, -0.009544265, 0.040218975, -0.012510054, 0.04849776, -0.05000309, 0.025404643, 0.008990219, 0.02775138, -0.07551933, 0.008215385, 0.0053623077, -2.8556216E-4, 0.013400637, 0.017384026, -0.016238615, 0.031755704, -0.06869863, 0.0011450738, 0.04904909, 0.0032084947, -0.061084855, 0.005177811, -0.0043256404, 0.015641086, 0.01082181, -0.04075435, 0.014862946, 0.06862344, 0.008437109, -0.016099032, 0.022712294, -0.034809124, -0.03308236, -0.05667152, -0.03971709, 0.021760954, 0.042704564, -0.003670681, -0.0125031965, -0.01086691, 0.0297599, 5.219019E-4, 0.042474877, -0.010456534, 0.08990086, -0.07252977, -0.0232252, -0.032979038, 0.020222792, 0.040868383, 0.06501842, -0.035030693, -0.0015357807, 0.018102454, 0.024944443, -0.020003196, -0.011539847, 0.011255642, 0.037775412, -0.0037286845, -0.0341213, 0.023036147, 0.02926327, -0.046673402, 0.036873233, -0.03849799, 0.05359753, 0.0020826515, 0.006461479, 0.02670649, -0.00140334, 0.033684377, 0.038561035, -0.024399279, 0.002088306, -0.060904354, -0.075068265, -0.06754775, 0.076485276, -0.017709987, 0.046117906, 0.12425809, 0.0106040435, 0.0935674, -0.038158268, 0.009669471, -0.018891279, -0.008584558, 0.062187072, 0.0446559, -0.04003452, 0.021192033, -0.027830705, 0.0030938783, 0.026238382, 0.050908126, -0.0640897, 0.0039400524, -4.0983717E-4, -0.09788098, 0.077888265, -0.008923493, 9.2718634E-4, -0.003174036, -0.0077122, 0.024076542, -0.012247094, 0.015358698, -0.002875235, -0.03378138, -0.015616789, 0.016734147, 0.0035185486, 0.015807444, 0.03484354, 0.053835943, 0.01872425, -0.018600935, 0.0060353098, -0.0033563771, 0.055035062, -0.083564155, -0.011492768, 0.003962845, -0.03442353, 0.09015563, 0.012225138, 0.031516016, 0.030751515, -0.056343056, 0.037657607, 0.08115837, -0.041137557, 0.016311243, -0.058852646, -0.07653154, 0.02130071, 0.0040857317, -0.020951144, -0.0074253944, 0.05309452, -0.026305407, 0.0056941714, -0.02359672, 0.011392254, 0.017097248, -0.021877138, -0.06543879, 0.0428062, 0.023494843, -0.039750084, 0.0198583, 0.039141204, -0.043232452, 0.05673762, -0.00572516, 0.0099977795, -0.010179716, -0.060138825, 0.031860784, 0.0018468671, -0.010174757, 0.02398504, 0.014412493, 0.079279535, -0.015402895, -0.07597795, 0.0087828515, -0.0127440635, -0.008228165, -0.0019640992, -0.028497383, 0.013919859, 0.025142275, -0.1320675, 0.0121768685, -0.046735562, 9.829229E-5, -0.009189184, 0.018436272, -0.08516998, 0.015040611, 0.035327762, -0.010171434, 0.026718847, -0.028313076, -0.013120813, -0.058203585, -0.038716007, 0.022184927, 0.07012223, -0.06264533, 0.056756523, -0.065681, 0.05986038, -0.05279611, -0.054911636, 0.076010436, 0.041015115, 0.03920821, -0.01744772, 0.0034039353, 0.0075382935, -0.01624392, 0.05378706, 0.03231586, -0.07524116, 0.06305631, 0.05991506, -5.444081E-4, 0.013409323, -0.06888001, -0.040708184, 0.03734671, 0.0052551595, 0.010684721, -0.040529408, 0.028915955, 0.029105747, -0.020185236, 0.06496445, -0.022009412, -0.0033808595, 0.024795303, 0.0026664098, 0.042996325, -0.04022965, 0.012088627, -0.0223725, -0.015508588, -0.013264377, -0.020301288, -0.0015037537, 0.007726907, -0.0022741442, -0.044956572, 0.010999487, -0.0014431779, 0.031763487, 0.019383159, -0.010809799, -0.0134113515, -0.02977723, -0.0014747303, 0.04057383, -0.015751097, -0.011753722, -0.036123946, 0.018938705, 9.906364E-4, 0.036280718, -0.09332089, -0.009991581, 0.025463797, 0.05119224, 0.07540358, 0.027900526, 0.100351, 0.030668264, -0.007963987, -0.029012676, 0.021057166, -0.009048951, 0.00842427, 0.01876811, -0.035510283, 0.034366164, -0.019845309, -0.042352304, 0.061529007, 0.033723388, -0.003314133, -0.024003353, 0.028756566, 0.059479274, -0.064037204, -0.049339823, -8.226961E-4, -0.020002557, -0.011994202, 0.015570834, 0.045298383, 0.0057346253, 0.09007624, -0.053770024, 0.007630297, 0.020868106, -0.017037094, -0.055875137, 0.04900269, 0.015741454, 0.0124805225, -0.0018614308, -0.019576045, 0.023860257, 0.017991606, 0.003367343, 0.06020378, 0.0026180628, -0.09462455, -0.0070169405, -0.029571567, -0.038119137, 0.013861453, -0.017994085, -0.045172486, -0.022872778, 0.055174, -0.008971932, -0.004308986, 0.01601522, 0.003778432, 0.031744134, 0.02868899, -0.14191957, -0.016329547, -0.016410846, -4.6470436E-6, -0.001020947, 0.0027826065, -0.039300438, -0.011893471, -0.02075158, -0.010576237, -0.02062336, 0.013781222, -0.008120074, -0.029703692, -0.046667382, 0.043274097, -0.021984896, -0.02135883, 0.018591158, -0.041193772, -0.0059216945, -0.0011121663, -0.02494825, 0.017716935, -0.009277854, 0.04252703, -0.025771331, -0.04950817, -0.010750714, -0.03249349, -0.051454652, 0.013961526, 0.020731043, 0.005106143, -0.00143041, 0.026762294, -0.040144447, -0.017221546, -0.024441173, 0.026409082, -0.02006987, -0.06430974, 0.03596783, 0.11877633, 0.019118857, -0.023766126, -0.07279529, 0.09964732, -0.021428458, 0.026640266, 0.022268405, 0.042921524, -0.007858052, -0.09624318, -0.022612294, -0.019523097, 0.03567699, 0.03789931, -0.006097838, 0.02569811, 0.0191861, 0.07499048, -0.071985036, 0.02195141, -0.025485674, 0.027281731, 0.028316619, -2.0592185E-4, -0.0087429015, 0.03162398, -0.007593867, -0.008025583, 0.010998485, 0.0040793577, -0.0013161482, 0.04318332, 0.021368232, 0.019170962, 0.021635167, 0.004988852, -0.013367873, -0.012466818, -0.0046749967, -0.03768797, 0.039707363, -0.044927754, -0.03654003, -0.023658205, -0.001842112, -0.010652133, 0.011228231, 9.927069E-4, -0.037655, -3.4657202E-4, -4.4477347E-6, -0.0016849868, -0.08615711, 0.048710153, -0.041956488, 0.043102454, 0.039763212, 0.013289194, -0.080720246, 0.0059994697, 0.015247406, -0.04542366, -0.05336339, -0.054322492, -0.012767407, -0.004596957, -0.025987137, -0.0020473057, -0.007264475, -0.026240809, 0.004853881, 0.010054818, -0.021872481, 0.04792254, -0.017764855, 0.01646331, 0.027268302, 0.042611707, -0.03171807, -0.040693402, -0.021686986, -0.011264477, 0.0067759645, 0.02997798, 0.008916376, 0.02419584, -0.0020763963, 0.0056151943, -0.0026876493, 0.05944909, -0.045404088, 0.018879034, 0.011747689, 0.03196524, -5.1519996E-4, 0.0013718596, 0.058480065, 0.057530686, 0.0032917305, -0.03556252, -0.03199946, 8.2321337E-4, 0.008931801, 0.086252205, 0.0013950337, 0.024274798, -0.009235701, -0.016323563, 0.0069916663, 0.015588893, 0.07079948, 0.019281829, 0.028265161, -0.028714187, 0.041323867, -0.021685174, 0.037033204, 0.040014476, 0.066936225, 0.033902515, -0.0027583768, 0.0102592, -0.025718369, -0.023265323, 0.038798634, 7.6241576E-4, -0.038741548, -0.008511498, 0.0066514956, -0.0047597503, -0.013024812, 0.020948282, 0.032426294, -0.04831275, 0.023370765, -0.026260225, 0.00937463, -0.0136523675, -0.010202122, -0.019113116, 0.02264367, 0.061504897, 0.005174083, 0.009410467, -0.048552092, 0.018883549, 0.017368691, 0.075853914, -0.044583194, -0.018234642, 0.030739859, 0.03802531, -0.0039164764, -0.0034495012, -0.02906537, 0.01969172, 0.0039183535, 0.10471505, 0.0139206285, -0.023619942, -0.062056284, -0.03643699, 0.0075695068, -0.078179, -0.0035506163, 0.055161994, 0.029487751, 0.033565205, -0.034224838, 0.033817288, -0.041085172, -0.017652633, -0.023406055, -0.040258575, -0.02416118, 0.0065094046, -0.034261346, -0.02321457, 0.050748765, 0.0348932, -0.0054060123, 0.052658632, 0.027222686, -0.011120133, 0.026567513, 0.013036436, -0.051871147, -0.062004875, 0.03265022, -0.003253459, -0.047073938, -0.0069678905, 0.04008311, 0.02167116, 0.0023027628, -0.008902592, -0.032181825]");
-
- assertEquals(embedding, expected);
-
- }
-*/
-}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java
index 6e6a6dec9d3..d66e9ea7937 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.hosted.node.admin.configserver;
import ai.vespa.util.http.hc4.SslConnectionSocketFactory;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
-import com.yahoo.config.provision.HostName;
import com.yahoo.vespa.athenz.identity.ServiceIdentityProvider;
import com.yahoo.vespa.athenz.identity.ServiceIdentitySslSocketFactory;
import com.yahoo.vespa.hosted.node.admin.component.ConfigServerInfo;
@@ -73,14 +72,10 @@ public class ConfigServerApiImpl implements ConfigServerApi {
provider);
}
- public static ConfigServerApiImpl createFor(ConfigServerInfo info,
+ public static ConfigServerApiImpl createFor(URI uri,
ServiceIdentityProvider provider,
- HostnameVerifier hostnameVerifier,
- HostName configServerHostname) {
- return new ConfigServerApiImpl(
- List.of(info.getConfigServerUri(configServerHostname.value())),
- hostnameVerifier,
- provider);
+ HostnameVerifier hostnameVerifier) {
+ return new ConfigServerApiImpl(List.of(uri), hostnameVerifier, provider);
}
private ConfigServerApiImpl(Collection<URI> configServers,
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java
index b26b9d2d0e4..043a8ae4cd5 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.node.admin.configserver.noderepository;
import com.fasterxml.jackson.databind.JsonNode;
+import com.google.common.net.InetAddresses;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.CloudAccount;
@@ -30,6 +31,7 @@ import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
+import java.util.function.Consumer;
import java.util.function.Function;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -136,8 +138,18 @@ public class RealNodeRepository implements NodeRepository {
final GetNodesResponse response = configServerApi.get(path, GetNodesResponse.class);
return response.nodes.stream()
- .filter(node -> node.wireguardPubkey != null && ! node.wireguardPubkey.isEmpty())
- .map(RealNodeRepository::createTenantPeer)
+ .mapMulti((NodeRepositoryNode node, Consumer<WireguardPeer> consumer) -> {
+ if (node.wireguardPubkey == null || node.wireguardPubkey.isEmpty()) return;
+ List<VersionedIpAddress> ipAddresses = node.ipAddresses.stream()
+ .map(InetAddresses::forString)
+ .filter(address -> !address.isLoopbackAddress() && !address.isLinkLocalAddress() && !address.isSiteLocalAddress())
+ .map(VersionedIpAddress::from)
+ .toList();
+ if (ipAddresses.isEmpty()) return;
+
+ consumer.accept(new WireguardPeer(
+ HostName.of(node.hostname), ipAddresses, WireguardKey.from(node.wireguardPubkey)));
+ })
.sorted()
.toList();
}
@@ -353,16 +365,9 @@ public class RealNodeRepository implements NodeRepository {
return node;
}
- private static WireguardPeer createTenantPeer(NodeRepositoryNode node) {
- return new WireguardPeer(HostName.of(node.hostname),
- node.ipAddresses.stream().map(VersionedIpAddress::from).toList(),
- WireguardKey.from(node.wireguardPubkey));
- }
-
private static WireguardPeer createConfigserverPeer(GetWireguardResponse.Configserver configServer) {
return new WireguardPeer(HostName.of(configServer.hostname),
configServer.ipAddresses.stream().map(VersionedIpAddress::from).toList(),
WireguardKey.from(configServer.wireguardPubkey));
}
-
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
index 5d4628b41b6..5ad51e656ce 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
@@ -108,7 +108,7 @@ public class CoreCollector {
}
try {
- String binPath = readBinPath(context, coredumpPath);
+ String binPath = context.paths().underVespaHome("").resolve(readBinPath(context, coredumpPath)).pathInContainer();
metadata.setType(CoreDumpMetadata.Type.CORE_DUMP).setBinPath(binPath);
if (Path.of(binPath).getFileName().toString().equals("java")) {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
index 64efeb85e63..460ef2efb58 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
@@ -428,7 +428,7 @@ public class NodeAgentImpl implements NodeAgent {
}
private boolean noCpuCap(ZoneApi zone) {
- return zone.getEnvironment() == Environment.dev;
+ return zone.getEnvironment() == Environment.dev || zone.getEnvironment().isTest();
}
private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) {
@@ -619,8 +619,10 @@ public class NodeAgentImpl implements NodeAgent {
} catch (OrchestratorException e) {
// Ensure the ACLs are up to date: The reason we're unable to suspend may be because some other
// node is unable to resume because the ACL rules of SOME Docker container is wrong...
+ // Same can happen with stale WireGuard config, so update that too
try {
aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
+ wireguardTasks.forEach(task -> getContainer(context).ifPresent(c -> task.converge(context, c.id())));
} catch (RuntimeException suppressed) {
logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed);
e.addSuppressed(suppressed);
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java
index 12816e1b8a3..6358fcecafb 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java
@@ -211,7 +211,7 @@ public class RealNodeRepositoryTest {
assertEquals(1, cfgPeers.size());
assertWireguardPeer(cfgPeers.get(0), "cfg1.yahoo.com",
- "::201:1", "127.0.201.1",
+ "::201:1",
"lololololololololololololololololololololoo=");
//// Exclave nodes ////
@@ -222,15 +222,14 @@ public class RealNodeRepositoryTest {
assertEquals(1, exclavePeers.size());
assertWireguardPeer(exclavePeers.get(0), "dockerhost2.yahoo.com",
- "::101:1", "127.0.101.1",
+ "::101:1",
"000011112222333344445555666677778888999900c=");
}
- private void assertWireguardPeer(WireguardPeer peer, String hostname, String ipv6, String ipv4, String publicKey) {
+ private void assertWireguardPeer(WireguardPeer peer, String hostname, String ipv6, String publicKey) {
assertEquals(hostname, peer.hostname().value());
- assertEquals(2, peer.ipAddresses().size());
+ assertEquals(1, peer.ipAddresses().size());
assertIp(peer.ipAddresses().get(0), ipv6, 6);
- assertIp(peer.ipAddresses().get(1), ipv4, 4);
assertEquals(publicKey, peer.publicKey().value());
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java
index b4a35d6012c..6d7d31e5a6c 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java
@@ -57,11 +57,6 @@ public class CoreCollectorTest {
assertEquals(TEST_BIN_PATH, coreCollector.readBinPath(context, TEST_CORE_PATH));
mockExec(cmd,
- "/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, from " +
- "'/usr/bin/program'");
- assertEquals(TEST_BIN_PATH, coreCollector.readBinPath(context, TEST_CORE_PATH));
-
- mockExec(cmd,
"/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, " +
"from 'program', real uid: 0, effective uid: 0, real gid: 0, effective gid: 0, " +
"execfn: '/usr/bin/program', platform: 'x86_64");
@@ -159,6 +154,27 @@ public class CoreCollectorTest {
}
@Test
+ void collectsDataRelativePath() {
+ mockExec(new String[]{"file", TEST_CORE_PATH.pathInContainer()},
+ "/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, from 'sbin/distributord-bin'");
+ String absolutePath = "/opt/vespa/sbin/distributord-bin";
+ mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "set print frame-arguments none",
+ "-ex", "bt", "-batch", absolutePath, "/tmp/core.1234"},
+ String.join("\n", GDB_BACKTRACE));
+ mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "set print frame-arguments none",
+ "-ex", "thread apply all bt", "-batch", absolutePath, "/tmp/core.1234"},
+ String.join("\n", GDB_BACKTRACE));
+
+ var expected = new CoreDumpMetadata()
+ .setBinPath(absolutePath)
+ .setCreated(CORE_CREATED)
+ .setType(CoreDumpMetadata.Type.CORE_DUMP)
+ .setBacktrace(GDB_BACKTRACE)
+ .setBacktraceAllThreads(GDB_BACKTRACE);
+ assertEquals(expected, coreCollector.collect(context, TEST_CORE_PATH));
+ }
+
+ @Test
void collectsPartialIfBacktraceFailsTest() {
mockExec(new String[]{"file", TEST_CORE_PATH.pathInContainer()},
"/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, from " +
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java
index 3b518728607..fe4f4987d34 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java
@@ -23,6 +23,7 @@ import com.yahoo.vespa.hosted.provision.node.Reports;
import com.yahoo.vespa.hosted.provision.node.Status;
import com.yahoo.vespa.hosted.provision.node.TrustStoreItem;
+import java.time.Duration;
import java.time.Instant;
import java.util.Arrays;
import java.util.EnumSet;
@@ -54,6 +55,8 @@ public final class Node implements Nodelike {
private final Optional<String> modelName;
private final Optional<TenantName> reservedTo;
private final Optional<ApplicationId> exclusiveToApplicationId;
+ private final Optional<Duration> hostTTL;
+ private final Optional<Instant> hostEmptyAt;
private final Optional<ClusterSpec.Type> exclusiveToClusterType;
private final Optional<String> switchHostname;
private final List<TrustStoreItem> trustStoreItems;
@@ -87,11 +90,11 @@ public final class Node implements Nodelike {
/** DO NOT USE: public for serialization purposes. See {@code create} helper methods. */
public Node(String id, IP.Config ipConfig, String hostname, Optional<String> parentHostname,
- Flavor flavor, Status status, State state, Optional<Allocation> allocation, History history, NodeType type,
- Reports reports, Optional<String> modelName, Optional<TenantName> reservedTo,
- Optional<ApplicationId> exclusiveToApplicationId, Optional<ClusterSpec.Type> exclusiveToClusterType,
- Optional<String> switchHostname, List<TrustStoreItem> trustStoreItems,
- CloudAccount cloudAccount, Optional<WireguardKey> wireguardPubKey) {
+ Flavor flavor, Status status, State state, Optional<Allocation> allocation, History history,
+ NodeType type, Reports reports, Optional<String> modelName, Optional<TenantName> reservedTo,
+ Optional<ApplicationId> exclusiveToApplicationId, Optional<Duration> hostTTL, Optional<Instant> hostEmptyAt,
+ Optional<ClusterSpec.Type> exclusiveToClusterType, Optional<String> switchHostname,
+ List<TrustStoreItem> trustStoreItems, CloudAccount cloudAccount, Optional<WireguardKey> wireguardPubKey) {
this.id = Objects.requireNonNull(id, "A node must have an ID");
this.hostname = requireNonEmptyString(hostname, "A node must have a hostname");
this.ipConfig = Objects.requireNonNull(ipConfig, "A node must a have an IP config");
@@ -106,6 +109,8 @@ public final class Node implements Nodelike {
this.modelName = Objects.requireNonNull(modelName, "A null modelName is not permitted");
this.reservedTo = Objects.requireNonNull(reservedTo, "reservedTo cannot be null");
this.exclusiveToApplicationId = Objects.requireNonNull(exclusiveToApplicationId, "exclusiveToApplicationId cannot be null");
+ this.hostTTL = Objects.requireNonNull(hostTTL, "hostTTL cannot be null");
+ this.hostEmptyAt = Objects.requireNonNull(hostEmptyAt, "hostEmptyAt cannot be null");
this.exclusiveToClusterType = Objects.requireNonNull(exclusiveToClusterType, "exclusiveToClusterType cannot be null");
this.switchHostname = requireNonEmptyString(switchHostname, "switchHostname cannot be null");
this.trustStoreItems = Objects.requireNonNull(trustStoreItems).stream().distinct().toList();
@@ -133,6 +138,9 @@ public final class Node implements Nodelike {
if (type != NodeType.host && exclusiveToApplicationId.isPresent())
throw new IllegalArgumentException("Only tenant hosts can be exclusive to an application");
+ if (type != NodeType.host && hostTTL.isPresent())
+ throw new IllegalArgumentException("Only tenant hosts can have a TTL");
+
if (type != NodeType.host && exclusiveToClusterType.isPresent())
throw new IllegalArgumentException("Only tenant hosts can be exclusive to a cluster type");
}
@@ -212,6 +220,19 @@ public final class Node implements Nodelike {
public Optional<ApplicationId> exclusiveToApplicationId() { return exclusiveToApplicationId; }
/**
+ * Returns the additional time to live of tenant host, in a dynamically provisioned zone, after all its child
+ * nodes are removed, before being deprovisioned, if any.
+ * This is set during provisioning and applies for the entire lifetime of the host.
+ */
+ public Optional<Duration> hostTTL() { return hostTTL; }
+
+ /**
+ * Returns the time at which a tenant host became empty, i.e., no longer had any child nodes allocated.
+ * This is used with {@link #hostTTL} to determine when to deprovision a tenant host in a dynamically provisioned zone.
+ */
+ public Optional<Instant> hostEmptyAt() { return hostEmptyAt; }
+
+ /**
* Returns the cluster type this host is exclusive to, if any. Only tenant hosts can be exclusive to a cluster type.
* If this is set, resources on this host cannot be allocated to any other cluster type. This is set during
* provisioning and applies for the entire lifetime of the host
@@ -255,7 +276,7 @@ public final class Node implements Nodelike {
* If both given wantToRetire and wantToDeprovision are equal to the current values, the method is no-op.
*/
public Node withWantToRetire(boolean wantToRetire, boolean wantToDeprovision, Agent agent, Instant at) {
- return withWantToRetire(wantToRetire, wantToDeprovision, false, agent, at);
+ return withWantToRetire(wantToRetire, wantToDeprovision, false, false, agent, at);
}
/**
@@ -264,15 +285,16 @@ public final class Node implements Nodelike {
*
* If all given values are equal to the current ones, the method is no-op.
*/
- public Node withWantToRetire(boolean wantToRetire, boolean wantToDeprovision, boolean wantToRebuild, Agent agent, Instant at) {
+ public Node withWantToRetire(boolean wantToRetire, boolean wantToDeprovision, boolean wantToRebuild, boolean wantToUpgradeFlavor, Agent agent, Instant at) {
if (wantToRetire == status.wantToRetire() &&
wantToDeprovision == status.wantToDeprovision() &&
- wantToRebuild == status.wantToRebuild()) return this;
+ wantToRebuild == status.wantToRebuild() &&
+ wantToUpgradeFlavor == status.wantToUpgradeFlavor()) return this;
if (wantToRebuild && !wantToRetire && resources().storageType() != NodeResources.StorageType.remote) {
throw new IllegalArgumentException("Cannot rebuild " + this + " without retiring because storage is " +
resources().storageType());
}
- Node node = this.with(status.withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild));
+ Node node = this.with(status.withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild, wantToUpgradeFlavor));
if (wantToRetire)
node = node.with(history.with(new History.Event(History.Event.Type.wantToRetire, agent, at)));
return node;
@@ -330,15 +352,15 @@ public final class Node implements Nodelike {
/** Returns a node with the status assigned to the given value */
public Node with(Status status) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history, type,
- reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname,
- trustStoreItems, cloudAccount, wireguardPubKey);
+ reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
+ exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
/** Returns a node with the type assigned to the given value */
public Node with(NodeType type) {
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history, type,
- reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname,
- trustStoreItems, cloudAccount, wireguardPubKey);
+ reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
+ exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
/** Returns a node with the flavor assigned to the given value */
@@ -346,37 +368,36 @@ public final class Node implements Nodelike {
if (flavor.equals(this.flavor)) return this;
History updateHistory = history.with(new History.Event(History.Event.Type.resized, agent, instant));
return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, updateHistory, type,
- reports, modelName, reservedTo, exclusiveToApplicationId, exclusiveToClusterType, switchHostname,
- trustStoreItems, cloudAccount, wireguardPubKey);
+ reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
+ exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
/** Returns a copy of this with the reboot generation set to generation */
public Node withReboot(Generation generation) {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status.withReboot(generation), state,
- allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId,
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status.withReboot(generation), state, allocation,
+ history, type, reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
/** Returns a copy of this with given id set */
public Node withId(String id) {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId,
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation,
+ history, type, reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
/** Returns a copy of this with model name set to given value */
public Node withModelName(String modelName) {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- allocation, history, type, reports, Optional.of(modelName), reservedTo, exclusiveToApplicationId,
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, Optional.of(modelName), reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
/** Returns a copy of this with model name cleared */
public Node withoutModelName() {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- allocation, history, type, reports, Optional.empty(), reservedTo,
- exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems,
- cloudAccount, wireguardPubKey);
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, Optional.empty(), reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
+ exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
/** Returns a copy of this with a history record saying it was detected to be down at this instant */
@@ -416,66 +437,75 @@ public final class Node implements Nodelike {
* Do not use this to allocate a node.
*/
public Node with(Allocation allocation) {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- Optional.of(allocation), history, type, reports, modelName, reservedTo, exclusiveToApplicationId,
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, Optional.of(allocation), history,
+ type, reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
/** Returns a copy of this node with IP config set to the given value. */
public Node with(IP.Config ipConfig) {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId,
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
/** Returns a copy of this node with the parent hostname assigned to the given value. */
public Node withParentHostname(String parentHostname) {
- return new Node(id, ipConfig, hostname, Optional.of(parentHostname), flavor, status, state,
- allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId,
+ return new Node(id, ipConfig, hostname, Optional.of(parentHostname), flavor, status, state, allocation,
+ history, type, reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
public Node withReservedTo(TenantName tenant) {
if (type != NodeType.host)
throw new IllegalArgumentException("Only host nodes can be reserved, " + hostname + " has type " + type);
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- allocation, history, type, reports, modelName, Optional.of(tenant), exclusiveToApplicationId,
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, modelName, Optional.of(tenant), exclusiveToApplicationId, hostTTL, hostEmptyAt,
exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
/** Returns a copy of this node which is not reserved to a tenant */
public Node withoutReservedTo() {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- allocation, history, type, reports, modelName, Optional.empty(),
- exclusiveToApplicationId, exclusiveToClusterType, switchHostname, trustStoreItems,
- cloudAccount, wireguardPubKey);
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, modelName, Optional.empty(), exclusiveToApplicationId, hostTTL, hostEmptyAt,
+ exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
public Node withExclusiveToApplicationId(ApplicationId exclusiveTo) {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- allocation, history, type, reports, modelName, reservedTo, Optional.ofNullable(exclusiveTo),
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, modelName, reservedTo, Optional.ofNullable(exclusiveTo), hostTTL, hostEmptyAt,
+ exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
+ }
+
+ public Node withHostTTL(Duration hostTTL) {
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, modelName, reservedTo, exclusiveToApplicationId, Optional.ofNullable(hostTTL), hostEmptyAt,
+ exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
+ }
+
+ public Node withHostEmptyAt(Instant hostEmptyAt) {
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, Optional.ofNullable(hostEmptyAt),
exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
public Node withExclusiveToClusterType(ClusterSpec.Type exclusiveTo) {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId,
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
Optional.ofNullable(exclusiveTo), switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
public Node withWireguardPubkey(WireguardKey wireguardPubkey) {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId,
- exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount,
- Optional.ofNullable(wireguardPubkey));
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
+ exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, Optional.ofNullable(wireguardPubkey));
}
/** Returns a copy of this node with switch hostname set to given value */
public Node withSwitchHostname(String switchHostname) {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId,
- exclusiveToClusterType, Optional.ofNullable(switchHostname), trustStoreItems, cloudAccount,
- wireguardPubKey);
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
+ exclusiveToClusterType, Optional.ofNullable(switchHostname), trustStoreItems, cloudAccount, wireguardPubKey);
}
/** Returns a copy of this node with switch hostname unset */
@@ -526,20 +556,20 @@ public final class Node implements Nodelike {
/** Returns a copy of this node with the given history. */
public Node with(History history) {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId,
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
public Node with(Reports reports) {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId,
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
public Node with(List<TrustStoreItem> trustStoreItems) {
- return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state,
- allocation, history, type, reports, modelName, reservedTo, exclusiveToApplicationId,
+ return new Node(id, ipConfig, hostname, parentHostname, flavor, status, state, allocation, history,
+ type, reports, modelName, reservedTo, exclusiveToApplicationId, hostTTL, hostEmptyAt,
exclusiveToClusterType, switchHostname, trustStoreItems, cloudAccount, wireguardPubKey);
}
@@ -673,6 +703,8 @@ public final class Node implements Nodelike {
private String modelName;
private TenantName reservedTo;
private ApplicationId exclusiveToApplicationId;
+ private Duration hostTTL;
+ private Instant hostEmptyAt;
private ClusterSpec.Type exclusiveToClusterType;
private String switchHostname;
private Allocation allocation;
@@ -712,6 +744,16 @@ public final class Node implements Nodelike {
return this;
}
+ public Builder hostTTL(Duration hostTTL) {
+ this.hostTTL = hostTTL;
+ return this;
+ }
+
+ public Builder hostEmptyAt(Instant hostEmptyAt) {
+ this.hostEmptyAt = hostEmptyAt;
+ return this;
+ }
+
public Builder exclusiveToClusterType(ClusterSpec.Type exclusiveTo) {
this.exclusiveToClusterType = exclusiveTo;
return this;
@@ -772,9 +814,9 @@ public final class Node implements Nodelike {
flavor, Optional.ofNullable(status).orElseGet(Status::initial), state, Optional.ofNullable(allocation),
Optional.ofNullable(history).orElseGet(History::empty), type, Optional.ofNullable(reports).orElseGet(Reports::new),
Optional.ofNullable(modelName), Optional.ofNullable(reservedTo), Optional.ofNullable(exclusiveToApplicationId),
- Optional.ofNullable(exclusiveToClusterType), Optional.ofNullable(switchHostname),
- Optional.ofNullable(trustStoreItems).orElseGet(List::of),
- cloudAccount, Optional.ofNullable(wireguardPubKey));
+ Optional.ofNullable(hostTTL), Optional.ofNullable(hostEmptyAt), Optional.ofNullable(exclusiveToClusterType),
+ Optional.ofNullable(switchHostname), Optional.ofNullable(trustStoreItems).orElseGet(List::of), cloudAccount,
+ Optional.ofNullable(wireguardPubKey));
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
index 6a81c17d362..1ca81df824b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
@@ -8,6 +8,7 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
+import com.yahoo.vespa.hosted.provision.autoscale.ClusterModel;
import java.time.Duration;
import java.time.Instant;
@@ -197,24 +198,14 @@ public class Cluster {
Duration totalDuration = Duration.ZERO;
for (ScalingEvent event : scalingEvents()) {
if (event.duration().isEmpty()) continue;
- completedEventCount++;
- // Assume we have missed timely recording completion if it is longer than 4 days
- totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get()));
- }
- if (completedEventCount == 0) { // Use defaults
- if (clusterSpec.isStateful()) return Duration.ofHours(12);
- return Duration.ofMinutes(10);
- }
- else {
- Duration predictedDuration = totalDuration.dividedBy(completedEventCount);
-
- if ( clusterSpec.isStateful() ) // TODO: Remove when we have reliable completion for content clusters
- predictedDuration = minimum(Duration.ofHours(12), predictedDuration);
-
- predictedDuration = minimum(Duration.ofMinutes(5), predictedDuration);
+ // Assume we have missed timely recording completion if it is longer than 4 days, so ignore
+ if ( ! event.duration().get().minus(Duration.ofDays(4)).isNegative()) continue;
- return predictedDuration;
+ completedEventCount++;
+ totalDuration = totalDuration.plus(event.duration().get());
}
+ if (completedEventCount == 0) return ClusterModel.minScalingDuration(clusterSpec);
+ return minimum(ClusterModel.minScalingDuration(clusterSpec), totalDuration.dividedBy(completedEventCount));
}
private static Duration minimum(Duration smallestAllowed, Duration duration) {
@@ -223,10 +214,4 @@ public class Cluster {
return duration;
}
- private static Duration maximum(Duration largestAllowed, Duration duration) {
- if ( ! duration.minus(largestAllowed).isNegative())
- return largestAllowed;
- return duration;
- }
-
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
index 4020166a132..a7d5cc50828 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
@@ -55,7 +55,7 @@ public class Autoscaler {
}
private Autoscaling autoscale(Application application, Cluster cluster, NodeList clusterNodes, Limits limits) {
- ClusterModel clusterModel = new ClusterModel(nodeRepository.zone(),
+ ClusterModel clusterModel = new ClusterModel(nodeRepository,
application,
clusterNodes.not().retired().clusterSpec(),
cluster,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
index 2f9ad28a072..dac9a0f3518 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.autoscale;
+import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.hosted.provision.Node;
@@ -8,6 +9,7 @@ import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
+import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
import java.time.Clock;
import java.time.Duration;
@@ -42,16 +44,24 @@ public class ClusterModel {
static final double idealContainerDiskLoad = 0.95;
static final double idealContentDiskLoad = 0.6;
+ // Memory for other processes running on the node (config-proxy, metrics-proxy).
+ // Keep in sync with config-model/NodeResourcesTuning.
+ static final double nodeMemoryOverheadGb = 0.7;
+
// When a query is issued on a node the cost is the sum of a fixed cost component and a cost component
// proportional to document count. We must account for this when comparing configurations with more or fewer nodes.
// TODO: Measure this, and only take it into account with queries
private static final double fixedCpuCostFraction = 0.1;
- private final Zone zone;
+ private final NodeRepository nodeRepository;
private final Application application;
private final ClusterSpec clusterSpec;
private final Cluster cluster;
+ private final CpuModel cpu = new CpuModel();
+ private final MemoryModel memory = new MemoryModel();
+ private final DiskModel disk = new DiskModel();
+
/**
* The current active nodes of this cluster, including retired,
* or empty if this models a new cluster not yet deployed.
@@ -69,14 +79,14 @@ public class ClusterModel {
private Double maxQueryGrowthRate = null;
private OptionalDouble averageQueryRate = null;
- public ClusterModel(Zone zone,
+ public ClusterModel(NodeRepository nodeRepository,
Application application,
ClusterSpec clusterSpec,
Cluster cluster,
NodeList clusterNodes,
MetricsDb metricsDb,
Clock clock) {
- this.zone = zone;
+ this.nodeRepository = nodeRepository;
this.application = application;
this.clusterSpec = clusterSpec;
this.cluster = cluster;
@@ -88,7 +98,7 @@ public class ClusterModel {
this.at = clock.instant();
}
- ClusterModel(Zone zone,
+ ClusterModel(NodeRepository nodeRepository,
Application application,
ClusterSpec clusterSpec,
Cluster cluster,
@@ -96,7 +106,7 @@ public class ClusterModel {
Duration scalingDuration,
ClusterTimeseries clusterTimeseries,
ClusterNodesTimeseries nodeTimeseries) {
- this.zone = zone;
+ this.nodeRepository = nodeRepository;
this.application = application;
this.clusterSpec = clusterSpec;
this.cluster = cluster;
@@ -111,12 +121,29 @@ public class ClusterModel {
public Application application() { return application; }
public ClusterSpec clusterSpec() { return clusterSpec; }
- public Cluster cluster() { return cluster; }
+ private ClusterNodesTimeseries nodeTimeseries() { return nodeTimeseries; }
+ private ClusterTimeseries clusterTimeseries() { return clusterTimeseries; }
+
+ /** Returns the instant this model was created. */
+ public Instant at() { return at;}
public boolean isEmpty() {
return nodeTimeseries().isEmpty();
}
+ /** Returns the predicted duration of a rescaling of this cluster */
+ public Duration scalingDuration() { return scalingDuration; }
+
+ /** Returns the average of the peak load measurement in each dimension, from each node. */
+ public Load peakLoad() {
+ return nodeTimeseries().peakLoad();
+ }
+
+ /** Returns the relative load adjustment accounting for redundancy in this. */
+ public Load redundancyAdjustment() {
+ return loadWith(nodeCount(), groupCount());
+ }
+
/** Returns the relative load adjustment that should be made to this cluster given available measurements. */
public Load loadAdjustment() {
if (nodeTimeseries().measurementsPerNode() < 0.5) return Load.one(); // Don't change based on very little data
@@ -147,17 +174,9 @@ public class ClusterModel {
return true;
}
- /** Returns the predicted duration of a rescaling of this cluster */
- public Duration scalingDuration() { return scalingDuration; }
-
- /** Returns the average of the peak load measurement in each dimension, from each node. */
- public Load peakLoad() {
- return nodeTimeseries().peakLoad();
- }
-
- /** Returns the relative load adjustment accounting for redundancy in this. */
- public Load redundancyAdjustment() {
- return loadWith(nodeCount(), groupCount());
+ public static Duration minScalingDuration(ClusterSpec clusterSpec) {
+ if (clusterSpec.isStateful()) return Duration.ofHours(8);
+ return Duration.ofMinutes(5);
}
/**
@@ -178,8 +197,8 @@ public class ClusterModel {
double queryCpu = queryCpuPerGroup * groupCount() / groups;
double writeCpu = (double)groupSize() / groupSize;
- return new Load(queryCpuFraction() * queryCpu + (1 - queryCpuFraction()) * writeCpu,
- (double)groupSize() / groupSize,
+ return new Load(cpu.queryFraction() * queryCpu + (1 - cpu.queryFraction()) * writeCpu,
+ (1 - memory.fixedFraction()) * (double)groupSize() / groupSize + memory.fixedFraction() * 1,
(double)groupSize() / groupSize);
}
else {
@@ -192,7 +211,7 @@ public class ClusterModel {
* if one of the nodes go down.
*/
public Load idealLoad() {
- var ideal = new Load(idealCpuLoad(), idealMemoryLoad(), idealDiskLoad()).divide(redundancyAdjustment());
+ var ideal = new Load(cpu.idealLoad(), memory.idealLoad(), disk.idealLoad()).divide(redundancyAdjustment());
if ( !cluster.bcpGroupInfo().isEmpty() && cluster.bcpGroupInfo().queryRate() > 0) {
// Since we have little local information, use information about query cost in other groups
@@ -214,18 +233,7 @@ public class ClusterModel {
public Autoscaling.Metrics metrics() {
return new Autoscaling.Metrics(averageQueryRate().orElse(0),
growthRateHeadroom(),
- cpuCostPerQuery().orElse(0));
- }
-
- /** Returns the instant this model was created. */
- public Instant at() { return at;}
-
- private OptionalDouble cpuCostPerQuery() {
- if (averageQueryRate().isEmpty() || averageQueryRate().getAsDouble() == 0.0) return OptionalDouble.empty();
- // TODO: Query rate should generally be sampled at the time where we see the peak resource usage
- int fanOut = clusterSpec.type().isContainer() ? 1 : groupSize();
- return OptionalDouble.of(peakLoad().cpu() * queryCpuFraction() * fanOut * nodes.not().retired().first().get().resources().vcpu()
- / averageQueryRate().getAsDouble() / groupCount());
+ cpu.costPerQuery().orElse(0));
}
private Load adjustQueryDependentIdealLoadByBcpGroupInfo(Load ideal) {
@@ -235,7 +243,7 @@ public class ClusterModel {
: cluster.bcpGroupInfo().queryRate() )
* cluster.bcpGroupInfo().growthRateHeadroom() * trafficShiftHeadroom();
double neededTotalVcpPerGroup = cluster.bcpGroupInfo().cpuCostPerQuery() * targetQueryRateToHandle / groupCount() +
- ( 1 - queryCpuFraction()) * idealCpuLoad() *
+ ( 1 - cpu.queryFraction()) * cpu.idealLoad() *
(clusterSpec.type().isContainer() ? 1 : groupSize());
double cpuAdjustment = neededTotalVcpPerGroup / currentClusterTotalVcpuPerGroup;
@@ -249,10 +257,6 @@ public class ClusterModel {
return lastCompletion.get().isAfter(clock.instant().minus(period));
}
- private ClusterNodesTimeseries nodeTimeseries() { return nodeTimeseries; }
-
- private ClusterTimeseries clusterTimeseries() { return clusterTimeseries; }
-
/**
* Returns the predicted max query growth rate per minute as a fraction of the average traffic
* in the scaling window.
@@ -302,20 +306,9 @@ public class ClusterModel {
return nodes > 1 ? (groups == 1 ? 1 : groups - 1) : groups;
}
- /** Ideal cpu load must take the application traffic fraction into account. */
- private double idealCpuLoad() {
- double queryCpuFraction = queryCpuFraction();
-
- // Assumptions: 1) Write load is not organic so we should not grow to handle more.
- // (TODO: But allow applications to set their target write rate and size for that)
- // 2) Write load does not change in BCP scenarios.
- return queryCpuFraction * 1/growthRateHeadroom() * 1/trafficShiftHeadroom() * idealQueryCpuLoad +
- (1 - queryCpuFraction) * idealWriteCpuLoad;
- }
-
/** Returns the headroom for growth during organic traffic growth as a multiple of current resources. */
private double growthRateHeadroom() {
- if ( ! zone.environment().isProduction()) return 1;
+ if ( ! nodeRepository.zone().environment().isProduction()) return 1;
double growthRateHeadroom = 1 + maxQueryGrowthRate() * scalingDuration().toMinutes();
// Cap headroom at 10% above the historical observed peak
if (queryFractionOfMax() != 0)
@@ -329,7 +322,7 @@ public class ClusterModel {
* as a multiple of current resources.
*/
private double trafficShiftHeadroom() {
- if ( ! zone.environment().isProduction()) return 1;
+ if ( ! nodeRepository.zone().environment().isProduction()) return 1;
if (canRescaleWithinBcpDeadline()) return 1;
double trafficShiftHeadroom;
if (application.status().maxReadShare() == 0) // No traffic fraction data
@@ -350,50 +343,88 @@ public class ClusterModel {
return ( (headroom -1 ) * Math.min(1, averageQueryRate().orElse(0) / queryRateGivingFullConfidence) ) + 1;
}
- /** The estimated fraction of cpu usage which goes to processing queries vs. writes */
- private double queryCpuFraction() {
- OptionalDouble writeRate = clusterTimeseries().writeRate(scalingDuration(), clock);
- if (averageQueryRate().orElse(0) == 0 && writeRate.orElse(0) == 0) return queryCpuFraction(0.5);
- return queryCpuFraction(averageQueryRate().orElse(0) / (averageQueryRate().orElse(0) + writeRate.orElse(0)));
- }
+ private class CpuModel {
- private double queryCpuFraction(double queryRateFraction) {
- double relativeQueryCost = 9; // How much more expensive are queries than writes? TODO: Measure
- double writeFraction = 1 - queryRateFraction;
- return queryRateFraction * relativeQueryCost / (queryRateFraction * relativeQueryCost + writeFraction);
- }
+ /** Ideal cpu load must take the application traffic fraction into account. */
+ double idealLoad() {
+ double queryCpuFraction = queryFraction();
- private double idealMemoryLoad() {
- if (clusterSpec.type().isContainer()) return idealContainerMemoryLoad;
- if (clusterSpec.type() == ClusterSpec.Type.admin) return idealContainerMemoryLoad; // Not autoscaled, but ideal shown in console
- return idealContentMemoryLoad;
- }
+ // Assumptions: 1) Write load is not organic so we should not grow to handle more.
+ // (TODO: But allow applications to set their target write rate and size for that)
+ // 2) Write load does not change in BCP scenarios.
+ return queryCpuFraction * 1/growthRateHeadroom() * 1/trafficShiftHeadroom() * idealQueryCpuLoad +
+ (1 - queryCpuFraction) * idealWriteCpuLoad;
+ }
+
+ OptionalDouble costPerQuery() {
+ if (averageQueryRate().isEmpty() || averageQueryRate().getAsDouble() == 0.0) return OptionalDouble.empty();
+ // TODO: Query rate should generally be sampled at the time where we see the peak resource usage
+ int fanOut = clusterSpec.type().isContainer() ? 1 : groupSize();
+ return OptionalDouble.of(peakLoad().cpu() * cpu.queryFraction() * fanOut * nodes.not().retired().first().get().resources().vcpu()
+ / averageQueryRate().getAsDouble() / groupCount());
+ }
+
+ /** The estimated fraction of cpu usage which goes to processing queries vs. writes */
+ double queryFraction() {
+ OptionalDouble writeRate = clusterTimeseries().writeRate(scalingDuration(), clock);
+ if (averageQueryRate().orElse(0) == 0 && writeRate.orElse(0) == 0) return queryFraction(0.5);
+ return queryFraction(averageQueryRate().orElse(0) / (averageQueryRate().orElse(0) + writeRate.orElse(0)));
+ }
+
+ double queryFraction(double queryRateFraction) {
+ double relativeQueryCost = 9; // How much more expensive are queries than writes? TODO: Measure
+ double writeFraction = 1 - queryRateFraction;
+ return queryRateFraction * relativeQueryCost / (queryRateFraction * relativeQueryCost + writeFraction);
+ }
- private double idealDiskLoad() {
- // Stateless clusters are not expected to consume more disk over time -
- // if they do it is due to logs which will be rotated away right before the disk is full
- return clusterSpec.isStateful() ? idealContentDiskLoad : idealContainerDiskLoad;
}
- /**
- * Create a cluster model if possible and logs a warning and returns empty otherwise.
- * This is useful in cases where it's possible to continue without the cluster model,
- * as QuestDb is known to temporarily fail during reading of data.
- */
- public static Optional<ClusterModel> create(Zone zone,
- Application application,
- ClusterSpec clusterSpec,
- Cluster cluster,
- NodeList clusterNodes,
- MetricsDb metricsDb,
- Clock clock) {
- try {
- return Optional.of(new ClusterModel(zone, application, clusterSpec, cluster, clusterNodes, metricsDb, clock));
+ private class MemoryModel {
+
+ double idealLoad() {
+ if (clusterSpec.type().isContainer()) return idealContainerMemoryLoad;
+ if (clusterSpec.type() == ClusterSpec.Type.admin) return idealContainerMemoryLoad; // Not autoscaled, but ideal shown in console
+ return idealContentMemoryLoad;
+ }
+
+ /**
+ * Returns the fraction of memory of the current allocation which is currently consumed by
+ * fixed data structures which take the same amount of space regardless of document volume.
+ */
+ double fixedFraction() {
+ if (clusterSpec().type().isContainer()) return 1.0;
+ double fixedMemory = nodeMemoryOverheadGb +
+ (averageReal() - nodeMemoryOverheadGb) * 0.05; // TODO: Measure actual content node usage
+ return fixedMemory / averageReal();
+ }
+
+ double averageReal() {
+ if (nodes.isEmpty()) { // we're estimating
+ var initialResources = new CapacityPolicies(nodeRepository).specifyFully(cluster.minResources().nodeResources(),
+ clusterSpec,
+ application.id());
+ return nodeRepository.resourcesCalculator().requestToReal(initialResources,
+ nodeRepository.exclusiveAllocation(clusterSpec),
+ false).memoryGb();
+ }
+ else {
+ return nodes.stream()
+ .mapToDouble(node -> nodeRepository.resourcesCalculator().realResourcesOf(node, nodeRepository).memoryGb())
+ .average()
+ .getAsDouble();
+ }
}
- catch (Exception e) {
- log.log(Level.WARNING, "Failed creating a cluster model for " + application + " " + cluster, e);
- return Optional.empty();
+
+ }
+
+ private class DiskModel {
+
+ double idealLoad() {
+ // Stateless clusters are not expected to consume more disk over time -
+ // if they do it is due to logs which will be rotated away right before the disk is full
+ return clusterSpec.isStateful() ? idealContentDiskLoad : idealContainerDiskLoad;
}
+
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java
index 5284511af47..e228d31384c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java
@@ -119,10 +119,6 @@ public class LoadBalancerInstance {
cloudAccount);
}
- public LoadBalancerInstance with(CloudAccount cloudAccount) {
- return new LoadBalancerInstance(hostname, ipAddress, dnsZone, ports, networks, reals, settings, serviceIds, cloudAccount);
- }
-
/** Prepends the given service IDs, possibly changing the order of those we have in this. */
public LoadBalancerInstance withServiceIds(List<PrivateServiceId> serviceIds) {
List<PrivateServiceId> ids = new ArrayList<>(serviceIds);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java
index 50eee9e33b3..a1eae42da38 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java
@@ -95,16 +95,21 @@ public abstract class ApplicationMaintainer extends NodeRepositoryMaintainer {
@Override
public void shutdown() {
super.shutdown();
- deploymentExecutor.shutdownNow();
+ deploymentExecutor.shutdown();
}
@Override
public void awaitShutdown() {
+ Instant deadline = clock().instant().plus(Duration.ofMinutes(1));
super.awaitShutdown();
try {
+ long remainder = Duration.between(clock().instant(), deadline).toMillis();
+ if (deploymentExecutor.isShutdown()) return;
+
// Give deployments in progress some time to complete
- if (!deploymentExecutor.awaitTermination(1, TimeUnit.MINUTES)) {
+ if (remainder < 0 || !deploymentExecutor.awaitTermination(remainder, TimeUnit.MILLISECONDS)) {
log.log(Level.WARNING, "Failed to shut down deployment executor within deadline");
+ deploymentExecutor.shutdownNow();
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DeprovisionedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DeprovisionedExpirer.java
new file mode 100644
index 00000000000..fcfde53c0df
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DeprovisionedExpirer.java
@@ -0,0 +1,31 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.provision.maintenance;
+
+import com.yahoo.jdisc.Metric;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.node.History;
+
+import java.time.Duration;
+import java.util.List;
+
+/**
+ * This removes hosts from {@link com.yahoo.vespa.hosted.provision.Node.State#deprovisioned}, after a grace period.
+ *
+ * @author mpolden
+ */
+public class DeprovisionedExpirer extends Expirer {
+
+ DeprovisionedExpirer(NodeRepository nodeRepository, Duration expiryTime, Metric metric) {
+ super(Node.State.deprovisioned, History.Event.Type.deprovisioned, nodeRepository, expiryTime, metric);
+ }
+
+ @Override
+ protected void expire(List<Node> expired) {
+ if (!nodeRepository().zone().cloud().dynamicProvisioning()) return; // Never expire in statically provisioned zones
+ for (var node : expired) {
+ nodeRepository().nodes().forget(node);
+ }
+ }
+
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java
index 5a77fcca85c..a8929cf9d22 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java
@@ -12,8 +12,7 @@ import java.util.List;
import java.util.logging.Logger;
/**
- * Superclass of expiry tasks which moves nodes from some state to the dirty state.
- * These jobs runs at least every 25 minutes.
+ * Base class for maintenance of nodes that linger in a particular state too long.
*
* @author bratseth
*/
@@ -61,6 +60,6 @@ public abstract class Expirer extends NodeRepositoryMaintainer {
}
/** Implement this callback to take action to expire these nodes */
- protected abstract void expire(List<Node> node);
+ protected abstract void expire(List<Node> expired);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
index a4bc3a1aea5..3f9e8ef4407 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
@@ -24,25 +24,28 @@ import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.History;
+import com.yahoo.vespa.hosted.provision.provisioning.HostProvisionRequest;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner.HostSharing;
import com.yahoo.vespa.hosted.provision.provisioning.NodeCandidate;
import com.yahoo.vespa.hosted.provision.provisioning.NodePrioritizer;
import com.yahoo.vespa.hosted.provision.provisioning.NodeSpec;
-import com.yahoo.vespa.hosted.provision.provisioning.ProvisionedHost;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
-import java.util.Comparator;
import java.util.EnumSet;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
-import java.util.stream.Collectors;
+
+import static java.util.Comparator.comparing;
+import static java.util.Comparator.naturalOrder;
+import static java.util.stream.Collectors.groupingBy;
+import static java.util.stream.Collectors.toSet;
/**
* @author freva
@@ -67,10 +70,9 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
@Override
protected double maintain() {
- NodeList nodes = nodeRepository().nodes().list();
- List<Node> excessHosts;
+ List<Node> provisionedSnapshot;
try {
- excessHosts = provision(nodes);
+ provisionedSnapshot = provision(nodeRepository().nodes().list());
} catch (NodeAllocationException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to allocate preprovisioned capacity and/or find excess hosts: " + e.getMessage());
return 0; // avoid removing excess hosts
@@ -79,33 +81,68 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
return 0; // avoid removing excess hosts
}
- return markForRemoval(excessHosts);
+ return markForRemoval(provisionedSnapshot);
}
- private double markForRemoval(List<Node> excessHosts) {
- if (excessHosts.isEmpty()) return 1;
+ private double markForRemoval(List<Node> provisionedSnapshot) {
+ // Group nodes by parent; no parent means it's a host.
+ Map<Optional<String>, List<Node>> nodesByParent = provisionedSnapshot.stream().collect(groupingBy(Node::parentHostname));
+
+ // Find all hosts that we once thought were empty (first clouse), or whose children are now all removable (second clause).
+ List<Node> emptyHosts = nodesByParent.get(Optional.<String>empty()).stream()
+ .filter(host -> host.hostEmptyAt().isPresent()
+ || nodesByParent.getOrDefault(Optional.of(host.hostname()), List.of())
+ .stream().allMatch(HostCapacityMaintainer::canDeprovision))
+ .toList();
+
+ if (emptyHosts.isEmpty()) return 1;
int attempts = 0, success = 0;
- for (List<Node> typeExcessHosts : excessHosts.stream().collect(Collectors.groupingBy(Node::type)).values()) {
+ for (Set<Node> typeEmptyHosts : emptyHosts.stream().collect(groupingBy(Node::type, toSet())).values()) {
attempts++;
// All nodes in the list are hosts of the same type, so they use the same lock regardless of their allocation
- Optional<NodeMutex> appMutex = nodeRepository().nodes().lockAndGet(typeExcessHosts.get(0), Duration.ofSeconds(10));
+ Optional<NodeMutex> appMutex = nodeRepository().nodes().lockAndGet(typeEmptyHosts.iterator().next(), Duration.ofSeconds(10));
if (appMutex.isEmpty()) continue;
try (Mutex lock = appMutex.get();
Mutex unallocatedLock = nodeRepository().nodes().lockUnallocated()) {
// Re-read all nodes under lock and compute the candidates for removal. The actual nodes we want
- // to mark for removal is the intersection with typeExcessHosts
- List<Node> toMarkForRemoval = candidatesForRemoval(nodeRepository().nodes().list().asList()).stream()
- .filter(typeExcessHosts::contains)
- .toList();
+ // to mark for removal is the intersection with typeEmptyHosts, which excludes the preprovisioned hosts.
+ Map<Optional<String>, List<Node>> currentNodesByParent = nodeRepository().nodes().list().stream().collect(groupingBy(Node::parentHostname));
+ List<Node> candidateHosts = new ArrayList<>(currentNodesByParent.get(Optional.<String>empty()));
+ candidateHosts.retainAll(typeEmptyHosts);
- for (Node host : toMarkForRemoval) {
+ for (Node host : candidateHosts) {
attempts++;
- // Retire the host to parked if possible, otherwise move it straight to parked
- if (EnumSet.of(Node.State.reserved, Node.State.active, Node.State.inactive).contains(host.state())) {
- Node retiredHost = host.withWantToRetire(true, true, Agent.HostCapacityMaintainer, nodeRepository().clock().instant());
- nodeRepository().nodes().write(retiredHost, lock);
- } else nodeRepository().nodes().park(host.hostname(), true, Agent.HostCapacityMaintainer, "Parked for removal");
+
+ // Any hosts that are no longer empty should be marked as such, and excluded from removal.
+ if (currentNodesByParent.getOrDefault(Optional.of(host.hostname()), List.of())
+ .stream().anyMatch(n -> ! canDeprovision(n))) {
+ if (host.hostEmptyAt().isPresent()) {
+ nodeRepository().nodes().write(host.withHostEmptyAt(null), lock);
+ }
+ }
+ // If the host is still empty, we can mark it as empty now, or mark it for removal if it has already expired.
+ else {
+ Instant now = clock().instant();
+ Node emptyHost = host.hostEmptyAt().isPresent() ? host : host.withHostEmptyAt(now);
+ boolean expired = ! now.isBefore(emptyHost.hostEmptyAt().get().plus(host.hostTTL().orElse(Duration.ZERO)));
+
+ if (expired && canRemoveHost(emptyHost)) {
+ // Retire the host to parked if possible, otherwise move it straight to parked.
+ if (EnumSet.of(Node.State.reserved, Node.State.active, Node.State.inactive).contains(host.state())) {
+ emptyHost = emptyHost.withWantToRetire(true, true, Agent.HostCapacityMaintainer, now);
+ nodeRepository().nodes().write(emptyHost, lock);
+ }
+ else {
+ if (emptyHost != host) nodeRepository().nodes().write(emptyHost, lock);
+ nodeRepository().nodes().park(host.hostname(), true, Agent.HostCapacityMaintainer, "Parked for removal");
+ }
+ }
+ else {
+ if (emptyHost != host) nodeRepository().nodes().write(emptyHost, lock);
+ }
+ }
+
success++;
}
} catch (UncheckedTimeoutException e) {
@@ -116,35 +153,13 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
return asSuccessFactorDeviation(attempts, attempts - success);
}
- /**
- * Provision hosts to ensure there is room to allocate spare nodes.
- *
- * @param nodeList list of all nodes
- * @return excess hosts that can safely be deprovisioned: An excess host 1. contains no nodes allocated
- * to an application, and assuming the spare nodes have been allocated, and 2. is not parked
- * without wantToDeprovision (which means an operator is looking at the node).
- */
private List<Node> provision(NodeList nodeList) {
- var nodes = new ArrayList<>(provisionUntilNoDeficit(nodeList));
- return candidatesForRemoval(nodes).stream()
- .sorted(Comparator.comparing(node -> node.history().events().stream()
- .map(History.Event::at).min(Comparator.naturalOrder()).orElse(Instant.MIN)))
- .toList();
- }
-
- private static List<Node> candidatesForRemoval(List<Node> nodes) {
- Map<String, Node> removableHostsByHostname = new HashMap<>();
- for (var node : nodes) {
- if (canRemoveHost(node)) {
- removableHostsByHostname.put(node.hostname(), node);
- }
- }
- for (var node : nodes) {
- if (node.parentHostname().isPresent() && !canDeprovision(node)) {
- removableHostsByHostname.remove(node.parentHostname().get());
- }
- }
- return List.copyOf(removableHostsByHostname.values());
+ return provisionUntilNoDeficit(nodeList).stream()
+ .sorted(comparing(node -> node.history().events().stream()
+ .map(History.Event::at)
+ .min(naturalOrder())
+ .orElse(Instant.MIN)))
+ .toList();
}
private static boolean canRemoveHost(Node host) {
@@ -196,10 +211,12 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
Version osVersion = nodeRepository().osVersions().targetFor(NodeType.host).orElse(Version.emptyVersion);
List<Integer> provisionIndices = nodeRepository().database().readProvisionIndices(count);
List<Node> hosts = new ArrayList<>();
- hostProvisioner.provisionHosts(provisionIndices, NodeType.host, nodeResources, ApplicationId.defaultId(), osVersion,
- HostSharing.shared, Optional.empty(), Optional.empty(), nodeRepository().zone().cloud().account(),
+ HostProvisionRequest request = new HostProvisionRequest(provisionIndices, NodeType.host, nodeResources, ApplicationId.defaultId(), osVersion,
+ HostSharing.shared, Optional.empty(), Optional.empty(),
+ nodeRepository().zone().cloud().account(), false);
+ hostProvisioner.provisionHosts(request,
provisionedHosts -> {
- hosts.addAll(provisionedHosts.stream().map(ProvisionedHost::generateHost).toList());
+ hosts.addAll(provisionedHosts.stream().map(host -> host.generateHost(Duration.ZERO)).toList());
nodeRepository().nodes().addNodes(hosts, Agent.HostCapacityMaintainer);
});
return hosts;
@@ -246,7 +263,8 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
// build() requires a version, even though it is not (should not be) used
.vespaVersion(Vtag.currentVersion)
.build();
- NodeSpec nodeSpec = NodeSpec.from(clusterCapacity.count(), nodeResources, false, true, nodeRepository().zone().cloud().account());
+ NodeSpec nodeSpec = NodeSpec.from(clusterCapacity.count(), nodeResources, false, true,
+ nodeRepository().zone().cloud().account(), Duration.ZERO);
int wantedGroups = 1;
NodePrioritizer prioritizer = new NodePrioritizer(allNodes, applicationId, clusterSpec, nodeSpec, wantedGroups,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
new file mode 100644
index 00000000000..b16f2c5c17e
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
@@ -0,0 +1,99 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.provision.maintenance;
+
+import com.yahoo.config.provision.Deployer;
+import com.yahoo.config.provision.NodeAllocationException;
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.jdisc.Metric;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.node.Agent;
+import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner;
+
+import java.time.Duration;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Random;
+import java.util.logging.Level;
+
+/**
+ * This maintainer attempts to upgrade a single host running on an older flavor generation. The upgrade happens by
+ * marking and retiring the host on the old generation, and redeploying to provision a replacement host on a newer
+ * generation.
+ *
+ * If the cloud provider reports a lack of capacity for the newer generation, retirement of the host is
+ * cancelled, and upgrade is attempted of the next host on an old flavor, if any.
+ *
+ * Once a host has been marked for upgrade, {@link HostResumeProvisioner} will complete provisioning of the replacement
+ * host.
+ *
+ * @author mpolden
+ */
+public class HostFlavorUpgrader extends NodeRepositoryMaintainer {
+
+ private final HostProvisioner hostProvisioner;
+ private final Random random;
+ private final Deployer deployer;
+ private final Metric metric;
+
+ public HostFlavorUpgrader(NodeRepository nodeRepository, Duration interval, Metric metric, Deployer deployer, HostProvisioner hostProvisioner) {
+ super(nodeRepository, interval, metric);
+ this.hostProvisioner = Objects.requireNonNull(hostProvisioner);
+ this.deployer = Objects.requireNonNull(deployer);
+ this.metric = Objects.requireNonNull(metric);
+ this.random = new Random(nodeRepository.clock().millis());
+ }
+
+ @Override
+ protected double maintain() {
+ if (!nodeRepository().zone().cloud().dynamicProvisioning()) return 1.0; // Not relevant in zones with static capacity
+ if (nodeRepository().zone().environment().isTest()) return 1.0; // Short-lived deployments
+ if (!nodeRepository().nodes().isWorking()) return 0.0;
+
+ NodeList allNodes = nodeRepository().nodes().list();
+ if (!NodeMover.zoneIsStable(allNodes)) return 1.0;
+ return upgradeHostFlavor(allNodes);
+ }
+
+ private double upgradeHostFlavor(NodeList allNodes) {
+ NodeList activeNodes = allNodes.nodeType(NodeType.tenant)
+ .state(Node.State.active)
+ .shuffle(random); // Shuffle to avoid getting stuck trying to upgrade the same host
+ for (var node : activeNodes) {
+ Optional<Node> parent = allNodes.parentOf(node);
+ if (parent.isEmpty()) continue;
+ if (!hostProvisioner.canUpgradeFlavor(parent.get(), node)) continue;
+ if (parent.get().status().wantToUpgradeFlavor()) continue; // Already upgrading
+
+ boolean redeployed = false;
+ boolean deploymentValid = false;
+ try (MaintenanceDeployment deployment = new MaintenanceDeployment(node.allocation().get().owner(), deployer, metric, nodeRepository(), true)) {
+ deploymentValid = deployment.isValid();
+ if (!deploymentValid) continue;
+
+ log.log(Level.INFO, () -> "Redeploying " + node.allocation().get().owner() + " to upgrade flavor (" +
+ parent.get().flavor().name() + ") of " + parent.get());
+ upgradeFlavor(parent.get(), true);
+ deployment.activate();
+ redeployed = true;
+ return 1.0;
+ } catch (NodeAllocationException e) {
+ // Fine, no capacity for upgrade
+ } finally {
+ if (deploymentValid && !redeployed) { // Cancel upgrade if redeploy failed
+ upgradeFlavor(parent.get(), false);
+ }
+ }
+ }
+ return 1.0;
+ }
+
+ private void upgradeFlavor(Node host, boolean upgrade) {
+ nodeRepository().nodes().upgradeFlavor(host.hostname(),
+ Agent.HostFlavorUpgrader,
+ nodeRepository().clock().instant(),
+ upgrade);
+ }
+
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
index be398f6e8ad..14013007da0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
@@ -38,6 +38,7 @@ class MaintenanceDeployment implements Closeable {
private final Metric metric;
private final Optional<Mutex> lock;
private final Optional<Deployment> deployment;
+ private final boolean throwOnFailure;
private boolean closed = false;
@@ -45,8 +46,17 @@ class MaintenanceDeployment implements Closeable {
Deployer deployer,
Metric metric,
NodeRepository nodeRepository) {
+ this(application, deployer, metric, nodeRepository, false);
+ }
+
+ public MaintenanceDeployment(ApplicationId application,
+ Deployer deployer,
+ Metric metric,
+ NodeRepository nodeRepository,
+ boolean throwOnFailure) {
this.application = application;
this.metric = metric;
+ this.throwOnFailure = throwOnFailure;
Optional<Mutex> lock = tryLock(application, nodeRepository);
try {
@@ -93,6 +103,9 @@ class MaintenanceDeployment implements Closeable {
} catch (RuntimeException e) {
metric.add(ConfigServerMetrics.MAINTENANCE_DEPLOYMENT_FAILURE.baseName(), 1, metric.createContext(Map.of()));
log.log(Level.WARNING, "Exception on maintenance deploy of " + application, e);
+ if (throwOnFailure) {
+ throw e;
+ }
return Optional.empty();
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
index 4f94f0fab53..f5ea5f7d20b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;
+import ai.vespa.metrics.ConfigServerMetrics;
import com.yahoo.collections.Pair;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
@@ -18,6 +19,7 @@ import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.Node.State;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.node.Allocation;
import com.yahoo.vespa.hosted.provision.node.ClusterId;
import com.yahoo.vespa.hosted.provision.persistence.CacheStats;
@@ -103,9 +105,9 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
nonActiveFraction = (double) nonActiveNodes / ((double) activeNodes + (double) nonActiveNodes);
}
Metric.Context context = getContext(dimensions(clusterId.application(), clusterId.cluster()));
- metric.set("nodes.active", activeNodes, context);
- metric.set("nodes.nonActive", nonActiveNodes, context);
- metric.set("nodes.nonActiveFraction", nonActiveFraction, context);
+ metric.set(ConfigServerMetrics.NODES_ACTIVE.baseName(), activeNodes, context);
+ metric.set(ConfigServerMetrics.NODES_NON_ACTIVE.baseName(), nonActiveNodes, context);
+ metric.set(ConfigServerMetrics.NODES_NON_ACTIVE_FRACTION.baseName(), nonActiveFraction, context);
});
}
@@ -118,7 +120,7 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
byCluster.forEach((clusterId, clusterNodes) -> {
Metric.Context context = getContext(dimensions(clusterId.application(), clusterId.cluster()));
updateExclusiveSwitchMetrics(clusterNodes, nodes, context);
- updateClusterCostMetrics(clusterNodes, context);
+ updateClusterCostMetrics(clusterId, clusterNodes, context);
});
}
@@ -126,32 +128,39 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
NodeList clusterHosts = allNodes.parentsOf(NodeList.copyOf(clusterNodes));
long nodesOnExclusiveSwitch = NodeList.copyOf(clusterNodes).onExclusiveSwitch(clusterHosts).size();
double exclusiveSwitchRatio = nodesOnExclusiveSwitch / (double) clusterNodes.size();
- metric.set("nodes.exclusiveSwitchFraction", exclusiveSwitchRatio,context);
+ metric.set(ConfigServerMetrics.NODES_EXCLUSIVE_SWITCH_FRACTION.baseName(), exclusiveSwitchRatio,context);
}
- private void updateClusterCostMetrics(List<Node> clusterNodes, Metric.Context context) {
+ private void updateClusterCostMetrics(ClusterId clusterId,
+ List<Node> clusterNodes, Metric.Context context) {
+ var cluster = nodeRepository().applications().get(clusterId.application())
+ .flatMap(application -> application.cluster(clusterId.cluster()));
+ if (cluster.isEmpty()) return;
double cost = clusterNodes.stream().mapToDouble(node -> node.resources().cost()).sum();
- metric.set("cluster.cost", cost, context);
+ metric.set(ConfigServerMetrics.CLUSTER_COST.baseName(), cost, context);
+ metric.set(ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.baseName(), cluster.get().target().ideal().cpu(), context);
+ metric.set(ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.baseName(), cluster.get().target().ideal().memory(), context);
+ metric.set(ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.baseName(), cluster.get().target().ideal().disk(), context);
}
private void updateZoneMetrics() {
- metric.set("zone.working", nodeRepository().nodes().isWorking() ? 1 : 0, null);
+ metric.set(ConfigServerMetrics.ZONE_WORKING.baseName(), nodeRepository().nodes().isWorking() ? 1 : 0, null);
}
private void updateCacheMetrics() {
CacheStats nodeCacheStats = nodeRepository().database().nodeSerializerCacheStats();
- metric.set("cache.nodeObject.hitRate", nodeCacheStats.hitRate(), null);
- metric.set("cache.nodeObject.evictionCount", nodeCacheStats.evictionCount(), null);
- metric.set("cache.nodeObject.size", nodeCacheStats.size(), null);
+ metric.set(ConfigServerMetrics.CACHE_NODE_OBJECT_HIT_RATE.baseName(), nodeCacheStats.hitRate(), null);
+ metric.set(ConfigServerMetrics.CACHE_NODE_OBJECT_EVICTION_COUNT.baseName(), nodeCacheStats.evictionCount(), null);
+ metric.set(ConfigServerMetrics.CACHE_NODE_OBJECT_SIZE.baseName(), nodeCacheStats.size(), null);
CacheStats curatorCacheStats = nodeRepository().database().cacheStats();
- metric.set("cache.curator.hitRate", curatorCacheStats.hitRate(), null);
- metric.set("cache.curator.evictionCount", curatorCacheStats.evictionCount(), null);
- metric.set("cache.curator.size", curatorCacheStats.size(), null);
+ metric.set(ConfigServerMetrics.CACHE_CURATOR_HIT_RATE.baseName(), curatorCacheStats.hitRate(), null);
+ metric.set(ConfigServerMetrics.CACHE_CURATOR_EVICTION_COUNT.baseName(), curatorCacheStats.evictionCount(), null);
+ metric.set(ConfigServerMetrics.CACHE_CURATOR_SIZE.baseName(), curatorCacheStats.size(), null);
}
private void updateMaintenanceMetrics() {
- metric.set("hostedVespa.pendingRedeployments", pendingRedeploymentsSupplier.get(), null);
+ metric.set(ConfigServerMetrics.HOSTED_VESPA_PENDING_REDEPLOYMENTS.baseName(), pendingRedeploymentsSupplier.get(), null);
}
/**
@@ -171,24 +180,24 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
context = getContext(dimensions);
long wantedRestartGeneration = allocation.get().restartGeneration().wanted();
- metric.set("wantedRestartGeneration", wantedRestartGeneration, context);
+ metric.set(ConfigServerMetrics.WANTED_RESTART_GENERATION.baseName(), wantedRestartGeneration, context);
long currentRestartGeneration = allocation.get().restartGeneration().current();
- metric.set("currentRestartGeneration", currentRestartGeneration, context);
+ metric.set(ConfigServerMetrics.CURRENT_RESTART_GENERATION.baseName(), currentRestartGeneration, context);
boolean wantToRestart = currentRestartGeneration < wantedRestartGeneration;
- metric.set("wantToRestart", wantToRestart ? 1 : 0, context);
+ metric.set(ConfigServerMetrics.WANT_TO_RESTART.baseName(), wantToRestart ? 1 : 0, context);
- metric.set("retired", allocation.get().membership().retired() ? 1 : 0, context);
+ metric.set(ConfigServerMetrics.RETIRED.baseName(), allocation.get().membership().retired() ? 1 : 0, context);
Version wantedVersion = allocation.get().membership().cluster().vespaVersion();
double wantedVersionNumber = getVersionAsNumber(wantedVersion);
- metric.set("wantedVespaVersion", wantedVersionNumber, context);
+ metric.set(ConfigServerMetrics.WANTED_VESPA_VERSION.baseName(), wantedVersionNumber, context);
Optional<Version> currentVersion = node.status().vespaVersion();
boolean converged = currentVersion.isPresent() &&
currentVersion.get().equals(wantedVersion);
- metric.set("wantToChangeVespaVersion", converged ? 0 : 1, context);
+ metric.set(ConfigServerMetrics.WANT_TO_CHANGE_VESPA_VERSION.baseName(), converged ? 0 : 1, context);
if (node.cloudAccount().isEnclave(nodeRepository().zone())) {
- metric.set("hasWireguardKey", node.wireguardPubKey().isPresent() ? 1 : 0, context);
+ metric.set(ConfigServerMetrics.HAS_WIRE_GUARD_KEY.baseName(), node.wireguardPubKey().isPresent() ? 1 : 0, context);
}
} else {
context = getContext(Map.of("state", node.state().name(),
@@ -198,19 +207,19 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
Optional<Version> currentVersion = node.status().vespaVersion();
if (currentVersion.isPresent()) {
double currentVersionNumber = getVersionAsNumber(currentVersion.get());
- metric.set("currentVespaVersion", currentVersionNumber, context);
+ metric.set(ConfigServerMetrics.CURRENT_VESPA_VERSION.baseName(), currentVersionNumber, context);
}
long wantedRebootGeneration = node.status().reboot().wanted();
- metric.set("wantedRebootGeneration", wantedRebootGeneration, context);
+ metric.set(ConfigServerMetrics.WANTED_REBOOT_GENERATION.baseName(), wantedRebootGeneration, context);
long currentRebootGeneration = node.status().reboot().current();
- metric.set("currentRebootGeneration", currentRebootGeneration, context);
+ metric.set(ConfigServerMetrics.CURRENT_REBOOT_GENERATION.baseName(), currentRebootGeneration, context);
boolean wantToReboot = currentRebootGeneration < wantedRebootGeneration;
- metric.set("wantToReboot", wantToReboot ? 1 : 0, context);
+ metric.set(ConfigServerMetrics.WANT_TO_REBOOT.baseName(), wantToReboot ? 1 : 0, context);
- metric.set("wantToRetire", node.status().wantToRetire() ? 1 : 0, context);
- metric.set("wantToDeprovision", node.status().wantToDeprovision() ? 1 : 0, context);
- metric.set("failReport", NodeFailer.reasonsToFailHost(node).isEmpty() ? 0 : 1, context);
+ metric.set(ConfigServerMetrics.WANT_TO_RETIRE.baseName(), node.status().wantToRetire() ? 1 : 0, context);
+ metric.set(ConfigServerMetrics.WANT_TO_DEPROVISION.baseName(), node.status().wantToDeprovision() ? 1 : 0, context);
+ metric.set(ConfigServerMetrics.FAIL_REPORT.baseName(), NodeFailer.reasonsToFailHost(node).isEmpty() ? 0 : 1, context);
HostName hostname = new HostName(node.hostname());
@@ -219,11 +228,11 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
.map(reference -> nodeRepository().orchestrator().getHostInfo(reference, hostname))
.ifPresent(info -> {
int suspended = info.status().isSuspended() ? 1 : 0;
- metric.set("suspended", suspended, context);
+ metric.set(ConfigServerMetrics.SUSPENDED.baseName(), suspended, context);
long suspendedSeconds = info.suspendedSince()
.map(suspendedSince -> Duration.between(suspendedSince, clock().instant()).getSeconds())
.orElse(0L);
- metric.set("suspendedSeconds", suspendedSeconds, context);
+ metric.set(ConfigServerMetrics.SUSPENDED_SECONDS.baseName(), suspendedSeconds, context);
});
long numberOfServices;
@@ -237,30 +246,30 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
numberOfServices = servicesCount.values().stream().mapToLong(Long::longValue).sum();
metric.set(
- "numberOfServicesUp",
+ ConfigServerMetrics.NUMBER_OF_SERVICES_UP.baseName(),
servicesCount.getOrDefault(ServiceStatus.UP, 0L),
context);
metric.set(
- "numberOfServicesNotChecked",
+ ConfigServerMetrics.NUMBER_OF_SERVICES_NOT_CHECKED.baseName(),
servicesCount.getOrDefault(ServiceStatus.NOT_CHECKED, 0L),
context);
long numberOfServicesDown = servicesCount.getOrDefault(ServiceStatus.DOWN, 0L);
- metric.set("numberOfServicesDown", numberOfServicesDown, context);
+ metric.set(ConfigServerMetrics.NUMBER_OF_SERVICES_DOWN.baseName(), numberOfServicesDown, context);
- metric.set("someServicesDown", (numberOfServicesDown > 0 ? 1 : 0), context);
+ metric.set(ConfigServerMetrics.SOME_SERVICES_DOWN.baseName(), (numberOfServicesDown > 0 ? 1 : 0), context);
- metric.set("numberOfServicesUnknown", servicesCount.getOrDefault(ServiceStatus.UNKNOWN, 0L), context);
+ metric.set(ConfigServerMetrics.NUMBER_OF_SERVICES_UNKNOWN.baseName(), servicesCount.getOrDefault(ServiceStatus.UNKNOWN, 0L), context);
boolean down = NodeHealthTracker.allDown(services);
- metric.set("nodeFailerBadNode", (down ? 1 : 0), context);
+ metric.set(ConfigServerMetrics.NODE_FAILER_BAD_NODE.baseName(), (down ? 1 : 0), context);
boolean nodeDownInNodeRepo = node.isDown();
- metric.set("downInNodeRepo", (nodeDownInNodeRepo ? 1 : 0), context);
+ metric.set(ConfigServerMetrics.DOWN_IN_NODE_REPO.baseName(), (nodeDownInNodeRepo ? 1 : 0), context);
}
- metric.set("numberOfServices", numberOfServices, context);
+ metric.set(ConfigServerMetrics.NUMBER_OF_SERVICES.baseName(), numberOfServices, context);
}
private static String toApp(ApplicationId applicationId) {
@@ -304,19 +313,19 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
Metric.Context context = getContext(Map.of("lockPath", lockPath));
LatencyMetrics acquireLatencyMetrics = lockMetrics.getAndResetAcquireLatencyMetrics();
- setNonZero("lockAttempt.acquireMaxActiveLatency", acquireLatencyMetrics.maxActiveLatencySeconds(), context);
- setNonZero("lockAttempt.acquireHz", acquireLatencyMetrics.startHz(), context);
- setNonZero("lockAttempt.acquireLoad", acquireLatencyMetrics.load(), context);
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ACQUIRE_MAX_ACTIVE_LATENCY.baseName(), acquireLatencyMetrics.maxActiveLatencySeconds(), context);
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ACQUIRE_HZ.baseName(), acquireLatencyMetrics.startHz(), context);
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ACQUIRE_LOAD.baseName(), acquireLatencyMetrics.load(), context);
LatencyMetrics lockedLatencyMetrics = lockMetrics.getAndResetLockedLatencyMetrics();
- setNonZero("lockAttempt.lockedLatency", lockedLatencyMetrics.maxLatencySeconds(), context);
- setNonZero("lockAttempt.lockedLoad", lockedLatencyMetrics.load(), context);
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_LOCKED_LATENCY.baseName(), lockedLatencyMetrics.maxLatencySeconds(), context);
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_LOCKED_LOAD.baseName(), lockedLatencyMetrics.load(), context);
- setNonZero("lockAttempt.acquireTimedOut", lockMetrics.getAndResetAcquireTimedOutCount(), context);
- setNonZero("lockAttempt.deadlock", lockMetrics.getAndResetDeadlockCount(), context);
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ACQUIRE_TIMED_OUT.baseName(), lockMetrics.getAndResetAcquireTimedOutCount(), context);
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_DEADLOCK.baseName(), lockMetrics.getAndResetDeadlockCount(), context);
// bucket for various rare errors - to reduce #metrics
- setNonZero("lockAttempt.errors",
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ERRORS.baseName(),
lockMetrics.getAndResetAcquireFailedCount() +
lockMetrics.getAndResetReleaseFailedCount() +
lockMetrics.getAndResetNakedReleaseCount() +
@@ -340,14 +349,14 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
private void updateContainerMetrics(NodeList nodes) {
NodeResources totalCapacity = getCapacityTotal(nodes);
- metric.set("hostedVespa.docker.totalCapacityCpu", totalCapacity.vcpu(), null);
- metric.set("hostedVespa.docker.totalCapacityMem", totalCapacity.memoryGb(), null);
- metric.set("hostedVespa.docker.totalCapacityDisk", totalCapacity.diskGb(), null);
+ metric.set(ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_CPU.baseName(), totalCapacity.vcpu(), null);
+ metric.set(ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_MEM.baseName(), totalCapacity.memoryGb(), null);
+ metric.set(ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_DISK.baseName(), totalCapacity.diskGb(), null);
NodeResources totalFreeCapacity = getFreeCapacityTotal(nodes);
- metric.set("hostedVespa.docker.freeCapacityCpu", totalFreeCapacity.vcpu(), null);
- metric.set("hostedVespa.docker.freeCapacityMem", totalFreeCapacity.memoryGb(), null);
- metric.set("hostedVespa.docker.freeCapacityDisk", totalFreeCapacity.diskGb(), null);
+ metric.set(ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_CPU.baseName(), totalFreeCapacity.vcpu(), null);
+ metric.set(ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_MEM.baseName(), totalFreeCapacity.memoryGb(), null);
+ metric.set(ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_DISK.baseName(), totalFreeCapacity.diskGb(), null);
}
private void updateTenantUsageMetrics(NodeList nodes) {
@@ -362,9 +371,9 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
var context = getContext(dimensions(applicationId));
- metric.set("hostedVespa.docker.allocatedCapacityCpu", allocatedCapacity.vcpu(), context);
- metric.set("hostedVespa.docker.allocatedCapacityMem", allocatedCapacity.memoryGb(), context);
- metric.set("hostedVespa.docker.allocatedCapacityDisk", allocatedCapacity.diskGb(), context);
+ metric.set(ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_CPU.baseName(), allocatedCapacity.vcpu(), context);
+ metric.set(ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_MEM.baseName(), allocatedCapacity.memoryGb(), context);
+ metric.set(ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_DISK.baseName(), allocatedCapacity.diskGb(), context);
}
);
}
@@ -375,7 +384,7 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
.flatMap(Optional::stream)
.map(report -> report.getInspector().field("status").asString())
.collect(Collectors.groupingBy(Function.identity(), Collectors.counting()))
- .forEach((status, number) -> metric.set("hostedVespa.breakfixedHosts", number, getContext(Map.of("status", status))));
+ .forEach((status, number) -> metric.set(ConfigServerMetrics.HOSTED_VESPA_BREAKFIXED_HOSTS.baseName(), number, getContext(Map.of("status", status))));
}
static Map<String, String> dimensions(ApplicationId application, ClusterSpec.Id cluster) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
index 722c9acfdc0..f6391a7d475 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
@@ -60,6 +60,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
maintainers.add(new AutoscalingMaintainer(nodeRepository, deployer, metric, defaults.autoscalingInterval));
maintainers.add(new ScalingSuggestionsMaintainer(nodeRepository, defaults.scalingSuggestionsInterval, metric));
maintainers.add(new SwitchRebalancer(nodeRepository, defaults.switchRebalancerInterval, metric, deployer));
+ maintainers.add(new DeprovisionedExpirer(nodeRepository, defaults.deprovisionedExpiry, metric));
provisionServiceProvider.getLoadBalancerService()
.map(lbService -> new LoadBalancerExpirer(nodeRepository, defaults.loadBalancerExpirerInterval, lbService, metric))
@@ -70,7 +71,9 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
new HostDeprovisioner(nodeRepository, defaults.hostDeprovisionerInterval, metric, hostProvisioner),
new HostResumeProvisioner(nodeRepository, defaults.hostResumeProvisionerInterval, metric, hostProvisioner),
new HostRetirer(nodeRepository, defaults.hostRetirerInterval, metric, hostProvisioner),
- new DiskReplacer(nodeRepository, defaults.diskReplacerInterval, metric, hostProvisioner)))
+ new DiskReplacer(nodeRepository, defaults.diskReplacerInterval, metric, hostProvisioner),
+ new HostFlavorUpgrader(nodeRepository, defaults.hostFlavorUpgraderInterval, metric, deployer, hostProvisioner))
+ )
.ifPresent(maintainers::addAll);
// The DuperModel is filled with infrastructure applications by the infrastructure provisioner, so explicitly run that now
infrastructureProvisioner.maintainButThrowOnException();
@@ -118,10 +121,14 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
private final Duration scalingSuggestionsInterval;
private final Duration switchRebalancerInterval;
private final Duration hostRetirerInterval;
+ private final Duration hostFlavorUpgraderInterval;
+ private final Duration deprovisionedExpiry;
private final NodeFailer.ThrottlePolicy throttlePolicy;
DefaultTimes(Zone zone, Deployer deployer) {
+ boolean isCdZone = zone.system().isCd();
+
autoscalingInterval = Duration.ofMinutes(5);
dynamicProvisionerInterval = Duration.ofMinutes(3);
hostDeprovisionerInterval = Duration.ofMinutes(3);
@@ -137,7 +144,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
nodeMetricsCollectionInterval = Duration.ofMinutes(1);
expeditedChangeRedeployInterval = Duration.ofMinutes(3);
// Vespa upgrade frequency is higher in CD so (de)activate OS upgrades more frequently as well
- osUpgradeActivatorInterval = zone.system().isCd() ? Duration.ofSeconds(30) : Duration.ofMinutes(5);
+ osUpgradeActivatorInterval = isCdZone ? Duration.ofSeconds(30) : Duration.ofMinutes(5);
periodicRedeployInterval = Duration.ofMinutes(60);
provisionedExpiry = zone.cloud().dynamicProvisioning() ? Duration.ofMinutes(40) : Duration.ofHours(4);
rebalancerInterval = Duration.ofMinutes(120);
@@ -149,8 +156,11 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
switchRebalancerInterval = Duration.ofHours(1);
throttlePolicy = NodeFailer.ThrottlePolicy.hosted;
hostRetirerInterval = Duration.ofMinutes(30);
+ hostFlavorUpgraderInterval = Duration.ofMinutes(30);
+ // CD (de)provisions hosts frequently. Expire deprovisioned ones earlier
+ deprovisionedExpiry = isCdZone ? Duration.ofDays(1) : Duration.ofDays(30);
- if (zone.environment().isProduction() && ! zone.system().isCd()) {
+ if (zone.environment().isProduction() && ! isCdZone) {
inactiveExpiry = Duration.ofHours(4); // enough time for the application owner to discover and redeploy
retiredInterval = Duration.ofMinutes(15);
dirtyExpiry = Duration.ofHours(2); // enough time to clean the node
@@ -159,8 +169,10 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
// long enough that nodes aren't reused immediately and delete can happen on all config servers
// with time enough to clean up even with ZK connection issues on config servers
inactiveExpiry = Duration.ofMinutes(1);
- retiredInterval = Duration.ofMinutes(1);
dirtyExpiry = Duration.ofMinutes(30);
+ // Longer time in non-CD since we might end up with many deployments in a short time
+ // when retiring many hosts, e.g. when doing OS upgrades
+ retiredInterval = isCdZone ? Duration.ofMinutes(1) : Duration.ofMinutes(5);
retiredExpiry = Duration.ofDays(1);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Rebalancer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Rebalancer.java
index 3649f921480..8ef0f107eb0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Rebalancer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Rebalancer.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;
+import ai.vespa.metrics.ConfigServerMetrics;
import com.yahoo.config.provision.Deployer;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
@@ -72,7 +73,7 @@ public class Rebalancer extends NodeMover<Rebalancer.Move> {
hostCount++;
totalSkew += Node.skew(host.flavor().resources(), capacity.unusedCapacityOf(host));
}
- metric.set("hostedVespa.docker.skew", totalSkew/hostCount, null);
+ metric.set(ConfigServerMetrics.HOSTED_VESPA_DOCKER_SKEW.baseName(),totalSkew/hostCount, null);
}
private double skewReductionByRemoving(Node node, Node fromHost, HostCapacity capacity) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java
index 0b2bdb4620a..e03b77b91b8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java
@@ -29,5 +29,6 @@ public enum Agent {
SpareCapacityMaintainer,
SwitchRebalancer,
HostEncrypter,
+ HostFlavorUpgrader,
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index fd6b15609d6..cc49265506c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -7,6 +7,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationTransaction;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Flavor;
+import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.Zone;
import com.yahoo.transaction.Mutex;
@@ -97,8 +98,24 @@ public class Nodes {
* @param inState the states to return nodes from. If no states are given, all nodes are returned
*/
public NodeList list(Node.State... inState) {
- NodeList nodes = NodeList.copyOf(db.readNodes());
- return inState.length == 0 ? nodes : nodes.state(Set.of(inState));
+ NodeList allNodes = NodeList.copyOf(db.readNodes());
+ NodeList nodes = inState.length == 0 ? allNodes : allNodes.state(Set.of(inState));
+ nodes = NodeList.copyOf(nodes.stream().map(node -> specifyFully(node, allNodes)).toList());
+ return nodes;
+ }
+
+ // Repair underspecified node resources. TODO: Remove this after June 2023
+ private Node specifyFully(Node node, NodeList allNodes) {
+ if (node.resources().isUnspecified()) return node;
+
+ if (node.resources().bandwidthGbpsIsUnspecified())
+ node = node.with(new Flavor(node.resources().withBandwidthGbps(0.3)), Agent.system, clock.instant());
+ if ( node.resources().architecture() == NodeResources.Architecture.any) {
+ Optional<Node> parent = allNodes.parentOf(node);
+ if (parent.isPresent())
+ node = node.with(new Flavor(node.resources().with(parent.get().resources().architecture())), Agent.system, clock.instant());
+ }
+ return node;
}
/** Returns a locked list of all nodes in this repository */
@@ -167,7 +184,8 @@ public class Nodes {
if (rebuilding) {
node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(),
false,
- rebuilding));
+ rebuilding,
+ existing.get().status().wantToUpgradeFlavor()));
}
nodesToRemove.add(existing.get());
}
@@ -190,7 +208,7 @@ public class Nodes {
if (node.status().wantToDeprovision() || node.status().wantToRebuild())
return park(node.hostname(), false, agent, reason);
- node = node.withWantToRetire(false, false, false, agent, clock.instant());
+ node = node.withWantToRetire(false, false, false, false, agent, clock.instant());
return db.writeTo(Node.State.ready, node, agent, Optional.of(reason));
}
@@ -502,11 +520,7 @@ public class Nodes {
db.removeNodes(removed, transaction);
} else {
removed = removeChildren(node, force, transaction);
- if (zone.cloud().dynamicProvisioning()) {
- db.removeNodes(List.of(node), transaction);
- } else {
- move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
- }
+ move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
removed.add(node);
}
transaction.commit();
@@ -638,6 +652,11 @@ public class Nodes {
return decommission(hostname, soft ? HostOperation.softRebuild : HostOperation.rebuild, agent, instant);
}
+ /** Upgrade flavor for given host */
+ public List<Node> upgradeFlavor(String hostname, Agent agent, Instant instant, boolean upgrade) {
+ return decommission(hostname, upgrade ? HostOperation.upgradeFlavor : HostOperation.cancel, agent, instant);
+ }
+
private List<Node> decommission(String hostname, HostOperation op, Agent agent, Instant instant) {
Optional<NodeMutex> nodeMutex = lockAndGet(hostname);
if (nodeMutex.isEmpty()) return List.of();
@@ -645,20 +664,20 @@ public class Nodes {
boolean wantToDeprovision = op == HostOperation.deprovision;
boolean wantToRebuild = op == HostOperation.rebuild || op == HostOperation.softRebuild;
boolean wantToRetire = op.needsRetirement();
+ boolean wantToUpgradeFlavor = op == HostOperation.upgradeFlavor;
Node host = nodeMutex.get().node();
try (NodeMutex lock = nodeMutex.get()) {
if ( ! host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host);
try (Mutex allocationLock = lockUnallocated()) {
// Modify parent with wantToRetire while holding the allocationLock to prevent
// any further allocation of nodes on this host
- Node newHost = lock.node().withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild, agent, instant);
+ Node newHost = lock.node().withWantToRetire(wantToRetire, wantToDeprovision, wantToRebuild, wantToUpgradeFlavor, agent, instant);
result.add(write(newHost, lock));
}
}
-
- if (wantToRetire) { // Apply recursively if we're retiring
+ if (wantToRetire || op == HostOperation.cancel) { // Apply recursively if we're retiring, or cancelling
List<Node> updatedNodes = performOn(list().childrenOf(host), (node, nodeLock) -> {
- Node newNode = node.withWantToRetire(wantToRetire, wantToDeprovision, false, agent, instant);
+ Node newNode = node.withWantToRetire(wantToRetire, wantToDeprovision, false, false, agent, instant);
return write(newNode, nodeLock);
});
result.addAll(updatedNodes);
@@ -755,7 +774,6 @@ public class Nodes {
if ( ! host.type().canRun(NodeType.tenant)) return false;
if (host.status().wantToRetire()) return false;
if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
- if (suspended(host)) return false;
if (dynamicProvisioning)
return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state());
@@ -889,7 +907,13 @@ public class Nodes {
rebuild(true),
/** Host is stopped and re-bootstrapped, data is preserved */
- softRebuild(false);
+ softRebuild(false),
+
+ /** Host flavor should be upgraded, data is destroyed */
+ upgradeFlavor(true),
+
+ /** Attempt to cancel any ongoing operations. If the current operation has progressed too far, cancelling won't have any effect */
+ cancel(false);
private final boolean needsRetirement;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Status.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Status.java
index ef0f899ca3e..6e5c5a07fc2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Status.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Status.java
@@ -24,6 +24,7 @@ public class Status {
private final boolean wantToRebuild;
private final boolean preferToRetire;
private final boolean wantToFail;
+ private final boolean wantToUpgradeFlavor;
private final OsVersion osVersion;
private final Optional<Instant> firmwareVerifiedAt;
@@ -36,18 +37,23 @@ public class Status {
boolean wantToRebuild,
boolean preferToRetire,
boolean wantToFail,
+ boolean wantToUpgradeFlavor,
OsVersion osVersion,
Optional<Instant> firmwareVerifiedAt) {
this.reboot = Objects.requireNonNull(generation, "Generation must be non-null");
this.vespaVersion = Objects.requireNonNull(vespaVersion, "Vespa version must be non-null").filter(v -> !Version.emptyVersion.equals(v));
this.containerImage = Objects.requireNonNull(containerImage, "Container image must be non-null").filter(d -> !DockerImage.EMPTY.equals(d));
this.failCount = failCount;
+ this.wantToUpgradeFlavor = wantToUpgradeFlavor;
if (wantToDeprovision && wantToRebuild) {
throw new IllegalArgumentException("Node cannot be marked both wantToDeprovision and wantToRebuild");
}
if (wantToDeprovision && !wantToRetire) {
throw new IllegalArgumentException("Node cannot be marked wantToDeprovision unless it's also marked wantToRetire");
}
+ if (wantToUpgradeFlavor && !wantToRetire) {
+ throw new IllegalArgumentException("Node cannot be marked wantToUpgradeFlavor unless it's also marked wantToRetire");
+ }
this.wantToRetire = wantToRetire;
this.wantToDeprovision = wantToDeprovision;
this.wantToRebuild = wantToRebuild;
@@ -58,35 +64,35 @@ public class Status {
}
/** Returns a copy of this with the reboot generation changed */
- public Status withReboot(Generation reboot) { return new Status(reboot, vespaVersion, containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, osVersion, firmwareVerifiedAt); }
+ public Status withReboot(Generation reboot) { return new Status(reboot, vespaVersion, containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, wantToUpgradeFlavor, osVersion, firmwareVerifiedAt); }
/** Returns the reboot generation of this node */
public Generation reboot() { return reboot; }
/** Returns a copy of this with the vespa version changed */
- public Status withVespaVersion(Version version) { return new Status(reboot, Optional.of(version), containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, osVersion, firmwareVerifiedAt); }
+ public Status withVespaVersion(Version version) { return new Status(reboot, Optional.of(version), containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, wantToUpgradeFlavor, osVersion, firmwareVerifiedAt); }
/** Returns the Vespa version installed on the node, if known */
public Optional<Version> vespaVersion() { return vespaVersion; }
/** Returns a copy of this with the container image changed */
- public Status withContainerImage(DockerImage containerImage) { return new Status(reboot, vespaVersion, Optional.of(containerImage), failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, osVersion, firmwareVerifiedAt); }
+ public Status withContainerImage(DockerImage containerImage) { return new Status(reboot, vespaVersion, Optional.of(containerImage), failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, wantToUpgradeFlavor, osVersion, firmwareVerifiedAt); }
/** Returns the container image the node is running, if any */
public Optional<DockerImage> containerImage() { return containerImage; }
- public Status withIncreasedFailCount() { return new Status(reboot, vespaVersion, containerImage, failCount + 1, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, osVersion, firmwareVerifiedAt); }
+ public Status withIncreasedFailCount() { return new Status(reboot, vespaVersion, containerImage, failCount + 1, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, wantToUpgradeFlavor, osVersion, firmwareVerifiedAt); }
- public Status withDecreasedFailCount() { return new Status(reboot, vespaVersion, containerImage, failCount - 1, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, osVersion, firmwareVerifiedAt); }
+ public Status withDecreasedFailCount() { return new Status(reboot, vespaVersion, containerImage, failCount - 1, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, wantToUpgradeFlavor, osVersion, firmwareVerifiedAt); }
- public Status withFailCount(int value) { return new Status(reboot, vespaVersion, containerImage, value, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, osVersion, firmwareVerifiedAt); }
+ public Status withFailCount(int value) { return new Status(reboot, vespaVersion, containerImage, value, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, wantToUpgradeFlavor, osVersion, firmwareVerifiedAt); }
/** Returns how many times this node has been moved to the failed state. */
public int failCount() { return failCount; }
/** Returns a copy of this with the want to retire/deprovision/rebuild flags changed */
- public Status withWantToRetire(boolean wantToRetire, boolean wantToDeprovision, boolean wantToRebuild) {
- return new Status(reboot, vespaVersion, containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, osVersion, firmwareVerifiedAt);
+ public Status withWantToRetire(boolean wantToRetire, boolean wantToDeprovision, boolean wantToRebuild, boolean wantToUpgradeFlavor) {
+ return new Status(reboot, vespaVersion, containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, wantToUpgradeFlavor, osVersion, firmwareVerifiedAt);
}
/**
@@ -109,9 +115,14 @@ public class Status {
*/
public boolean preferToRetire() { return preferToRetire; }
+ /** Returns whether the flavor of is node is required to be of the latest generation */
+ public boolean wantToUpgradeFlavor() {
+ return wantToUpgradeFlavor;
+ }
+
/** Returns a copy of this with want to fail set to the given value */
public Status withWantToFail(boolean wantToFail) {
- return new Status(reboot, vespaVersion, containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, osVersion, firmwareVerifiedAt);
+ return new Status(reboot, vespaVersion, containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, wantToUpgradeFlavor, osVersion, firmwareVerifiedAt);
}
/** Returns whether this node should be failed */
@@ -119,12 +130,17 @@ public class Status {
/** Returns a copy of this with prefer-to-retire set to given value */
public Status withPreferToRetire(boolean preferToRetire) {
- return new Status(reboot, vespaVersion, containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, osVersion, firmwareVerifiedAt);
+ return new Status(reboot, vespaVersion, containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, wantToUpgradeFlavor, osVersion, firmwareVerifiedAt);
+ }
+
+ /** Returns a copy of this with wantToUpgradeFlavor set to given value */
+ public Status withWantToUpgradeFlavor(boolean wantToUpgradeFlavor) {
+ return new Status(reboot, vespaVersion, containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, wantToUpgradeFlavor, osVersion, firmwareVerifiedAt);
}
/** Returns a copy of this with the OS version set to given version */
public Status withOsVersion(OsVersion version) {
- return new Status(reboot, vespaVersion, containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, version, firmwareVerifiedAt);
+ return new Status(reboot, vespaVersion, containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, wantToUpgradeFlavor, version, firmwareVerifiedAt);
}
/** Returns the OS version of this node */
@@ -134,7 +150,7 @@ public class Status {
/** Returns a copy of this with the firmwareVerifiedAt set to the given instant. */
public Status withFirmwareVerifiedAt(Instant instant) {
- return new Status(reboot, vespaVersion, containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, osVersion, Optional.of(instant));
+ return new Status(reboot, vespaVersion, containerImage, failCount, wantToRetire, wantToDeprovision, wantToRebuild, preferToRetire, wantToFail, wantToUpgradeFlavor, osVersion, Optional.of(instant));
}
/** Returns the last time this node had firmware that was verified to be up to date. */
@@ -145,7 +161,7 @@ public class Status {
/** Returns the initial status of a newly provisioned node */
public static Status initial() {
return new Status(Generation.initial(), Optional.empty(), Optional.empty(), 0, false,
- false, false, false, false, OsVersion.EMPTY, Optional.empty());
+ false, false, false, false, false, OsVersion.EMPTY, Optional.empty());
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java
index 32fe9ba9f7b..5def863113c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java
@@ -50,7 +50,7 @@ public abstract class OsUpgrader {
/** The duration this leaves new nodes alone before scheduling any upgrade */
private Duration gracePeriod() {
- return Duration.ofDays(1);
+ return nodeRepository.zone().system().isCd() ? Duration.ofHours(4) : Duration.ofDays(1);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java
index 1e378c80f90..f62e019e408 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java
@@ -58,6 +58,7 @@ public class ApplicationSerializer {
private static final String suggestedKey = "suggested";
private static final String clusterInfoKey = "clusterInfo";
private static final String bcpDeadlineKey = "bcpDeadline";
+ private static final String hostTTLKey = "hostTTL";
private static final String bcpGroupInfoKey = "bcpGroupInfo";
private static final String queryRateKey = "queryRateKey";
private static final String growthRateHeadroomKey = "growthRateHeadroomKey";
@@ -234,12 +235,14 @@ public class ApplicationSerializer {
private static void toSlime(ClusterInfo clusterInfo, Cursor clusterInfoObject) {
clusterInfoObject.setLong(bcpDeadlineKey, clusterInfo.bcpDeadline().toMinutes());
+ if ( ! clusterInfo.hostTTL().isZero()) clusterInfoObject.setLong(hostTTLKey, clusterInfo.hostTTL().toMillis());
}
private static ClusterInfo clusterInfoFromSlime(Inspector clusterInfoObject) {
if ( ! clusterInfoObject.valid()) return ClusterInfo.empty();
ClusterInfo.Builder builder = new ClusterInfo.Builder();
builder.bcpDeadline(Duration.ofMinutes(clusterInfoObject.field(bcpDeadlineKey).asLong()));
+ builder.hostTTL(Duration.ofMillis(clusterInfoObject.field(hostTTLKey).asLong()));
return builder.build();
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
index cec413cf4e3..fc008b7b9dc 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
@@ -206,8 +206,8 @@ public class CuratorDb {
toState.isAllocated() ? node.allocation() : Optional.empty(),
node.history().recordStateTransition(node.state(), toState, agent, clock.instant()),
node.type(), node.reports(), node.modelName(), node.reservedTo(),
- node.exclusiveToApplicationId(), node.exclusiveToClusterType(), node.switchHostname(),
- node.trustedCertificates(), node.cloudAccount(), node.wireguardPubKey());
+ node.exclusiveToApplicationId(), node.hostTTL(), node.hostEmptyAt(), node.exclusiveToClusterType(),
+ node.switchHostname(), node.trustedCertificates(), node.cloudAccount(), node.wireguardPubKey());
curatorTransaction.add(createOrSet(nodePath(newNode), nodeSerializer.toJson(newNode)));
writtenNodes.add(newNode);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
index 23ea14da4cc..514689d3d4e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
@@ -88,7 +88,8 @@ public class NodeSerializer {
private static final String wantToDeprovisionKey = "wantToDeprovision";
private static final String wantToRebuildKey = "wantToRebuild";
private static final String preferToRetireKey = "preferToRetire";
- private static final String wantToFailKey = "wantToFailKey";
+ private static final String wantToFailKey = "wantToFailKey"; // TODO: This should be changed to 'wantToFail'
+ private static final String wantToUpgradeFlavorKey = "wantToUpgradeFlavor";
private static final String osVersionKey = "osVersion";
private static final String wantedOsVersionKey = "wantedOsVersion";
private static final String firmwareCheckKey = "firmwareCheck";
@@ -96,6 +97,8 @@ public class NodeSerializer {
private static final String modelNameKey = "modelName";
private static final String reservedToKey = "reservedTo";
private static final String exclusiveToApplicationIdKey = "exclusiveTo";
+ private static final String hostTTLKey = "hostTTL";
+ private static final String hostEmptyAtKey = "hostEmptyAt";
private static final String exclusiveToClusterTypeKey = "exclusiveToClusterType";
private static final String switchHostnameKey = "switchHostname";
private static final String trustedCertificatesKey = "trustedCertificates";
@@ -182,6 +185,7 @@ public class NodeSerializer {
object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision());
object.setBool(wantToFailKey, node.status().wantToFail());
object.setBool(wantToRebuildKey, node.status().wantToRebuild());
+ object.setBool(wantToUpgradeFlavorKey, node.status().wantToUpgradeFlavor());
node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey)));
toSlime(node.history().events(), object.setArray(historyKey));
toSlime(node.history().log(), object.setArray(logKey));
@@ -194,6 +198,8 @@ public class NodeSerializer {
node.modelName().ifPresent(modelName -> object.setString(modelNameKey, modelName));
node.reservedTo().ifPresent(tenant -> object.setString(reservedToKey, tenant.value()));
node.exclusiveToApplicationId().ifPresent(applicationId -> object.setString(exclusiveToApplicationIdKey, applicationId.serializedForm()));
+ node.hostTTL().ifPresent(hostTTL -> object.setLong(hostTTLKey, hostTTL.toMillis()));
+ node.hostEmptyAt().ifPresent(emptyAt -> object.setLong(hostEmptyAtKey, emptyAt.toEpochMilli()));
node.exclusiveToClusterType().ifPresent(clusterType -> object.setString(exclusiveToClusterTypeKey, clusterType.name()));
trustedCertificatesToSlime(node.trustedCertificates(), object.setArray(trustedCertificatesKey));
if (!node.cloudAccount().isUnspecified()) {
@@ -292,6 +298,8 @@ public class NodeSerializer {
SlimeUtils.optionalString(object.field(modelNameKey)),
SlimeUtils.optionalString(object.field(reservedToKey)).map(TenantName::from),
SlimeUtils.optionalString(object.field(exclusiveToApplicationIdKey)).map(ApplicationId::fromSerializedForm),
+ SlimeUtils.optionalDuration(object.field(hostTTLKey)),
+ SlimeUtils.optionalInstant(object.field(hostEmptyAtKey)),
SlimeUtils.optionalString(object.field(exclusiveToClusterTypeKey)).map(ClusterSpec.Type::from),
SlimeUtils.optionalString(object.field(switchHostnameKey)),
trustedCertificatesFromSlime(object),
@@ -309,8 +317,9 @@ public class NodeSerializer {
object.field(wantToRebuildKey).asBool(),
object.field(preferToRetireKey).asBool(),
object.field(wantToFailKey).asBool(),
+ object.field(wantToUpgradeFlavorKey).asBool(),
new OsVersion(versionFromSlime(object.field(osVersionKey)),
- versionFromSlime(object.field(wantedOsVersionKey))),
+ versionFromSlime(object.field(wantedOsVersionKey))),
SlimeUtils.optionalInstant(object.field(firmwareCheckKey)));
}
@@ -482,6 +491,7 @@ public class NodeSerializer {
case "SwitchRebalancer" -> Agent.SwitchRebalancer;
case "HostEncrypter" -> Agent.HostEncrypter;
case "ParkedExpirer" -> Agent.ParkedExpirer;
+ case "HostFlavorUpgrader" -> Agent.HostFlavorUpgrader;
default -> throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
};
}
@@ -506,6 +516,7 @@ public class NodeSerializer {
case SwitchRebalancer -> "SwitchRebalancer";
case HostEncrypter -> "HostEncrypter";
case ParkedExpirer -> "ParkedExpirer";
+ case HostFlavorUpgrader -> "HostFlavorUpgrader";
};
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index 8cff57e3005..ee7650da8c3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -9,6 +9,7 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.NodeResources;
+import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.StringFlag;
@@ -89,33 +90,38 @@ public class CapacityPolicies {
}
public NodeResources specifyFully(NodeResources resources, ClusterSpec clusterSpec, ApplicationId applicationId) {
- if (resources.vcpuIsUnspecified())
- resources = resources.withVcpu(defaultResources(clusterSpec, applicationId).vcpu());
- if (resources.memoryGbIsUnspecified())
- resources = resources.withMemoryGb(defaultResources(clusterSpec, applicationId).memoryGb());
- if (resources.diskGbIsUnspecified())
- resources = resources.withDiskGb(defaultResources(clusterSpec, applicationId).diskGb());
- return resources;
+ return resources.withUnspecifiedNumbersFrom(defaultResources(clusterSpec, applicationId));
}
private NodeResources defaultResources(ClusterSpec clusterSpec, ApplicationId applicationId) {
if (clusterSpec.type() == ClusterSpec.Type.admin) {
Architecture architecture = adminClusterArchitecture(applicationId);
+ if (nodeRepository.exclusiveAllocation(clusterSpec)) {
+ return versioned(clusterSpec, Map.of(new Version(0), smallestExclusiveResources()));
+ }
+
if (clusterSpec.id().value().equals("cluster-controllers")) {
return clusterControllerResources(clusterSpec, architecture).with(architecture);
}
+ if (clusterSpec.id().value().equals("logserver")) {
+ return logserverResources(architecture).with(architecture);
+ }
+
return (nodeRepository.exclusiveAllocation(clusterSpec)
? versioned(clusterSpec, Map.of(new Version(0), smallestExclusiveResources()))
: versioned(clusterSpec, Map.of(new Version(0), smallestSharedResources())))
.with(architecture);
}
+ if (zone.environment() == Environment.dev && zone.system() == SystemName.cd) {
+ return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(1.5, 4, 50, 0.3)));
+ }
+
if (clusterSpec.type() == ClusterSpec.Type.content) {
return zone.cloud().dynamicProvisioning()
- ? versioned(clusterSpec, Map.of(new Version(0), new NodeResources(2.0, 8, 50, 0.3),
- new Version(8, 75), new NodeResources(2, 16, 300, 0.3)))
+ ? versioned(clusterSpec, Map.of(new Version(0), new NodeResources(2, 16, 300, 0.3)))
: versioned(clusterSpec, Map.of(new Version(0), new NodeResources(1.5, 8, 50, 0.3)));
}
else {
@@ -126,15 +132,25 @@ public class CapacityPolicies {
}
private NodeResources clusterControllerResources(ClusterSpec clusterSpec, Architecture architecture) {
- if (nodeRepository.exclusiveAllocation(clusterSpec)) {
- return versioned(clusterSpec, Map.of(new Version(0), smallestExclusiveResources()));
- }
-
// 1.32 fits floor(8/1.32) = 6 cluster controllers on each 8Gb host, and each will have
// 1.32-(0.7+0.6)*(1.32/8) = 1.1 Gb real memory given current taxes.
- return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25, 1.14, 10, 0.3),
- new Version(8, 127, 11), new NodeResources(0.25, 1.5, 10, 0.3),
- new Version(8, 129, 4), new NodeResources(0.25, 1.32, 10, 0.3)));
+ if (architecture == Architecture.x86_64)
+ return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25, 1.14, 10, 0.3),
+ new Version(8, 129, 4), new NodeResources(0.25, 1.32, 10, 0.3)));
+ else
+ // arm64 nodes need more memory
+ return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25, 1.50, 10, 0.3),
+ new Version(8, 129, 4), new NodeResources(0.25, 1.32, 10, 0.3),
+ new Version(8, 173, 5), new NodeResources(0.25, 1.50, 10, 0.3)));
+ }
+
+ private NodeResources logserverResources(Architecture architecture) {
+ if (zone.cloud().name().equals(CloudName.GCP))
+ return new NodeResources(1, 4, 50, 0.3);
+
+ return architecture == Architecture.arm64
+ ? new NodeResources(0.5, 2.5, 50, 0.3)
+ : new NodeResources(0.5, 2, 50, 0.3);
}
private Architecture adminClusterArchitecture(ApplicationId instance) {
@@ -148,14 +164,14 @@ public class CapacityPolicies {
.getValue();
}
- // The lowest amount resources that can be exclusive allocated (i.e. a matching host flavor for this exists)
+ // The lowest amount of resources that can be exclusive allocated (i.e. a matching host flavor for this exists)
private NodeResources smallestExclusiveResources() {
return (zone.cloud().name().equals(CloudName.GCP))
? new NodeResources(1, 4, 50, 0.3)
: new NodeResources(0.5, 4, 50, 0.3);
}
- // The lowest amount resources that can be shared (i.e. a matching host flavor for this exists)
+ // The lowest amount of resources that can be shared (i.e. a matching host flavor for this exists)
private NodeResources smallestSharedResources() {
return (zone.cloud().name().equals(CloudName.GCP))
? new NodeResources(1, 4, 50, 0.3)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
index 06c1916dd4f..07e645025c0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
@@ -99,23 +99,29 @@ public class GroupPreparer {
Version osVersion = nodeRepository.osVersions().targetFor(hostType).orElse(Version.emptyVersion);
NodeAllocation.HostDeficit deficit = allocation.hostDeficit().get();
List<Node> hosts = new ArrayList<>();
- Consumer<List<ProvisionedHost>> provisionedHostsConsumer = provisionedHosts -> {
- hosts.addAll(provisionedHosts.stream().map(ProvisionedHost::generateHost).toList());
+ Consumer<List<ProvisionedHost>> whenProvisioned = provisionedHosts -> {
+ hosts.addAll(provisionedHosts.stream().map(host -> host.generateHost(requestedNodes.hostTTL())).toList());
nodeRepository.nodes().addNodes(hosts, Agent.application);
// Offer the nodes on the newly provisioned hosts, this should be enough to cover the deficit
List<NodeCandidate> candidates = provisionedHosts.stream()
.map(host -> NodeCandidate.createNewExclusiveChild(host.generateNode(),
- host.generateHost()))
+ host.generateHost(requestedNodes.hostTTL())))
.toList();
allocation.offer(candidates);
};
-
try {
- hostProvisioner.get().provisionHosts(
- allocation.provisionIndices(deficit.count()), hostType, deficit.resources(), application,
- osVersion, sharing, Optional.of(cluster.type()), Optional.of(cluster.id()), requestedNodes.cloudAccount(),
- provisionedHostsConsumer);
+ HostProvisionRequest request = new HostProvisionRequest(allocation.provisionIndices(deficit.count()),
+ hostType,
+ deficit.resources(),
+ application,
+ osVersion,
+ sharing,
+ Optional.of(cluster.type()),
+ Optional.of(cluster.id()),
+ requestedNodes.cloudAccount(),
+ deficit.dueToFlavorUpgrade());
+ hostProvisioner.get().provisionHosts(request, whenProvisioned);
} catch (NodeAllocationException e) {
// Mark the nodes that were written to ZK in the consumer for deprovisioning. While these hosts do
// not exist, we cannot remove them from ZK here because other nodes may already have been
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisionRequest.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisionRequest.java
new file mode 100644
index 00000000000..f7b9c9016b1
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisionRequest.java
@@ -0,0 +1,63 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.provision.provisioning;
+
+import com.yahoo.component.Version;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.NodeResources;
+import com.yahoo.config.provision.NodeType;
+
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+
+/**
+ * A host provisioning request. This contains the details required to provision a host.
+ *
+ * @param indices List of unique provision indices which will be used to generate the node hostnames
+ * on the form of <code>[prefix][index].[domain]</code>.
+ * @param type The host type to provision.
+ * @param resources The resources needed per node - the provisioned host may be significantly larger.
+ * @param owner ID of the application that will own the provisioned host.
+ * @param osVersion The OS version to use. If this version does not exist, implementations may choose a suitable
+ * fallback version.
+ * @param sharing Puts requirements on sharing or exclusivity of the host to be provisioned.
+ * @param clusterType The cluster we are provisioning for, or empty if we are provisioning hosts
+ * to be shared by multiple cluster nodes.
+ * @param clusterId The ID of the cluster we are provisioning for, or empty if we are provisioning hosts
+ * to be shared by multiple cluster nodes.
+ * @param cloudAccount The cloud account to use.
+ * @param requireLatestGeneration Whether to require the latest generation when choosing a flavor. Latest generation will
+ * always be preferred, but setting this to true disallows falling back to an older
+ * generation.
+ * @author mpolden
+ */
+public record HostProvisionRequest(List<Integer> indices,
+ NodeType type,
+ NodeResources resources,
+ ApplicationId owner,
+ Version osVersion,
+ HostProvisioner.HostSharing sharing,
+ Optional<ClusterSpec.Type> clusterType,
+ Optional<ClusterSpec.Id> clusterId,
+ CloudAccount cloudAccount,
+ boolean requireLatestGeneration) {
+
+ public HostProvisionRequest(List<Integer> indices, NodeType type, NodeResources resources,
+ ApplicationId owner, Version osVersion, HostProvisioner.HostSharing sharing,
+ Optional<ClusterSpec.Type> clusterType, Optional<ClusterSpec.Id> clusterId,
+ CloudAccount cloudAccount, boolean requireLatestGeneration) {
+ this.indices = List.copyOf(Objects.requireNonNull(indices));
+ this.type = Objects.requireNonNull(type);
+ this.resources = Objects.requireNonNull(resources);
+ this.owner = Objects.requireNonNull(owner);
+ this.osVersion = Objects.requireNonNull(osVersion);
+ this.sharing = Objects.requireNonNull(sharing);
+ this.clusterType = Objects.requireNonNull(clusterType);
+ this.clusterId = Objects.requireNonNull(clusterId);
+ this.cloudAccount = Objects.requireNonNull(cloudAccount);
+ this.requireLatestGeneration = requireLatestGeneration;
+ }
+
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
index ce48c5adab8..397eb4d7af9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
@@ -1,18 +1,12 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.provisioning;
-import com.yahoo.component.Version;
-import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.CloudAccount;
-import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostEvent;
import com.yahoo.config.provision.NodeAllocationException;
-import com.yahoo.config.provision.NodeResources;
-import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.hosted.provision.Node;
import java.util.List;
-import java.util.Optional;
import java.util.Set;
import java.util.function.Consumer;
@@ -39,35 +33,14 @@ public interface HostProvisioner {
/**
* Schedule provisioning of a given number of hosts.
*
- * @param provisionIndices list of unique provision indices which will be used to generate the node hostnames
- * on the form of <code>[prefix][index].[domain]</code>
- * @param hostType the host type to provision
- * @param resources the resources needed per node - the provisioned host may be significantly larger
- * @param applicationId id of the application that will own the provisioned host
- * @param osVersion the OS version to use. If this version does not exist, implementations may choose a suitable
- * fallback version.
- * @param sharing puts requirements on sharing or exclusivity of the host to be provisioned.
- * @param clusterType the cluster we are provisioning for, or empty if we are provisioning hosts
- * to be shared by multiple cluster nodes
- * @param clusterId the id of the cluster we are provisioning for, or empty if we are provisioning hosts
- * to be shared by multiple cluster nodes
- * @param cloudAccount the cloud account to use
- * @param provisionedHostConsumer consumer of {@link ProvisionedHost}s describing the provisioned nodes,
- * the {@link Node} returned from {@link ProvisionedHost#generateHost()} must be
- * written to ZK immediately in case the config server goes down while waiting
- * for the provisioning to finish.
+ * @param request details of the host provision request.
+ * @param whenProvisioned consumer of {@link ProvisionedHost}s describing the provisioned nodes,
+ * the {@link Node} returned from {@link ProvisionedHost#generateHost} must be
+ * written to ZK immediately in case the config server goes down while waiting
+ * for the provisioning to finish.
* @throws NodeAllocationException if the cloud provider cannot satisfy the request
*/
- void provisionHosts(List<Integer> provisionIndices,
- NodeType hostType,
- NodeResources resources,
- ApplicationId applicationId,
- Version osVersion,
- HostSharing sharing,
- Optional<ClusterSpec.Type> clusterType,
- Optional<ClusterSpec.Id> clusterId,
- CloudAccount cloudAccount,
- Consumer<List<ProvisionedHost>> provisionedHostConsumer) throws NodeAllocationException;
+ void provisionHosts(HostProvisionRequest request, Consumer<List<ProvisionedHost>> whenProvisioned) throws NodeAllocationException;
/**
* Continue provisioning of given list of Nodes.
@@ -103,4 +76,7 @@ public interface HostProvisioner {
*/
List<HostEvent> hostEventsIn(List<CloudAccount> cloudAccounts);
+ /** Returns whether flavor for given host can be upgraded to a newer generation */
+ boolean canUpgradeFlavor(Node host, Node child);
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index fe40b2c5001..ae1edab7fad 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -70,16 +70,10 @@ public class LoadBalancerProvisioner {
this.deactivateRouting = PermanentFlags.DEACTIVATE_ROUTING.bindTo(nodeRepository.flagSource());
// Read and write all load balancers to make sure they are stored in the latest version of the serialization format
- CloudAccount zoneAccount = nodeRepository.zone().cloud().account();
for (var id : db.readLoadBalancerIds()) {
try (var lock = db.lock(id.application())) {
var loadBalancer = db.readLoadBalancer(id);
- loadBalancer.ifPresent(lb -> {
- // TODO (freva): Remove after 8.166
- if (!zoneAccount.isUnspecified() && lb.instance().isPresent() && lb.instance().get().cloudAccount().isUnspecified())
- lb = lb.with(Optional.of(lb.instance().get().with(zoneAccount)));
- db.writeLoadBalancer(lb, lb.state());
- });
+ loadBalancer.ifPresent(lb -> db.writeLoadBalancer(lb, lb.state()));
}
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
index 7f0d201b3e4..b202a10c4d3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
@@ -31,6 +31,7 @@ import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.logging.Logger;
import java.util.stream.Collectors;
+import java.util.stream.Stream;
/**
* Used to manage a list of nodes during the node reservation process to fulfill the nodespec.
@@ -59,8 +60,11 @@ class NodeAllocation {
/** The number of already allocated nodes accepted and not retired */
private int accepted = 0;
- /** The number of already allocated nodes accepted and not retired and not needing resize */
- private int acceptedWithoutResizingRetired = 0;
+ /** The number of already allocated nodes of compatible size */
+ private int acceptedAndCompatible = 0;
+
+ /** The number of already allocated nodes which can be made compatible*/
+ private int acceptedAndCompatibleOrResizable = 0;
/** The number of nodes rejected because of clashing parentHostname */
private int rejectedDueToClashingParentHost = 0;
@@ -73,7 +77,10 @@ class NodeAllocation {
/** The number of nodes that just now was changed to retired */
private int wasRetiredJustNow = 0;
- /** The node indexes to verify uniqueness of each members index */
+ /** The number of nodes that just now was changed to retired to upgrade its host flavor */
+ private int wasRetiredDueToFlavorUpgrade = 0;
+
+ /** The node indexes to verify uniqueness of each member's index */
private final Set<Integer> indexes = new HashSet<>();
/** The next membership index to assign to a new node */
@@ -93,11 +100,11 @@ class NodeAllocation {
this.nodeRepository = nodeRepository;
this.nodeResourceLimits = new NodeResourceLimits(nodeRepository);
this.requiredHostFlavor = Optional.of(PermanentFlags.HOST_FLAVOR.bindTo(nodeRepository.flagSource())
- .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm())
- .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name())
- .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value())
- .value())
- .filter(s -> !s.isBlank());
+ .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm())
+ .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name())
+ .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value())
+ .value())
+ .filter(s -> !s.isBlank());
}
/**
@@ -123,9 +130,8 @@ class NodeAllocation {
if (nodeRepository.zone().cloud().allowEnclave() && candidate.parent.isPresent() && ! candidate.parent.get().cloudAccount().equals(requestedNodes.cloudAccount())) continue; // wrong account
boolean resizeable = requestedNodes.considerRetiring() && candidate.isResizable;
- boolean acceptToRetire = acceptToRetire(candidate);
- if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) {
+ if ((! saturated() && hasCompatibleResources(candidate) && requestedNodes.acceptable(candidate)) || acceptIncompatible(candidate)) {
candidate = candidate.withNode();
if (candidate.isValid())
acceptNode(candidate, shouldRetire(candidate, candidates), resizeable);
@@ -167,6 +173,7 @@ class NodeAllocation {
if ( ! nodeResourceLimits.isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits;
if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy;
if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources;
+ if (candidate.parent.map(node -> node.status().wantToUpgradeFlavor()).orElse(false)) return Retirement.violatesHostFlavorGeneration;
if (candidate.wantToRetire()) return Retirement.hardRequest;
if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest;
if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity;
@@ -220,23 +227,27 @@ class NodeAllocation {
/**
* Returns whether this node should be accepted into the cluster even if it is not currently desired
* (already enough nodes, or wrong resources, etc.).
- * Such nodes will be marked retired during finalization of the list of accepted nodes.
+ * Such nodes will be marked retired during finalization of the list of accepted nodes when allowed.
* The conditions for this are:
*
- * This is a stateful node. These must always be retired before being removed to allow the cluster to
+ * - We are forced to accept since we cannot remove gracefully (bootstrap).
+ *
+ * - This is a stateful node. These must always be retired before being removed to allow the cluster to
* migrate away data.
*
- * This is a container node and it is not desired due to having the wrong flavor. In this case this
+ * - This is a container node and it is not desired due to having the wrong flavor. In this case this
* will (normally) obtain for all the current nodes in the cluster and so retiring before removing must
* be used to avoid removing all the current nodes at once, before the newly allocated replacements are
* initialized. (In the other case, where a container node is not desired because we have enough nodes we
* do want to remove it immediately to get immediate feedback on how the size reduction works out.)
*/
- private boolean acceptToRetire(NodeCandidate candidate) {
+ private boolean acceptIncompatible(NodeCandidate candidate) {
if (candidate.state() != Node.State.active) return false;
if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false;
if (candidate.allocation().get().membership().retired()) return true; // don't second-guess if already retired
- if (! requestedNodes.considerRetiring()) return false;
+
+ if ( ! requestedNodes.considerRetiring()) // the node is active and we are not allowed to remove gracefully, so keep
+ return true;
return cluster.isStateful() ||
(cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate));
@@ -258,18 +269,24 @@ class NodeAllocation {
// We want to allocate new nodes rather than unretiring with resize, so count without those
// for the purpose of deciding when to stop accepting nodes (saturation)
if (node.allocation().isEmpty()
- || ! ( requestedNodes.needsResize(node) && node.allocation().get().membership().retired()))
- acceptedWithoutResizingRetired++;
+ || ! ( requestedNodes.needsResize(node) &&
+ (node.allocation().get().membership().retired() || ! requestedNodes.considerRetiring()))) {
+ acceptedAndCompatible++;
+ }
+ if (hasCompatibleResources(candidate))
+ acceptedAndCompatibleOrResizable++;
- if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired())) {
+ if (resizeable && ! ( node.allocation().isPresent() && node.allocation().get().membership().retired()))
node = resize(node);
- }
if (node.state() != Node.State.active) // reactivated node - wipe state that deactivated it
node = node.unretire().removable(false);
} else if (retirement != Retirement.alreadyRetired) {
LOG.info("Retiring " + node + " because " + retirement.description());
++wasRetiredJustNow;
+ if (retirement == Retirement.violatesHostFlavorGeneration) {
+ ++wasRetiredDueToFlavorUpgrade;
+ }
node = node.retire(nodeRepository.clock().instant());
}
if ( ! node.allocation().get().membership().cluster().equals(cluster)) {
@@ -297,13 +314,13 @@ class NodeAllocation {
}
/** Returns true if no more nodes are needed in this list */
- private boolean saturated() {
- return requestedNodes.saturatedBy(acceptedWithoutResizingRetired);
+ public boolean saturated() {
+ return requestedNodes.saturatedBy(acceptedAndCompatible);
}
/** Returns true if the content of this list is sufficient to meet the request */
boolean fulfilled() {
- return requestedNodes.fulfilledBy(accepted());
+ return requestedNodes.fulfilledBy(acceptedAndCompatibleOrResizable());
}
/** Returns true if this allocation was already fulfilled and resulted in no new changes */
@@ -319,15 +336,19 @@ class NodeAllocation {
/**
* Returns {@link HostDeficit} describing the host deficit for the given {@link NodeSpec}.
*
- * @return empty if the requested spec is already fulfilled. Otherwise returns {@link HostDeficit} containing the
+ * @return empty if the requested spec is already fulfilled. Otherwise, returns {@link HostDeficit} containing the
* flavor and host count required to cover the deficit.
*/
Optional<HostDeficit> hostDeficit() {
if (nodeType().isHost()) {
return Optional.empty(); // Hosts are provisioned as required by the child application
}
+ int deficit = requestedNodes.fulfilledDeficitCount(acceptedAndCompatibleOrResizable());
+ // We can only require flavor upgrade if the entire deficit is caused by upgrades
+ boolean dueToFlavorUpgrade = deficit == wasRetiredDueToFlavorUpgrade;
return Optional.of(new HostDeficit(requestedNodes.resources().orElseGet(NodeResources::unspecified),
- requestedNodes.fulfilledDeficitCount(accepted())))
+ deficit,
+ dueToFlavorUpgrade))
.filter(hostDeficit -> hostDeficit.count() > 0);
}
@@ -341,6 +362,7 @@ class NodeAllocation {
// Infrastructure hosts have fixed indices, starting at 1
Set<Integer> currentIndices = allNodes.nodeType(hostType)
+ .not().state(Node.State.deprovisioned)
.hostnames()
.stream()
// TODO(mpolden): Use cluster index instead of parsing hostname, once all
@@ -354,7 +376,7 @@ class NodeAllocation {
indices.add(i);
}
}
- // Ignore our own index as we should never try to provision ourself. This can happen in the following scenario:
+ // Ignore our own index as we should never try to provision ourselves. This can happen in the following scenario:
// - cfg1 has been deprovisioned
// - cfg2 has triggered provisioning of a new cfg1
// - cfg1 is starting and redeploys its infrastructure application during bootstrap. A deficit is detected
@@ -379,8 +401,8 @@ class NodeAllocation {
* @return the final list of nodes
*/
List<Node> finalNodes() {
- int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count();
- int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count();
+ int wantToRetireCount = (int) matching(NodeCandidate::wantToRetire).count();
+ int currentRetiredCount = (int) matching(node -> node.allocation().get().membership().retired()).count();
int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount);
if (deltaRetiredCount > 0) { // retire until deltaRetiredCount is 0
@@ -415,29 +437,26 @@ class NodeAllocation {
nodes.put(candidate.toNode().hostname(), candidate);
}
- return nodes.values().stream().map(n -> n.toNode()).toList();
+ return nodes.values().stream().map(NodeCandidate::toNode).toList();
}
List<Node> reservableNodes() {
// Include already reserved nodes to extend reservation period and to potentially update their cluster spec.
EnumSet<Node.State> reservableStates = EnumSet.of(Node.State.inactive, Node.State.ready, Node.State.reserved);
- return nodesFilter(n -> ! n.isNew && reservableStates.contains(n.state()));
+ return matching(n -> ! n.isNew && reservableStates.contains(n.state())).toList();
}
List<Node> newNodes() {
- return nodesFilter(n -> n.isNew);
+ return matching(node -> node.isNew).toList();
}
- private List<Node> nodesFilter(Predicate<NodeCandidate> predicate) {
- return nodes.values().stream()
- .filter(predicate)
- .map(n -> n.toNode())
- .toList();
+ private Stream<Node> matching(Predicate<NodeCandidate> predicate) {
+ return nodes.values().stream().filter(predicate).map(NodeCandidate::toNode);
}
/** Returns the number of nodes accepted this far */
- private int accepted() {
- if (nodeType() == NodeType.tenant) return accepted;
+ private int acceptedAndCompatibleOrResizable() {
+ if (nodeType() == NodeType.tenant) return acceptedAndCompatibleOrResizable;
// Infrastructure nodes are always allocated by type. Count all nodes as accepted so that we never exceed
// the wanted number of nodes for the type.
return allNodes.nodeType(nodeType()).size();
@@ -487,10 +506,11 @@ class NodeAllocation {
outsideRealLimits("node real resources is outside limits"),
violatesParentHostPolicy("node violates parent host policy"),
incompatibleResources("node resources are incompatible"),
- hardRequest("node is requested to retire"),
- softRequest("node is requested to retire (soft)"),
+ hardRequest("node is requested and required to retire"),
+ softRequest("node is requested to retire"),
violatesExclusivity("node violates host exclusivity"),
violatesHostFlavor("node violates host flavor"),
+ violatesHostFlavorGeneration("node violates host flavor generation"),
none("");
private final String description;
@@ -499,7 +519,7 @@ class NodeAllocation {
this.description = description;
}
- /** Human readable description of this cause */
+ /** Human-readable description of this cause */
public String description() {
return description;
}
@@ -507,27 +527,11 @@ class NodeAllocation {
}
/** A host deficit, the number of missing hosts, for a deployment */
- static class HostDeficit {
-
- private final NodeResources resources;
- private final int count;
-
- private HostDeficit(NodeResources resources, int count) {
- this.resources = resources;
- this.count = count;
- }
-
- NodeResources resources() {
- return resources;
- }
-
- int count() {
- return count;
- }
+ record HostDeficit(NodeResources resources, int count, boolean dueToFlavorUpgrade) {
@Override
public String toString() {
- return "deficit of " + count + " nodes with " + resources;
+ return "deficit of " + count + " nodes with " + resources + (dueToFlavorUpgrade ? ", due to flavor upgrade" : "");
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
index b194730727f..8462e23fbfd 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
@@ -25,6 +25,7 @@ import java.util.logging.Logger;
* A node candidate containing the details required to prioritize it for allocation. This is immutable.
*
* @author smorgrav
+ * @author bratseth
*/
public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidate> {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index 9dcc564190b..4f21c8dcd50 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -32,14 +32,13 @@ public class NodePrioritizer {
private final List<NodeCandidate> candidates = new ArrayList<>();
private final LockedNodeList allNodes;
private final HostCapacity capacity;
+ private final HostResourcesCalculator calculator;
private final NodeSpec requestedNodes;
private final ApplicationId application;
private final ClusterSpec clusterSpec;
private final NameResolver nameResolver;
private final Nodes nodes;
private final boolean dynamicProvisioning;
- /** Whether node specification allows new nodes to be allocated. */
- private final boolean canAllocateNew;
private final boolean canAllocateToSpareHosts;
private final boolean topologyChange;
private final int currentClusterSize;
@@ -50,6 +49,7 @@ public class NodePrioritizer {
int wantedGroups, boolean dynamicProvisioning, NameResolver nameResolver, Nodes nodes,
HostResourcesCalculator hostResourcesCalculator, int spareCount, boolean enclave) {
this.allNodes = allNodes;
+ this.calculator = hostResourcesCalculator;
this.capacity = new HostCapacity(this.allNodes, hostResourcesCalculator);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
@@ -81,9 +81,6 @@ public class NodePrioritizer {
// NodeCandidate::compareTo will ensure that they will not be used until there is no room elsewhere.
// In non-dynamically provisioned zones, we only allow allocating to spare hosts to replace failed nodes.
this.canAllocateToSpareHosts = dynamicProvisioning || isReplacement(nodesInCluster, clusterSpec.group());
- // Do not allocate new nodes for exclusive deployments in dynamically provisioned zones: provision new host instead.
- this.canAllocateNew = requestedNodes instanceof NodeSpec.CountNodeSpec
- && (!dynamicProvisioning || !requestedNodes.isExclusive());
}
/** Collects all node candidates for this application and returns them in the most-to-least preferred order */
@@ -136,13 +133,14 @@ public class NodePrioritizer {
/** Add a node on each host with enough capacity for the requested flavor */
private void addCandidatesOnExistingHosts() {
- if ( !canAllocateNew) return;
+ if (requestedNodes.resources().isEmpty()) return;
for (Node host : allNodes) {
if ( ! nodes.canAllocateTenantNodeTo(host, dynamicProvisioning)) continue;
+ if (nodes.suspended(host)) continue; // Hosts that are suspended may be down for some time, e.g. for OS upgrade
if (host.reservedTo().isPresent() && !host.reservedTo().get().equals(application.tenant())) continue;
if (host.reservedTo().isPresent() && application.instance().isTester()) continue;
- if (host.exclusiveToApplicationId().isPresent()) continue; // Never allocate new nodes to exclusive hosts
+ if (host.exclusiveToApplicationId().isPresent() && ! fitsPerfectly(host)) continue;
if ( ! host.exclusiveToClusterType().map(clusterSpec.type()::equals).orElse(true)) continue;
if (spareHosts.contains(host) && !canAllocateToSpareHosts) continue;
if ( ! capacity.hasCapacity(host, requestedNodes.resources().get())) continue;
@@ -158,6 +156,10 @@ public class NodePrioritizer {
}
}
+ private boolean fitsPerfectly(Node host) {
+ return calculator.advertisedResourcesOf(host.flavor()).compatibleWith(requestedNodes.resources().get());
+ }
+
/** Add existing nodes allocated to the application */
private void addApplicationNodes() {
EnumSet<Node.State> legalStates = EnumSet.of(Node.State.active, Node.State.inactive, Node.State.reserved);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 237a6657ccc..ffd2805bcff 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -38,7 +38,6 @@ import java.util.List;
import java.util.Optional;
import java.util.logging.Level;
import java.util.logging.Logger;
-import java.util.stream.Stream;
/**
* Implementation of the host provisioner API for hosted Vespa, using the node repository to allocate nodes.
@@ -101,7 +100,8 @@ public class NodeRepositoryProvisioner implements Provisioner {
groups = target.groups();
resources = getNodeResources(cluster, target.nodeResources(), application);
nodeSpec = NodeSpec.from(target.nodes(), resources, cluster.isExclusive(), actual.canFail(),
- requested.cloudAccount().orElse(nodeRepository.zone().cloud().account()));
+ requested.cloudAccount().orElse(nodeRepository.zone().cloud().account()),
+ requested.clusterInfo().hostTTL());
}
else {
groups = 1; // type request with multiple groups is not supported
@@ -177,7 +177,7 @@ public class NodeRepositoryProvisioner implements Provisioner {
firstDeployment // start at min, preserve current resources otherwise
? new AllocatableClusterResources(initialResourcesFrom(requested, clusterSpec, application.id()), clusterSpec, nodeRepository)
: new AllocatableClusterResources(nodes, nodeRepository);
- var clusterModel = new ClusterModel(zone, application, clusterSpec, cluster, nodes, nodeRepository.metricsDb(), nodeRepository.clock());
+ var clusterModel = new ClusterModel(nodeRepository, application, clusterSpec, cluster, nodes, nodeRepository.metricsDb(), nodeRepository.clock());
return within(Limits.of(requested), currentResources, firstDeployment, clusterModel);
}
@@ -269,34 +269,32 @@ public class NodeRepositoryProvisioner implements Provisioner {
private IllegalArgumentException newNoAllocationPossible(ClusterSpec spec, Limits limits) {
StringBuilder message = new StringBuilder("No allocation possible within ").append(limits);
- if (nodeRepository.exclusiveAllocation(spec))
- message.append(". Nearest allowed node resources: ").append(findNearestNodeResources(limits));
+ if (nodeRepository.exclusiveAllocation(spec) && findNearestNodeResources(limits).isPresent())
+ message.append(". Nearest allowed node resources: ").append(findNearestNodeResources(limits).get());
return new IllegalArgumentException(message.toString());
}
- private NodeResources findNearestNodeResources(Limits limits) {
- NodeResources nearestMin = nearestFlavorResources(limits.min().nodeResources());
- NodeResources nearestMax = nearestFlavorResources(limits.max().nodeResources());
- if (limits.min().nodeResources().distanceTo(nearestMin) < limits.max().nodeResources().distanceTo(nearestMax))
+ private Optional<NodeResources> findNearestNodeResources(Limits limits) {
+ Optional<NodeResources> nearestMin = nearestFlavorResources(limits.min().nodeResources());
+ Optional<NodeResources> nearestMax = nearestFlavorResources(limits.max().nodeResources());
+ if (nearestMin.isEmpty()) return nearestMax;
+ if (nearestMax.isEmpty()) return nearestMin;
+ if (limits.min().nodeResources().distanceTo(nearestMin.get()) < limits.max().nodeResources().distanceTo(nearestMax.get()))
return nearestMin;
else
return nearestMax;
}
/** Returns the advertised flavor resources which are nearest to the given resources */
- private NodeResources nearestFlavorResources(NodeResources requestedResources) {
- NodeResources nearestHostResources = nodeRepository.flavors().getFlavors().stream()
- .map(flavor -> nodeRepository.resourcesCalculator().advertisedResourcesOf(flavor))
- .filter(resources -> resources.diskSpeed().compatibleWith(requestedResources.diskSpeed()))
- .filter(resources -> resources.storageType().compatibleWith(requestedResources.storageType()))
- .filter(resources -> resources.architecture().compatibleWith(requestedResources.architecture()))
- .min(Comparator.comparingDouble(resources -> resources.distanceTo(requestedResources)))
- .orElseThrow()
- .withBandwidthGbps(requestedResources.bandwidthGbps());
- if ( nearestHostResources.storageType() == NodeResources.StorageType.remote)
- nearestHostResources = nearestHostResources.withDiskGb(requestedResources.diskGb());
- return nearestHostResources;
+ private Optional<NodeResources> nearestFlavorResources(NodeResources requestedResources) {
+ return nodeRepository.flavors().getFlavors().stream()
+ .map(flavor -> nodeRepository.resourcesCalculator().advertisedResourcesOf(flavor))
+ .filter(resources -> resources.satisfies(requestedResources))
+ .min(Comparator.comparingDouble(resources -> resources.distanceTo(requestedResources)))
+ .map(resources -> resources.withBandwidthGbps(requestedResources.bandwidthGbps()))
+ .map(resources -> resources.storageType() == NodeResources.StorageType.remote ?
+ resources.withDiskGb(requestedResources.diskGb()) : resources);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
index 4d33e1c7bad..9ded1a2735c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
@@ -27,13 +27,11 @@ public class NodeResourceLimits {
/** Validates the resources applications ask for (which are in "advertised" resource space) */
public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ApplicationId applicationId, ClusterSpec cluster) {
- if (requested.isUnspecified()) return;
-
- if (requested.vcpu() < minAdvertisedVcpu(applicationId, cluster))
+ if (! requested.vcpuIsUnspecified() && requested.vcpu() < minAdvertisedVcpu(applicationId, cluster))
illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu(applicationId, cluster));
- if (requested.memoryGb() < minAdvertisedMemoryGb(cluster))
+ if (! requested.memoryGbIsUnspecified() && requested.memoryGb() < minAdvertisedMemoryGb(cluster))
illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster));
- if (requested.diskGb() < minAdvertisedDiskGb(requested, cluster.isExclusive()))
+ if (! requested.diskGbIsUnspecified() && requested.diskGb() < minAdvertisedDiskGb(requested, cluster.isExclusive()))
illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested, cluster.isExclusive()));
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
index 28d1e7c1c68..bfe6f4211cb 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
@@ -7,6 +7,7 @@ import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.hosted.provision.Node;
+import java.time.Duration;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
@@ -67,6 +68,9 @@ public interface NodeSpec {
/** Returns the cloud account to use when fulfilling this spec */
CloudAccount cloudAccount();
+ /** Returns the host TTL to use for any hosts provisioned as a result of this fulfilling this spec. */
+ default Duration hostTTL() { return Duration.ZERO; }
+
/**
* Returns true if a node with given current resources and current spare host resources can be resized
* in-place to resources in this spec.
@@ -76,8 +80,9 @@ public interface NodeSpec {
return false;
}
- static NodeSpec from(int nodeCount, NodeResources resources, boolean exclusive, boolean canFail, CloudAccount cloudAccount) {
- return new CountNodeSpec(nodeCount, resources, exclusive, canFail, canFail, cloudAccount);
+ static NodeSpec from(int nodeCount, NodeResources resources, boolean exclusive, boolean canFail,
+ CloudAccount cloudAccount, Duration hostTTL) {
+ return new CountNodeSpec(nodeCount, resources, exclusive, canFail, canFail, cloudAccount, hostTTL);
}
static NodeSpec from(NodeType type, CloudAccount cloudAccount) {
@@ -93,14 +98,17 @@ public interface NodeSpec {
private final boolean canFail;
private final boolean considerRetiring;
private final CloudAccount cloudAccount;
+ private final Duration hostTTL;
- private CountNodeSpec(int count, NodeResources resources, boolean exclusive, boolean canFail, boolean considerRetiring, CloudAccount cloudAccount) {
+ private CountNodeSpec(int count, NodeResources resources, boolean exclusive, boolean canFail,
+ boolean considerRetiring, CloudAccount cloudAccount, Duration hostTTL) {
this.count = count;
this.requestedNodeResources = Objects.requireNonNull(resources, "Resources must be specified");
this.exclusive = exclusive;
this.canFail = canFail;
this.considerRetiring = considerRetiring;
this.cloudAccount = Objects.requireNonNull(cloudAccount);
+ this.hostTTL = Objects.requireNonNull(hostTTL);
if (!canFail && considerRetiring)
throw new IllegalArgumentException("Cannot consider retiring nodes if we cannot fail");
@@ -145,11 +153,11 @@ public interface NodeSpec {
@Override
public NodeSpec fraction(int divisor) {
- return new CountNodeSpec(count/divisor, requestedNodeResources, exclusive, canFail, considerRetiring, cloudAccount);
+ return new CountNodeSpec(count/divisor, requestedNodeResources, exclusive, canFail, considerRetiring, cloudAccount, hostTTL);
}
public NodeSpec withoutRetiring() {
- return new CountNodeSpec(count, requestedNodeResources, exclusive, canFail, false, cloudAccount);
+ return new CountNodeSpec(count, requestedNodeResources, exclusive, canFail, false, cloudAccount, hostTTL);
}
@Override
@@ -187,6 +195,9 @@ public interface NodeSpec {
}
@Override
+ public Duration hostTTL() { return hostTTL; }
+
+ @Override
public String toString() { return "request for " + count + " nodes with " + requestedNodeResources; }
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisionedHost.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisionedHost.java
index 98afc6e7482..c9fd1d08759 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisionedHost.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisionedHost.java
@@ -14,6 +14,7 @@ import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.node.OsVersion;
import com.yahoo.vespa.hosted.provision.node.Status;
+import java.time.Duration;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
@@ -39,7 +40,9 @@ public class ProvisionedHost {
public ProvisionedHost(String id, String hostHostname, Flavor hostFlavor, NodeType hostType,
Optional<ApplicationId> exclusiveToApplicationId, Optional<ClusterSpec.Type> exclusiveToClusterType,
- List<HostName> nodeHostnames, NodeResources nodeResources, Version osVersion, CloudAccount cloudAccount) {
+ List<HostName> nodeHostnames, NodeResources nodeResources,
+ Version osVersion, CloudAccount cloudAccount) {
+ if (!hostType.isHost()) throw new IllegalArgumentException(hostType + " is not a host");
this.id = Objects.requireNonNull(id, "Host id must be set");
this.hostHostname = Objects.requireNonNull(hostHostname, "Host hostname must be set");
this.hostFlavor = Objects.requireNonNull(hostFlavor, "Host flavor must be set");
@@ -50,7 +53,6 @@ public class ProvisionedHost {
this.nodeResources = Objects.requireNonNull(nodeResources, "Node resources must be set");
this.osVersion = Objects.requireNonNull(osVersion, "OS version must be set");
this.cloudAccount = Objects.requireNonNull(cloudAccount, "Cloud account must be set");
- if (!hostType.isHost()) throw new IllegalArgumentException(hostType + " is not a host");
}
private static List<HostName> validateNodeAddresses(List<HostName> nodeHostnames) {
@@ -62,13 +64,13 @@ public class ProvisionedHost {
}
/** Generate {@link Node} instance representing the provisioned physical host */
- public Node generateHost() {
- Node.Builder builder = Node.create(id, IP.Config.of(Set.of(), Set.of(), nodeHostnames), hostHostname, hostFlavor,
- hostType)
+ public Node generateHost(Duration hostTTL) {
+ Node.Builder builder = Node.create(id, IP.Config.of(Set.of(), Set.of(), nodeHostnames), hostHostname, hostFlavor, hostType)
.status(Status.initial().withOsVersion(OsVersion.EMPTY.withCurrent(Optional.of(osVersion))))
.cloudAccount(cloudAccount);
exclusiveToApplicationId.ifPresent(builder::exclusiveToApplicationId);
exclusiveToClusterType.ifPresent(builder::exclusiveToClusterType);
+ if ( ! hostTTL.isZero()) builder.hostTTL(hostTTL);
return builder.build();
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ArchiveResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ArchiveResponse.java
index 84c82d314c9..6370b01af23 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ArchiveResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ArchiveResponse.java
@@ -27,7 +27,7 @@ public class ArchiveResponse extends SlimeJsonResponse {
archiveObject.setString("uri", entry.getValue());
});
archiveUris.accountArchiveUris().entrySet().stream()
- .sorted(Map.Entry.comparingByKey())
+ .sorted()
.forEach(entry -> {
Cursor archiveObject = archivesArray.addObject();
archiveObject.setString("account", entry.getKey().value());
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodePatcher.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodePatcher.java
index 4dc48459ec9..407961dc054 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodePatcher.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodePatcher.java
@@ -63,6 +63,7 @@ public class NodePatcher {
private static final String WANT_TO_RETIRE = "wantToRetire";
private static final String WANT_TO_DEPROVISION = "wantToDeprovision";
private static final String WANT_TO_REBUILD = "wantToRebuild";
+ private static final String WANT_TO_UPGRADE_FLAVOR = "wantToUpgradeFlavor";
private static final String REPORTS = "reports";
private static final Set<String> RECURSIVE_FIELDS = Set.of(WANT_TO_RETIRE, WANT_TO_DEPROVISION);
private static final Set<String> IP_CONFIG_FIELDS = Set.of("ipAddresses",
@@ -223,12 +224,16 @@ public class NodePatcher {
case WANT_TO_DEPROVISION:
case WANT_TO_REBUILD:
// These needs to be handled as one, because certain combinations are not allowed.
- return node.withWantToRetire(asOptionalBoolean(root.field(WANT_TO_RETIRE)).orElseGet(node.status()::wantToRetire),
+ return node.withWantToRetire(asOptionalBoolean(root.field(WANT_TO_RETIRE))
+ .orElseGet(node.status()::wantToRetire),
asOptionalBoolean(root.field(WANT_TO_DEPROVISION))
.orElseGet(node.status()::wantToDeprovision),
asOptionalBoolean(root.field(WANT_TO_REBUILD))
.filter(want -> !applyingAsChild)
.orElseGet(node.status()::wantToRebuild),
+ asOptionalBoolean(root.field(WANT_TO_UPGRADE_FLAVOR))
+ .filter(want -> !applyingAsChild)
+ .orElseGet(node.status()::wantToUpgradeFlavor),
Agent.operator,
clock.instant());
case REPORTS:
@@ -256,6 +261,10 @@ public class NodePatcher {
case "exclusiveTo":
case "exclusiveToApplicationId":
return node.withExclusiveToApplicationId(SlimeUtils.optionalString(value).map(ApplicationId::fromSerializedForm).orElse(null));
+ case "hostTTL":
+ return node.withHostTTL(SlimeUtils.optionalDuration(value).orElse(null));
+ case "hostEmptyAt":
+ return node.withHostEmptyAt(SlimeUtils.optionalInstant(value).orElse(null));
case "exclusiveToClusterType":
return node.withExclusiveToClusterType(SlimeUtils.optionalString(value).map(ClusterSpec.Type::valueOf).orElse(null));
case "switchHostname":
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
index 0e33e3461e7..ff814af7390 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
@@ -133,6 +133,8 @@ class NodesResponse extends SlimeJsonResponse {
object.setString("flavor", node.flavor().name());
node.reservedTo().ifPresent(reservedTo -> object.setString("reservedTo", reservedTo.value()));
node.exclusiveToApplicationId().ifPresent(applicationId -> object.setString("exclusiveTo", applicationId.serializedForm()));
+ node.hostTTL().ifPresent(ttl -> object.setLong("hostTTL", ttl.toMillis()));
+ node.hostEmptyAt().ifPresent(emptyAt -> object.setLong("hostEmptyAt", emptyAt.toEpochMilli()));
node.exclusiveToClusterType().ifPresent(clusterType -> object.setString("exclusiveToClusterType", clusterType.name()));
if (node.flavor().isConfigured())
object.setDouble("cpuCores", node.flavor().resources().vcpu());
@@ -244,7 +246,7 @@ class NodesResponse extends SlimeJsonResponse {
return Optional.empty();
}
- static void ipAddressesToSlime(Set<String> ipAddresses, Cursor array) {
+ static void ipAddressesToSlime(Collection<String> ipAddresses, Cursor array) {
ipAddresses.forEach(array::addString);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/WireguardResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/WireguardResponse.java
index 11be80de990..76c709da97f 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/WireguardResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/WireguardResponse.java
@@ -7,8 +7,10 @@ import com.yahoo.slime.Cursor;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.node.IP;
-import java.util.Set;
+import java.net.InetAddress;
+import java.util.List;
/**
* A response containing the wireguard peer config for each configserver that has a public key.
@@ -25,18 +27,25 @@ public class WireguardResponse extends SlimeJsonResponse {
.list(Node.State.active)
.nodeType(NodeType.config);
- configservers.stream()
- .filter(node -> node.wireguardPubKey().isPresent())
- .forEach(configserver -> addConfigserver(cfgArray.addObject(),
- configserver.hostname(),
- configserver.wireguardPubKey().get(),
- configserver.ipConfig().primary()));
+ for (Node cfg : configservers) {
+ if (cfg.wireguardPubKey().isEmpty()) return;
+ List<String> ipAddresses = cfg.ipConfig().primary().stream()
+ .filter(WireguardResponse::isPublicIp)
+ .toList();
+ if (ipAddresses.isEmpty()) return;
+
+ addConfigserver(cfgArray.addObject(), cfg.hostname(), cfg.wireguardPubKey().get(), ipAddresses);
+ }
}
- private void addConfigserver(Cursor cfgEntry, String hostname, WireguardKey key, Set<String> ipAddresses) {
+ private void addConfigserver(Cursor cfgEntry, String hostname, WireguardKey key, List<String> ipAddresses) {
cfgEntry.setString("hostname", hostname);
cfgEntry.setString("wireguardPubkey", key.value());
NodesResponse.ipAddressesToSlime(ipAddresses, cfgEntry.setArray("ipAddresses"));
}
+ private static boolean isPublicIp(String ipAddress) {
+ InetAddress address = IP.parse(ipAddress);
+ return !address.isLoopbackAddress() && !address.isLinkLocalAddress() && !address.isSiteLocalAddress();
+ }
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
index 24ea9361823..3d5987cd04d 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
@@ -1,8 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.testutils;
-import com.yahoo.component.Version;
-import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.CloudAccount;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Flavor;
@@ -16,6 +14,7 @@ import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.provisioning.FatalProvisioningException;
import com.yahoo.vespa.hosted.provision.provisioning.HostIpConfig;
+import com.yahoo.vespa.hosted.provision.provisioning.HostProvisionRequest;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner;
import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisionedHost;
@@ -23,7 +22,6 @@ import com.yahoo.vespa.hosted.provision.provisioning.ProvisionedHost;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
-import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -44,10 +42,11 @@ public class MockHostProvisioner implements HostProvisioner {
private final MockNameResolver nameResolver;
private final int memoryTaxGb;
private final Set<String> rebuildsCompleted = new HashSet<>();
+ private final Map<ClusterSpec.Type, Flavor> hostFlavors = new HashMap<>();
+ private final Set<String> upgradableFlavors = new HashSet<>();
+ private final Map<Behaviour, Integer> behaviours = new HashMap<>();
private int deprovisionedHosts = 0;
- private EnumSet<Behaviour> behaviours = EnumSet.noneOf(Behaviour.class);
- private Map<ClusterSpec.Type, Flavor> hostFlavors = new HashMap<>();
public MockHostProvisioner(List<Flavor> flavors, MockNameResolver nameResolver, int memoryTaxGb) {
this.flavors = List.copyOf(flavors);
@@ -63,40 +62,43 @@ public class MockHostProvisioner implements HostProvisioner {
this(flavors, new MockNameResolver().mockAnyLookup(), memoryTaxGb);
}
+ /** Returns whether given behaviour is active for this invocation */
+ private boolean behaviour(Behaviour behaviour) {
+ return behaviours.computeIfPresent(behaviour, (k, old) -> old == 0 ? null : --old) != null;
+ }
+
@Override
- public void provisionHosts(List<Integer> provisionIndices, NodeType hostType, NodeResources resources,
- ApplicationId applicationId, Version osVersion, HostSharing sharing,
- Optional<ClusterSpec.Type> clusterType, Optional<ClusterSpec.Id> clusterId,
- CloudAccount cloudAccount, Consumer<List<ProvisionedHost>> provisionedHostsConsumer) {
- Flavor hostFlavor = hostFlavors.get(clusterType.orElse(ClusterSpec.Type.content));
+ public void provisionHosts(HostProvisionRequest request, Consumer<List<ProvisionedHost>> whenProvisioned) {
+ if (behaviour(Behaviour.failProvisionRequest)) throw new NodeAllocationException("No capacity for provision request", true);
+ Flavor hostFlavor = hostFlavors.get(request.clusterType().orElse(ClusterSpec.Type.content));
if (hostFlavor == null)
hostFlavor = flavors.stream()
- .filter(f -> sharing == HostSharing.exclusive ? compatible(f, resources)
- : f.resources().satisfies(resources))
+ .filter(f -> request.sharing() == HostSharing.exclusive ? compatible(f, request.resources())
+ : f.resources().satisfies(request.resources()))
.findFirst()
- .orElseThrow(() -> new NodeAllocationException("No host flavor matches " + resources, true));
+ .orElseThrow(() -> new NodeAllocationException("No host flavor matches " + request.resources(), true));
List<ProvisionedHost> hosts = new ArrayList<>();
- for (int index : provisionIndices) {
- String hostHostname = hostType == NodeType.host ? "host" + index : hostType.name() + index;
- hosts.add(new ProvisionedHost("id-of-" + hostType.name() + index,
+ for (int index : request.indices()) {
+ String hostHostname = request.type() == NodeType.host ? "host" + index : request.type().name() + index;
+ hosts.add(new ProvisionedHost("id-of-" + request.type().name() + index,
hostHostname,
hostFlavor,
- hostType,
- sharing == HostSharing.exclusive ? Optional.of(applicationId) : Optional.empty(),
+ request.type(),
+ request.sharing() == HostSharing.exclusive ? Optional.of(request.owner()) : Optional.empty(),
Optional.empty(),
- createHostnames(hostType, hostFlavor, index),
- resources,
- osVersion,
- cloudAccount));
+ createHostnames(request.type(), hostFlavor, index),
+ request.resources(),
+ request.osVersion(),
+ request.cloudAccount()));
}
provisionedHosts.addAll(hosts);
- provisionedHostsConsumer.accept(hosts);
+ whenProvisioned.accept(hosts);
}
@Override
public HostIpConfig provision(Node host, Set<Node> children) throws FatalProvisioningException {
- if (behaviours.contains(Behaviour.failProvisioning)) throw new FatalProvisioningException("Failed to provision node(s)");
+ if (behaviour(Behaviour.failProvisioning)) throw new FatalProvisioningException("Failed to provision node(s)");
if (host.state() != Node.State.provisioned) throw new IllegalStateException("Host to provision must be in " + Node.State.provisioned);
Map<String, IP.Config> result = new HashMap<>();
result.put(host.hostname(), createIpConfig(host));
@@ -109,7 +111,7 @@ public class MockHostProvisioner implements HostProvisioner {
@Override
public void deprovision(Node host) {
- if (behaviours.contains(Behaviour.failDeprovisioning)) throw new FatalProvisioningException("Failed to deprovision node");
+ if (behaviour(Behaviour.failDeprovisioning)) throw new FatalProvisioningException("Failed to deprovision node");
provisionedHosts.removeIf(provisionedHost -> provisionedHost.hostHostname().equals(host.hostname()));
deprovisionedHosts++;
}
@@ -119,7 +121,7 @@ public class MockHostProvisioner implements HostProvisioner {
if (!host.type().isHost()) throw new IllegalArgumentException(host + " is not a host");
if (rebuildsCompleted.remove(host.hostname())) {
return host.withWantToRetire(host.status().wantToRetire(), host.status().wantToDeprovision(),
- false, Agent.system, Instant.ofEpochMilli(123));
+ false, false, Agent.system, Instant.ofEpochMilli(123));
}
return host;
}
@@ -129,6 +131,11 @@ public class MockHostProvisioner implements HostProvisioner {
return Collections.unmodifiableList(hostEvents);
}
+ @Override
+ public boolean canUpgradeFlavor(Node host, Node child) {
+ return upgradableFlavors.contains(host.flavor().name());
+ }
+
/** Returns the hosts that have been provisioned by this */
public List<ProvisionedHost> provisionedHosts() {
return Collections.unmodifiableList(provisionedHosts);
@@ -140,14 +147,23 @@ public class MockHostProvisioner implements HostProvisioner {
}
public MockHostProvisioner with(Behaviour first, Behaviour... rest) {
- this.behaviours = EnumSet.of(first, rest);
+ behaviours.put(first, Integer.MAX_VALUE);
+ for (var b : rest) {
+ behaviours.put(b, Integer.MAX_VALUE);
+ }
+ return this;
+ }
+
+ public MockHostProvisioner with(Behaviour behaviour, int count) {
+ behaviours.put(behaviour, count);
return this;
}
public MockHostProvisioner without(Behaviour first, Behaviour... rest) {
- Set<Behaviour> behaviours = new HashSet<>(this.behaviours);
- behaviours.removeAll(EnumSet.of(first, rest));
- this.behaviours = behaviours.isEmpty() ? EnumSet.noneOf(Behaviour.class) : EnumSet.copyOf(behaviours);
+ behaviours.remove(first);
+ for (var b : rest) {
+ behaviours.remove(b);
+ }
return this;
}
@@ -167,6 +183,11 @@ public class MockHostProvisioner implements HostProvisioner {
return this;
}
+ public MockHostProvisioner addUpgradableFlavor(String name) {
+ upgradableFlavors.add(name);
+ return this;
+ }
+
/** Sets the host flavor to use to the flavor matching these resources exactly, if any. */
public MockHostProvisioner setHostFlavorIfAvailable(NodeResources flavorAdvertisedResources, HostResourcesCalculator calculator, ClusterSpec.Type ... types) {
Optional<Flavor> hostFlavor = flavors.stream().filter(f -> calculator.advertisedResourcesOf(f).compatibleWith(flavorAdvertisedResources))
@@ -215,7 +236,7 @@ public class MockHostProvisioner implements HostProvisioner {
int hostIndex = Integer.parseInt(node.hostname().replaceAll("^[a-z]+|-\\d+$", ""));
Set<String> addresses = Set.of("::" + hostIndex + ":0");
Set<String> ipAddressPool = new HashSet<>();
- if (!behaviours.contains(Behaviour.failDnsUpdate)) {
+ if (!behaviour(Behaviour.failDnsUpdate)) {
nameResolver.addRecord(node.hostname(), addresses.iterator().next());
for (int i = 1; i <= 2; i++) {
String ip = "::" + hostIndex + ":" + i;
@@ -229,10 +250,13 @@ public class MockHostProvisioner implements HostProvisioner {
public enum Behaviour {
- /** Fail all calls to {@link MockHostProvisioner#provision(com.yahoo.vespa.hosted.provision.Node, java.util.Set)} */
+ /** Fail call to {@link MockHostProvisioner#provision(com.yahoo.vespa.hosted.provision.Node, java.util.Set)} */
failProvisioning,
- /** Fail all calls to {@link MockHostProvisioner#deprovision(com.yahoo.vespa.hosted.provision.Node)} */
+ /** Fail call to {@link MockHostProvisioner#provisionHosts(HostProvisionRequest, Consumer)} */
+ failProvisionRequest,
+
+ /** Fail call to {@link MockHostProvisioner#deprovision(com.yahoo.vespa.hosted.provision.Node)} */
failDeprovisioning,
/** Fail DNS updates of provisioned hosts */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
index 0a614cc9b2b..b7d6e0a9dd9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
@@ -148,7 +148,7 @@ public class MockNodeRepository extends NodeRepository {
nodes.add(node10);
Node node55 = Node.create("node55", ipConfig(55), "host55.yahoo.com", resources(2, 8, 50, 1, fast, local), NodeType.tenant)
- .status(Status.initial().withWantToRetire(true, true, false))
+ .status(Status.initial().withWantToRetire(true, true, false, false))
.cloudAccount(defaultCloudAccount).build();
nodes.add(node55);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
index 7d9a48f6773..05e01c65798 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
@@ -199,7 +199,7 @@ public class NodeRepositoryTest {
// Set host 2 properties and deprovision it
try (var lock = tester.nodeRepository().nodes().lockAndGetRequired("host2")) {
- Node host2 = lock.node().withWantToRetire(true, false, true, Agent.system, tester.nodeRepository().clock().instant());
+ Node host2 = lock.node().withWantToRetire(true, false, true, false, Agent.system, tester.nodeRepository().clock().instant());
tester.nodeRepository().nodes().write(host2, lock);
}
tester.nodeRepository().nodes().removeRecursively("host2");
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
index dc1e1320ff2..29ebf1789c0 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
@@ -8,9 +8,11 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationTransaction;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.Cloud;
+import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.ProvisionLock;
@@ -24,9 +26,12 @@ import com.yahoo.vespa.config.ConfigPayload;
import com.yahoo.vespa.hosted.provision.maintenance.SwitchRebalancer;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.persistence.DnsNameResolver;
+import com.yahoo.vespa.hosted.provision.persistence.NameResolver;
import com.yahoo.vespa.hosted.provision.persistence.NodeSerializer;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
import com.yahoo.vespa.hosted.provision.testutils.MockDeployer;
+import com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner;
+import com.yahoo.vespa.hosted.provision.testutils.MockNameResolver;
import com.yahoo.vespa.model.builder.xml.dom.DomConfigPayloadBuilder;
import org.junit.Ignore;
import org.junit.Test;
@@ -37,20 +42,17 @@ import java.io.InputStreamReader;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.nio.file.Paths;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.logging.Logger;
-import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.IntStream;
-import static com.yahoo.config.provision.NodeResources.DiskSpeed.any;
import static com.yahoo.config.provision.NodeResources.DiskSpeed.fast;
import static com.yahoo.config.provision.NodeResources.StorageType.local;
import static com.yahoo.config.provision.NodeResources.StorageType.remote;
@@ -75,21 +77,15 @@ public class RealDataScenarioTest {
@Ignore
@Test
public void test() {
- ProvisioningTester tester = new ProvisioningTester.Builder()
- .zone(new Zone(Cloud.builder().dynamicProvisioning(true).build(), SystemName.defaultSystem(), Environment.prod, RegionName.defaultName()))
- .flavorsConfig(parseFlavors(Paths.get("/tmp/node-flavors.xml")))
- .nameResolver(new DnsNameResolver())
- .spareCount(1)
- .build();
- initFromZk(tester.nodeRepository(), Paths.get("/tmp/snapshot"));
+ ProvisioningTester tester = tester(SystemName.Public, CloudName.AWS, Environment.prod, parseFlavors(Path.of("/tmp/node-flavors.xml")));
+ initFromZk(tester.nodeRepository(), Path.of("/tmp/snapshot"));
ApplicationId app = ApplicationId.from("tenant", "app", "default");
- Version version = Version.fromString("7.123.4");
+ Version version = Version.fromString("8.123.4");
Capacity[] capacities = new Capacity[]{
Capacity.from(new ClusterResources(1, 1, NodeResources.unspecified())),
- /** TODO: Change to NodeResources.unspecified() when {@link (com.yahoo.vespa.flags.Flags).DEDICATED_CLUSTER_CONTROLLER_FLAVOR} is gone */
- Capacity.from(new ClusterResources(3, 1, new NodeResources(0.25, 1.0, 10.0, 0.3, any))),
+ Capacity.from(new ClusterResources(3, 1, NodeResources.unspecified())),
Capacity.from(new ClusterResources(4, 1, new NodeResources(8, 16, 100, 0.3, fast, remote))),
Capacity.from(new ClusterResources(2, 1, new NodeResources(4, 8, 100, 0.3, fast, local)))
};
@@ -125,10 +121,13 @@ public class RealDataScenarioTest {
transaction.commit();
}
- private static FlavorsConfig parseFlavors(Path path) {
+ private static List<Flavor> parseFlavors(Path path) {
try {
var element = XmlHelper.getDocumentBuilder().parse(path.toFile()).getDocumentElement();
- return ConfigPayload.fromBuilder(new DomConfigPayloadBuilder(null).build(element)).toInstance(FlavorsConfig.class, "");
+ return ConfigPayload.fromBuilder(new DomConfigPayloadBuilder(null).build(element)).toInstance(FlavorsConfig.class, "")
+ .flavor().stream()
+ .map(Flavor::new)
+ .toList();
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -136,21 +135,18 @@ public class RealDataScenarioTest {
private static void initFromZk(NodeRepository nodeRepository, Path pathToZkSnapshot) {
NodeSerializer nodeSerializer = new NodeSerializer(nodeRepository.flavors(), 1000);
- AtomicReference<Node.State> state = new AtomicReference<>();
- Pattern zkNodePathPattern = Pattern.compile(".?/provision/v1/([a-z]+)/[a-z0-9.-]+\\.(com|cloud).?");
+ AtomicBoolean nodeNext = new AtomicBoolean(false);
+ Pattern zkNodePathPattern = Pattern.compile(".?/provision/v1/nodes/[a-z0-9.-]+\\.(com|cloud).?");
Consumer<String> consumer = input -> {
- if (state.get() != null) {
+ if (nodeNext.get()) {
String json = input.substring(input.indexOf("{\""), input.lastIndexOf('}') + 1);
Node node = nodeSerializer.fromJson(json.getBytes(UTF_8));
- nodeRepository.database().addNodesInState(List.of(node), state.get(), Agent.system);
- state.set(null);
+ nodeRepository.database().addNodesInState(List.of(node), node.state(), Agent.system);
+ nodeNext.set(false);
} else {
- Matcher matcher = zkNodePathPattern.matcher(input);
- if (!matcher.matches()) return;
- String stateStr = matcher.group(1);
- Node.State s = "deallocated".equals(stateStr) ? Node.State.inactive :
- "allocated".equals(stateStr) ? Node.State.active : Node.State.valueOf(stateStr);
- state.set(s);
+ if (!zkNodePathPattern.matcher(input).matches()) return;
+ if (nodeNext.getAndSet(true))
+ throw new IllegalStateException("Expected to find node JSON, but found another node path: " + input);
}
};
@@ -170,4 +166,18 @@ public class RealDataScenarioTest {
}
}
+ private static ProvisioningTester tester(SystemName systemName, CloudName cloudName, Environment environment, List<Flavor> flavors) {
+ Cloud cloud = Cloud.builder().name(cloudName).dynamicProvisioning(cloudName != CloudName.YAHOO).build();
+ NameResolver nameResolver = cloudName == CloudName.YAHOO ? new DnsNameResolver() : new MockNameResolver().mockAnyLookup();
+ ProvisioningTester.Builder builder = new ProvisioningTester.Builder()
+ .zone(new Zone(cloud, systemName, environment, RegionName.defaultName()))
+ .flavors(flavors)
+ .nameResolver(nameResolver)
+ .spareCount(environment.isProduction() && !cloud.dynamicProvisioning() && !systemName.isCd() ? 1 : 0);
+ if (cloud.dynamicProvisioning())
+ builder.hostProvisioner(new MockHostProvisioner(flavors, (MockNameResolver) nameResolver, 0));
+
+ return builder.build();
+ }
+
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index 36d0e464b3d..3d51c50f681 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -61,7 +61,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.7f, 10);
var scaledResources = fixture.tester().assertResources("Scaling up since resource usage is too high",
- 9, 1, 3.6, 7.7, 31.7,
+ 8, 1, 4.0, 9.3, 36.2,
fixture.autoscale());
fixture.deploy(Capacity.from(scaledResources));
@@ -83,7 +83,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(7));
fixture.loader().applyCpuLoad(0.1f, 10);
fixture.tester().assertResources("Scaling cpu down since usage has gone down significantly",
- 8, 1, 1.0, 7.3, 22.1,
+ 7, 1, 1.1, 8.7, 25.4,
fixture.autoscale());
}
@@ -107,7 +107,7 @@ public class AutoscalingTest {
fixture.loader().applyLoad(new Load(0.1, 0.1, 0.1), 3);
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 1);
fixture.tester().assertResources("Scaling up since resource usage is too high",
- 9, 1, 4.7, 14.8, 66.0,
+ 8, 1, 5.3, 17.5, 75.4,
fixture.autoscale());
}
@@ -167,7 +167,7 @@ public class AutoscalingTest {
var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10);
fixture.tester().assertResources("Scaling up (only) since resource usage is too high",
- 8, 1, 7.1, 8.8, 75.4,
+ 8, 1, 7.1, 9.3, 75.4,
fixture.autoscale());
}
@@ -199,7 +199,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
- 9, 1, 3.8, 7.7, 31.7,
+ 8, 1, 4.3, 9.3, 36.2,
fixture.autoscale());
}
@@ -210,7 +210,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
- 10, 1, 4, 8.0, 22.7,
+ 9, 1, 4, 16.0, 25.5,
fixture.autoscale());
}
@@ -221,7 +221,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
- 9, 1, 3.8, 8.0, 37.5,
+ 8, 1, 4.3, 9.7, 42.9,
fixture.autoscale());
}
@@ -283,7 +283,7 @@ public class AutoscalingTest {
new NodeResources(100, 1000, 1000, 1, DiskSpeed.any));
var capacity = Capacity.from(min, max);
ClusterResources scaledResources = fixture.tester().assertResources("Scaling up",
- 13, 1, 1.5, 26.7, 26.7,
+ 13, 1, 1.5, 29.1, 26.7,
fixture.autoscale(capacity));
assertEquals("Disk speed from new capacity is used",
DiskSpeed.any, scaledResources.nodeResources().diskSpeed());
@@ -401,7 +401,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.4, 240);
fixture.tester().assertResources("Scaling cpu up",
- 8, 4, 4.6, 4.0, 10.0,
+ 8, 4, 4.6, 4.2, 10.0,
fixture.autoscale());
}
@@ -446,7 +446,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(1.0, 120);
fixture.tester().assertResources("Suggesting above capacity limit",
- 8, 1, 6.2, 7.0, 29.0,
+ 8, 1, 6.2, 7.4, 29.0,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
}
@@ -492,7 +492,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.9, 120);
fixture.tester().assertResources("Scaling up to 2 nodes, scaling memory and disk down at the same time",
- 10, 5, 7.7, 39.3, 38.5,
+ 10, 5, 7.7, 41.5, 38.5,
fixture.autoscale());
}
@@ -528,7 +528,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 200.0 : 100.0, t -> 10.0);
fixture.tester().assertResources("Scaling up cpu, others down, changing to 1 group is cheaper",
- 9, 1, 2.5, 30.7, 30.1,
+ 7, 1, 3.2, 43.3, 40.1,
fixture.autoscale());
}
@@ -548,7 +548,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 20.0 : 10.0, t -> 100.0);
fixture.tester().assertResources("Scaling down since resource usage is too high, changing to 1 group is cheaper",
- 6, 1, 1.0, 49.1, 48.1,
+ 5, 1, 1.0, 62.6, 60.1,
fixture.autoscale());
}
@@ -565,7 +565,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyMemLoad(1.0, 1000);
fixture.tester().assertResources("Increase group size to reduce memory load",
- 8, 2, 13.9, 94.5, 60.1,
+ 8, 2, 13.9, 96.3, 60.1,
fixture.autoscale());
}
@@ -594,7 +594,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofHours(12 * 3 + 1));
fixture.loader().applyCpuLoad(0.02, 120);
fixture.tester().assertResources("Scaling down since enough time has passed",
- 3, 1, 1.0, 24.6, 101.4,
+ 3, 1, 1.0, 23.6, 101.4,
fixture.autoscale());
}
@@ -637,8 +637,8 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.25, 200);
- fixture.tester().assertResources("Scale up since we assume we need 2x cpu for growth when no data scaling time data",
- 10, 1, 1.2, 5.5, 22.5,
+ fixture.tester().assertResources("Scale up since we assume we need 2x cpu for growth when no scaling time data",
+ 8, 1, 1.6, 7.4, 29.0,
fixture.autoscale());
fixture.setScalingDuration(Duration.ofMinutes(5));
@@ -647,7 +647,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale down since observed growth is slower than scaling time",
- 10, 1, 1.0, 5.5, 22.5,
+ 8, 1, 1.2, 7.4, 29.0,
fixture.autoscale());
fixture.setScalingDuration(Duration.ofMinutes(60));
@@ -658,7 +658,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale up since observed growth is faster than scaling time",
- 9, 1, 1.4, 6.1, 25.3,
+ 8, 1, 1.5, 7.4, 29.0,
fixture.autoscale());
}
@@ -670,12 +670,12 @@ public class AutoscalingTest {
fixture.setScalingDuration(Duration.ofMinutes(60));
fixture.tester().clock().advance(Duration.ofDays(2));
Duration timeAdded = fixture.loader().addLoadMeasurements(100,
- t -> scalingFactor * (100.0 + (t < 50 ? t * t * t : 125000 - (t - 49) * (t - 49) * (t - 49))),
+ t -> scalingFactor * (100.0 + (t < 50 ? t * t * t : 155000 - (t - 49) * (t - 49) * (t - 49))),
t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.7, 200);
fixture.tester().assertResources("Scale up slightly since observed growth is faster than scaling time, but we are not confident",
- 10, 1, 1.0, 5.5, 22.5,
+ 8, 1, 1.3, 7.4, 29.0,
fixture.autoscale());
}
@@ -693,7 +693,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester.assertResources("Query and write load is equal -> scale up somewhat",
- 10, 1, 1.4, 5.5, 22.5,
+ 8, 1, 1.8, 7.4, 29.0,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -702,7 +702,7 @@ public class AutoscalingTest {
fixture.loader().addCpuMeasurements(0.4, 200);
// TODO: Ackhually, we scale down here - why?
fixture.tester().assertResources("Query load is 4x write load -> scale up more",
- 10, 1, 1.3, 5.5, 22.5,
+ 8, 1, 1.4, 7.4, 29.0,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -710,7 +710,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Write load is 10x query load -> scale down",
- 6, 1, 1.1, 9.8, 40.5,
+ 6, 1, 1.1, 10.0, 40.5,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -718,7 +718,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Query only -> largest possible",
- 9, 1, 2.7, 6.1, 25.3,
+ 8, 1, 2.5, 7.4, 29.0,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -726,7 +726,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Write only -> smallest possible",
- 4, 1, 1.1, 16.4, 67.6,
+ 4, 1, 1.1, 16.1, 67.6,
fixture.autoscale());
}
@@ -781,7 +781,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 200);
fixture.tester().assertResources("We scale even in dev because resources are 'required'",
- 3, 1, 1.0, 12.3, 62.5,
+ 3, 1, 1.0, 13.4, 62.5,
fixture.autoscale());
}
@@ -851,7 +851,7 @@ public class AutoscalingTest {
fixture.loader().applyLoad(new Load(0.06, 0.52, 0.27), 100);
var autoscaling = fixture.autoscale();
fixture.tester().assertResources("Scaling down",
- 7, 1, 2, 14.7, 384.0,
+ 7, 1, 2, 14.5, 384.0,
autoscaling);
fixture.deploy(Capacity.from(autoscaling.resources().get()));
assertEquals("Initial nodes are kept", initialNodes, fixture.nodes().asList());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java
index 704491ed44f..ae40795d783 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java
@@ -32,7 +32,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 3.6, 6.1, 25.3,
+ 8, 1, 4.0, 7.4, 29.0,
fixture.autoscale());
// Higher query rate
@@ -40,7 +40,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 7.1, 6.1, 25.3,
+ 8, 1, 8.0, 7.4, 29.0,
fixture.autoscale());
// Higher headroom
@@ -48,7 +48,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.3, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 4.2, 6.1, 25.3,
+ 8, 1, 4.8, 7.4, 29.0,
fixture.autoscale());
// Higher per query cost
@@ -56,7 +56,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 5.4, 6.1, 25.3,
+ 8, 1, 6.0, 7.4, 29.0,
fixture.autoscale());
// Bcp elsewhere is 0 - use local only
@@ -64,7 +64,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(0, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling using local info",
- 8, 1, 1, 7.0, 29.0,
+ 8, 1, 1, 7.4, 29.0,
fixture.autoscale());
}
@@ -85,7 +85,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 10.5, 41.0, 168.9,
+ 3, 3, 10.5, 38.4, 168.9,
fixture.autoscale());
// Higher query rate
@@ -93,7 +93,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 20.9, 41.0, 168.9,
+ 3, 3, 20.9, 38.4, 168.9,
fixture.autoscale());
// Higher headroom
@@ -101,7 +101,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.3, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 12.4, 41.0, 168.9,
+ 3, 3, 12.4, 38.4, 168.9,
fixture.autoscale());
// Higher per query cost
@@ -109,7 +109,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 15.7, 41.0, 168.9,
+ 3, 3, 15.7, 38.4, 168.9,
fixture.autoscale());
}
@@ -186,7 +186,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.3, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 14.2, 7.0, 29.0,
+ 8, 1, 14.2, 7.4, 29.0,
fixture.autoscale());
// Some local traffic
@@ -196,7 +196,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration1.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 10.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 6.9, 7.0, 29.0,
+ 8, 1, 6.9, 7.4, 29.0,
fixture.autoscale());
// Enough local traffic to get half the votes
@@ -206,7 +206,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration2.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 50.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 2.7, 6.1, 25.3,
+ 8, 1, 3.0, 7.4, 29.0,
fixture.autoscale());
// Mostly local
@@ -216,7 +216,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration3.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 90.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 2.1, 6.1, 25.3,
+ 8, 1, 2.3, 7.4, 29.0,
fixture.autoscale());
// Local only
@@ -226,7 +226,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration4.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 100.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 2.0, 6.1, 25.3,
+ 8, 1, 2.2, 7.4, 29.0,
fixture.autoscale());
// No group info, should be the same as the above
@@ -236,7 +236,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration5.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 100.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 2.0, 6.1, 25.3,
+ 8, 1, 2.2, 7.4, 29.0,
fixture.autoscale());
// 40 query rate, no group info (for reference to the below)
@@ -246,7 +246,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration6.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 40.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 1.4, 6.1, 25.3,
+ 8, 1, 1.6, 7.4, 29.0,
fixture.autoscale());
// Local query rate is too low but global is even lower so disregard it, giving the same as above
@@ -256,7 +256,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration7.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 40.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 1.4, 6.1, 25.3,
+ 8, 1, 1.6, 7.4, 29.0,
fixture.autoscale());
// Local query rate is too low to be fully confident, and so is global but as it is slightly larger, incorporate it slightly
@@ -266,7 +266,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration8.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 40.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 9, 1, 1.8, 6.1, 25.3,
+ 8, 1, 1.9, 7.4, 29.0,
fixture.autoscale());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
index ed00134af55..ec084014a6a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
@@ -5,12 +5,17 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.Zone;
import com.yahoo.test.ManualClock;
+import com.yahoo.vespa.curator.mock.MockCurator;
+import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.Status;
+import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
+import com.yahoo.vespa.hosted.provision.testutils.MockNodeRepository;
import org.junit.Test;
import java.time.Duration;
@@ -84,12 +89,11 @@ public class ClusterModelTest {
private ClusterModel clusterModel(Status status, IntFunction<Double> queryRate, IntFunction<Double> writeRate) {
ManualClock clock = new ManualClock();
- Zone zone = Zone.defaultZone();
Application application = Application.empty(ApplicationId.from("t1", "a1", "i1"));
ClusterSpec clusterSpec = clusterSpec();
Cluster cluster = cluster(resources());
application = application.with(cluster);
- return new ClusterModel(zone,
+ return new ClusterModel(new ProvisioningTester.Builder().build().nodeRepository(),
application.with(status),
clusterSpec, cluster, clock, Duration.ofMinutes(10),
timeseries(cluster,100, queryRate, writeRate, clock),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
index 5d1fd58489b..b150b372fe8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
@@ -10,10 +10,12 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
+import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.curator.mock.MockCurator;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.custom.HostResources;
@@ -27,6 +29,8 @@ import com.yahoo.vespa.hosted.provision.autoscale.awsnodes.AwsHostResourcesCalcu
import com.yahoo.vespa.hosted.provision.autoscale.awsnodes.AwsNodeTypes;
import com.yahoo.vespa.hosted.provision.provisioning.DynamicProvisioningTester;
import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
+import com.yahoo.vespa.hosted.provision.testutils.MockNodeRepository;
+
import java.time.Duration;
import java.util.Arrays;
import java.util.List;
@@ -40,14 +44,12 @@ import java.util.Optional;
public class Fixture {
final DynamicProvisioningTester tester;
- final Zone zone;
final ApplicationId applicationId;
final ClusterSpec clusterSpec;
final Capacity capacity;
final Loader loader;
public Fixture(Fixture.Builder builder, Optional<ClusterResources> initialResources, int hostCount) {
- zone = builder.zone;
applicationId = builder.application;
clusterSpec = builder.cluster;
capacity = builder.capacity;
@@ -80,7 +82,7 @@ public class Fixture {
public Capacity capacity() { return capacity; }
public ClusterModel clusterModel() {
- return new ClusterModel(zone,
+ return new ClusterModel(tester.nodeRepository(),
application(),
clusterSpec,
cluster(),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java
index d75f51680d7..3c459871490 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java
@@ -154,10 +154,10 @@ public class AutoscalingMaintainerTest {
@Test
public void test_toString() {
- assertEquals("4 nodes with [vcpu: 1.0, memory: 2.0 Gb, disk 4.0 Gb, bandwidth: 1.0 Gbps, architecture: any] (total: [vcpu: 4.0, memory: 8.0 Gb, disk 16.0 Gb, bandwidth: 4.0 Gbps, architecture: any])",
+ assertEquals("4 nodes with [vcpu: 1.0, memory: 2.0 Gb, disk: 4.0 Gb, bandwidth: 1.0 Gbps, architecture: any] (total: [vcpu: 4.0, memory: 8.0 Gb, disk: 16.0 Gb, bandwidth: 4.0 Gbps, architecture: any])",
AutoscalingMaintainer.toString(new ClusterResources(4, 1, new NodeResources(1, 2, 4, 1))));
- assertEquals("4 nodes (in 2 groups) with [vcpu: 1.0, memory: 2.0 Gb, disk 4.0 Gb, bandwidth: 1.0 Gbps, architecture: any] (total: [vcpu: 4.0, memory: 8.0 Gb, disk 16.0 Gb, bandwidth: 4.0 Gbps, architecture: any])",
+ assertEquals("4 nodes (in 2 groups) with [vcpu: 1.0, memory: 2.0 Gb, disk: 4.0 Gb, bandwidth: 1.0 Gbps, architecture: any] (total: [vcpu: 4.0, memory: 8.0 Gb, disk: 16.0 Gb, bandwidth: 4.0 Gbps, architecture: any])",
AutoscalingMaintainer.toString(new ClusterResources(4, 2, new NodeResources(1, 2, 4, 1))));
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java
index 51522304350..51b3bb99f4c 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java
@@ -29,7 +29,7 @@ public class CapacityCheckerTest {
var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure();
assertTrue(failurePath.isPresent());
assertTrue(tester.nodeRepository.nodes().list().nodeType(NodeType.host).asList().containsAll(failurePath.get().hostsCausingFailure));
- assertEquals(5, failurePath.get().hostsCausingFailure.size());
+ assertEquals(4, failurePath.get().hostsCausingFailure.size());
}
@Test
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DeprovisionedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DeprovisionedExpirerTest.java
new file mode 100644
index 00000000000..4100fe39eca
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DeprovisionedExpirerTest.java
@@ -0,0 +1,56 @@
+package com.yahoo.vespa.hosted.provision.maintenance;
+
+import com.yahoo.config.provision.NodeFlavors;
+import com.yahoo.config.provision.NodeResources;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.vespa.hosted.provision.node.Agent;
+import com.yahoo.vespa.hosted.provision.provisioning.FlavorConfigBuilder;
+import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
+import com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner;
+import org.junit.jupiter.api.Test;
+
+import java.time.Duration;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertSame;
+
+/**
+ * @author mpolden
+ */
+class DeprovisionedExpirerTest {
+
+ private final NodeFlavors flavors = FlavorConfigBuilder.createDummies("host");
+ private final ProvisioningTester tester = new ProvisioningTester.Builder().dynamicProvisioning()
+ .flavors(flavors.getFlavors())
+ .hostProvisioner(new MockHostProvisioner(flavors.getFlavors()))
+ .build();
+ private final DeprovisionedExpirer expirer = new DeprovisionedExpirer(tester.nodeRepository(), Duration.ofDays(30),
+ new TestMetric());
+
+ @Test
+ public void maintain() {
+ tester.makeReadyHosts(1, new NodeResources(2,4,8,1))
+ .activateTenantHosts();
+ NodeList hosts = tester.nodeRepository().nodes().list().state(Node.State.active);
+ assertEquals(1, hosts.size());
+
+ // Remove host
+ String hostname = hosts.first().get().hostname();
+ tester.nodeRepository().nodes().park(hostname, false, Agent.system, getClass().getSimpleName());
+ tester.nodeRepository().nodes().removeRecursively(hostname);
+ assertSame(Node.State.deprovisioned, tester.node(hostname).state());
+
+ // Host is not removed until expiry passes
+ assertExpiredAfter(Duration.ZERO, false, hostname);
+ assertExpiredAfter(Duration.ofDays(15), false, hostname);
+ assertExpiredAfter(Duration.ofDays(15), true, hostname);
+ }
+
+ private void assertExpiredAfter(Duration duration, boolean expired, String hostname) {
+ tester.clock().advance(duration);
+ expirer.maintain();
+ assertEquals(expired, tester.nodeRepository().nodes().node(hostname).isEmpty());
+ }
+
+}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DirtyExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DirtyExpirerTest.java
index ac20b9164f8..ddd7413567a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DirtyExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DirtyExpirerTest.java
@@ -2,15 +2,10 @@
package com.yahoo.vespa.hosted.provision.maintenance;
import com.yahoo.component.Version;
-import com.yahoo.config.provision.Cloud;
import com.yahoo.config.provision.ClusterMembership;
-import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.RegionName;
-import com.yahoo.config.provision.SystemName;
-import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.Allocation;
@@ -37,8 +32,8 @@ public class DirtyExpirerTest {
}
private void assertAllocationAfterExpiry(boolean dynamicProvisioning) {
- Zone zone = new Zone(Cloud.builder().dynamicProvisioning(dynamicProvisioning).build(), SystemName.main, Environment.prod, RegionName.from("us-east"));
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone)
+ ProvisioningTester tester = new ProvisioningTester.Builder()
+ .dynamicProvisioning(dynamicProvisioning, true)
.hostProvisioner(dynamicProvisioning ? new MockHostProvisioner(List.of()) : null)
.build();
@@ -65,4 +60,4 @@ public class DirtyExpirerTest {
assertEquals(dynamicProvisioning, tester.nodeRepository().nodes().list().first().get().status().wantToDeprovision());
}
-} \ No newline at end of file
+}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java
index 7f5bb79b20c..ead94663807 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java
@@ -21,11 +21,14 @@ import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
+import com.yahoo.docproc.jdisc.metric.NullMetric;
import com.yahoo.net.HostName;
+import com.yahoo.test.ManualClock;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.custom.ClusterCapacity;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.Node.State;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
@@ -37,6 +40,7 @@ import com.yahoo.vespa.hosted.provision.provisioning.FlavorConfigBuilder;
import com.yahoo.vespa.hosted.provision.provisioning.InfraDeployerImpl;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisionedHost;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
+import com.yahoo.vespa.hosted.provision.testutils.MockDeployer;
import com.yahoo.vespa.hosted.provision.testutils.MockDuperModel;
import com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner;
import com.yahoo.vespa.hosted.provision.testutils.MockNameResolver;
@@ -50,6 +54,7 @@ import org.junit.Test;
import java.time.Duration;
import java.time.Instant;
+import java.time.temporal.ChronoUnit;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
@@ -60,6 +65,7 @@ import java.util.stream.Stream;
import static com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner.Behaviour;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -77,7 +83,7 @@ public class HostCapacityMaintainerTest {
assertTrue(tester.nodeRepository.nodes().node("host3").isPresent());
tester.maintain();
- assertTrue(tester.nodeRepository.nodes().node("host2").isEmpty());
+ assertSame(State.deprovisioned, tester.nodeRepository.nodes().node("host2").get().state());
}
@Test
@@ -88,7 +94,7 @@ public class HostCapacityMaintainerTest {
assertTrue(failedHost.isPresent());
tester.maintain();
- assertTrue("Failed host is deprovisioned", tester.nodeRepository.nodes().node(failedHost.get().hostname()).isEmpty());
+ assertSame("Failed host is deprovisioned", State.deprovisioned, tester.nodeRepository.nodes().node(failedHost.get().hostname()).get().state());
assertEquals(1, tester.hostProvisioner.deprovisionedHosts());
}
@@ -112,9 +118,9 @@ public class HostCapacityMaintainerTest {
assertEquals(2, tester.hostProvisioner.provisionedHosts().size());
assertEquals(2, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10)));
- NodeList nodesAfter = tester.nodeRepository.nodes().list();
+ NodeList nodesAfter = tester.nodeRepository.nodes().list().not().state(State.deprovisioned);
assertEquals(9, nodesAfter.size()); // 2 removed, 2 added
- assertTrue("Failed host 'host2' is deprovisioned", tester.nodeRepository.nodes().node("host2").isEmpty());
+ assertSame("Failed host 'host2' is deprovisioned", State.deprovisioned, tester.nodeRepository.nodes().node("host2").get().state());
assertTrue("Node on deprovisioned host removed", tester.nodeRepository.nodes().node("host2-1").isEmpty());
assertTrue("Host satisfying 16-24-100-1 is kept", tester.nodeRepository.nodes().node("host3").isPresent());
assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().node("host100").isPresent());
@@ -171,8 +177,8 @@ public class HostCapacityMaintainerTest {
assertEquals(2, tester.hostProvisioner.provisionedHosts().size());
assertEquals(2, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10)));
- assertEquals(8, tester.nodeRepository.nodes().list().size()); // 3 removed, 2 added
- assertTrue("preprovision capacity is prefered on shared hosts", tester.nodeRepository.nodes().node("host3").isEmpty());
+ assertEquals(8, tester.nodeRepository.nodes().list().not().state(State.deprovisioned).size()); // 3 removed, 2 added
+ assertSame("preprovision capacity is prefered on shared hosts", State.deprovisioned, tester.nodeRepository.nodes().node("host3").get().state());
assertTrue(tester.nodeRepository.nodes().node("host100").isPresent());
assertTrue(tester.nodeRepository.nodes().node("host101").isPresent());
@@ -187,13 +193,13 @@ public class HostCapacityMaintainerTest {
assertEquals("one provisioned host has been deprovisioned, so there are 2 -> 1 provisioned hosts",
1, tester.hostProvisioner.provisionedHosts().size());
assertEquals(1, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10)));
- assertEquals(7, tester.nodeRepository.nodes().list().size()); // 4 removed, 2 added
+ assertEquals(7, tester.nodeRepository.nodes().list().not().state(State.deprovisioned).size()); // 4 removed, 2 added
if (tester.nodeRepository.nodes().node("host100").isPresent()) {
- assertTrue("host101 is superfluous and should have been deprovisioned",
- tester.nodeRepository.nodes().node("host101").isEmpty());
+ assertSame("host101 is superfluous and should have been deprovisioned", State.deprovisioned,
+ tester.nodeRepository.nodes().node("host101").get().state());
} else {
assertTrue("host101 is required for preprovision capacity",
- tester.nodeRepository.nodes().node("host101").isPresent());
+ tester.nodeRepository.nodes().node("host101").isPresent());
}
}
@@ -201,8 +207,8 @@ public class HostCapacityMaintainerTest {
private void verifyFirstMaintain(DynamicProvisioningTester tester) {
assertEquals(1, tester.hostProvisioner.provisionedHosts().size());
assertEquals(1, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10)));
- assertEquals(8, tester.nodeRepository.nodes().list().size()); // 2 removed, 1 added
- assertTrue("Failed host 'host2' is deprovisioned", tester.nodeRepository.nodes().node("host2").isEmpty());
+ assertEquals(8, tester.nodeRepository.nodes().list().not().state(State.deprovisioned).size()); // 2 removed, 1 added
+ assertSame("Failed host 'host2' is deprovisioned", State.deprovisioned, tester.nodeRepository.nodes().node("host2").get().state());
assertTrue("Node on deprovisioned host removed", tester.nodeRepository.nodes().node("host2-1").isEmpty());
assertTrue("One 1-30-20-3 node fits on host3", tester.nodeRepository.nodes().node("host3").isPresent());
assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().node("host100").isPresent());
@@ -384,7 +390,7 @@ public class HostCapacityMaintainerTest {
tester.prepareAndActivateInfraApplication(configSrvApp, hostType.childNodeType());
// Expected number of hosts and children are provisioned
- NodeList allNodes = tester.nodeRepository().nodes().list();
+ NodeList allNodes = tester.nodeRepository().nodes().list().not().state(State.deprovisioned);
NodeList configHosts = allNodes.nodeType(hostType);
NodeList configNodes = allNodes.nodeType(hostType.childNodeType());
assertEquals(3, configHosts.size());
@@ -421,7 +427,7 @@ public class HostCapacityMaintainerTest {
// Host and child is removed
dynamicProvisioningTester.maintain();
- allNodes = tester.nodeRepository().nodes().list();
+ allNodes = tester.nodeRepository().nodes().list().not().state(State.deprovisioned);
assertEquals(2, allNodes.nodeType(hostType).size());
assertEquals(2, allNodes.nodeType(hostType.childNodeType()).size());
@@ -497,15 +503,101 @@ public class HostCapacityMaintainerTest {
}
@Test
+ public void deprovision_node_when_no_allocation_and_past_TTL() {
+ var tester = new DynamicProvisioningTester();
+ ManualClock clock = (ManualClock) tester.nodeRepository.clock();
+ tester.hostProvisioner.with(Behaviour.failProvisioning);
+ tester.provisioningTester.makeReadyHosts(2, new NodeResources(1, 1, 1, 1)).activateTenantHosts();
+ List<Node> hosts = tester.nodeRepository.nodes().list(Node.State.active).asList();
+ Node host1 = hosts.get(0);
+ Node host2 = hosts.get(1);
+ tester.nodeRepository.nodes().write(host1.withHostTTL(Duration.ofDays(1)), () -> { });
+ tester.nodeRepository.nodes().write(host2.withHostTTL(Duration.ofHours(1)), () -> { });
+ Node host11 = tester.addNode("host1-1", Optional.of(host1.hostname()), NodeType.tenant, State.active, DynamicProvisioningTester.tenantApp);
+
+ // Host is not marked for deprovisioning by maintainer, because child is present
+ tester.maintain();
+ assertFalse(tester.nodeRepository.nodes().node(host1.hostname()).get().status().wantToDeprovision());
+ assertEquals(Optional.empty(), tester.nodeRepository.nodes().node(host1.hostname()).get().hostEmptyAt());
+
+ // Child is set to deprovision, but turns active
+ tester.nodeRepository.nodes().park(host11.hostname(), true, Agent.operator, "not good");
+ tester.nodeRepository.nodes().reactivate(host11.hostname(), Agent.operator, "all good");
+ assertTrue(tester.nodeRepository.nodes().node(host11.hostname()).get().status().wantToDeprovision());
+ assertEquals(State.active, tester.nodeRepository.nodes().node(host11.hostname()).get().state());
+ tester.maintain();
+ assertFalse(tester.nodeRepository.nodes().node(host1.hostname()).get().status().wantToDeprovision());
+ assertEquals(Optional.empty(), tester.nodeRepository.nodes().node(host1.hostname()).get().hostEmptyAt());
+
+ // Child is parked, to make the host effectively empty
+ tester.nodeRepository.nodes().park(host11.hostname(), true, Agent.operator, "not good");
+ tester.maintain();
+ assertFalse(tester.nodeRepository.nodes().node(host1.hostname()).get().status().wantToDeprovision());
+ assertEquals(Optional.of(clock.instant().truncatedTo(ChronoUnit.MILLIS)),
+ tester.nodeRepository.nodes().node(host1.hostname()).get().hostEmptyAt());
+
+ // Some time passes, but not enough for host1 to be deprovisioned
+ clock.advance(Duration.ofDays(1).minusSeconds(1));
+ tester.maintain();
+ assertFalse(tester.nodeRepository.nodes().node(host1.hostname()).get().status().wantToDeprovision());
+ assertEquals(Optional.of(clock.instant().minus(Duration.ofDays(1).minusSeconds(1)).truncatedTo(ChronoUnit.MILLIS)),
+ tester.nodeRepository.nodes().node(host1.hostname()).get().hostEmptyAt());
+ assertTrue(tester.nodeRepository.nodes().node(host2.hostname()).get().status().wantToDeprovision());
+ assertTrue(tester.nodeRepository.nodes().node(host2.hostname()).get().status().wantToRetire());
+ assertEquals(State.active, tester.nodeRepository.nodes().node(host2.hostname()).get().state());
+ assertEquals(Optional.of(clock.instant().minus(Duration.ofDays(1).minusSeconds(1)).truncatedTo(ChronoUnit.MILLIS)),
+ tester.nodeRepository.nodes().node(host2.hostname()).get().hostEmptyAt());
+
+ // Some more time passes, but child is reactivated on host1, rendering the host non-empty again
+ clock.advance(Duration.ofDays(1));
+ tester.nodeRepository.nodes().reactivate(host11.hostname(), Agent.operator, "all good");
+ tester.maintain();
+ assertFalse(tester.nodeRepository.nodes().node(host1.hostname()).get().status().wantToDeprovision());
+ assertEquals(Optional.empty(), tester.nodeRepository.nodes().node(host1.hostname()).get().hostEmptyAt());
+
+ // Child is removed, and host is marked as empty
+ tester.nodeRepository.database().writeTo(State.deprovisioned, host11, Agent.operator, Optional.empty());
+ tester.nodeRepository.nodes().forget(tester.nodeRepository.nodes().node(host11.hostname()).get());
+ assertEquals(Optional.empty(), tester.nodeRepository.nodes().node(host11.hostname()));
+ tester.maintain();
+ assertFalse(tester.nodeRepository.nodes().node(host1.hostname()).get().status().wantToDeprovision());
+ assertEquals(Optional.of(clock.instant().truncatedTo(ChronoUnit.MILLIS)),
+ tester.nodeRepository.nodes().node(host1.hostname()).get().hostEmptyAt());
+
+ // Enough time passes for the host to be deprovisioned
+ clock.advance(Duration.ofDays(1));
+ tester.maintain();
+ assertTrue(tester.nodeRepository.nodes().node(host1.hostname()).get().status().wantToDeprovision());
+ assertTrue(tester.nodeRepository.nodes().node(host1.hostname()).get().status().wantToRetire());
+ assertEquals(State.active, tester.nodeRepository.nodes().node(host1.hostname()).get().state());
+ assertEquals(Optional.of(clock.instant().minus(Duration.ofDays(1)).truncatedTo(ChronoUnit.MILLIS)),
+ tester.nodeRepository.nodes().node(host1.hostname()).get().hostEmptyAt());
+
+ // Let tenant host app redeploy, retiring the obsolete host.
+ tester.provisioningTester.activateTenantHosts();
+ clock.advance(Duration.ofHours(1));
+ new RetiredExpirer(tester.nodeRepository,
+ new MockDeployer(tester.nodeRepository),
+ new NullMetric(),
+ Duration.ofHours(1),
+ Duration.ofHours(1)).maintain();
+
+ // Host and children can now be removed.
+ tester.provisioningTester.activateTenantHosts();
+ tester.maintain();
+ assertEquals(List.of(), tester.nodeRepository.nodes().list().not().state(State.deprovisioned).asList());
+ }
+
+ @Test
public void deprovision_parked_node_with_allocation() {
var tester = new DynamicProvisioningTester();
tester.hostProvisioner.with(Behaviour.failProvisioning);
- Node host4 = tester.addNode("host4", Optional.empty(), NodeType.host, Node.State.parked);
+ Node host4 = tester.addNode("host4", Optional.empty(), NodeType.host, Node.State.parked, null, Duration.ofDays(1));
Node host41 = tester.addNode("host4-1", Optional.of("host4"), NodeType.tenant, Node.State.parked, DynamicProvisioningTester.tenantApp);
Node host42 = tester.addNode("host4-2", Optional.of("host4"), NodeType.tenant, Node.State.active, DynamicProvisioningTester.tenantApp);
Node host43 = tester.addNode("host4-3", Optional.of("host4"), NodeType.tenant, Node.State.failed, DynamicProvisioningTester.tenantApp);
- // Host and children are marked for deprovisioning
+ // Host and children are marked for deprovisioning, bypassing host TTL.
tester.nodeRepository.nodes().deprovision("host4", Agent.operator, Instant.now());
for (var node : List.of(host4, host41, host42, host43)) {
assertTrue(tester.nodeRepository.nodes().node(node.hostname()).map(n -> n.status().wantToDeprovision()).get());
@@ -522,10 +614,14 @@ public class HostCapacityMaintainerTest {
// Last child is parked
tester.nodeRepository.nodes().park(host42.hostname(), false, Agent.system, getClass().getSimpleName());
- // Host and children can now be removed
+ // Host and children can now be removed.
tester.maintain();
for (var node : List.of(host4, host41, host42, host43)) {
- assertTrue(node.hostname() + " removed", tester.nodeRepository.nodes().node(node.hostname()).isEmpty());
+ if (node.type().isHost()) {
+ assertSame(node.hostname() + " moved to deprovisioned", State.deprovisioned, tester.nodeRepository.nodes().node(node.hostname()).get().state());
+ } else {
+ assertTrue(node.hostname() + " removed", tester.nodeRepository.nodes().node(node.hostname()).isEmpty());
+ }
}
}
@@ -559,7 +655,7 @@ public class HostCapacityMaintainerTest {
private void assertCfghost3IsDeprovisioned(DynamicProvisioningTester tester) {
assertEquals(4, tester.nodeRepository.nodes().list(Node.State.active).size());
assertEquals(2, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.confighost).size());
- assertTrue(tester.nodeRepository.nodes().node("cfghost3").isEmpty());
+ assertSame(State.deprovisioned, tester.nodeRepository.nodes().node("cfghost3").get().state());
}
private static class DynamicProvisioningTester {
@@ -633,17 +729,22 @@ public class HostCapacityMaintainerTest {
return cfghost;
}
- private Node addNode(String hostname, Optional<String> parentHostname, NodeType nodeType, Node.State state) {
- return addNode(hostname, parentHostname, nodeType, state, null);
+ private Node addNode(String hostname, Optional<String> parentHostname, NodeType nodeType, Node.State state, ApplicationId application) {
+ return addNode(hostname, parentHostname, nodeType, state, application, null);
}
- private Node addNode(String hostname, Optional<String> parentHostname, NodeType nodeType, Node.State state, ApplicationId application) {
- Node node = createNode(hostname, parentHostname, nodeType, state, application);
+ private Node addNode(String hostname, Optional<String> parentHostname, NodeType nodeType, Node.State state, ApplicationId application, Duration hostTTL) {
+ Node node = createNode(hostname, parentHostname, nodeType, state, application, hostTTL);
return nodeRepository.database().addNodesInState(List.of(node), node.state(), Agent.system).get(0);
}
private Node createNode(String hostname, Optional<String> parentHostname, NodeType nodeType,
Node.State state, ApplicationId application, String... additionalHostnames) {
+ return createNode(hostname, parentHostname, nodeType, state, application, null, additionalHostnames);
+ }
+
+ private Node createNode(String hostname, Optional<String> parentHostname, NodeType nodeType,
+ Node.State state, ApplicationId application, Duration hostTTL, String... additionalHostnames) {
Flavor flavor = nodeRepository.flavors().getFlavor(parentHostname.isPresent() ? "docker" : "host3").orElseThrow();
Optional<Allocation> allocation = Optional.ofNullable(application)
.map(app -> new Allocation(
@@ -654,17 +755,18 @@ public class HostCapacityMaintainerTest {
false));
List<com.yahoo.config.provision.HostName> hostnames = Stream.of(additionalHostnames).map(com.yahoo.config.provision.HostName::of).toList();
Node.Builder builder = Node.create("fake-id-" + hostname, hostname, flavor, state, nodeType)
- .ipConfig(IP.Config.of(state == Node.State.active ? Set.of("::1") : Set.of(), Set.of(), hostnames));
+ .ipConfig(IP.Config.of(state == Node.State.active ? Set.of("::1") : Set.of(), Set.of(), hostnames))
+ .hostTTL(hostTTL);
parentHostname.ifPresent(builder::parentHostname);
allocation.ifPresent(builder::allocation);
if (hostname.equals("host2-1"))
- builder.status(Status.initial().withWantToRetire(true, true, false));
+ builder.status(Status.initial().withWantToRetire(true, true, false, false));
return builder.build();
}
private long provisionedHostsMatching(NodeResources resources) {
return hostProvisioner.provisionedHosts().stream()
- .filter(host -> host.generateHost().resources().compatibleWith(resources))
+ .filter(host -> host.generateHost(Duration.ZERO).resources().compatibleWith(resources))
.count();
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgraderTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgraderTest.java
new file mode 100644
index 00000000000..6224143aabf
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgraderTest.java
@@ -0,0 +1,85 @@
+package com.yahoo.vespa.hosted.provision.maintenance;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Capacity;
+import com.yahoo.config.provision.ClusterResources;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.NodeFlavors;
+import com.yahoo.config.provision.NodeResources;
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.vespa.hosted.provision.provisioning.FlavorConfigBuilder;
+import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
+import com.yahoo.vespa.hosted.provision.testutils.MockDeployer;
+import com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner;
+import com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner.Behaviour;
+import org.junit.jupiter.api.Test;
+
+import java.time.Duration;
+import java.util.Map;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * @author mpolden
+ */
+class HostFlavorUpgraderTest {
+
+ @Test
+ public void maintain() {
+ String flavor0 = "host";
+ String flavor1 = "host2";
+ NodeFlavors flavors = FlavorConfigBuilder.createDummies(flavor0, flavor1);
+ MockHostProvisioner hostProvisioner = new MockHostProvisioner(flavors.getFlavors());
+ ProvisioningTester tester = new ProvisioningTester.Builder().dynamicProvisioning()
+ .flavors(flavors.getFlavors())
+ .hostProvisioner(hostProvisioner)
+ .build();
+ ApplicationId app = ProvisioningTester.applicationId();
+ NodeResources resources = new NodeResources(4, 8, 100, 1,
+ NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote);
+ ClusterSpec spec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("c1")).vespaVersion("1").build();
+ Capacity capacity = Capacity.from(new ClusterResources(2, 1, resources));
+ Map<ApplicationId, MockDeployer.ApplicationContext> applications = Map.of(app, new MockDeployer.ApplicationContext(app, spec, capacity));
+ MockDeployer deployer = new MockDeployer(tester.provisioner(), tester.clock(), applications);
+ HostFlavorUpgrader upgrader = new HostFlavorUpgrader(tester.nodeRepository(), Duration.ofDays(1),
+ new TestMetric(), deployer, hostProvisioner);
+
+ // Provision hosts and deploy application
+ tester.makeReadyNodes(2, flavor0, NodeType.host);
+ tester.activateTenantHosts();
+ tester.deploy(app, spec, capacity);
+ Node host = tester.nodeRepository().nodes().list().hosts().first().get();
+ assertEquals(flavor0, host.flavor().name());
+
+ // Nothing to upgrade initially
+ assertEquals(1, upgrader.maintain());
+ assertEquals(NodeList.of(), tester.nodeRepository().nodes().list()
+ .matching(h -> h.status().wantToUpgradeFlavor()));
+
+ // Mark flavor as upgradable, but fail all provisioning requests
+ hostProvisioner.addUpgradableFlavor(flavor0)
+ .with(Behaviour.failProvisionRequest);
+ assertEquals(1, upgrader.maintain());
+ assertEquals(NodeList.of(),
+ tester.nodeRepository().nodes().list()
+ .matching(node -> node.status().wantToUpgradeFlavor() || node.status().wantToRetire()),
+ "No hosts marked for upgrade or retirement");
+
+ // First provision request fails, but second succeeds and a replacement host starts provisioning
+ hostProvisioner.with(Behaviour.failProvisionRequest, 1);
+ assertEquals(1, upgrader.maintain());
+ NodeList nodes = tester.nodeRepository().nodes().list();
+ NodeList upgradingFlavor = nodes.matching(node -> node.status().wantToRetire() &&
+ node.status().wantToUpgradeFlavor());
+ assertEquals(1, upgradingFlavor.size());
+ assertEquals(1, nodes.state(Node.State.provisioned).size());
+
+ // No more upgrades are started while host is retiring
+ assertEquals(1, upgrader.maintain());
+ assertEquals(upgradingFlavor, tester.nodeRepository().nodes().list()
+ .matching(node -> node.status().wantToUpgradeFlavor()));
+ }
+
+}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostRetirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostRetirerTest.java
index 387a2cf5a4b..253c150f9da 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostRetirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostRetirerTest.java
@@ -1,14 +1,9 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;
-import com.yahoo.config.provision.Cloud;
-import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostEvent;
import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
-import com.yahoo.config.provision.RegionName;
-import com.yahoo.config.provision.SystemName;
-import com.yahoo.config.provision.Zone;
import com.yahoo.jdisc.test.MockMetric;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
@@ -30,15 +25,10 @@ public class HostRetirerTest {
@Test
public void retire_hosts() {
NodeFlavors flavors = FlavorConfigBuilder.createDummies("default");
- Zone zone = new Zone(Cloud.builder()
- .dynamicProvisioning(true)
- .build(), SystemName.defaultSystem(),
- Environment.defaultEnvironment(),
- RegionName.defaultName());
MockHostProvisioner hostProvisioner = new MockHostProvisioner(flavors.getFlavors());
ProvisioningTester tester = new ProvisioningTester.Builder().hostProvisioner(hostProvisioner)
.flavors(flavors.getFlavors())
- .zone(zone)
+ .dynamicProvisioning()
.build();
HostRetirer retirer = new HostRetirer(tester.nodeRepository(), Duration.ofDays(1), new MockMetric(), hostProvisioner);
tester.makeReadyHosts(3, new NodeResources(24, 48, 1000, 10))
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
index 487355a0b75..de2c060a0eb 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
@@ -19,8 +19,11 @@ import com.yahoo.vespa.curator.stats.LockStats;
import com.yahoo.vespa.hosted.provision.LockedNodeList;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
+import com.yahoo.vespa.hosted.provision.autoscale.Load;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.Allocation;
+import com.yahoo.vespa.hosted.provision.node.ClusterId;
import com.yahoo.vespa.hosted.provision.node.Generation;
import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.provisioning.FlavorConfigBuilder;
@@ -240,7 +243,7 @@ public class MetricsReporterTest {
}
@Test
- public void non_active_metric() {
+ public void node_and_cluster_metrics() {
ProvisioningTester tester = new ProvisioningTester.Builder().build();
tester.makeReadyHosts(5, new NodeResources(64, 256, 2000, 10));
tester.activateTenantHosts();
@@ -248,18 +251,36 @@ public class MetricsReporterTest {
MetricsReporter metricsReporter = metricsReporter(metric, tester);
// Application is deployed
- ApplicationId application = ApplicationId.from("t1", "a1", "default");
- Map<String, String> dimensions = Map.of("applicationId", application.toFullString());
+ ApplicationId applicationId = ApplicationId.from("t1", "a1", "default");
+ ClusterSpec clusterSpec = ProvisioningTester.contentClusterSpec();
NodeResources resources = new NodeResources(2, 8, 100, 1);
- List<Node> activeNodes = tester.deploy(application, ProvisioningTester.contentClusterSpec(), Capacity.from(new ClusterResources(4, 1, resources)));
+ Capacity capacity = Capacity.from(new ClusterResources(4, 1, resources));
+
+ List<Node> activeNodes = tester.deploy(applicationId, clusterSpec, capacity);
+ var application = tester.nodeRepository().applications().require(applicationId);
+ application = application.withCluster(clusterSpec.id(), false, capacity);
+ var cluster = application.cluster(clusterSpec.id()).get().withTarget(new Autoscaling(Autoscaling.Status.ideal,
+ "test",
+ Optional.empty(),
+ tester.clock().instant(),
+ Load.zero(),
+ new Load(0.1, 0.2, 0.3),
+ Autoscaling.Metrics.zero()));
+ tester.nodeRepository().applications().put(application.with(cluster), tester.nodeRepository().applications().lock(applicationId));
+
metricsReporter.maintain();
+ Map<String, String> dimensions = Map.of("applicationId", applicationId.toFullString());
assertEquals(0D, getMetric("nodes.nonActiveFraction", metric, dimensions));
assertEquals(4, getMetric("nodes.active", metric, dimensions));
assertEquals(0, getMetric("nodes.nonActive", metric, dimensions));
- Map<String, String> clusterDimensions = Map.of("applicationId", application.toFullString(),
- "clusterid", ProvisioningTester.contentClusterSpec().id().value());
+
+ Map<String, String> clusterDimensions = Map.of("applicationId", applicationId.toFullString(),
+ "clusterid", clusterSpec.id().value());
assertEquals(1.392, getMetric("cluster.cost", metric, clusterDimensions));
+ assertEquals(0.1, getMetric("cluster.load.ideal.cpu", metric, clusterDimensions));
+ assertEquals(0.2, getMetric("cluster.load.ideal.memory", metric, clusterDimensions));
+ assertEquals(0.3, getMetric("cluster.load.ideal.disk", metric, clusterDimensions));
// One node fails
tester.fail(activeNodes.get(0).hostname());
@@ -269,7 +290,7 @@ public class MetricsReporterTest {
assertEquals(1, getMetric("nodes.nonActive", metric, dimensions));
// Cluster is removed
- tester.deactivate(application);
+ tester.deactivate(applicationId);
metricsReporter.maintain();
assertEquals(1D, getMetric("nodes.nonActiveFraction", metric, dimensions).doubleValue(), Double.MIN_VALUE);
assertEquals(0, getMetric("nodes.active", metric, dimensions));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
index f73d6f2ce01..1b677224295 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
@@ -73,9 +73,9 @@ public class ScalingSuggestionsMaintainerTest {
new TestMetric());
maintainer.maintain();
- assertEquals("8 nodes with [vcpu: 3.2, memory: 4.5 Gb, disk 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
+ assertEquals("8 nodes with [vcpu: 3.2, memory: 4.5 Gb, disk: 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app1, cluster1, tester).resources().get().toString());
- assertEquals("8 nodes with [vcpu: 3.6, memory: 4.4 Gb, disk 11.8 Gb, bandwidth: 0.1 Gbps, architecture: any]",
+ assertEquals("8 nodes with [vcpu: 3.6, memory: 4.7 Gb, disk: 11.8 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app2, cluster2, tester).resources().get().toString());
// Utilization goes way down
@@ -83,14 +83,14 @@ public class ScalingSuggestionsMaintainerTest {
addMeasurements(0.10f, 0.10f, 0.10f, 0, 500, app1, tester.nodeRepository());
maintainer.maintain();
assertEquals("Suggestion stays at the peak value observed",
- "8 nodes with [vcpu: 3.2, memory: 4.5 Gb, disk 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
+ "8 nodes with [vcpu: 3.2, memory: 4.5 Gb, disk: 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app1, cluster1, tester).resources().get().toString());
// Utilization is still way down and a week has passed
tester.clock().advance(Duration.ofDays(7));
addMeasurements(0.10f, 0.10f, 0.10f, 0, 500, app1, tester.nodeRepository());
maintainer.maintain();
assertEquals("Peak suggestion has been outdated",
- "3 nodes with [vcpu: 1.2, memory: 4.0 Gb, disk 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
+ "3 nodes with [vcpu: 1.2, memory: 4.0 Gb, disk: 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app1, cluster1, tester).resources().get().toString());
assertTrue(shouldSuggest(app1, cluster1, tester));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
index 40b035968bd..ada96b3f793 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
@@ -622,7 +622,7 @@ public class OsVersionsTest {
Optional<Version> wantedOsVersion = node.status().osVersion().wanted();
assertFalse(node + " is not retiring", node.status().wantToRetire());
assertTrue(node + " is rebuilding", node.status().wantToRebuild());
- node = node.withWantToRetire(false, false, false, Agent.system,
+ node = node.withWantToRetire(false, false, false, false, Agent.system,
tester.clock().instant());
return node.with(node.status().withOsVersion(node.status().osVersion().withCurrent(wantedOsVersion)));
});
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java
index c429f88cfa1..c997da543ea 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java
@@ -68,7 +68,7 @@ public class ApplicationSerializerTest {
Load.zero(),
Load.one(),
Autoscaling.Metrics.zero()),
- new ClusterInfo.Builder().bcpDeadline(Duration.ofMinutes(33)).build(),
+ new ClusterInfo.Builder().bcpDeadline(Duration.ofMinutes(33)).hostTTL(Duration.ofSeconds(321)).build(),
new BcpGroupInfo(0.1, 0.2, 0.3),
List.of(new ScalingEvent(new ClusterResources(10, 5, minResources),
new ClusterResources(12, 6, minResources),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java
index 1086f2026a8..1a0827fc487 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java
@@ -326,14 +326,15 @@ public class NodeSerializerTest {
}
@Test
- public void want_to_rebuild() {
+ public void want_to_rebuild_and_upgrade_flavor() {
Node node = nodeSerializer.fromJson(nodeSerializer.toJson(createNode()));
assertFalse(node.status().wantToRebuild());
- node = node.with(node.status().withWantToRetire(true, false, true));
+ node = node.with(node.status().withWantToRetire(true, false, true, true));
node = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertTrue(node.status().wantToRetire());
assertFalse(node.status().wantToDeprovision());
assertTrue(node.status().wantToRebuild());
+ assertTrue(node.status().wantToUpgradeFlavor());
}
@Test
@@ -476,13 +477,19 @@ public class NodeSerializerTest {
nodeFlavors.getFlavorOrThrow("default"), NodeType.host);
Node node = nodeSerializer.fromJson(nodeSerializer.toJson(builder.build()));
assertFalse(node.exclusiveToApplicationId().isPresent());
+ assertFalse(node.hostTTL().isPresent());
assertFalse(node.exclusiveToClusterType().isPresent());
ApplicationId exclusiveToApp = ApplicationId.from("tenant1", "app1", "instance1");
ClusterSpec.Type exclusiveToCluster = ClusterSpec.Type.admin;
- node = builder.exclusiveToApplicationId(exclusiveToApp).exclusiveToClusterType(exclusiveToCluster).build();
+ node = builder.exclusiveToApplicationId(exclusiveToApp)
+ .hostTTL(Duration.ofDays(1))
+ .hostEmptyAt(clock.instant().minus(Duration.ofDays(1)).truncatedTo(MILLIS))
+ .exclusiveToClusterType(exclusiveToCluster).build();
node = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(exclusiveToApp, node.exclusiveToApplicationId().get());
+ assertEquals(Duration.ofDays(1), node.hostTTL().get());
+ assertEquals(clock.instant().minus(Duration.ofDays(1)).truncatedTo(MILLIS), node.hostEmptyAt().get());
assertEquals(exclusiveToCluster, node.exclusiveToClusterType().get());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
index 382d2840377..ced29b28d41 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
@@ -3,10 +3,8 @@ package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Capacity;
-import com.yahoo.config.provision.Cloud;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeFlavors;
@@ -15,12 +13,10 @@ import com.yahoo.config.provision.NodeResources.Architecture;
import com.yahoo.config.provision.NodeResources.DiskSpeed;
import com.yahoo.config.provision.NodeResources.StorageType;
import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.RegionName;
-import com.yahoo.config.provision.SystemName;
-import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.Node.State;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
@@ -31,6 +27,7 @@ import org.junit.Test;
import java.time.Instant;
import java.util.Collection;
import java.util.List;
+import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
@@ -116,6 +113,55 @@ public class DynamicProvisioningTest {
}
@Test
+ public void empty_exclusive_to_hosts_reused_iff_new_allocation_fits_perfectly() {
+ var tester = tester(true);
+
+ NodeResources highResources = new NodeResources(4, 80, 100, 1);
+ NodeResources lowResources = new NodeResources(2, 20, 50, 1);
+
+ ApplicationId application = ProvisioningTester.applicationId();
+ prepareAndActivate(application, clusterSpec("mycluster", true), 2, 1, highResources, tester);
+
+ // Total of 4 nodes should now be in node-repo, 2 active hosts and 2 active nodes.
+ assertEquals(4, tester.nodeRepository().nodes().list().size());
+ assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+
+ // Redeploying the application causes no changes at all.
+ prepareAndActivate(application, clusterSpec("mycluster", true), 2, 1, highResources, tester);
+ assertEquals(4, tester.nodeRepository().nodes().list().size());
+ assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+
+ // Deploying with a smaller node flavour causes new, smaller hosts to be provisioned.
+ prepareAndActivate(application, clusterSpec("mycluster", true), 2, 1, lowResources, tester);
+
+ // Total of 8 nodes should now be in node-repo, 4 active hosts and 4 active nodes, of which 2 are retired.
+ NodeList nodes = tester.nodeRepository().nodes().list();
+ assertEquals(8, nodes.size());
+ assertEquals(4, nodes.nodeType(NodeType.host).state(Node.State.active).size());
+ assertEquals(4, nodes.nodeType(NodeType.tenant).state(Node.State.active).size());
+ assertEquals(2, nodes.retired().size());
+
+ // Remove the child nodes, and redeploy with the original flavour. This should reuse the existing hosts.
+ tester.nodeRepository().database().writeTo(State.deprovisioned, nodes.retired().asList(), Agent.operator, Optional.empty());
+ tester.nodeRepository().nodes().list().state(State.deprovisioned).forEach(tester.nodeRepository().nodes()::forget);
+
+ // Total of 6 nodes should now be in node-repo, 4 active hosts and 2 active nodes.
+ nodes = tester.nodeRepository().nodes().list();
+ assertEquals(6, nodes.size());
+ assertEquals(4, nodes.nodeType(NodeType.host).state(Node.State.active).size());
+ assertEquals(2, nodes.nodeType(NodeType.tenant).state(Node.State.active).size());
+ assertEquals(0, nodes.retired().size());
+
+ // Deploy again with high resources.
+ prepareAndActivate(application, clusterSpec("mycluster", true), 2, 1, highResources, tester);
+ // Total of 8 nodes should now be in node-repo, 4 active hosts and 4 active nodes.
+ nodes = tester.nodeRepository().nodes().list();
+ assertEquals(8, nodes.size());
+ assertEquals(4, nodes.nodeType(NodeType.host).state(Node.State.active).size());
+ assertEquals(4, nodes.nodeType(NodeType.tenant).state(Node.State.active).size());
+ }
+
+ @Test
public void avoids_allocating_to_empty_hosts() {
var tester = tester(false);
tester.makeReadyHosts(6, new NodeResources(12, 12, 200, 12));
@@ -199,7 +245,7 @@ public class DynamicProvisioningTest {
List<Flavor> flavors = List.of(new Flavor("2x",
new NodeResources(2, 17, 200, 10, fast, remote)));
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone(false))
+ ProvisioningTester tester = new ProvisioningTester.Builder().dynamicProvisioning(true, false)
.flavors(flavors)
.hostProvisioner(new MockHostProvisioner(flavors, memoryTax))
.nameResolver(nameResolver)
@@ -244,7 +290,8 @@ public class DynamicProvisioningTest {
List<Flavor> flavors = List.of(new Flavor("x86", new NodeResources(2, 4, 50, 0.1, fast, local, Architecture.x86_64)),
new Flavor("arm", new NodeResources(2, 4, 50, 0.1, fast, local, Architecture.arm64)));
MockHostProvisioner hostProvisioner = new MockHostProvisioner(flavors);
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone(false))
+ ProvisioningTester tester = new ProvisioningTester.Builder()
+ .dynamicProvisioning(true, false)
.flavors(flavors)
.hostProvisioner(hostProvisioner)
.resourcesCalculator(0, 0)
@@ -287,7 +334,7 @@ public class DynamicProvisioningTest {
new Flavor("2x", new NodeResources(2, 20 - memoryTax, 200, 0.1, fast, remote)),
new Flavor("4x", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, remote)));
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone(false))
+ ProvisioningTester tester = new ProvisioningTester.Builder().dynamicProvisioning(true, false)
.flavors(flavors)
.hostProvisioner(new MockHostProvisioner(flavors, memoryTax))
.nameResolver(nameResolver)
@@ -362,7 +409,7 @@ public class DynamicProvisioningTest {
new Flavor("4x", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, remote)),
new Flavor("4xl", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, local)));
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone(false))
+ ProvisioningTester tester = new ProvisioningTester.Builder().dynamicProvisioning(true, false)
.flavors(flavors)
.hostProvisioner(new MockHostProvisioner(flavors, memoryTax))
.nameResolver(nameResolver)
@@ -397,7 +444,7 @@ public class DynamicProvisioningTest {
new Flavor("2xl", new NodeResources(2, 20 - memoryTax, 200, 0.1, fast, remote)),
new Flavor("4xl", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, remote)));
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone(false))
+ ProvisioningTester tester = new ProvisioningTester.Builder().dynamicProvisioning(true, false)
.flavors(flavors)
.hostProvisioner(new MockHostProvisioner(flavors, memoryTax))
.nameResolver(nameResolver)
@@ -420,7 +467,7 @@ public class DynamicProvisioningTest {
public void gpu_host() {
List<Flavor> flavors = List.of(new Flavor("gpu", new NodeResources(4, 16, 125, 10, fast, local,
Architecture.x86_64, new NodeResources.GpuResources(1, 16))));
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone(false))
+ ProvisioningTester tester = new ProvisioningTester.Builder().dynamicProvisioning(true, false)
.flavors(flavors)
.hostProvisioner(new MockHostProvisioner(flavors))
.nameResolver(nameResolver)
@@ -433,17 +480,9 @@ public class DynamicProvisioningTest {
2, 1, resources);
}
- private Zone zone(boolean sharing) {
- return new Zone(
- Cloud.builder().dynamicProvisioning(true).allowHostSharing(sharing).build(),
- SystemName.main,
- Environment.prod,
- RegionName.from("us-east"));
- }
-
private ProvisioningTester tester(boolean sharing) {
var hostProvisioner = new MockHostProvisioner(new NodeFlavors(ProvisioningTester.createConfig()).getFlavors(), nameResolver, 0);
- return new ProvisioningTester.Builder().zone(zone(sharing)).hostProvisioner(hostProvisioner).nameResolver(nameResolver).build();
+ return new ProvisioningTester.Builder().dynamicProvisioning(true, sharing).hostProvisioner(hostProvisioner).nameResolver(nameResolver).build();
}
private void prepareAndActivate(ApplicationId application, ClusterSpec clusterSpec, int nodes, int groups, NodeResources resources,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index 28cd3067155..67760d8cdf3 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -86,7 +86,7 @@ public class ProvisioningTest {
// deploy another application
SystemState state1App2 = prepare(application2, 2, 2, 3, 3, defaultResources, tester);
- assertFalse("Hosts to different apps are disjunct", state1App2.allHosts.removeAll(state1.allHosts));
+ assertFalse("Hosts to different apps are disjoint", state1App2.allHosts.removeAll(state1.allHosts));
tester.activate(application2, state1App2.allHosts);
// prepare twice
@@ -525,14 +525,14 @@ public class ProvisioningTest {
tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 20),
resources(6, 3, 3, 15, 25)));
tester.assertNodes("Allocation preserving resources within new limits",
- 6, 2, 3, 8.0/4*21 / (6.0/2), 25,
+ 6, 2, 3, 14.57, 25,
app1, cluster1);
// Widening window does not change allocation
tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 1, 5, 15),
resources(8, 4, 4, 21, 30)));
tester.assertNodes("Same allocation",
- 6, 2, 3, 8.0/4*21 / (6.0/2), 25,
+ 6, 2, 3, 14.57, 25,
app1, cluster1);
// Changing limits in opposite directions cause a mixture of min and max
@@ -682,6 +682,22 @@ public class ProvisioningTest {
}
@Test
+ public void non_matching_resources_but_cannot_fail() {
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
+ tester.makeReadyHosts(4, defaultResources).activateTenantHosts();
+ ApplicationId application = ProvisioningTester.applicationId();
+ var cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
+ var hosts1 = tester.prepare(application, cluster, Capacity.from(new ClusterResources(4, 1, defaultResources), false, true));
+ tester.activate(application, hosts1);
+
+ var nonMatchingResources = defaultResources.withVcpu(defaultResources.vcpu() * 2);
+ var hosts2 = tester.prepare(application, cluster, Capacity.from(new ClusterResources(4, 1, nonMatchingResources), false, false));
+ assertEquals(hosts1, hosts2);
+ for (var host : hosts2)
+ assertFalse(host.membership().get().retired());
+ }
+
+ @Test
public void out_of_capacity_all_nodes_want_to_retire() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index 76dcc6cf8a8..2acbeb00f5f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -7,9 +7,11 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.ApplicationTransaction;
import com.yahoo.config.provision.Capacity;
+import com.yahoo.config.provision.Cloud;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.DockerImage;
+import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.HostFilter;
import com.yahoo.config.provision.HostSpec;
@@ -21,6 +23,8 @@ import com.yahoo.config.provision.NodeResources.StorageType;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.ProvisionLock;
import com.yahoo.config.provision.ProvisionLogger;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
import com.yahoo.config.provisioning.FlavorsConfig;
@@ -673,6 +677,20 @@ public class ProvisioningTester {
return this;
}
+ public Builder dynamicProvisioning() {
+ return dynamicProvisioning(true, true);
+ }
+
+ public Builder dynamicProvisioning(boolean enabled, boolean allowHostSharing) {
+ return zone(new Zone(Cloud.builder()
+ .dynamicProvisioning(enabled)
+ .allowHostSharing(allowHostSharing)
+ .build(),
+ SystemName.defaultSystem(),
+ Environment.defaultEnvironment(),
+ RegionName.defaultName()));
+ }
+
public Builder nameResolver(NameResolver nameResolver) {
this.nameResolver = nameResolver;
return this;
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
index f40c8037f41..0b4d345b8a5 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
@@ -440,7 +440,7 @@ public class VirtualNodeProvisioningTest {
catch (Exception e) {
assertEquals("No room for 3 nodes as 2 of 4 hosts are exclusive",
"Could not satisfy request for 3 nodes with " +
- "[vcpu: 2.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
+ "[vcpu: 2.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
"in tenant2.app2 container cluster 'my-container' 6.39: " +
"Node allocation failure on group 0: " +
"Not enough suitable nodes available due to host exclusivity constraints",
@@ -467,7 +467,7 @@ public class VirtualNodeProvisioningTest {
}
catch (NodeAllocationException e) {
assertEquals("Could not satisfy request for 2 nodes with " +
- "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote, architecture: any] " +
+ "[vcpu: 1.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote, architecture: any] " +
"in tenant.app1 content cluster 'my-content'" +
" 6.42: Node allocation failure on group 0",
e.getMessage());
@@ -549,8 +549,8 @@ public class VirtualNodeProvisioningTest {
}
catch (IllegalArgumentException e) {
assertEquals("No allocation possible within limits: " +
- "from 2 nodes with [vcpu: 1.0, memory: 5.0 Gb, disk 10.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
- "to 4 nodes with [vcpu: 1.0, memory: 5.0 Gb, disk 10.0 Gb, bandwidth: 1.0 Gbps, architecture: any]",
+ "from 2 nodes with [vcpu: 1.0, memory: 5.0 Gb, disk: 10.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
+ "to 4 nodes with [vcpu: 1.0, memory: 5.0 Gb, disk: 10.0 Gb, bandwidth: 1.0 Gbps, architecture: any]",
e.getMessage());
}
}
@@ -573,9 +573,9 @@ public class VirtualNodeProvisioningTest {
}
catch (IllegalArgumentException e) {
assertEquals("No allocation possible within limits: " +
- "from 2 nodes with [vcpu: 20.0, memory: 37.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
- "to 4 nodes with [vcpu: 20.0, memory: 37.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any]. " +
- "Nearest allowed node resources: [vcpu: 20.0, memory: 40.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote, architecture: any]",
+ "from 2 nodes with [vcpu: 20.0, memory: 37.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
+ "to 4 nodes with [vcpu: 20.0, memory: 37.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any]. " +
+ "Nearest allowed node resources: [vcpu: 20.0, memory: 40.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote, architecture: any]",
e.getMessage());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
index 022822fd3ec..40895e25f2f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
@@ -1016,15 +1016,25 @@ public class NodesV2ApiTest {
assertResponse(new Request(url, Utf8.toBytes("{\"exclusiveToApplicationId\": \"t1:a1:i1\"}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
- tester.assertPartialResponse(new Request(url), "exclusiveTo\":\"t1:a1:i1\",", true);
+ tester.assertPartialResponse(new Request(url), "\"exclusiveTo\":\"t1:a1:i1\",", true);
+
+ assertResponse(new Request(url, Utf8.toBytes("{\"hostTTL\": 86400000}"), Request.Method.PATCH),
+ "{\"message\":\"Updated dockerhost1.yahoo.com\"}");
+ tester.assertPartialResponse(new Request(url), "\"hostTTL\":86400000", true);
+
+ assertResponse(new Request(url, Utf8.toBytes("{\"hostEmptyAt\": 789}"), Request.Method.PATCH),
+ "{\"message\":\"Updated dockerhost1.yahoo.com\"}");
+ tester.assertPartialResponse(new Request(url), "\"hostEmptyAt\":789", true);
assertResponse(new Request(url, Utf8.toBytes("{\"exclusiveToClusterType\": \"admin\"}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
- tester.assertPartialResponse(new Request(url), "exclusiveTo\":\"t1:a1:i1\",\"exclusiveToClusterType\":\"admin\",", true);
+ tester.assertPartialResponse(new Request(url), "\"exclusiveToClusterType\":\"admin\",", true);
- assertResponse(new Request(url, Utf8.toBytes("{\"exclusiveTo\": null, \"exclusiveToClusterType\": null}"), Request.Method.PATCH),
+ assertResponse(new Request(url, Utf8.toBytes("{\"exclusiveTo\": null, \"hostTTL\":null, \"hostEmptyAt\":null, \"exclusiveToClusterType\": null}"), Request.Method.PATCH),
"{\"message\":\"Updated dockerhost1.yahoo.com\"}");
- tester.assertPartialResponse(new Request(url), "exclusiveTo", false);
+ tester.assertPartialResponse(new Request(url), "\"exclusiveTo", false);
+ tester.assertPartialResponse(new Request(url), "\"hostTTL\"", false);
+ tester.assertPartialResponse(new Request(url), "\"hostEmptyAt\"", false);
}
@Test
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json
index 92e5425e84e..28bde7bd966 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json
@@ -140,7 +140,7 @@
"at" : 123
}
],
- "scalingDuration": 600000
+ "scalingDuration": 300000
}
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json
index cba56e1c51e..2a8f436b30c 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json
@@ -94,7 +94,7 @@
"at" : 123
}
],
- "scalingDuration": 43200000
+ "scalingDuration": 28800000
}
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/archives.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/archives.json
index 738d8ee1bb3..51fd7201295 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/archives.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/archives.json
@@ -9,7 +9,7 @@
"uri": "ftp://host/dir/"
},
{
- "account": "777888999000",
+ "account": "aws:777888999000",
"uri": "s3://acc-bucket/"
}
]
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/capacity-zone.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/capacity-zone.json
index 0e14dd8b36f..87b823fbb33 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/capacity-zone.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/capacity-zone.json
@@ -3,7 +3,7 @@
"couldLoseHosts": 4,
"failedTenantParent": "dockerhost1.yahoo.com",
"failedTenant": "host4.yahoo.com",
- "failedTenantResources": "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "failedTenantResources": "[vcpu: 1.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"failedTenantAllocation": "allocated to tenant3.application3.instance3 as 'content/id3/0/0/stateful'",
"hostCandidateRejectionReasons": {
"singularReasonFailures": {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/controller1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/controller1.json
index 42ca28a09d5..a314afebd06 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/controller1.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/controller1.json
@@ -6,8 +6,24 @@
"hostname": "controller1.yahoo.com",
"flavor": "default",
"cpuCores": 2.0,
- "resources":{"vcpu":2.0,"memoryGb":16.0,"diskGb":400.0,"bandwidthGbps":10.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":2.0,"memoryGb":16.0,"diskGb":400.0,"bandwidthGbps":10.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 2.0,
+ "memoryGb": 16.0,
+ "diskGb": 400.0,
+ "bandwidthGbps": 10.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 2.0,
+ "memoryGb": 16.0,
+ "diskGb": 400.0,
+ "bandwidthGbps": 10.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -31,9 +47,7 @@
"agent": "operator"
}
],
- "ipAddresses": [
- "127.0.0.1"
- ],
+ "ipAddresses": ["127.0.0.1"],
"additionalIpAddresses": [],
- "cloudAccount":"111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json
index 8ef88eae97d..a1d38ed0e22 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-container1.json
@@ -5,9 +5,25 @@
"type": "tenant",
"hostname": "test-node-pool-102-2",
"parentHostname": "dockerhost3.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote, architecture: x86_64]",
- "resources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote, architecture: x86_64]",
+ "resources": {
+ "vcpu": 1.0,
+ "memoryGb": 4.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 1.0,
+ "memoryGb": 4.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"owner": {
"tenant": "tenant3",
@@ -25,7 +41,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":1.0, "memoryGb":4.0, "diskGb":100.0, "bandwidthGbps":1.0,"diskSpeed":"fast", "storageType":"any","architecture":"any" },
+ "requestedResources": {
+ "vcpu": 1.0,
+ "memoryGb": 4.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "any",
+ "architecture": "any"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"failCount": 0,
@@ -63,9 +87,7 @@
"agent": "application"
}
],
- "ipAddresses": [
- "::102:2"
- ],
+ "ipAddresses": ["::102:2"],
"additionalIpAddresses": [],
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-os-upgrade-complete.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-os-upgrade-complete.json
index b5e8a040c30..cc38ae425b1 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-os-upgrade-complete.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-os-upgrade-complete.json
@@ -6,8 +6,24 @@
"hostname": "dockerhost1.yahoo.com",
"flavor": "large",
"cpuCores": 4.0,
- "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"owner": {
"tenant": "zoneapp",
@@ -25,7 +41,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":4.0, "memoryGb":32.0, "diskGb":1600.0, "bandwidthGbps":20.0, "diskSpeed": "fast", "storageType":"remote","architecture":"x86_64" },
+ "requestedResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"currentOsVersion": "7.5.2",
@@ -90,14 +114,7 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.100.1",
- "::100:1"
- ],
- "additionalIpAddresses": [
- "::100:2",
- "::100:3",
- "::100:4"
- ],
- "cloudAccount": "111222333444"
+ "ipAddresses": ["127.0.100.1", "::100:1"],
+ "additionalIpAddresses": ["::100:2", "::100:3", "::100:4"],
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-2.json
index afed3b4e17e..9094844126c 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-2.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-2.json
@@ -6,8 +6,24 @@
"hostname": "dockerhost1.yahoo.com",
"flavor": "large",
"cpuCores": 4.0,
- "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"owner": {
"tenant": "zoneapp",
@@ -32,7 +48,7 @@
"bandwidthGbps": 20.0,
"diskSpeed": "fast",
"storageType": "remote",
- "architecture":"x86_64"
+ "architecture": "x86_64"
},
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -107,15 +123,8 @@
"agent": "system"
}
],
- "ipAddresses": [
- "127.0.100.1",
- "::100:1"
- ],
- "additionalIpAddresses": [
- "::100:2",
- "::100:3",
- "::100:4"
- ],
+ "ipAddresses": ["127.0.100.1", "::100:1"],
+ "additionalIpAddresses": ["::100:2", "::100:3", "::100:4"],
"reports": {
"actualCpuCores": {
"createdMillis": 3
@@ -126,12 +135,9 @@
"type": "HARD_FAIL",
"details": {
"inGib": 3,
- "disks": [
- "/dev/sda1",
- "/dev/sdb3"
- ]
+ "disks": ["/dev/sda1", "/dev/sdb3"]
}
}
},
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-3.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-3.json
index 1c366d634cc..158c5388c72 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-3.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-3.json
@@ -6,8 +6,24 @@
"hostname": "dockerhost1.yahoo.com",
"flavor": "large",
"cpuCores": 4.0,
- "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"owner": {
"tenant": "zoneapp",
@@ -32,7 +48,7 @@
"bandwidthGbps": 20.0,
"diskSpeed": "fast",
"storageType": "remote",
- "architecture":"x86_64"
+ "architecture": "x86_64"
},
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -107,19 +123,12 @@
"agent": "system"
}
],
- "ipAddresses": [
- "127.0.100.1",
- "::100:1"
- ],
- "additionalIpAddresses": [
- "::100:2",
- "::100:3",
- "::100:4"
- ],
+ "ipAddresses": ["127.0.100.1", "::100:1"],
+ "additionalIpAddresses": ["::100:2", "::100:3", "::100:4"],
"reports": {
"actualCpuCores": {
"createdMillis": 3
}
},
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-4.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-4.json
index 98e3920b910..0984a08ad9f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-4.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports-4.json
@@ -6,8 +6,24 @@
"hostname": "dockerhost1.yahoo.com",
"flavor": "large",
"cpuCores": 4.0,
- "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"owner": {
"tenant": "zoneapp",
@@ -32,7 +48,7 @@
"bandwidthGbps": 20.0,
"diskSpeed": "fast",
"storageType": "remote",
- "architecture":"x86_64"
+ "architecture": "x86_64"
},
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -107,14 +123,7 @@
"agent": "system"
}
],
- "ipAddresses": [
- "127.0.100.1",
- "::100:1"
- ],
- "additionalIpAddresses": [
- "::100:2",
- "::100:3",
- "::100:4"
- ],
- "cloudAccount": "111222333444"
+ "ipAddresses": ["127.0.100.1", "::100:1"],
+ "additionalIpAddresses": ["::100:2", "::100:3", "::100:4"],
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports.json
index dbe0222a848..5a6e8f943cd 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1-reports.json
@@ -6,8 +6,24 @@
"hostname": "dockerhost1.yahoo.com",
"flavor": "large",
"cpuCores": 4.0,
- "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"owner": {
"tenant": "zoneapp",
@@ -32,7 +48,7 @@
"bandwidthGbps": 20.0,
"diskSpeed": "fast",
"storageType": "remote",
- "architecture":"x86_64"
+ "architecture": "x86_64"
},
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -107,15 +123,8 @@
"agent": "system"
}
],
- "ipAddresses": [
- "127.0.100.1",
- "::100:1"
- ],
- "additionalIpAddresses": [
- "::100:2",
- "::100:3",
- "::100:4"
- ],
+ "ipAddresses": ["127.0.100.1", "::100:1"],
+ "additionalIpAddresses": ["::100:2", "::100:3", "::100:4"],
"reports": {
"actualCpuCores": {
"createdMillis": 1,
@@ -129,12 +138,9 @@
"type": "HARD_FAIL",
"details": {
"inGib": 3,
- "disks": [
- "/dev/sda1",
- "/dev/sdb3"
- ]
+ "disks": ["/dev/sda1", "/dev/sdb3"]
}
}
},
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1.json
index b5c61780c51..7327d003329 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node1.json
@@ -6,8 +6,24 @@
"hostname": "dockerhost1.yahoo.com",
"flavor": "large",
"cpuCores": 4.0,
- "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"owner": {
"tenant": "zoneapp",
@@ -25,7 +41,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":4.0, "memoryGb":32.0, "diskGb":1600.0, "bandwidthGbps":20.0, "diskSpeed":"fast", "storageType":"remote","architecture":"x86_64" },
+ "requestedResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"deferOsUpgrade": true,
@@ -89,14 +113,7 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.100.1",
- "::100:1"
- ],
- "additionalIpAddresses": [
- "::100:2",
- "::100:3",
- "::100:4"
- ],
- "cloudAccount": "111222333444"
+ "ipAddresses": ["127.0.100.1", "::100:1"],
+ "additionalIpAddresses": ["::100:2", "::100:3", "::100:4"],
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node2.json
index f7e02261065..61b664aeba4 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node2.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node2.json
@@ -6,8 +6,24 @@
"hostname": "dockerhost2.yahoo.com",
"flavor": "large",
"cpuCores": 4.0,
- "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"owner": {
"tenant": "zoneapp",
@@ -25,7 +41,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":4.0, "memoryGb":32.0, "diskGb":1600.0, "bandwidthGbps":20.0, "diskSpeed":"fast", "storageType":"remote","architecture":"x86_64" },
+ "requestedResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"deferOsUpgrade": true,
@@ -89,15 +113,8 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.101.1",
- "::101:1"
- ],
- "additionalIpAddresses": [
- "::101:2",
- "::101:3",
- "::101:4"
- ],
- "cloudAccount": "777888999000",
- "wireguardPubkey":"000011112222333344445555666677778888999900c="
+ "ipAddresses": ["127.0.101.1", "::101:1"],
+ "additionalIpAddresses": ["::101:2", "::101:3", "::101:4"],
+ "cloudAccount": "aws:777888999000",
+ "wireguardPubkey": "000011112222333344445555666677778888999900c="
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node3.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node3.json
index 480e8f7f910..af523551bfa 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node3.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node3.json
@@ -6,8 +6,24 @@
"hostname": "dockerhost3.yahoo.com",
"flavor": "large",
"cpuCores": 4.0,
- "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"owner": {
"tenant": "zoneapp",
@@ -25,7 +41,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":4.0, "memoryGb":32.0, "diskGb":1600.0, "bandwidthGbps":20.0, "diskSpeed":"fast", "storageType":"remote","architecture":"x86_64" },
+ "requestedResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"deferOsUpgrade": true,
@@ -89,14 +113,7 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.102.1",
- "::102:1"
- ],
- "additionalIpAddresses": [
- "::102:2",
- "::102:3",
- "::102:4"
- ],
- "cloudAccount": "111222333444"
+ "ipAddresses": ["127.0.102.1", "::102:1"],
+ "additionalIpAddresses": ["::102:2", "::102:3", "::102:4"],
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node4.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node4.json
index 163a3d7c244..826ee793b22 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node4.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node4.json
@@ -6,8 +6,24 @@
"hostname": "dockerhost4.yahoo.com",
"flavor": "large",
"cpuCores": 4.0,
- "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"owner": {
"tenant": "zoneapp",
@@ -25,7 +41,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":4.0, "memoryGb":32.0, "diskGb":1600.0, "bandwidthGbps":20.0, "diskSpeed":"fast", "storageType":"remote","architecture":"x86_64" },
+ "requestedResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"deferOsUpgrade": true,
@@ -89,14 +113,7 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.103.1",
- "::103:1"
- ],
- "additionalIpAddresses": [
- "::103:2",
- "::103:3",
- "::103:4"
- ],
- "cloudAccount": "111222333444"
+ "ipAddresses": ["127.0.103.1", "::103:1"],
+ "additionalIpAddresses": ["::103:2", "::103:3", "::103:4"],
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node5.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node5.json
index c160c5dcdfe..459d51ce5bc 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node5.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/docker-node5.json
@@ -6,8 +6,24 @@
"hostname": "dockerhost5.yahoo.com",
"flavor": "large",
"cpuCores": 4.0,
- "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"owner": {
"tenant": "zoneapp",
@@ -25,7 +41,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":4.0, "memoryGb":32.0, "diskGb":1600.0, "bandwidthGbps":20.0, "diskSpeed":"fast", "storageType":"remote","architecture":"x86_64" },
+ "requestedResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"deferOsUpgrade": true,
@@ -89,14 +113,7 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.104.1",
- "::104:1"
- ],
- "additionalIpAddresses": [
- "::104:2",
- "::104:3",
- "::104:4"
- ],
- "cloudAccount": "111222333444"
+ "ipAddresses": ["127.0.104.1", "::104:1"],
+ "additionalIpAddresses": ["::104:2", "::104:3", "::104:4"],
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost1-with-firmware-data.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost1-with-firmware-data.json
index 6d62c31ce56..8bdcd8c44c5 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost1-with-firmware-data.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost1-with-firmware-data.json
@@ -6,8 +6,24 @@
"hostname": "dockerhost1.yahoo.com",
"flavor": "large",
"cpuCores": 4.0,
- "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"owner": {
"tenant": "zoneapp",
@@ -25,7 +41,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":4.0, "memoryGb":32.0, "diskGb":1600.0, "bandwidthGbps":20.0, "diskSpeed":"fast", "storageType":"remote","architecture":"x86_64" },
+ "requestedResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"deferOsUpgrade": true,
@@ -101,14 +125,7 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.100.1",
- "::100:1"
- ],
- "additionalIpAddresses": [
- "::100:2",
- "::100:3",
- "::100:4"
- ],
- "cloudAccount": "111222333444"
+ "ipAddresses": ["127.0.100.1", "::100:1"],
+ "additionalIpAddresses": ["::100:2", "::100:3", "::100:4"],
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost6.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost6.json
index add2fcf87a8..b4be3fe418c 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost6.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/dockerhost6.json
@@ -6,8 +6,24 @@
"hostname": "dockerhost6.yahoo.com",
"flavor": "arm64",
"cpuCores": 4.0,
- "resources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"arm64"},
- "realResources":{"vcpu":4.0,"memoryGb":32.0,"diskGb":1600.0,"bandwidthGbps":20.0,"diskSpeed":"fast","storageType":"remote","architecture":"arm64"},
+ "resources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "arm64"
+ },
+ "realResources": {
+ "vcpu": 4.0,
+ "memoryGb": 32.0,
+ "diskGb": 1600.0,
+ "bandwidthGbps": 20.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "arm64"
+ },
"environment": "BARE_METAL",
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -69,5 +85,5 @@
],
"ipAddresses": [],
"additionalIpAddresses": [],
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json
index 4c8c5d80018..f39a086e97a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json
@@ -4,6 +4,9 @@
"name": "AutoscalingMaintainer"
},
{
+ "name": "DeprovisionedExpirer"
+ },
+ {
"name": "DirtyExpirer"
},
{
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json
index d90ed692f1c..cddf865361a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json
@@ -4,9 +4,25 @@
"state": "active",
"type": "tenant",
"hostname": "host1.yahoo.com",
- "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
- "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
- "realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
+ "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk: 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "resources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"owner": {
"tenant": "tenant1",
@@ -24,7 +40,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":2.0, "memoryGb":8.0, "diskGb":50.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any","architecture":"any" },
+ "requestedResources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "any",
+ "architecture": "any"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"failCount": 0,
@@ -52,7 +76,8 @@
{
"event": "activated",
"at": 123,
- "agent": "application"}
+ "agent": "application"
+ }
],
"log": [
{
@@ -86,10 +111,7 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.1.1",
- "::1:1"
- ],
+ "ipAddresses": ["127.0.1.1", "::1:1"],
"additionalIpAddresses": [],
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json
index bec194ea325..7ccc0660548 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json
@@ -5,9 +5,25 @@
"type": "tenant",
"hostname": "host10.yahoo.com",
"parentHostname": "parent1.yahoo.com",
- "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
- "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
- "realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
+ "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk: 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "resources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"owner": {
"tenant": "tenant1",
@@ -25,7 +41,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":2.0, "memoryGb":8.0, "diskGb":50.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any","architecture":"any" },
+ "requestedResources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "any",
+ "architecture": "any"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"vespaVersion": "5.104.142",
@@ -90,10 +114,7 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.10.1",
- "::10:1"
- ],
+ "ipAddresses": ["127.0.10.1", "::10:1"],
"additionalIpAddresses": [],
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json
index d7e07f02f3a..60c55713b03 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node11.json
@@ -5,9 +5,25 @@
"type": "tenant",
"hostname": "host11.yahoo.com",
"parentHostname": "parent.host.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 1.0 Gb, disk 100.0 Gb, bandwidth: 0.3 Gbps, architecture: any]",
- "resources":{"vcpu":1.0,"memoryGb":1.0,"diskGb":100.0,"bandwidthGbps":0.3,"diskSpeed":"fast","storageType":"any","architecture":"any"},
- "realResources":{"vcpu":1.0,"memoryGb":1.0,"diskGb":100.0,"bandwidthGbps":0.3,"diskSpeed":"fast","storageType":"any","architecture":"any"},
+ "flavor": "[vcpu: 1.0, memory: 1.0 Gb, disk: 100.0 Gb, bandwidth: 0.3 Gbps, architecture: any]",
+ "resources": {
+ "vcpu": 1.0,
+ "memoryGb": 1.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 0.3,
+ "diskSpeed": "fast",
+ "storageType": "any",
+ "architecture": "any"
+ },
+ "realResources": {
+ "vcpu": 1.0,
+ "memoryGb": 1.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 0.3,
+ "diskSpeed": "fast",
+ "storageType": "any",
+ "architecture": "any"
+ },
"environment": "DOCKER_CONTAINER",
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -31,10 +47,7 @@
"agent": "operator"
}
],
- "ipAddresses": [
- "::11"
- ],
- "additionalIpAddresses": [
- ],
- "cloudAccount":"111222333444"
+ "ipAddresses": ["::11"],
+ "additionalIpAddresses": [],
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json
index 73c34a7fa9e..f5152efd7cb 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node13.json
@@ -4,7 +4,7 @@
"state": "active",
"type": "tenant",
"hostname": "host13.yahoo.com",
- "flavor": "[vcpu: 10.0, memory: 48.0 Gb, disk 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 10.0, memory: 48.0 Gb, disk: 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":10.0,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":10.0,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json
index abb0ba57e49..f48e52b18bf 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node14.json
@@ -4,7 +4,7 @@
"state": "active",
"type": "tenant",
"hostname": "host14.yahoo.com",
- "flavor": "[vcpu: 10.0, memory: 48.0 Gb, disk 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "flavor": "[vcpu: 10.0, memory: 48.0 Gb, disk: 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
"resources":{"vcpu":10.0,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0, "diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"realResources":{"vcpu":10.0,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0, "diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
"environment": "DOCKER_CONTAINER",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json
index 9cd675163f0..7266343069e 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json
@@ -4,9 +4,25 @@
"state": "active",
"type": "tenant",
"hostname": "host2.yahoo.com",
- "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
- "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
- "realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
+ "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk: 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "resources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"owner": {
"tenant": "tenant2",
@@ -24,7 +40,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":2.0, "memoryGb":8.0, "diskGb":50.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any","architecture":"any" },
+ "requestedResources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "any",
+ "architecture": "any"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"failCount": 0,
@@ -87,10 +111,7 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.2.1",
- "::2:1"
- ],
+ "ipAddresses": ["127.0.2.1", "::2:1"],
"additionalIpAddresses": [],
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json
index 1c560c2f95b..1c7162f64d4 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json
@@ -4,9 +4,25 @@
"state": "ready",
"type": "tenant",
"hostname": "host3.yahoo.com",
- "flavor": "[vcpu: 0.5, memory: 48.0 Gb, disk 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
- "resources":{"vcpu":0.5,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
- "realResources":{"vcpu":0.5,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
+ "flavor": "[vcpu: 0.5, memory: 48.0 Gb, disk: 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "resources": {
+ "vcpu": 0.5,
+ "memoryGb": 48.0,
+ "diskGb": 500.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 0.5,
+ "memoryGb": 48.0,
+ "diskGb": 500.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -45,10 +61,7 @@
"agent": "system"
}
],
- "ipAddresses": [
- "127.0.3.1",
- "::3:1"
- ],
+ "ipAddresses": ["127.0.3.1", "::3:1"],
"additionalIpAddresses": [],
- "cloudAccount": "777888999000"
+ "cloudAccount": "aws:777888999000"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json
index 03621c40f67..950a69958d0 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json
@@ -7,8 +7,24 @@
"parentHostname": "parent.yahoo.com",
"flavor": "d-2-8-100",
"cpuCores": 2.0,
- "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"owner": {
"tenant": "tenant3",
@@ -26,7 +42,15 @@
"currentRestartGeneration": 1,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":1.0, "memoryGb":4.0, "diskGb":100.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any","architecture":"any" },
+ "requestedResources": {
+ "vcpu": 1.0,
+ "memoryGb": 4.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "any",
+ "architecture": "any"
+ },
"orchestratorStatus": "ALLOWED_TO_BE_DOWN",
"suspendedSinceMillis": 0,
"rebootGeneration": 2,
@@ -133,10 +157,7 @@
"agent": "operator"
}
],
- "ipAddresses": [
- "127.0.0.1",
- "::1"
- ],
+ "ipAddresses": ["127.0.0.1", "::1"],
"additionalIpAddresses": [],
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-wg.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-wg.json
index a1883ba4b25..7961fb08719 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-wg.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-wg.json
@@ -5,9 +5,25 @@
"type": "tenant",
"hostname": "host4.yahoo.com",
"parentHostname": "dockerhost1.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
- "resources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
- "realResources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
+ "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "resources": {
+ "vcpu": 1.0,
+ "memoryGb": 4.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 1.0,
+ "memoryGb": 4.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"owner": {
"tenant": "tenant3",
@@ -25,7 +41,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":1.0, "memoryGb":4.0, "diskGb":100.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any","architecture":"any" },
+ "requestedResources": {
+ "vcpu": 1.0,
+ "memoryGb": 4.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "any",
+ "architecture": "any"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"vespaVersion": "6.41.0",
@@ -90,11 +114,8 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.4.1",
- "::4:1"
- ],
+ "ipAddresses": ["127.0.4.1", "::4:1"],
"additionalIpAddresses": [],
- "cloudAccount": "111222333444",
+ "cloudAccount": "aws:111222333444",
"wireguardPubkey": "lololololololololololololololololololololoo="
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json
index 50007fd6610..ca9cf44df80 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json
@@ -5,9 +5,25 @@
"type": "tenant",
"hostname": "host4.yahoo.com",
"parentHostname": "dockerhost1.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
- "resources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
- "realResources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
+ "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "resources": {
+ "vcpu": 1.0,
+ "memoryGb": 4.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 1.0,
+ "memoryGb": 4.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"owner": {
"tenant": "tenant3",
@@ -25,7 +41,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":1.0, "memoryGb":4.0, "diskGb":100.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any","architecture":"any" },
+ "requestedResources": {
+ "vcpu": 1.0,
+ "memoryGb": 4.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "any",
+ "architecture": "any"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"vespaVersion": "6.41.0",
@@ -90,11 +114,8 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.4.1",
- "::4:1"
- ],
+ "ipAddresses": ["127.0.4.1", "::4:1"],
"additionalIpAddresses": [],
- "additionalHostnames": ["a","b"],
- "cloudAccount": "111222333444"
+ "additionalHostnames": ["a", "b"],
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json
index f206adf4366..626765239e6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json
@@ -5,9 +5,25 @@
"type": "tenant",
"hostname": "host4.yahoo.com",
"parentHostname": "dockerhost1.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
- "resources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
- "realResources":{"vcpu":1.0,"memoryGb":4.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
+ "flavor": "[vcpu: 1.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "resources": {
+ "vcpu": 1.0,
+ "memoryGb": 4.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 1.0,
+ "memoryGb": 4.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"owner": {
"tenant": "tenant3",
@@ -25,7 +41,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":1.0, "memoryGb":4.0, "diskGb":100.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any","architecture":"any" },
+ "requestedResources": {
+ "vcpu": 1.0,
+ "memoryGb": 4.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "any",
+ "architecture": "any"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"vespaVersion": "6.41.0",
@@ -90,10 +114,7 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.4.1",
- "::4:1"
- ],
+ "ipAddresses": ["127.0.4.1", "::4:1"],
"additionalIpAddresses": [],
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json
index 77dd81c736a..bf2f37d7c50 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json
@@ -5,9 +5,25 @@
"type": "tenant",
"hostname": "host5.yahoo.com",
"parentHostname": "dockerhost2.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 8.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, disk speed: slow, storage type: remote, architecture: x86_64]",
- "resources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote","architecture":"x86_64"},
+ "flavor": "[vcpu: 1.0, memory: 8.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, disk speed: slow, storage type: remote, architecture: x86_64]",
+ "resources": {
+ "vcpu": 1.0,
+ "memoryGb": 8.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "slow",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 1.0,
+ "memoryGb": 8.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "slow",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -56,10 +72,7 @@
"agent": "system"
}
],
- "ipAddresses": [
- "127.0.5.1",
- "::5:1"
- ],
+ "ipAddresses": ["127.0.5.1", "::5:1"],
"additionalIpAddresses": [],
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json
index 8a397fe5faa..2d74768e53c 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json
@@ -5,9 +5,25 @@
"type": "tenant",
"hostname": "host5.yahoo.com",
"parentHostname": "dockerhost2.yahoo.com",
- "flavor": "[vcpu: 1.0, memory: 8.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, disk speed: slow, storage type: remote, architecture: x86_64]",
- "resources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote","architecture":"x86_64"},
+ "flavor": "[vcpu: 1.0, memory: 8.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, disk speed: slow, storage type: remote, architecture: x86_64]",
+ "resources": {
+ "vcpu": 1.0,
+ "memoryGb": 8.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "slow",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 1.0,
+ "memoryGb": 8.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "slow",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -58,10 +74,7 @@
"agent": "system"
}
],
- "ipAddresses": [
- "127.0.5.1",
- "::5:1"
- ],
+ "ipAddresses": ["127.0.5.1", "::5:1"],
"additionalIpAddresses": [],
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json
index 2bbe4a3024e..2961434af8f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node55.json
@@ -4,9 +4,25 @@
"state": "dirty",
"type": "tenant",
"hostname": "host55.yahoo.com",
- "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
- "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
- "realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
+ "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk: 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "resources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -40,10 +56,7 @@
"agent": "system"
}
],
- "ipAddresses": [
- "127.0.55.1",
- "::55:1"
- ],
+ "ipAddresses": ["127.0.55.1", "::55:1"],
"additionalIpAddresses": [],
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json
index 69316b1ca7f..a304de951ed 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json
@@ -4,9 +4,25 @@
"state": "active",
"type": "tenant",
"hostname": "host6.yahoo.com",
- "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
- "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
- "realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
+ "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk: 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "resources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"owner": {
"tenant": "tenant2",
@@ -24,7 +40,15 @@
"currentRestartGeneration": 0,
"wantedDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.42.0",
"wantedVespaVersion": "6.42.0",
- "requestedResources": { "vcpu":2.0, "memoryGb":8.0, "diskGb":50.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any","architecture":"any" },
+ "requestedResources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "any",
+ "architecture": "any"
+ },
"rebootGeneration": 0,
"currentRebootGeneration": 0,
"failCount": 0,
@@ -87,10 +111,7 @@
"agent": "application"
}
],
- "ipAddresses": [
- "127.0.6.1",
- "::6:1"
- ],
+ "ipAddresses": ["127.0.6.1", "::6:1"],
"additionalIpAddresses": [],
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json
index 19fa81b82e0..9c77c7778e0 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node7.json
@@ -4,9 +4,25 @@
"state": "provisioned",
"type": "tenant",
"hostname": "host7.yahoo.com",
- "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
- "resources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
- "realResources":{"vcpu":2.0,"memoryGb":8.0,"diskGb":50.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local","architecture":"x86_64"},
+ "flavor": "[vcpu: 2.0, memory: 8.0 Gb, disk: 50.0 Gb, bandwidth: 1.0 Gbps, storage type: local, architecture: x86_64]",
+ "resources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 2.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
"environment": "DOCKER_CONTAINER",
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -30,10 +46,7 @@
"agent": "system"
}
],
- "ipAddresses": [
- "127.0.7.1",
- "::7:1"
- ],
+ "ipAddresses": ["127.0.7.1", "::7:1"],
"additionalIpAddresses": [],
- "cloudAccount": "111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node8.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node8.json
index bbef88ca1b0..08c5c0a13c6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node8.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node8.json
@@ -50,5 +50,5 @@
],
"ipAddresses": ["127.0.8.1"],
"additionalIpAddresses": ["127.0.8.2"],
- "cloudAccount":"111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node9.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node9.json
index e9199905b21..0285975f9af 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node9.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node9.json
@@ -6,8 +6,24 @@
"hostname": "host9.yahoo.com",
"flavor": "large-variant",
"cpuCores": 64.0,
- "resources":{"vcpu":64.0,"memoryGb":128.0,"diskGb":2000.0,"bandwidthGbps":15.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources":{"vcpu":64.0,"memoryGb":128.0,"diskGb":2000.0,"bandwidthGbps":15.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 64.0,
+ "memoryGb": 128.0,
+ "diskGb": 2000.0,
+ "bandwidthGbps": 15.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 64.0,
+ "memoryGb": 128.0,
+ "diskGb": 2000.0,
+ "bandwidthGbps": 15.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -32,11 +48,8 @@
"agent": "operator"
}
],
- "ipAddresses": [
- "127.0.9.1",
- "::9:1"
- ],
+ "ipAddresses": ["127.0.9.1", "::9:1"],
"additionalIpAddresses": [],
"additionalHostnames": ["node9-1.yahoo.com"],
- "cloudAccount":"111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/parent2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/parent2.json
index cfcc59a3454..a8759423ecd 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/parent2.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/parent2.json
@@ -8,8 +8,24 @@
"reservedTo": "myTenant",
"exclusiveTo": "tenant1:app1:instance1",
"cpuCores": 64.0,
- "resources": {"vcpu":64.0,"memoryGb":128.0,"diskGb":2000.0,"bandwidthGbps":15.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
- "realResources": {"vcpu":64.0,"memoryGb":128.0,"diskGb":2000.0,"bandwidthGbps":15.0,"diskSpeed":"fast","storageType":"remote","architecture":"x86_64"},
+ "resources": {
+ "vcpu": 64.0,
+ "memoryGb": 128.0,
+ "diskGb": 2000.0,
+ "bandwidthGbps": 15.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
+ "realResources": {
+ "vcpu": 64.0,
+ "memoryGb": 128.0,
+ "diskGb": 2000.0,
+ "bandwidthGbps": 15.0,
+ "diskSpeed": "fast",
+ "storageType": "remote",
+ "architecture": "x86_64"
+ },
"environment": "BARE_METAL",
"rebootGeneration": 0,
"currentRebootGeneration": 0,
@@ -34,10 +50,7 @@
"agent": "operator"
}
],
- "ipAddresses": [
- "127.0.127.1",
- "::127:1"
- ],
+ "ipAddresses": ["127.0.127.1", "::127:1"],
"additionalIpAddresses": [],
- "cloudAccount":"111222333444"
+ "cloudAccount": "aws:111222333444"
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/wireguard.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/wireguard.json
index 660b92d92ba..5369229bd75 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/wireguard.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/wireguard.json
@@ -1,10 +1,9 @@
{
- "configservers":
- [
+ "configservers": [
{
- "hostname":"cfg1.yahoo.com",
- "wireguardPubkey":"lololololololololololololololololololololoo=",
- "ipAddresses":["127.0.201.1","::201:1"]
+ "hostname": "cfg1.yahoo.com",
+ "wireguardPubkey": "lololololololololololololololololololololoo=",
+ "ipAddresses": ["::201:1"]
}
]
}
diff --git a/openai-client/pom.xml b/openai-client/pom.xml
index da2252ee1e6..71a31a7b859 100644
--- a/openai-client/pom.xml
+++ b/openai-client/pom.xml
@@ -48,6 +48,9 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
@@ -69,4 +72,4 @@
</plugins>
</build>
-</project> \ No newline at end of file
+</project>
diff --git a/opennlp-linguistics/pom.xml b/opennlp-linguistics/pom.xml
index a7907ba212f..afd06665cf8 100644
--- a/opennlp-linguistics/pom.xml
+++ b/opennlp-linguistics/pom.xml
@@ -63,6 +63,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java b/opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java
index 8080dc92729..5452da71775 100644
--- a/opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java
+++ b/opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java
@@ -25,7 +25,6 @@ import java.util.List;
*/
public class OpenNlpTokenizer implements Tokenizer {
- private final static int SPACE_CODE = 32;
private final Normalizer normalizer;
private final Transformer transformer;
private final SimpleTokenizer simpleTokenizer;
@@ -74,26 +73,26 @@ public class OpenNlpTokenizer implements Tokenizer {
}
private SnowballStemmer.ALGORITHM algorithmFor(Language language) {
- switch (language) {
- case DANISH: return SnowballStemmer.ALGORITHM.DANISH;
- case DUTCH: return SnowballStemmer.ALGORITHM.DUTCH;
- case FINNISH: return SnowballStemmer.ALGORITHM.FINNISH;
- case FRENCH: return SnowballStemmer.ALGORITHM.FRENCH;
- case GERMAN: return SnowballStemmer.ALGORITHM.GERMAN;
- case HUNGARIAN: return SnowballStemmer.ALGORITHM.HUNGARIAN;
- case IRISH: return SnowballStemmer.ALGORITHM.IRISH;
- case ITALIAN: return SnowballStemmer.ALGORITHM.ITALIAN;
- case NORWEGIAN_BOKMAL: return SnowballStemmer.ALGORITHM.NORWEGIAN;
- case NORWEGIAN_NYNORSK: return SnowballStemmer.ALGORITHM.NORWEGIAN;
- case PORTUGUESE: return SnowballStemmer.ALGORITHM.PORTUGUESE;
- case ROMANIAN: return SnowballStemmer.ALGORITHM.ROMANIAN;
- case RUSSIAN: return SnowballStemmer.ALGORITHM.RUSSIAN;
- case SPANISH: return SnowballStemmer.ALGORITHM.SPANISH;
- case SWEDISH: return SnowballStemmer.ALGORITHM.SWEDISH;
- case TURKISH: return SnowballStemmer.ALGORITHM.TURKISH;
- case ENGLISH: return SnowballStemmer.ALGORITHM.ENGLISH;
- default: return null;
- }
+ return switch (language) {
+ case DANISH -> SnowballStemmer.ALGORITHM.DANISH;
+ case DUTCH -> SnowballStemmer.ALGORITHM.DUTCH;
+ case FINNISH -> SnowballStemmer.ALGORITHM.FINNISH;
+ case FRENCH -> SnowballStemmer.ALGORITHM.FRENCH;
+ case GERMAN -> SnowballStemmer.ALGORITHM.GERMAN;
+ case HUNGARIAN -> SnowballStemmer.ALGORITHM.HUNGARIAN;
+ case IRISH -> SnowballStemmer.ALGORITHM.IRISH;
+ case ITALIAN -> SnowballStemmer.ALGORITHM.ITALIAN;
+ case NORWEGIAN_BOKMAL -> SnowballStemmer.ALGORITHM.NORWEGIAN;
+ case NORWEGIAN_NYNORSK -> SnowballStemmer.ALGORITHM.NORWEGIAN;
+ case PORTUGUESE -> SnowballStemmer.ALGORITHM.PORTUGUESE;
+ case ROMANIAN -> SnowballStemmer.ALGORITHM.ROMANIAN;
+ case RUSSIAN -> SnowballStemmer.ALGORITHM.RUSSIAN;
+ case SPANISH -> SnowballStemmer.ALGORITHM.SPANISH;
+ case SWEDISH -> SnowballStemmer.ALGORITHM.SWEDISH;
+ case TURKISH -> SnowballStemmer.ALGORITHM.TURKISH;
+ case ENGLISH -> SnowballStemmer.ALGORITHM.ENGLISH;
+ default -> null;
+ };
}
}
diff --git a/opennlp-linguistics/src/test/java/com/yahoo/language/opennlp/OpenNlpTokenizationTestCase.java b/opennlp-linguistics/src/test/java/com/yahoo/language/opennlp/OpenNlpTokenizationTestCase.java
index a5daf7f0531..33e820fbb9a 100644
--- a/opennlp-linguistics/src/test/java/com/yahoo/language/opennlp/OpenNlpTokenizationTestCase.java
+++ b/opennlp-linguistics/src/test/java/com/yahoo/language/opennlp/OpenNlpTokenizationTestCase.java
@@ -2,6 +2,7 @@
package com.yahoo.language.opennlp;
import com.yahoo.language.Language;
+import com.yahoo.language.process.StemList;
import com.yahoo.language.process.StemMode;
import com.yahoo.language.process.Token;
import com.yahoo.language.process.TokenType;
@@ -150,8 +151,7 @@ public class OpenNlpTokenizationTestCase {
@Test
public void testIndexability() {
String input = "tafsirnya\u0648\u0643\u064F\u0646\u0652";
- for (StemMode stemMode : new StemMode[] { StemMode.NONE,
- StemMode.SHORTEST }) {
+ for (StemMode stemMode : new StemMode[] { StemMode.NONE, StemMode.SHORTEST }) {
for (Language l : List.of(Language.INDONESIAN, Language.ENGLISH, Language.ARABIC)) {
for (boolean accentDrop : new boolean[] { true, false }) {
for (Token token : tokenizer.tokenize(input, l, stemMode, accentDrop)) {
@@ -165,6 +165,33 @@ public class OpenNlpTokenizationTestCase {
}
@Test
+ public void testTokenizeEmojis() {
+ String emoji1 = "\uD83D\uDD2A"; // 🔪
+ Iterator<Token> tokens1 = tokenizer.tokenize(emoji1, Language.ENGLISH, StemMode.ALL, true).iterator();
+ assertTrue(tokens1.hasNext());
+ assertEquals(emoji1, tokens1.next().getTokenString());
+ assertFalse(tokens1.hasNext());
+
+ String emoji2 = "\uD83D\uDE00"; // 😀
+ Iterator<Token> tokens2 = tokenizer.tokenize(emoji1 + emoji2, Language.ENGLISH, StemMode.ALL, true).iterator();
+ assertTrue(tokens2.hasNext());
+ assertEquals(emoji1, tokens2.next().getTokenString());
+ assertEquals(emoji2, tokens2.next().getTokenString());
+ assertFalse(tokens2.hasNext());
+ }
+
+ @Test
+ public void testStemEmojis() {
+ var stemmer = new OpenNlpLinguistics().getStemmer();
+ String emoji = "\uD83D\uDD2A"; // 🔪
+ List<StemList> stems = stemmer.stem(emoji, StemMode.ALL, Language.ENGLISH);
+ assertEquals(1, stems.size());
+ var stemList = stems.get(0);
+ assertEquals(1, stemList.size());
+ assertEquals(emoji, stemList.get(0));
+ }
+
+ @Test
public void testTokenTypes() {
testTokenTypes(Language.ENGLISH);
testTokenTypes(Language.SPANISH);
diff --git a/orchestrator-restapi/pom.xml b/orchestrator-restapi/pom.xml
index 156e9367760..3b86309f66a 100644
--- a/orchestrator-restapi/pom.xml
+++ b/orchestrator-restapi/pom.xml
@@ -40,6 +40,9 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
</plugins>
</build>
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ClusterApiImpl.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ClusterApiImpl.java
index 1fb1ad4b2ff..024a3bc58db 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ClusterApiImpl.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ClusterApiImpl.java
@@ -136,7 +136,8 @@ class ClusterApiImpl implements ClusterApi {
continue;
}
- if (service.serviceStatus() == ServiceStatus.DOWN) {
+ // Disallow suspending a 2nd and downed config server to avoid losing ZK quorum.
+ if (service.serviceStatus() == ServiceStatus.DOWN && !isConfigServerLike()) {
Optional<Instant> since = service.serviceStatusInfo().since();
if (since.isEmpty()) {
reasons.mergeWith(SuspensionReasons.isDown(service));
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java
index 8193974c35a..5d553c86c50 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java
@@ -44,12 +44,9 @@ public class HostedVespaClusterPolicy implements ClusterPolicy {
// Be a bit more cautious when removing nodes permanently
if (!permanent) {
- // Disallow suspending a 2nd and downed config server to avoid losing ZK quorum.
- if (!clusterApi.isConfigServerLike()) {
- Optional<SuspensionReasons> suspensionReasons = clusterApi.allServicesDown();
- if (suspensionReasons.isPresent()) {
- return suspensionReasons.get();
- }
+ Optional<SuspensionReasons> suspensionReasons = clusterApi.allServicesDown();
+ if (suspensionReasons.isPresent()) {
+ return suspensionReasons.get();
}
}
diff --git a/parent/pom.xml b/parent/pom.xml
index 71e0c35eb6c..f68b2d0c068 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -246,6 +246,7 @@
<configuration>
<configGenVersion>${project.version}</configGenVersion>
<useCommonAssemblyIds>true</useCommonAssemblyIds>
+ <failOnWarnings>true</failOnWarnings>
</configuration>
</plugin>
<plugin>
@@ -716,7 +717,7 @@
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-compress</artifactId>
- <version>1.22</version>
+ <version>1.23.0</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
diff --git a/screwdriver.yaml b/screwdriver.yaml
index 76ec7f2aa71..81fe8d75c22 100644
--- a/screwdriver.yaml
+++ b/screwdriver.yaml
@@ -366,6 +366,7 @@ jobs:
echo "Must have valid Vespa version to continue (got VESPA_VERSION=$VESPA_VERSION)."
return 1
fi
+ VESPA_MAJOR=$(echo $VESPA_VERSION | cut -d. -f1)
- install-dependencies: |
dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
dnf -y install docker-ce docker-ce-cli containerd.io
@@ -380,21 +381,16 @@ jobs:
docker context use vespa-context
docker buildx create --name vespa-builder --driver docker-container --use
docker buildx inspect --bootstrap
- docker login --username aressem --password "$DOCKER_HUB_DEPLOY_KEY"
docker buildx build \
--progress plain \
--load \
- --platform linux/amd64,linux/arm64 \
+ --platform linux/amd64 \
--build-arg VESPA_BASE_IMAGE=el9 \
--build-arg VESPA_VERSION=$VESPA_VERSION \
--file Dockerfile \
- --tag docker.io/vespaengine/$IMAGE_NAME:$VESPA_VERSION \
- --tag docker.io/vespaengine/$IMAGE_NAME:$VESPA_MAJOR \
- --tag docker.io/vespaengine/$IMAGE_NAME:latest \
+ --tag vespaengine/$IMAGE_NAME:latest \
.
- verify-container-image: |
- # Trick to be able to use the documentation testing to verify the image built locally
- buildah tag $IMAGE_NAME:$VESPA_VERSION vespaengine/vespa:latest
# Run quick start guide
$SD_SOURCE_DIR/screwdriver/test-quick-start-guide.sh
- publish-image: |
@@ -447,8 +443,8 @@ jobs:
now_epoch=`date "+%s"`
echo "Now epoch: " $now_epoch
- calculate-current-release-age: |
- current_release_date=`curl -s 'https://repo1.maven.org/maven2/com/yahoo/vespa/parent/' | \
- grep '^<a href="[0-9]' | awk '{print $4}' | sort | tail -1`
+ current_release_date=$(curl -sLf https://repo1.maven.org/maven2/com/yahoo/vespa/cloud-tenant-base/maven-metadata.xml | \
+ grep -oP "<lastUpdated>\K\w+" | cut -c 1-8)
echo "Current release date: " $current_release_date
current_release_epoch=`date -d "$current_release_date" "+%s"`
echo "Current release epoch: " $current_release_epoch
@@ -460,8 +456,7 @@ jobs:
exit 1
fi
- calculate-docker-image-age: |
- image_date=`curl https://hub.docker.com/v2/repositories/vespaengine/vespa/ | \
- python -m json.tool| grep last_updated | awk '{print $2}' | tr -d '",'`
+ image_date=$(curl -sLf https://hub.docker.com/v2/repositories/vespaengine/vespa/ | jq -re '.last_updated')
echo "Docker image last_updated: " $image_date
image_epoch=`date -d "$image_date" "+%s"`
echo "Docker image epoch: " $image_epoch
diff --git a/screwdriver/release-container-image-docker.sh b/screwdriver/release-container-image-docker.sh
index 6d8babe3dcc..46786bf8dc9 100755
--- a/screwdriver/release-container-image-docker.sh
+++ b/screwdriver/release-container-image-docker.sh
@@ -61,6 +61,14 @@ for data in "Dockerfile vespa"; do
if curl -fsSL https://index.docker.io/v1/repositories/vespaengine/$IMAGE_NAME/tags/$VESPA_VERSION &> /dev/null; then
echo "Container image docker.io/vespaengine/$IMAGE_NAME:$VESPA_VERSION aldready exists."
else
+ # Build only for x86_64 first for test as BuildKit does not support loading multi arch into docker daemon.
+ docker buildx build --progress plain --load --platform linux/amd64 --build-arg VESPA_VERSION=$VESPA_VERSION \
+ --file $DOCKER_FILE --tag vespaengine/$IMAGE_NAME:latest .
+
+ # Test
+ $SD_SOURCE_DIR/screwdriver/test-quick-start-guide.sh
+
+ # Build for arm64 and publish
docker login --username aressem --password "$DOCKER_HUB_DEPLOY_KEY"
docker buildx build --progress plain --push --platform linux/amd64,linux/arm64 --build-arg VESPA_VERSION=$VESPA_VERSION \
--file $DOCKER_FILE --tag docker.io/vespaengine/$IMAGE_NAME:$VESPA_VERSION \
diff --git a/screwdriver/release-java-artifacts.sh b/screwdriver/release-java-artifacts.sh
index e8e98015d39..8cf61dfc839 100755
--- a/screwdriver/release-java-artifacts.sh
+++ b/screwdriver/release-java-artifacts.sh
@@ -16,7 +16,7 @@ fi
readonly VESPA_RELEASE="$1"
readonly VESPA_REF="$2"
-QUERY_VERSION_HTTP_CODE=$(curl --write-out %{http_code} --silent --location --output /dev/null https://oss.sonatype.org/content/repositories/releases/com/yahoo/vespa/parent/${VESPA_RELEASE}/)
+QUERY_VERSION_HTTP_CODE=$(curl --write-out %{http_code} --silent --location --output /dev/null https://oss.sonatype.org/content/repositories/releases/com/yahoo/vespa/parent/${VESPA_RELEASE}/parent-${VESPA_RELEASE}.pom)
if [[ "200" == $QUERY_VERSION_HTTP_CODE ]]; then
echo "Vespa version $VESPA_RELEASE is already promoted, exiting"
exit 0
diff --git a/searchcore/src/apps/vespa-gen-testdocs/vespa-gen-testdocs.cpp b/searchcore/src/apps/vespa-gen-testdocs/vespa-gen-testdocs.cpp
index 180f4618de6..b7f5731ddf4 100644
--- a/searchcore/src/apps/vespa-gen-testdocs/vespa-gen-testdocs.cpp
+++ b/searchcore/src/apps/vespa-gen-testdocs/vespa-gen-testdocs.cpp
@@ -252,12 +252,12 @@ class PrefixTextFieldGenerator : public FieldGenerator
uint32_t _mod;
uint32_t _div;
public:
- PrefixTextFieldGenerator(std::vector<string> argv);
+ PrefixTextFieldGenerator(std::vector<string> argv) noexcept;
virtual ~PrefixTextFieldGenerator() override;
virtual void generateValue(vespalib::asciistream &doc, uint32_t id) override;
};
-PrefixTextFieldGenerator::PrefixTextFieldGenerator(std::vector<string> argv)
+PrefixTextFieldGenerator::PrefixTextFieldGenerator(std::vector<string> argv) noexcept
: FieldGenerator(argv[0]),
_prefix(),
_mod(std::numeric_limits<uint32_t>::max()),
diff --git a/searchcore/src/tests/grouping/grouping.cpp b/searchcore/src/tests/grouping/grouping.cpp
index eabbaf3d50f..6afaf06b244 100644
--- a/searchcore/src/tests/grouping/grouping.cpp
+++ b/searchcore/src/tests/grouping/grouping.cpp
@@ -4,6 +4,7 @@
#include <vespa/searchlib/aggregation/sumaggregationresult.h>
#include <vespa/searchcommon/attribute/iattributevector.h>
#include <vespa/searchlib/expression/attributenode.h>
+#include <vespa/searchlib/expression/integerresultnode.h>
#include <vespa/searchlib/attribute/extendableattributes.h>
#include <vespa/searchcore/grouping/groupingcontext.h>
#include <vespa/searchcore/grouping/groupingmanager.h>
diff --git a/searchcore/src/tests/proton/attribute/attribute_test.cpp b/searchcore/src/tests/proton/attribute/attribute_test.cpp
index 4458cd66ad6..bc398866252 100644
--- a/searchcore/src/tests/proton/attribute/attribute_test.cpp
+++ b/searchcore/src/tests/proton/attribute/attribute_test.cpp
@@ -984,13 +984,8 @@ TEST_F(TwoPhasePutTest, handles_assign_update_as_two_phase_put_when_specified_fo
ImportedAttributeVector::SP
createImportedAttribute(const vespalib::string &name)
{
- auto result = ImportedAttributeVectorFactory::create(name,
- std::shared_ptr<ReferenceAttribute>(),
- std::shared_ptr<search::IDocumentMetaStoreContext>(),
- AttributeVector::SP(),
- std::shared_ptr<const search::IDocumentMetaStoreContext>(),
- true);
- result->getSearchCache()->insert("foo", BitVectorSearchCache::Entry::SP());
+ auto result = ImportedAttributeVectorFactory::create(name, {}, {}, {}, {}, true);
+ result->getSearchCache()->insert("foo", {});
return result;
}
diff --git a/searchcore/src/tests/proton/matching/matching_test.cpp b/searchcore/src/tests/proton/matching/matching_test.cpp
index 130fb29c289..b59384f1493 100644
--- a/searchcore/src/tests/proton/matching/matching_test.cpp
+++ b/searchcore/src/tests/proton/matching/matching_test.cpp
@@ -394,10 +394,9 @@ struct MyWorld {
SearchReply::UP performSearch(const SearchRequest & req, size_t threads) {
Matcher::SP matcher = createMatcher();
- SearchSession::OwnershipBundle owned_objects;
- owned_objects.search_handler = std::make_shared<MySearchHandler>(matcher);
- owned_objects.context = std::make_unique<MatchContext>(std::make_unique<MockAttributeContext>(),
- std::make_unique<FakeSearchContext>());
+ SearchSession::OwnershipBundle owned_objects({std::make_unique<MockAttributeContext>(),
+ std::make_unique<FakeSearchContext>()},
+ std::make_shared<MySearchHandler>(matcher));
vespalib::SimpleThreadBundle threadBundle(threads);
SearchReply::UP reply = matcher->match(req, threadBundle, searchContext, attributeContext,
*sessionManager, metaStore, metaStore.getBucketDB(),
diff --git a/searchcore/src/tests/proton/matching/querynodes_test.cpp b/searchcore/src/tests/proton/matching/querynodes_test.cpp
index 15fcc8a3fd7..3c9220bcdb8 100644
--- a/searchcore/src/tests/proton/matching/querynodes_test.cpp
+++ b/searchcore/src/tests/proton/matching/querynodes_test.cpp
@@ -520,9 +520,9 @@ TEST("requireThatSimpleIntermediatesGetProperBlending") {
TEST("control query nodes size") {
EXPECT_EQUAL(160u, sizeof(search::query::NumberTerm));
- EXPECT_EQUAL(192u, sizeof(ProtonNodeTypes::NumberTerm));
+ EXPECT_EQUAL(280u, sizeof(ProtonNodeTypes::NumberTerm));
EXPECT_EQUAL(160u, sizeof(search::query::StringTerm));
- EXPECT_EQUAL(192u, sizeof(ProtonNodeTypes::StringTerm));
+ EXPECT_EQUAL(280u, sizeof(ProtonNodeTypes::StringTerm));
}
} // namespace
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp
index ee04f17d378..96d99008309 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp
@@ -451,7 +451,7 @@ namespace {
class CombinedAttributeContext : public IAttributeContext {
private:
using IAttributeFunctor = search::attribute::IAttributeFunctor;
- AttributeContext _ctx;
+ AttributeContext _ctx;
ImportedAttributesContext _importedCtx;
public:
@@ -483,6 +483,10 @@ public:
_ctx.releaseEnumGuards();
_importedCtx.releaseEnumGuards();
}
+ void enableMultiThreadSafe() override {
+ _ctx.enableMultiThreadSafe();
+ _importedCtx.enableMultiThreadSafe();
+ }
void asyncForAttribute(const vespalib::string &name, std::unique_ptr<IAttributeFunctor> func) const override {
_ctx.asyncForAttribute(name, std::move(func));
}
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/document_field_extractor.cpp b/searchcore/src/vespa/searchcore/proton/attribute/document_field_extractor.cpp
index c0c6f729509..9364be4570e 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/document_field_extractor.cpp
+++ b/searchcore/src/vespa/searchcore/proton/attribute/document_field_extractor.cpp
@@ -16,6 +16,7 @@
#include <vespa/searchcommon/common/undefinedvalues.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
#include <vespa/vespalib/util/exceptions.h>
+#include <cassert>
using document::FieldValue;
using document::BoolFieldValue;
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/imported_attributes_context.cpp b/searchcore/src/vespa/searchcore/proton/attribute/imported_attributes_context.cpp
index 376c84c12d6..ede86051efb 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/imported_attributes_context.cpp
+++ b/searchcore/src/vespa/searchcore/proton/attribute/imported_attributes_context.cpp
@@ -14,8 +14,7 @@ using LockGuard = std::lock_guard<std::mutex>;
namespace proton {
const IAttributeVector *
-ImportedAttributesContext::getOrCacheAttribute(const vespalib::string &name, AttributeCache &attributes,
- bool stableEnumGuard, const LockGuard &) const
+ImportedAttributesContext::getOrCacheAttribute(const vespalib::string &name, AttributeCache &attributes, bool stableEnumGuard) const
{
auto itr = attributes.find(name);
if (itr != attributes.end()) {
@@ -31,15 +30,22 @@ ImportedAttributesContext::getOrCacheAttribute(const vespalib::string &name, Att
} else {
metaGuard = metaItr->second;
}
- auto insRes = attributes.emplace(name, result->makeReadGuard(std::move(metaGuard), stableEnumGuard));
+ auto insRes = attributes.insert(std::make_pair(name, result->makeReadGuard(std::move(metaGuard), stableEnumGuard)));
return insRes.first->second->attribute();
} else {
return nullptr;
}
}
+const IAttributeVector *
+ImportedAttributesContext::getOrCacheAttributeMtSafe(const vespalib::string &name, AttributeCache &attributes, bool stableEnumGuard) const {
+ LockGuard guard(_cacheMutex);
+ return getOrCacheAttribute(name, attributes, stableEnumGuard);
+}
+
ImportedAttributesContext::ImportedAttributesContext(const ImportedAttributesRepo &repo)
: _repo(repo),
+ _mtSafe(false),
_guardedAttributes(),
_enumGuardedAttributes(),
_metaStores(),
@@ -52,15 +58,17 @@ ImportedAttributesContext::~ImportedAttributesContext() = default;
const IAttributeVector *
ImportedAttributesContext::getAttribute(const vespalib::string &name) const
{
- LockGuard guard(_cacheMutex);
- return getOrCacheAttribute(name, _guardedAttributes, false, guard);
+ return _mtSafe
+ ? getOrCacheAttributeMtSafe(name, _guardedAttributes, false)
+ : getOrCacheAttribute(name, _guardedAttributes, false);
}
const IAttributeVector *
ImportedAttributesContext::getAttributeStableEnum(const vespalib::string &name) const
{
- LockGuard guard(_cacheMutex);
- return getOrCacheAttribute(name, _enumGuardedAttributes, true, guard);
+ return _mtSafe
+ ? getOrCacheAttributeMtSafe(name, _enumGuardedAttributes, true)
+ : getOrCacheAttribute(name, _enumGuardedAttributes, true);
}
void
@@ -76,8 +84,12 @@ ImportedAttributesContext::getAttributeList(std::vector<const IAttributeVector *
void
ImportedAttributesContext::releaseEnumGuards()
{
- LockGuard guard(_cacheMutex);
- _enumGuardedAttributes.clear();
+ if (_mtSafe) {
+ LockGuard guard(_cacheMutex);
+ _enumGuardedAttributes.clear();
+ } else {
+ _enumGuardedAttributes.clear();
+ }
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/imported_attributes_context.h b/searchcore/src/vespa/searchcore/proton/attribute/imported_attributes_context.h
index 69f566b70a6..aefffd959de 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/imported_attributes_context.h
+++ b/searchcore/src/vespa/searchcore/proton/attribute/imported_attributes_context.h
@@ -33,18 +33,19 @@ private:
using IAttributeFunctor = search::attribute::IAttributeFunctor;
using MetaStoreReadGuard = search::IDocumentMetaStoreContext::IReadGuard;
- using AttributeCache = std::unordered_map<vespalib::string, std::unique_ptr<AttributeReadGuard>, vespalib::hash<vespalib::string>>;
+ using AttributeCache = vespalib::hash_map<vespalib::string, std::unique_ptr<AttributeReadGuard>>;
using MetaStoreCache = std::unordered_map<const void *, std::shared_ptr<MetaStoreReadGuard>>;
using LockGuard = std::lock_guard<std::mutex>;
const ImportedAttributesRepo &_repo;
- mutable AttributeCache _guardedAttributes;
- mutable AttributeCache _enumGuardedAttributes;
- mutable MetaStoreCache _metaStores;
- mutable std::mutex _cacheMutex;
+ bool _mtSafe;
+ mutable AttributeCache _guardedAttributes;
+ mutable AttributeCache _enumGuardedAttributes;
+ mutable MetaStoreCache _metaStores;
+ mutable std::mutex _cacheMutex;
- const IAttributeVector *getOrCacheAttribute(const vespalib::string &name, AttributeCache &attributes,
- bool stableEnumGuard, const LockGuard &) const;
+ const IAttributeVector *getOrCacheAttribute(const vespalib::string &name, AttributeCache &attributes, bool stableEnumGuard) const;
+ const IAttributeVector *getOrCacheAttributeMtSafe(const vespalib::string &name, AttributeCache &attributes, bool stableEnumGuard) const;
public:
ImportedAttributesContext(const ImportedAttributesRepo &repo);
@@ -55,6 +56,7 @@ public:
const IAttributeVector *getAttributeStableEnum(const vespalib::string &name) const override;
void getAttributeList(std::vector<const IAttributeVector *> &list) const override;
void releaseEnumGuards() override;
+ void enableMultiThreadSafe() override { _mtSafe = true; }
void asyncForAttribute(const vespalib::string &name, std::unique_ptr<IAttributeFunctor> func) const override;
};
diff --git a/searchcore/src/vespa/searchcore/proton/common/scheduled_forward_executor.cpp b/searchcore/src/vespa/searchcore/proton/common/scheduled_forward_executor.cpp
index f19ff36dbfb..230ff922e80 100644
--- a/searchcore/src/vespa/searchcore/proton/common/scheduled_forward_executor.cpp
+++ b/searchcore/src/vespa/searchcore/proton/common/scheduled_forward_executor.cpp
@@ -95,11 +95,15 @@ ScheduledForwardExecutor::~ScheduledForwardExecutor() {
bool
ScheduledForwardExecutor::cancel(uint64_t key)
{
- std::lock_guard guard(_lock);
- auto found = _taskList.find(key);
- if (found == _taskList.end()) return false;
- found->second->cancel();
- _taskList.erase(found);
+ std::unique_ptr<State> state;
+ {
+ std::lock_guard guard(_lock);
+ auto found = _taskList.find(key);
+ if (found == _taskList.end()) return false;
+ state = std::move(found->second);
+ _taskList.erase(found);
+ }
+ state->cancel();
return true;
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/CMakeLists.txt b/searchcore/src/vespa/searchcore/proton/matching/CMakeLists.txt
index 7960d1d51b5..92c8ec8f441 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/CMakeLists.txt
+++ b/searchcore/src/vespa/searchcore/proton/matching/CMakeLists.txt
@@ -12,6 +12,7 @@ vespa_add_library(searchcore_matching STATIC
i_match_loop_communicator.cpp
indexenvironment.cpp
match_loop_communicator.cpp
+ match_context.cpp
match_master.cpp
match_params.cpp
match_phase_limit_calculator.cpp
diff --git a/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp b/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp
index eee2b7a7203..68845cf7f7f 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp
@@ -23,28 +23,28 @@ struct Mixer {
Mixer() : attributes() {}
void addAttribute(Blueprint::UP attr) {
- if (attributes.get() == 0) {
+ if ( ! attributes) {
attributes = std::make_unique<OrBlueprint>();
}
attributes->addChild(std::move(attr));
}
Blueprint::UP mix(Blueprint::UP indexes) {
- if (attributes.get() == 0) {
- if (indexes.get() == 0) {
+ if ( ! attributes) {
+ if ( ! indexes) {
return std::make_unique<EmptyBlueprint>();
}
- return Blueprint::UP(std::move(indexes));
+ return indexes;
}
- if (indexes.get() == 0) {
+ if ( ! indexes) {
if (attributes->childCnt() == 1) {
return attributes->removeChild(0);
} else {
- return Blueprint::UP(std::move(attributes));
+ return std::move(attributes);
}
}
- attributes->addChild(Blueprint::UP(std::move(indexes)));
- return Blueprint::UP(std::move(attributes));
+ attributes->addChild(std::move(indexes));
+ return std::move(attributes);
}
};
@@ -62,6 +62,7 @@ private:
void buildChildren(IntermediateBlueprint &parent,
const std::vector<search::query::Node *> &children)
{
+ parent.reserve(children.size());
for (size_t i = 0; i < children.size(); ++i) {
parent.addChild(BlueprintBuilder::build(_requestContext, *children[i], _context));
}
@@ -88,6 +89,7 @@ private:
void buildEquiv(ProtonEquiv &n) {
double eqw = n.getWeight().percent();
FieldSpecBaseList specs;
+ specs.reserve(n.numFields());
for (size_t i = 0; i < n.numFields(); ++i) {
specs.add(n.field(i).fieldSpec());
}
@@ -123,9 +125,7 @@ private:
assert(field.getFieldId() != search::fef::IllegalFieldId);
assert(field.getHandle() != search::fef::IllegalHandle);
if (field.attribute_field) {
- FieldSpecList attrField;
- attrField.add(field.fieldSpec());
- mixer.addAttribute(_context.getAttributes().createBlueprint(_requestContext, attrField, n));
+ mixer.addAttribute(_context.getAttributes().createBlueprint(_requestContext, field.fieldSpec(), n));
} else {
indexFields.add(field.fieldSpec());
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.h b/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.h
index 6c9203ef52c..f1db05b814f 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.h
@@ -6,6 +6,8 @@
#include <vespa/searchlib/query/tree/node.h>
#include <vespa/searchlib/queryeval/blueprint.h>
+namespace search::queryeval { class IRequestContext; }
+
namespace proton::matching {
struct BlueprintBuilder {
@@ -14,9 +16,7 @@ struct BlueprintBuilder {
* blueprint meta-data back into corresponding query tree nodes.
*/
static search::queryeval::Blueprint::UP
- build(const search::queryeval::IRequestContext & requestContext,
- search::query::Node &node,
- ISearchContext &context);
+ build(const search::queryeval::IRequestContext & requestContext, search::query::Node &node, ISearchContext &context);
};
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/isearchcontext.h b/searchcore/src/vespa/searchcore/proton/matching/isearchcontext.h
index 51fca51d398..d3f17b10aac 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/isearchcontext.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/isearchcontext.h
@@ -2,10 +2,9 @@
#pragma once
-#include <vespa/searchlib/queryeval/searchable.h>
-
#include <memory>
+namespace search::queryeval { class Searchable; }
namespace searchcorespi { class IndexSearchable; }
namespace proton::matching {
@@ -25,10 +24,6 @@ class ISearchContext
protected:
ISearchContext() = default;
public:
- /**
- * Convenience typedef for an auto pointer to this interface.
- **/
- using UP = std::unique_ptr<ISearchContext>;
ISearchContext(const ISearchContext &) = delete;
ISearchContext & operator = (const ISearchContext &) = delete;
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_context.cpp b/searchcore/src/vespa/searchcore/proton/matching/match_context.cpp
new file mode 100644
index 00000000000..2d8be596380
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_context.cpp
@@ -0,0 +1,19 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "match_context.h"
+#include <cassert>
+
+namespace proton::matching {
+
+MatchContext::MatchContext(std::unique_ptr<IAttributeContext> attrCtx, std::unique_ptr<ISearchContext> searchCtx) noexcept
+ : _attrCtx(std::move(attrCtx)),
+ _searchCtx(std::move(searchCtx))
+{
+ assert(_attrCtx);
+ assert(_searchCtx);
+}
+
+MatchContext::MatchContext() noexcept = default;
+MatchContext::~MatchContext() = default;
+
+}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_context.h b/searchcore/src/vespa/searchcore/proton/matching/match_context.h
index d3e5a87d34c..c12c1c5731d 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/match_context.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_context.h
@@ -4,25 +4,20 @@
#include "isearchcontext.h"
#include <vespa/searchcommon/attribute/iattributecontext.h>
-#include <memory>
namespace proton::matching {
class MatchContext {
using IAttributeContext = search::attribute::IAttributeContext;
- IAttributeContext::UP _attrCtx;
- ISearchContext::UP _searchCtx;
-
+ std::unique_ptr<IAttributeContext> _attrCtx;
+ std::unique_ptr<ISearchContext> _searchCtx;
public:
using UP = std::unique_ptr<MatchContext>;
- MatchContext(IAttributeContext::UP attrCtx, ISearchContext::UP searchCtx)
- : _attrCtx(std::move(attrCtx)),
- _searchCtx(std::move(searchCtx))
- {
- assert(_attrCtx);
- assert(_searchCtx);
- }
+ MatchContext() noexcept;
+ MatchContext(IAttributeContext::UP attrCtx, std::unique_ptr<ISearchContext> searchCtx) noexcept;
+ MatchContext(MatchContext &&) noexcept = default;
+ ~MatchContext();
IAttributeContext &getAttributeContext() const { return *_attrCtx; }
ISearchContext &getSearchContext() const { return *_searchCtx; }
diff --git a/searchcore/src/vespa/searchcore/proton/matching/matcher.cpp b/searchcore/src/vespa/searchcore/proton/matching/matcher.cpp
index e2c5f4ef559..d0c1f99af11 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/matcher.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/matcher.cpp
@@ -63,7 +63,8 @@ struct StupidMetaStore : search::IDocumentMetaStore {
void foreach(const search::IGidToLidMapperVisitor &) const override { }
};
-size_t numThreads(size_t hits, size_t minHits) {
+size_t
+numThreads(size_t hits, size_t minHits) {
return static_cast<size_t>(std::ceil(double(hits) / double(minHits)));
}
@@ -74,16 +75,17 @@ public:
_threadBundle(threadBundle),
_maxThreads(std::min(maxThreads, static_cast<uint32_t>(threadBundle.size())))
{ }
-private:
size_t size() const override { return _maxThreads; }
void run(vespalib::Runnable* const* targets, size_t cnt) override {
_threadBundle.run(targets, cnt);
}
+private:
vespalib::ThreadBundle &_threadBundle;
const uint32_t _maxThreads;
};
-bool willNotNeedRanking(const SearchRequest & request, const GroupingContext & groupingContext) {
+bool
+willNotNeedRanking(const SearchRequest & request, const GroupingContext & groupingContext) {
return (!groupingContext.needRanking() && (request.maxhits == 0))
|| (!request.sortSpec.empty() && (request.sortSpec.find("[rank]") == vespalib::string::npos));
}
@@ -221,6 +223,7 @@ Matcher::match(const SearchRequest &request, vespalib::ThreadBundle &threadBundl
}
const Properties *feature_overrides = &request.propertiesMap.featureOverrides();
if (shouldCacheSearchSession) {
+ // These should have been moved instead.
owned_objects.feature_overrides = std::make_unique<Properties>(*feature_overrides);
feature_overrides = owned_objects.feature_overrides.get();
}
@@ -248,6 +251,9 @@ Matcher::match(const SearchRequest &request, vespalib::ThreadBundle &threadBundl
LimitedThreadBundleWrapper limitedThreadBundle(threadBundle, numThreadsPerSearch);
MatchMaster master;
uint32_t numParts = NumSearchPartitions::lookup(rankProperties, _rankSetup->getNumSearchPartitions());
+ if (limitedThreadBundle.size() > 1) {
+ attrContext.enableMultiThreadSafe();
+ }
ResultProcessor::Result::UP result = master.match(request.trace(), params, limitedThreadBundle, *mtf, rp,
_distributionKey, numParts);
my_stats = MatchMaster::getStats(std::move(master));
diff --git a/searchcore/src/vespa/searchcore/proton/matching/partial_result.h b/searchcore/src/vespa/searchcore/proton/matching/partial_result.h
index f4dc2e31d4d..314fefa3cc0 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/partial_result.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/partial_result.h
@@ -48,7 +48,7 @@ public:
_sortData.push_back(sd);
_sortDataSize += sd.second;
}
- virtual void merge(Source &rhs) override;
+ void merge(Source &rhs) override;
};
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/querynodes.h b/searchcore/src/vespa/searchcore/proton/matching/querynodes.h
index 03b274b7233..0e01884d504 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/querynodes.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/querynodes.h
@@ -44,7 +44,7 @@ public:
};
private:
- std::vector<FieldEntry> _fields;
+ vespalib::SmallVector<FieldEntry, 1u> _fields;
void propagate_document_frequency(uint32_t matching_count_doc, uint32_t total_doc_count);
diff --git a/searchcore/src/vespa/searchcore/proton/matching/same_element_builder.h b/searchcore/src/vespa/searchcore/proton/matching/same_element_builder.h
index dd89e952a34..9928c9a6ae8 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/same_element_builder.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/same_element_builder.h
@@ -7,21 +7,24 @@
#include <vespa/searchlib/queryeval/blueprint.h>
#include <vespa/searchlib/queryeval/same_element_blueprint.h>
-namespace search::queryeval { class FieldSpec; }
+namespace search::queryeval {
+ class IRequestContext;
+ class FieldSpec;
+}
namespace proton::matching {
class SameElementBuilder
{
-private:
- const search::queryeval::IRequestContext &_requestContext;
- ISearchContext &_context;
- std::unique_ptr<search::queryeval::SameElementBlueprint> _result;
public:
SameElementBuilder(const search::queryeval::IRequestContext &requestContext, ISearchContext &context,
const search::queryeval::FieldSpec &field, bool expensive);
void add_child(search::query::Node &node);
search::queryeval::Blueprint::UP build();
+private:
+ const search::queryeval::IRequestContext &_requestContext;
+ ISearchContext &_context;
+ std::unique_ptr<search::queryeval::SameElementBlueprint> _result;
};
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/search_session.cpp b/searchcore/src/vespa/searchcore/proton/matching/search_session.cpp
index c9b6ddb897f..c69887af1ec 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/search_session.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/search_session.cpp
@@ -18,12 +18,20 @@ SearchSession::SearchSession(const SessionId &id, vespalib::steady_time create_t
void
SearchSession::releaseEnumGuards() {
- _owned_objects.context->releaseEnumGuards();
+ _owned_objects.context.releaseEnumGuards();
}
SearchSession::~SearchSession() = default;
-SearchSession::OwnershipBundle::OwnershipBundle() = default;
+SearchSession::OwnershipBundle::OwnershipBundle(MatchContext && match_context,
+ std::shared_ptr<const ISearchHandler> searchHandler) noexcept
+ : search_handler(std::move(searchHandler)),
+ context(std::move(match_context)),
+ feature_overrides(),
+ readGuard()
+{}
+
+SearchSession::OwnershipBundle::OwnershipBundle() noexcept = default;
SearchSession::OwnershipBundle::~OwnershipBundle() = default;
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/search_session.h b/searchcore/src/vespa/searchcore/proton/matching/search_session.h
index 2cc37a07564..3775759126f 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/search_session.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/search_session.h
@@ -2,6 +2,7 @@
#pragma once
+#include "match_context.h"
#include <vespa/searchcore/proton/documentmetastore/i_document_meta_store_context.h>
#include <vespa/searchcore/proton/summaryengine/isearchhandler.h>
#include <vespa/vespalib/stllike/string.h>
@@ -22,13 +23,15 @@ class MatchContext;
class SearchSession {
public:
struct OwnershipBundle {
- OwnershipBundle();
- OwnershipBundle(OwnershipBundle &&) = default;
- OwnershipBundle & operator = (OwnershipBundle &&) = default;
+ OwnershipBundle() noexcept;
+ OwnershipBundle(MatchContext && matchContext, std::shared_ptr<const ISearchHandler> searchHandler) noexcept;
+ OwnershipBundle(OwnershipBundle &&) noexcept = default;
+ OwnershipBundle & operator = (OwnershipBundle &&) noexcept = delete;
~OwnershipBundle();
+ // Note that SearchHandler must above the other members due to life time guarantees.
std::shared_ptr<const ISearchHandler> search_handler;
+ MatchContext context;
std::unique_ptr<search::fef::Properties> feature_overrides;
- std::unique_ptr<MatchContext> context;
IDocumentMetaStoreContext::IReadGuard::SP readGuard;
};
private:
@@ -44,8 +47,7 @@ public:
using SP = std::shared_ptr<SearchSession>;
SearchSession(const SessionId &id, vespalib::steady_time create_time, vespalib::steady_time time_of_doom,
- std::unique_ptr<MatchToolsFactory> match_tools_factory,
- OwnershipBundle &&owned_objects);
+ std::unique_ptr<MatchToolsFactory> match_tools_factory, OwnershipBundle &&owned_objects);
~SearchSession();
const SessionId &getSessionId() const { return _session_id; }
diff --git a/searchcore/src/vespa/searchcore/proton/server/matchview.cpp b/searchcore/src/vespa/searchcore/proton/server/matchview.cpp
index f865d533d85..d7a95ae1102 100644
--- a/searchcore/src/vespa/searchcore/proton/server/matchview.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/matchview.cpp
@@ -50,17 +50,16 @@ MatchView::MatchView(Matchers::SP matchers,
MatchView::~MatchView() = default;
-Matcher::SP
+std::shared_ptr<Matcher>
MatchView::getMatcher(const vespalib::string & rankProfile) const
{
return _matchers->lookup(rankProfile);
}
-MatchContext::UP
+MatchContext
MatchView::createContext() const {
- IAttributeContext::UP attrCtx = _attrMgr->createContext();
auto searchCtx = std::make_unique<SearchContext>(_indexSearchable, _docIdLimit.get());
- return std::make_unique<MatchContext>(std::move(attrCtx), std::move(searchCtx));
+ return {_attrMgr->createContext(), std::move(searchCtx)};
}
std::unique_ptr<SearchReply>
@@ -68,14 +67,13 @@ MatchView::match(std::shared_ptr<const ISearchHandler> searchHandler, const Sear
vespalib::ThreadBundle &threadBundle) const
{
Matcher::SP matcher = getMatcher(req.ranking);
- SearchSession::OwnershipBundle owned_objects;
- owned_objects.search_handler = std::move(searchHandler);
+ SearchSession::OwnershipBundle owned_objects(createContext(), std::move(searchHandler));
owned_objects.readGuard = _metaStore->getReadGuard();
- owned_objects.context = createContext();
- MatchContext *ctx = owned_objects.context.get();
+ ISearchContext & search_ctx = owned_objects.context.getSearchContext();
+ IAttributeContext & attribute_ctx = owned_objects.context.getAttributeContext();
const search::IDocumentMetaStore & dms = owned_objects.readGuard->get();
const bucketdb::BucketDBOwner & bucketDB = _metaStore->get().getBucketDB();
- return matcher->match(req, threadBundle, ctx->getSearchContext(), ctx->getAttributeContext(),
+ return matcher->match(req, threadBundle, search_ctx, attribute_ctx,
_sessionMgr, dms, bucketDB, std::move(owned_objects));
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/matchview.h b/searchcore/src/vespa/searchcore/proton/server/matchview.h
index 20db5665832..f01442b9bc3 100644
--- a/searchcore/src/vespa/searchcore/proton/server/matchview.h
+++ b/searchcore/src/vespa/searchcore/proton/server/matchview.h
@@ -10,26 +10,23 @@
namespace searchcorespi { class IndexSearchable; }
-namespace proton {
-
-namespace matching {
-
-class MatchContext;
-class Matcher;
-class SessionManager;
-
+namespace proton::matching {
+ class MatchContext;
+ class Matcher;
+ class SessionManager;
}
+namespace proton {
struct IAttributeManager;
class MatchView {
using SessionManager = matching::SessionManager;
- std::shared_ptr<Matchers> _matchers;
- std::shared_ptr<searchcorespi::IndexSearchable> _indexSearchable;
- std::shared_ptr<IAttributeManager> _attrMgr;
- SessionManager & _sessionMgr;
- std::shared_ptr<IDocumentMetaStoreContext> _metaStore;
- DocIdLimit &_docIdLimit;
+ std::shared_ptr<Matchers> _matchers;
+ std::shared_ptr<searchcorespi::IndexSearchable> _indexSearchable;
+ std::shared_ptr<IAttributeManager> _attrMgr;
+ SessionManager & _sessionMgr;
+ std::shared_ptr<IDocumentMetaStoreContext> _metaStore;
+ DocIdLimit &_docIdLimit;
size_t getNumDocs() const {
return _metaStore->get().getNumActiveLids();
@@ -63,7 +60,7 @@ public:
return _matchers->getStats(rankProfile);
}
- std::unique_ptr<matching::MatchContext> createContext() const;
+ matching::MatchContext createContext() const;
std::unique_ptr<search::engine::SearchReply>
match(std::shared_ptr<const ISearchHandler> searchHandler,
diff --git a/searchcore/src/vespa/searchcore/proton/server/searchview.cpp b/searchcore/src/vespa/searchcore/proton/server/searchview.cpp
index 0c96b43a727..cace2478245 100644
--- a/searchcore/src/vespa/searchcore/proton/server/searchview.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/searchview.cpp
@@ -127,7 +127,7 @@ SearchView::getDocsumsInternal(const DocsumRequest & req)
auto store(_summarySetup->createDocsumStore());
auto mctx = _matchView->createContext();
auto ctx = std::make_unique<DocsumContext>(req, _summarySetup->getDocsumWriter(), *store, _matchView->getMatcher(req.ranking),
- mctx->getSearchContext(), mctx->getAttributeContext(),
+ mctx.getSearchContext(), mctx.getAttributeContext(),
*_summarySetup->getAttributeManager(), getSessionManager());
SearchView::InternalDocsumReply reply(ctx->getDocsums(), true);
uint64_t endGeneration = readGuard->get().getCurrentGeneration();
diff --git a/searchcore/src/vespa/searchcorespi/index/iindexmaintaineroperations.h b/searchcore/src/vespa/searchcorespi/index/iindexmaintaineroperations.h
index 9025b56dc27..0b089daff7c 100644
--- a/searchcore/src/vespa/searchcorespi/index/iindexmaintaineroperations.h
+++ b/searchcore/src/vespa/searchcorespi/index/iindexmaintaineroperations.h
@@ -19,7 +19,7 @@ struct IIndexMaintainerOperations {
using IFieldLengthInspector = search::index::IFieldLengthInspector;
using Schema = search::index::Schema;
using SelectorArray = search::diskindex::SelectorArray;
- virtual ~IIndexMaintainerOperations() {}
+ virtual ~IIndexMaintainerOperations() = default;
/**
* Creates a new memory index using the given schema.
diff --git a/searchlib/pom.xml b/searchlib/pom.xml
index 5555c83adde..bf556772a7c 100644
--- a/searchlib/pom.xml
+++ b/searchlib/pom.xml
@@ -79,6 +79,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/searchlib/src/apps/uniform/uniform.cpp b/searchlib/src/apps/uniform/uniform.cpp
index 807b8d61a9e..95d2bb1a7d1 100644
--- a/searchlib/src/apps/uniform/uniform.cpp
+++ b/searchlib/src/apps/uniform/uniform.cpp
@@ -3,6 +3,7 @@
#include <vespa/vespalib/util/signalhandler.h>
#include <vespa/searchlib/bitcompression/compression.h>
#include <cinttypes>
+#include <cassert>
static uint64_t
maxExpGolombVal(uint64_t kValue, uint64_t maxBits)
diff --git a/searchlib/src/tests/attribute/bitvector_search_cache/bitvector_search_cache_test.cpp b/searchlib/src/tests/attribute/bitvector_search_cache/bitvector_search_cache_test.cpp
index b9c82892a97..bb65beed68b 100644
--- a/searchlib/src/tests/attribute/bitvector_search_cache/bitvector_search_cache_test.cpp
+++ b/searchlib/src/tests/attribute/bitvector_search_cache/bitvector_search_cache_test.cpp
@@ -9,8 +9,9 @@ using namespace search::attribute;
using BitVectorSP = BitVectorSearchCache::BitVectorSP;
using Entry = BitVectorSearchCache::Entry;
+using EntrySP = std::shared_ptr<Entry>;
-Entry::SP
+EntrySP
makeEntry()
{
return std::make_shared<Entry>(IDocumentMetaStoreContext::IReadGuard::SP(), BitVector::create(5), 10);
@@ -18,8 +19,8 @@ makeEntry()
struct Fixture {
BitVectorSearchCache cache;
- Entry::SP entry1;
- Entry::SP entry2;
+ EntrySP entry1;
+ EntrySP entry2;
Fixture()
: cache(),
entry1(makeEntry()),
diff --git a/searchlib/src/tests/attribute/imported_search_context/imported_search_context_test.cpp b/searchlib/src/tests/attribute/imported_search_context/imported_search_context_test.cpp
index 311d3ef71e7..61e66d384e1 100644
--- a/searchlib/src/tests/attribute/imported_search_context/imported_search_context_test.cpp
+++ b/searchlib/src/tests/attribute/imported_search_context/imported_search_context_test.cpp
@@ -467,7 +467,7 @@ struct SearchCacheFixture : Fixture {
SearchCacheFixture::~SearchCacheFixture() = default;
-BitVectorSearchCache::Entry::SP
+std::shared_ptr<BitVectorSearchCache::Entry>
makeSearchCacheEntry(const std::vector<uint32_t> docIds, uint32_t docIdLimit)
{
std::shared_ptr<BitVector> bitVector = BitVector::create(docIdLimit);
diff --git a/searchlib/src/tests/attribute/stringattribute/stringattribute_test.cpp b/searchlib/src/tests/attribute/stringattribute/stringattribute_test.cpp
index 5ba90d2b077..6e5971ea81d 100644
--- a/searchlib/src/tests/attribute/stringattribute/stringattribute_test.cpp
+++ b/searchlib/src/tests/attribute/stringattribute/stringattribute_test.cpp
@@ -389,8 +389,8 @@ testSingleValue(Attribute & svsa, Config &cfg)
TEST("testSingleValue")
{
EXPECT_EQUAL(24u, sizeof(SearchContext));
- EXPECT_EQUAL(32u, sizeof(StringSearchHelper));
- EXPECT_EQUAL(88u, sizeof(attribute::SingleStringEnumSearchContext));
+ EXPECT_EQUAL(40u, sizeof(StringSearchHelper));
+ EXPECT_EQUAL(96u, sizeof(attribute::SingleStringEnumSearchContext));
{
Config cfg(BasicType::STRING, CollectionType::SINGLE);
SingleValueStringAttribute svsa("svsa", cfg);
diff --git a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
index e3c9e05073e..6ca7d298ee2 100644
--- a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
+++ b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
@@ -9,6 +9,7 @@
#include <vespa/searchlib/tensor/doc_vector_access.h>
#include <vespa/searchlib/tensor/distance_functions.h>
#include <vespa/searchlib/tensor/hnsw_index.h>
+#include <vespa/searchlib/tensor/mips_distance_transform.h>
#include <vespa/searchlib/tensor/nearest_neighbor_index.h>
#include <vespa/searchlib/tensor/nearest_neighbor_index_factory.h>
#include <vespa/searchlib/tensor/nearest_neighbor_index_loader.h>
@@ -24,6 +25,7 @@
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/vespalib/util/mmap_file_allocator_factory.h>
#include <vespa/searchlib/util/bufferwriter.h>
+#include <vespa/vespalib/util/fake_doom.h>
#include <vespa/vespalib/util/threadstackexecutor.h>
#include <vespa/document/base/exceptions.h>
#include <vespa/eval/eval/fast_value.h>
@@ -54,6 +56,7 @@ using search::tensor::DocVectorAccess;
using search::tensor::HnswIndex;
using search::tensor::HnswIndexType;
using search::tensor::HnswTestNode;
+using search::tensor::MipsDistanceFunctionFactoryBase;
using search::tensor::NearestNeighborIndex;
using search::tensor::NearestNeighborIndexFactory;
using search::tensor::NearestNeighborIndexLoader;
@@ -285,35 +288,41 @@ public:
void populate_address_space_usage(AddressSpaceUsage&) const override {}
void get_state(const vespalib::slime::Inserter&) const override {}
void shrink_lid_space(uint32_t) override { }
- std::unique_ptr<NearestNeighborIndexSaver> make_saver() const override {
+ std::unique_ptr<NearestNeighborIndexSaver> make_saver(vespalib::GenericHeader& header) const override {
+ (void) header;
if (_index_value != 0) {
return std::make_unique<MockIndexSaver>(_index_value);
}
return std::unique_ptr<NearestNeighborIndexSaver>();
}
- std::unique_ptr<NearestNeighborIndexLoader> make_loader(FastOS_FileInterface& file) override {
+ std::unique_ptr<NearestNeighborIndexLoader> make_loader(FastOS_FileInterface& file, const vespalib::GenericHeader& header) override {
+ (void) header;
return std::make_unique<MockIndexLoader>(_index_value, file);
}
std::vector<Neighbor> find_top_k(uint32_t k,
const search::tensor::BoundDistanceFunction &df,
uint32_t explore_k,
+ const vespalib::Doom& doom,
double distance_threshold) const override
{
(void) k;
(void) df;
(void) explore_k;
+ (void) doom;
(void) distance_threshold;
return std::vector<Neighbor>();
}
std::vector<Neighbor> find_top_k_with_filter(uint32_t k,
const search::tensor::BoundDistanceFunction &df,
const GlobalFilter& filter, uint32_t explore_k,
+ const vespalib::Doom& doom,
double distance_threshold) const override
{
(void) k;
(void) df;
(void) explore_k;
(void) filter;
+ (void) doom;
(void) distance_threshold;
return std::vector<Neighbor>();
}
@@ -342,12 +351,15 @@ class MockNearestNeighborIndexFactory : public NearestNeighborIndexFactory {
const vespalib::string test_dir = "test_data/";
const vespalib::string attr_name = test_dir + "my_attr";
+const vespalib::string hnsw_max_squared_norm = "hnsw.max_squared_norm";
+
struct FixtureTraits {
bool use_dense_tensor_attribute = false;
bool use_direct_tensor_attribute = false;
bool enable_hnsw_index = false;
bool use_mock_index = false;
bool use_mmap_file_allocator = false;
+ bool use_mips_distance = false;
FixtureTraits dense() && {
use_dense_tensor_attribute = true;
@@ -381,6 +393,14 @@ struct FixtureTraits {
return *this;
}
+ FixtureTraits mips_hnsw() && {
+ use_dense_tensor_attribute = true;
+ enable_hnsw_index = true;
+ use_mock_index = false;
+ use_mips_distance = true;
+ return *this;
+ }
+
FixtureTraits direct() && {
use_dense_tensor_attribute = false;
use_direct_tensor_attribute = true;
@@ -606,8 +626,9 @@ Fixture::Fixture(const vespalib::string &typeSpec, FixtureTraits traits)
_mmap_allocator_base_dir("mmap-file-allocator-factory-dir")
{
if (traits.enable_hnsw_index) {
- _cfg.set_distance_metric(DistanceMetric::Euclidean);
- _cfg.set_hnsw_index_params(HnswIndexParams(4, 20, DistanceMetric::Euclidean));
+ auto dm = traits.use_mips_distance ? DistanceMetric::Dotproduct : DistanceMetric::Euclidean;
+ _cfg.set_distance_metric(dm);
+ _cfg.set_hnsw_index_params(HnswIndexParams(4, 20, dm));
}
vespalib::alloc::MmapFileAllocatorFactory::instance().setup(_mmap_allocator_base_dir);
setup();
@@ -1254,14 +1275,33 @@ TEST_F("Nearest neighbor index type is added to attribute file header", DenseTen
EXPECT_EQUAL("hnsw", header.getTag("nearest_neighbor_index").asString());
}
+class DenseTensorAttributeMipsIndex : public Fixture {
+public:
+ DenseTensorAttributeMipsIndex() : Fixture(vec_2d_spec, FixtureTraits().mips_hnsw()) {}
+};
+
+TEST_F("Nearest neighbor index with mips distance metrics stores square of max distance", DenseTensorAttributeMipsIndex)
+{
+ f.set_example_tensors();
+ f.save();
+ auto header = f.get_file_header();
+ EXPECT_TRUE(header.hasTag(hnsw_max_squared_norm));
+ EXPECT_EQUAL(130.0, header.getTag(hnsw_max_squared_norm).asFloat());
+ f.load();
+ auto& norm_store = dynamic_cast<MipsDistanceFunctionFactoryBase&>(f.hnsw_index().distance_function_factory()).get_max_squared_norm_store();
+ EXPECT_EQUAL(130.0, norm_store.get_max());
+}
+
template <typename ParentT>
class NearestNeighborBlueprintFixtureBase : public ParentT {
private:
std::unique_ptr<Value> _query_tensor;
+ vespalib::FakeDoom _no_doom;
public:
NearestNeighborBlueprintFixtureBase()
- : _query_tensor()
+ : _query_tensor(),
+ _no_doom()
{
this->set_tensor(1, vec_2d(1, 1));
this->set_tensor(2, vec_2d(2, 2));
@@ -1288,7 +1328,7 @@ public:
create_query_tensor(vec_2d(17, 42))),
3, approximate, 5,
100100.25,
- global_filter_lower_limit, 1.0);
+ global_filter_lower_limit, 1.0, _no_doom.get_doom());
EXPECT_EQUAL(11u, bp->getState().estimate().estHits);
EXPECT_EQUAL(100100.25 * 100100.25, bp->get_distance_threshold());
return bp;
diff --git a/searchlib/src/tests/features/nns_closeness/nns_closeness_test.cpp b/searchlib/src/tests/features/nns_closeness/nns_closeness_test.cpp
index 8cb060c08e4..703f03918d8 100644
--- a/searchlib/src/tests/features/nns_closeness/nns_closeness_test.cpp
+++ b/searchlib/src/tests/features/nns_closeness/nns_closeness_test.cpp
@@ -16,6 +16,7 @@ using namespace search::features;
using namespace search::fef::test;
using namespace search::fef;
+using search::attribute::DistanceMetric;
using vespalib::eval::TensorSpec;
const vespalib::string labelFeatureName("closeness(label,nns)");
@@ -146,4 +147,23 @@ TEST(NnsClosenessTest, raw_score_is_calculated_on_the_fly_using_label_setup)
expect_raw_score_calculated_on_the_fly(f2);
}
+TEST(NnsClosenessTest, can_return_negative_values_with_dotproduct_distance_metric)
+{
+ NoLabel f1;
+ RankFixture f2(0, 2, f1, fieldFeatureName, "tensor(x[2]):[2,3]", DistanceMetric::Dotproduct);
+ ASSERT_FALSE(f2.failed());
+
+ f2.set_bar_rawscore(0, 7, 5.0);
+ f2.set_bar_rawscore(1, 8, -5.0);
+ f2.set_attribute_tensor(9, TensorSpec::from_expr("tensor(x[2]):[4,5]"));
+ f2.set_attribute_tensor(10, TensorSpec::from_expr("tensor(x[2]):[-4,-5]"));
+
+ // For docids 9 and 10 the raw score is calculated on the fly
+ // using a distance calculator over the attribute and query tensors.
+ EXPECT_EQ(5.0, f2.getScore(7));
+ EXPECT_EQ(-5.0, f2.getScore(8));
+ EXPECT_EQ(23.0, f2.getScore(9));
+ EXPECT_EQ(-23.0, f2.getScore(10));
+}
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp b/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp
index 54a7d6ea286..4ac4c92f658 100644
--- a/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp
+++ b/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp
@@ -27,6 +27,26 @@ struct State {
State::State() : term(), md(), f3(nullptr), f5(nullptr), f7(nullptr), array() {}
State::~State() = default;
+/**
+ * convenience adapter for easy iteration
+ **/
+class SimpleTermFieldRangeAdapter
+{
+ SimpleTermData& _ref;
+ size_t _idx;
+ size_t _lim;
+public:
+ explicit SimpleTermFieldRangeAdapter(SimpleTermData& ref)
+ : _ref(ref), _idx(0), _lim(ref.numFields())
+ {}
+
+ [[nodiscard]] bool valid() const { return (_idx < _lim); }
+
+ [[nodiscard]] SimpleTermFieldData& get() const { return _ref.field(_idx); }
+
+ void next() { assert(valid()); ++_idx; }
+};
+
void testInvalidId() {
const TermFieldMatchData empty;
using search::queryeval::SearchIterator;
@@ -44,7 +64,7 @@ void testSetup(State &state) {
state.term.addField(5); // docfreq = 3
using FRA = search::fef::ITermFieldRangeAdapter;
- using SFR = search::fef::SimpleTermFieldRangeAdapter;
+ using SFR = SimpleTermFieldRangeAdapter;
// lookup terms
{
diff --git a/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp b/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp
index 363193da110..de265394918 100644
--- a/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp
+++ b/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp
@@ -6,6 +6,7 @@
#include <vespa/searchlib/tensor/distance_function_factory.h>
#include <vespa/searchlib/tensor/mips_distance_transform.h>
#include <vespa/vespalib/gtest/gtest.h>
+#include <numbers>
#include <vector>
#include <vespa/log/log.h>
@@ -258,6 +259,21 @@ TEST(DistanceFunctionsTest, angular_gives_expected_score)
EXPECT_DOUBLE_EQ(a66, computeAngularChecked(t(iv6), t(iv6)));
}
+TEST(DistanceFunctionsTest, conversion_to_internal_distance_threshold_is_capped)
+{
+ AngularDistanceFunctionFactory<double> dff;
+ std::vector<double> p0{0.0, 0.0};
+ auto angular = dff.for_query_vector(t(p0));
+ // threshold < 0.0 is treated as threshold == 0.0
+ EXPECT_DOUBLE_EQ(0.0, angular->convert_threshold(-0.1));
+ EXPECT_DOUBLE_EQ(0.0, angular->convert_threshold(0.0));
+ EXPECT_LT(0.0, angular->convert_threshold(0.1));
+ // threshold > pi is treated as theshold == pi
+ EXPECT_GT(2.0, angular->convert_threshold(std::numbers::pi - 0.1));
+ EXPECT_DOUBLE_EQ(2.0, angular->convert_threshold(std::numbers::pi));
+ EXPECT_DOUBLE_EQ(2.0, angular->convert_threshold(4.0));
+}
+
double computePrenormalizedAngularChecked(TypedCells a, TypedCells b) {
static PrenormalizedAngularDistanceFunctionFactory<float> flt_dff;
static PrenormalizedAngularDistanceFunctionFactory<double> dbl_dff;
@@ -509,6 +525,22 @@ TEST(GeoDegreesTest, gives_expected_score)
verify_geo_miles(g9_jfk, g9_jfk, 0);
}
+TEST(GeoDegreesTest, conversion_to_internal_distance_threshold_is_capped)
+{
+ GeoDistanceFunctionFactory dff;
+ std::vector<double> p0{0.0, 0.0};
+ auto geo = dff.for_query_vector(t(p0));
+ // threshold < 0.0 is treated as theshold == 0.0
+ EXPECT_DOUBLE_EQ(0.0, geo->convert_threshold(-0.1));
+ EXPECT_DOUBLE_EQ(0.0, geo->convert_threshold(0.0));
+ EXPECT_LT(0.0, geo->convert_threshold(10.0));
+ // threshold > approx 20000 km is treated as threshold approx 20000 km
+ auto halfway = search::common::GeoGcd(-90.0, 0.0).km_great_circle_distance(90.0, 0.0);
+ EXPECT_GT(1.0, geo->convert_threshold(halfway - 10.0));
+ EXPECT_DOUBLE_EQ(1.0, geo->convert_threshold(halfway));
+ EXPECT_DOUBLE_EQ(1.0, geo->convert_threshold(halfway + 10.0));
+ EXPECT_DOUBLE_EQ(1.0, geo->convert_threshold(30000.0));
+}
double computeTransformedMipsChecked(TypedCells a, TypedCells b, bool check_insert = true) {
MipsDistanceFunctionFactory<float> flt_dff;
diff --git a/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp b/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp
index 4d759132114..f59c16c76f9 100644
--- a/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp
+++ b/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp
@@ -17,6 +17,7 @@
#include <vespa/vespalib/datastore/compaction_spec.h>
#include <vespa/vespalib/datastore/compaction_strategy.h>
#include <vespa/vespalib/gtest/gtest.h>
+#include <vespa/vespalib/util/fake_doom.h>
#include <vespa/vespalib/util/generationhandler.h>
#include <vespa/vespalib/data/slime/slime.h>
#include <type_traits>
@@ -90,13 +91,15 @@ public:
LevelGenerator* level_generator;
GenerationHandler gen_handler;
std::unique_ptr<IndexType> index;
+ std::unique_ptr<vespalib::FakeDoom> _doom;
HnswIndexTest()
: vectors(),
global_filter(GlobalFilter::create()),
level_generator(),
gen_handler(),
- index()
+ index(),
+ _doom(std::make_unique<vespalib::FakeDoom>())
{
vectors.set(1, {2, 2}).set(2, {3, 2}).set(3, {2, 3})
.set(4, {1, 2}).set(5, {8, 3}).set(6, {7, 2})
@@ -173,8 +176,8 @@ public:
vespalib::eval::TypedCells qv_cells(qv_ref);
auto df = index->distance_function_factory().for_query_vector(qv_cells);
auto got_by_docid = (global_filter->is_active()) ?
- index->find_top_k_with_filter(k, *df, *global_filter, explore_k, 10000.0) :
- index->find_top_k(k, *df, explore_k, 10000.0);
+ index->find_top_k_with_filter(k, *df, *global_filter, explore_k, _doom->get_doom(), 10000.0) :
+ index->find_top_k(k, *df, explore_k, _doom->get_doom(), 10000.0);
std::vector<uint32_t> act;
act.reserve(got_by_docid.size());
for (auto& hit : got_by_docid) {
@@ -186,7 +189,7 @@ public:
uint32_t k = 3;
auto qv = vectors.get_vector(docid, 0);
auto df = index->distance_function_factory().for_query_vector(qv);
- auto rv = index->top_k_candidates(*df, k, global_filter->ptr_if_active()).peek();
+ auto rv = index->top_k_candidates(*df, k, global_filter->ptr_if_active(), _doom->get_doom()).peek();
std::sort(rv.begin(), rv.end(), LesserDistance());
size_t idx = 0;
for (const auto & hit : rv) {
@@ -197,25 +200,27 @@ public:
if (exp_hits.size() == k) {
std::vector<uint32_t> expected_by_docid = exp_hits;
std::sort(expected_by_docid.begin(), expected_by_docid.end());
- auto got_by_docid = index->find_top_k(k, *df, k, 100100.25);
+ auto got_by_docid = index->find_top_k(k, *df, k, _doom->get_doom(), 100100.25);
for (idx = 0; idx < k; ++idx) {
EXPECT_EQ(expected_by_docid[idx], got_by_docid[idx].docid);
}
}
- check_with_distance_threshold(docid);
+ if (!exp_hits.empty()) {
+ check_with_distance_threshold(docid);
+ }
}
void check_with_distance_threshold(uint32_t docid) {
auto qv = vectors.get_vector(docid, 0);
auto df = index->distance_function_factory().for_query_vector(qv);
uint32_t k = 3;
- auto rv = index->top_k_candidates(*df, k, global_filter->ptr_if_active()).peek();
+ auto rv = index->top_k_candidates(*df, k, global_filter->ptr_if_active(), _doom->get_doom()).peek();
std::sort(rv.begin(), rv.end(), LesserDistance());
EXPECT_EQ(rv.size(), 3);
EXPECT_LE(rv[0].distance, rv[1].distance);
double thr = (rv[0].distance + rv[1].distance) * 0.5;
auto got_by_docid = (global_filter->is_active())
- ? index->find_top_k_with_filter(k, *df, *global_filter, k, thr)
- : index->find_top_k(k, *df, k, thr);
+ ? index->find_top_k_with_filter(k, *df, *global_filter, k, _doom->get_doom(), thr)
+ : index->find_top_k(k, *df, k, _doom->get_doom(), thr);
EXPECT_EQ(got_by_docid.size(), 1);
EXPECT_EQ(got_by_docid[0].docid, index->get_docid(rv[0].nodeid));
for (const auto & hit : got_by_docid) {
@@ -262,6 +267,12 @@ public:
HnswIndexLoader<VectorBufferReader, IndexType::index_type> loader(graph, id_mapping, std::make_unique<VectorBufferReader>(data));
while (loader.load_next()) {}
}
+ void reset_doom() {
+ _doom = std::make_unique<vespalib::FakeDoom>();
+ }
+ void reset_doom(vespalib::steady_time::duration time_to_doom) {
+ _doom = std::make_unique<vespalib::FakeDoom>(time_to_doom);
+ }
static constexpr bool is_single = std::is_same_v<IndexType, HnswIndex<HnswIndexType::SINGLE>>;
};
@@ -334,6 +345,8 @@ TYPED_TEST(HnswIndexTest, 2d_vectors_inserted_in_level_0_graph_with_simple_selec
this->expect_top_3(7, {3, 2});
this->expect_top_3(8, {4, 3});
this->expect_top_3(9, {3, 2});
+ this->reset_doom(-1s);
+ this->expect_top_3(2, {});
}
TYPED_TEST(HnswIndexTest, 2d_vectors_inserted_and_removed)
@@ -824,6 +837,10 @@ TEST_F(HnswMultiIndexTest, duplicate_docid_is_removed)
this->expect_top_3_by_docid("{2, 0}", {2, 0}, {1, 2, 4});
this->expect_top_3_by_docid("{2, 1}", {2, 1}, {2, 3, 4});
this->expect_top_3_by_docid("{2, 2}", {2, 2}, {1, 3, 4});
+ this->reset_doom(-1s); // 1s beyond doom => no hits
+ this->expect_top_3_by_docid("{2, 2}", {2, 2}, {});
+ this->reset_doom();
+ this->expect_top_3_by_docid("{2, 2}", {2, 2}, {1, 3, 4});
auto filter = std::make_shared<MyGlobalFilter>(GlobalFilter::create({1, 2}, 3));
global_filter = filter;
this->expect_top_3_by_docid("{2,2}", {2, 2}, {1, 2});
diff --git a/searchlib/src/vespa/searchcommon/attribute/config.cpp b/searchlib/src/vespa/searchcommon/attribute/config.cpp
index 70c2377289f..91495025dee 100644
--- a/searchlib/src/vespa/searchcommon/attribute/config.cpp
+++ b/searchlib/src/vespa/searchcommon/attribute/config.cpp
@@ -65,4 +65,11 @@ Config::operator==(const Config &b) const
_hnsw_index_params == b._hnsw_index_params;
}
+Config&
+Config::set_hnsw_index_params(const HnswIndexParams& params) {
+ assert(_distance_metric == params.distance_metric());
+ _hnsw_index_params = params;
+ return *this;
+}
+
}
diff --git a/searchlib/src/vespa/searchcommon/attribute/config.h b/searchlib/src/vespa/searchcommon/attribute/config.h
index 0102f362532..32cac7ec9d6 100644
--- a/searchlib/src/vespa/searchcommon/attribute/config.h
+++ b/searchlib/src/vespa/searchcommon/attribute/config.h
@@ -10,7 +10,6 @@
#include <vespa/searchcommon/common/dictionary_config.h>
#include <vespa/eval/eval/value_type.h>
#include <vespa/vespalib/datastore/compaction_strategy.h>
-#include <cassert>
#include <optional>
namespace search::attribute {
@@ -72,11 +71,7 @@ public:
_distance_metric = value;
return *this;
}
- Config& set_hnsw_index_params(const HnswIndexParams& params) {
- assert(_distance_metric == params.distance_metric());
- _hnsw_index_params = params;
- return *this;
- }
+ Config& set_hnsw_index_params(const HnswIndexParams& params);
Config& clear_hnsw_index_params() {
_hnsw_index_params.reset();
return *this;
diff --git a/searchlib/src/vespa/searchcommon/attribute/iattributecontext.h b/searchlib/src/vespa/searchcommon/attribute/iattributecontext.h
index 9c89b6a0f8b..cf7b1d2f959 100644
--- a/searchlib/src/vespa/searchcommon/attribute/iattributecontext.h
+++ b/searchlib/src/vespa/searchcommon/attribute/iattributecontext.h
@@ -46,6 +46,11 @@ public:
virtual void releaseEnumGuards() {}
/**
+ * Must be called before multiple threads will access the context.
+ */
+ virtual void enableMultiThreadSafe() {}
+
+ /**
* Virtual destructor to allow safe subclassing.
**/
virtual ~IAttributeContext() = default;
diff --git a/searchlib/src/vespa/searchlib/aggregation/groupinglevel.cpp b/searchlib/src/vespa/searchlib/aggregation/groupinglevel.cpp
index 73938437f3e..770834ba32f 100644
--- a/searchlib/src/vespa/searchlib/aggregation/groupinglevel.cpp
+++ b/searchlib/src/vespa/searchlib/aggregation/groupinglevel.cpp
@@ -19,7 +19,7 @@ GroupingLevel::GroupingLevel() :
_frozen(false),
_classify(),
_collect(),
- _grouper(NULL)
+ _grouper(nullptr)
{ }
GroupingLevel::~GroupingLevel() = default;
@@ -63,11 +63,6 @@ GroupingLevel::Grouper::Grouper(const Grouping * grouping, uint32_t level) :
{
}
-bool GroupingLevel::Grouper::isFrosen(size_t level) const
-{
- return level < _grouping->getFirstLevel();
-}
-
bool GroupingLevel::Grouper::hasNext(size_t level) const
{
return level < _grouping->getLevels().size();
@@ -77,7 +72,7 @@ template<typename Doc>
void GroupingLevel::SingleValueGrouper::groupDoc(Group & g, const ResultNode & result, const Doc & doc, HitRank rank) const
{
Group * next = g.groupSingle(result, rank, _grouping->getLevels()[_level]);
- if ((next != NULL) && doNext()) { // do next level ?
+ if ((next != nullptr) && doNext()) { // do next level ?
next->aggregate(*_grouping, _level + 1, doc, rank);
}
}
diff --git a/searchlib/src/vespa/searchlib/aggregation/groupinglevel.h b/searchlib/src/vespa/searchlib/aggregation/groupinglevel.h
index 3ef9610f560..f1e85b50ea4 100644
--- a/searchlib/src/vespa/searchlib/aggregation/groupinglevel.h
+++ b/searchlib/src/vespa/searchlib/aggregation/groupinglevel.h
@@ -28,10 +28,8 @@ private:
virtual Grouper * clone() const = 0;
protected:
Grouper(const Grouping * grouping, uint32_t level);
- bool isFrozen() const { return _frozen; }
bool hasNext() const { return _hasNext; }
bool doNext() const { return _doNext; }
- bool isFrosen(size_t level) const;
bool hasNext(size_t level) const;
const Grouping * _grouping;
uint32_t _level;
diff --git a/searchlib/src/vespa/searchlib/aggregation/perdocexpression.h b/searchlib/src/vespa/searchlib/aggregation/perdocexpression.h
index 7f98263197d..6128aa234ee 100644
--- a/searchlib/src/vespa/searchlib/aggregation/perdocexpression.h
+++ b/searchlib/src/vespa/searchlib/aggregation/perdocexpression.h
@@ -29,7 +29,6 @@
#include <vespa/searchlib/expression/catfunctionnode.h>
#include <vespa/searchlib/expression/xorbitfunctionnode.h>
#include <vespa/searchlib/expression/md5bitfunctionnode.h>
-#include <vespa/searchlib/expression/resultvector.h>
#include <vespa/searchlib/expression/fixedwidthbucketfunctionnode.h>
#include <vespa/searchlib/expression/rangebucketpredef.h>
#include <vespa/searchlib/expression/timestamp.h>
diff --git a/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt b/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt
index 350beca930e..6c1f4871161 100644
--- a/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt
+++ b/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt
@@ -36,6 +36,7 @@ vespa_add_library(searchlib_attribute OBJECT
createsinglefastsearch.cpp
createsinglestd.cpp
defines.cpp
+ distance_metric_utils.cpp
diversity.cpp
dociditerator.cpp
document_weight_or_filter_search.cpp
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
index 9d1ec1b37a8..62f76b5cee0 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
@@ -835,7 +835,8 @@ public:
n.get_explore_additional_hits(),
n.get_distance_threshold(),
getRequestContext().get_attribute_blueprint_params().global_filter_lower_limit,
- getRequestContext().get_attribute_blueprint_params().global_filter_upper_limit));
+ getRequestContext().get_attribute_blueprint_params().global_filter_upper_limit,
+ getRequestContext().getDoom()));
} catch (const vespalib::IllegalArgumentException& ex) {
return fail_nearest_neighbor_term(n, ex.getMessage());
@@ -860,6 +861,7 @@ void
CreateBlueprintVisitor::createShallowWeightedSet(WS *bp, MultiTerm &n, const FieldSpec &fs, bool isInteger) {
Blueprint::UP result(bp);
SearchContextParams scParams = createContextParams();
+ bp->reserve(n.getNumTerms());
for (uint32_t i(0); i < n.getNumTerms(); i++) {
FieldSpec childfs = bp->getNextChildField(fs);
auto term = n.getAsString(i);
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp b/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp
index 122c2c0c55e..f7736ffed0a 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp
@@ -1,9 +1,11 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "attribute_header.h"
+#include "distance_metric_utils.h"
#include <vespa/vespalib/data/fileheader.h>
#include <vespa/vespalib/data/databuffer.h>
-#include <vespa/vespalib/util/exceptions.h>
+
+using vespalib::GenericHeader;
namespace search::attribute {
@@ -24,13 +26,6 @@ const vespalib::string hnsw_index_value = "hnsw";
const vespalib::string hnsw_max_links_tag = "hnsw.max_links_per_node";
const vespalib::string hnsw_neighbors_to_explore_tag = "hnsw.neighbors_to_explore_at_insert";
const vespalib::string hnsw_distance_metric = "hnsw.distance_metric";
-const vespalib::string euclidean = "euclidean";
-const vespalib::string angular = "angular";
-const vespalib::string geodegrees = "geodegrees";
-const vespalib::string innerproduct = "innerproduct";
-const vespalib::string prenormalized_angular = "prenormalized_angular";
-const vespalib::string dotproduct = "dotproduct";
-const vespalib::string hamming = "hamming";
const vespalib::string doc_id_limit_tag = "docIdLimit";
const vespalib::string enumerated_tag = "enumerated";
const vespalib::string unique_value_count_tag = "uniqueValueCount";
@@ -57,7 +52,8 @@ AttributeHeader::AttributeHeader(const vespalib::string &fileName)
_uniqueValueCount(0),
_totalValueCount(0),
_createSerialNum(0u),
- _version(0)
+ _version(0),
+ _extra_tags()
{
}
@@ -92,47 +88,6 @@ AttributeHeader::AttributeHeader(const vespalib::string &fileName,
AttributeHeader::~AttributeHeader() = default;
-namespace {
-
-vespalib::string
-to_string(DistanceMetric metric)
-{
- switch (metric) {
- case DistanceMetric::Euclidean: return euclidean;
- case DistanceMetric::Angular: return angular;
- case DistanceMetric::GeoDegrees: return geodegrees;
- case DistanceMetric::InnerProduct: return innerproduct;
- case DistanceMetric::Hamming: return hamming;
- case DistanceMetric::PrenormalizedAngular: return prenormalized_angular;
- case DistanceMetric::Dotproduct: return dotproduct;
- }
- throw vespalib::IllegalArgumentException("Unknown distance metric " + std::to_string(static_cast<int>(metric)));
-}
-
-DistanceMetric
-to_distance_metric(const vespalib::string& metric)
-{
- if (metric == euclidean) {
- return DistanceMetric::Euclidean;
- } else if (metric == angular) {
- return DistanceMetric::Angular;
- } else if (metric == geodegrees) {
- return DistanceMetric::GeoDegrees;
- } else if (metric == innerproduct) {
- return DistanceMetric::InnerProduct;
- } else if (metric == prenormalized_angular) {
- return DistanceMetric::PrenormalizedAngular;
- } else if (metric == dotproduct) {
- return DistanceMetric::Dotproduct;
- } else if (metric == hamming) {
- return DistanceMetric::Hamming;
- } else {
- throw vespalib::IllegalStateException("Unknown distance metric '" + metric + "'");
- }
-}
-
-}
-
void
AttributeHeader::internalExtractTags(const vespalib::GenericHeader &header)
{
@@ -164,7 +119,7 @@ AttributeHeader::internalExtractTags(const vespalib::GenericHeader &header)
uint32_t max_links = header.getTag(hnsw_max_links_tag).asInteger();
uint32_t neighbors_to_explore = header.getTag(hnsw_neighbors_to_explore_tag).asInteger();
- DistanceMetric distance_metric = to_distance_metric(header.getTag(hnsw_distance_metric).asString());
+ DistanceMetric distance_metric = DistanceMetricUtils::to_distance_metric(header.getTag(hnsw_distance_metric).asString());
_hnsw_index_params.emplace(max_links, neighbors_to_explore, distance_metric);
}
}
@@ -235,7 +190,7 @@ AttributeHeader::addTags(vespalib::GenericHeader &header) const
const auto& params = *_hnsw_index_params;
header.putTag(Tag(hnsw_max_links_tag, params.max_links_per_node()));
header.putTag(Tag(hnsw_neighbors_to_explore_tag, params.neighbors_to_explore_at_insert()));
- header.putTag(Tag(hnsw_distance_metric, to_string(params.distance_metric())));
+ header.putTag(Tag(hnsw_distance_metric, DistanceMetricUtils::to_string(params.distance_metric())));
}
}
if (_basicType.type() == attribute::BasicType::Type::PREDICATE) {
@@ -244,6 +199,10 @@ AttributeHeader::addTags(vespalib::GenericHeader &header) const
header.putTag(Tag(predicateLowerBoundTag, params.lower_bound()));
header.putTag(Tag(predicateUpperBoundTag, params.upper_bound()));
}
+ for (uint32_t i = 0; i < _extra_tags.getNumTags(); ++i) {
+ auto& tag = _extra_tags.getTag(i);
+ header.putTag(tag);
+ }
}
bool
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_header.h b/searchlib/src/vespa/searchlib/attribute/attribute_header.h
index 7c0b8f3084b..8c5a0edc6a6 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_header.h
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_header.h
@@ -2,16 +2,15 @@
#pragma once
-#include <vespa/vespalib/stllike/string.h>
+#include <vespa/eval/eval/value_type.h>
#include <vespa/searchcommon/attribute/basictype.h>
#include <vespa/searchcommon/attribute/collectiontype.h>
#include <vespa/searchcommon/attribute/hnsw_index_params.h>
#include <vespa/searchcommon/attribute/predicate_params.h>
-#include <vespa/eval/eval/value_type.h>
+#include <vespa/vespalib/data/fileheader.h>
+#include <vespa/vespalib/stllike/string.h>
#include <optional>
-namespace vespalib { class GenericHeader; }
-
namespace search::attribute {
/**
@@ -34,6 +33,7 @@ private:
uint64_t _totalValueCount;
uint64_t _createSerialNum;
uint32_t _version;
+ vespalib::GenericHeader _extra_tags;
void internalExtractTags(const vespalib::GenericHeader &header);
public:
@@ -71,6 +71,7 @@ public:
const std::optional<HnswIndexParams>& get_hnsw_index_params() const { return _hnsw_index_params; }
static AttributeHeader extractTags(const vespalib::GenericHeader &header, const vespalib::string &file_name);
void addTags(vespalib::GenericHeader &header) const;
+ vespalib::GenericHeader& get_extra_tags() noexcept { return _extra_tags; }
};
}
diff --git a/searchlib/src/vespa/searchlib/attribute/attributecontext.cpp b/searchlib/src/vespa/searchlib/attribute/attributecontext.cpp
index 598fdc7ac40..97a7dc8bcb1 100644
--- a/searchlib/src/vespa/searchlib/attribute/attributecontext.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attributecontext.cpp
@@ -30,11 +30,18 @@ AttributeContext::getAttribute(AttributeMap & map, const string & name, bool sta
}
}
-AttributeContext::AttributeContext(const IAttributeManager & manager) :
- _manager(manager),
- _attributes(),
- _enumAttributes(),
- _cacheLock()
+const IAttributeVector *
+AttributeContext::getAttributeMtSafe(AttributeMap &map, const string &name, bool stableEnum) const {
+ std::lock_guard<std::mutex> guard(_cacheLock);
+ return getAttribute(map, name, stableEnum);
+}
+
+AttributeContext::AttributeContext(const IAttributeManager & manager)
+ : _manager(manager),
+ _mtSafe(false),
+ _attributes(),
+ _enumAttributes(),
+ _cacheLock()
{ }
AttributeContext::~AttributeContext() = default;
@@ -42,20 +49,26 @@ AttributeContext::~AttributeContext() = default;
const IAttributeVector *
AttributeContext::getAttribute(const string & name) const
{
- std::lock_guard<std::mutex> guard(_cacheLock);
- return getAttribute(_attributes, name, false);
+ return _mtSafe
+ ? getAttributeMtSafe(_attributes, name, false)
+ : getAttribute(_attributes, name, false);
}
const IAttributeVector *
AttributeContext::getAttributeStableEnum(const string & name) const
{
- std::lock_guard<std::mutex> guard(_cacheLock);
- return getAttribute(_enumAttributes, name, true);
+ return _mtSafe
+ ? getAttributeMtSafe(_enumAttributes, name, true)
+ : getAttribute(_enumAttributes, name, true);
}
void AttributeContext::releaseEnumGuards() {
- std::lock_guard<std::mutex> guard(_cacheLock);
- _enumAttributes.clear();
+ if (_mtSafe) {
+ std::lock_guard<std::mutex> guard(_cacheLock);
+ _enumAttributes.clear();
+ } else {
+ _enumAttributes.clear();
+ }
}
void
diff --git a/searchlib/src/vespa/searchlib/attribute/attributecontext.h b/searchlib/src/vespa/searchlib/attribute/attributecontext.h
index 4ba3d07ef74..28b05a76f65 100644
--- a/searchlib/src/vespa/searchlib/attribute/attributecontext.h
+++ b/searchlib/src/vespa/searchlib/attribute/attributecontext.h
@@ -21,12 +21,13 @@ private:
using IAttributeFunctor = attribute::IAttributeFunctor;
const IAttributeManager & _manager;
- mutable AttributeMap _attributes;
- mutable AttributeMap _enumAttributes;
- mutable std::mutex _cacheLock;
+ bool _mtSafe;
+ mutable AttributeMap _attributes;
+ mutable AttributeMap _enumAttributes;
+ mutable std::mutex _cacheLock;
const IAttributeVector *getAttribute(AttributeMap & map, const string & name, bool stableEnum) const;
-
+ const IAttributeVector *getAttributeMtSafe(AttributeMap & map, const string & name, bool stableEnum) const;
public:
AttributeContext(const IAttributeManager & manager);
~AttributeContext() override;
@@ -37,6 +38,7 @@ public:
const attribute::IAttributeVector * getAttributeStableEnum(const string & name) const override;
void getAttributeList(std::vector<const IAttributeVector *> & list) const override;
void releaseEnumGuards() override;
+ void enableMultiThreadSafe() override { _mtSafe = true; }
// Give acces to the underlying manager
const IAttributeManager & getManager() const { return _manager; }
diff --git a/searchlib/src/vespa/searchlib/attribute/attributemanager.cpp b/searchlib/src/vespa/searchlib/attribute/attributemanager.cpp
index 2db35d1fd1e..8c1b453c354 100644
--- a/searchlib/src/vespa/searchlib/attribute/attributemanager.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attributemanager.cpp
@@ -151,12 +151,11 @@ AttributeManager::getAttributeRef(const string & name) const
AttributeGuard::UP
AttributeManager::getAttribute(const string & name) const
{
- AttributeGuard::UP attrGuard(new AttributeGuard(VectorHolder()));
const VectorHolder * vh = findAndLoadAttribute(name);
if ( vh != nullptr ) {
- attrGuard.reset(new AttributeGuard(*vh));
+ return std::make_unique<AttributeGuard>(*vh);
}
- return attrGuard;
+ return std::make_unique<AttributeGuard>();
}
std::unique_ptr<attribute::AttributeReadGuard>
diff --git a/searchlib/src/vespa/searchlib/attribute/attributevector.h b/searchlib/src/vespa/searchlib/attribute/attributevector.h
index 6bab0f278ca..e3a7fdeb2c3 100644
--- a/searchlib/src/vespa/searchlib/attribute/attributevector.h
+++ b/searchlib/src/vespa/searchlib/attribute/attributevector.h
@@ -440,7 +440,13 @@ private:
vespalib::steady_time _nextStatUpdateTime;
std::shared_ptr<vespalib::alloc::MemoryAllocator> _memory_allocator;
-////// Locking strategy interface. only available from the Guards.
+ /// Clean up [0, firstUsed>
+ virtual void reclaim_memory(generation_t oldest_used_gen);
+ virtual void before_inc_generation(generation_t current_gen);
+ virtual void onUpdateStat() = 0;
+ friend class AttributeTest;
+public:
+ ////// Locking strategy interface.
/**
* Used to guard that a value you reference will always reference
* a value. It might not be the same value, but at least it will
@@ -448,22 +454,6 @@ private:
* the guard is alive.
*/
GenerationHandler::Guard takeGenerationGuard() { return _genHandler.takeGuard(); }
-
- /// Clean up [0, firstUsed>
- virtual void reclaim_memory(generation_t oldest_used_gen);
- virtual void before_inc_generation(generation_t current_gen);
- virtual void onUpdateStat() = 0;
- /**
- * Used to regulate access to critical resources. Apply the
- * reader/writer guards.
- */
- std::shared_mutex & getEnumLock() { return _enumLock; }
-
- friend class ComponentGuard<AttributeVector>;
- friend class AttributeValueGuard;
- friend class AttributeTest;
- friend class AttributeManagerTest;
-public:
bool headerTypeOK(const vespalib::GenericHeader &header) const;
bool hasMultiValue() const override final;
bool hasWeightedSetType() const override final;
diff --git a/searchlib/src/vespa/searchlib/attribute/bitvector_search_cache.cpp b/searchlib/src/vespa/searchlib/attribute/bitvector_search_cache.cpp
index 75e0a82ff1d..70d34eef2ca 100644
--- a/searchlib/src/vespa/searchlib/attribute/bitvector_search_cache.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/bitvector_search_cache.cpp
@@ -3,51 +3,45 @@
#include "bitvector_search_cache.h"
#include <vespa/searchlib/common/bitvector.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
+#include <mutex>
namespace search::attribute {
-using BitVectorSP = BitVectorSearchCache::BitVectorSP;
-
BitVectorSearchCache::BitVectorSearchCache()
: _mutex(),
+ _size(0),
_cache()
-{
-}
+{}
-BitVectorSearchCache::~BitVectorSearchCache()
-{
-}
+BitVectorSearchCache::~BitVectorSearchCache() = default;
void
-BitVectorSearchCache::insert(const vespalib::string &term, Entry::SP entry)
+BitVectorSearchCache::insert(const vespalib::string &term, std::shared_ptr<Entry> entry)
{
- LockGuard guard(_mutex);
+ std::unique_lock guard(_mutex);
_cache.insert(std::make_pair(term, std::move(entry)));
+ _size.store(_cache.size());
}
-BitVectorSearchCache::Entry::SP
+std::shared_ptr<BitVectorSearchCache::Entry>
BitVectorSearchCache::find(const vespalib::string &term) const
{
- LockGuard guard(_mutex);
- auto itr = _cache.find(term);
- if (itr != _cache.end()) {
- return itr->second;
+ if (size() > 0ul) {
+ std::shared_lock guard(_mutex);
+ auto itr = _cache.find(term);
+ if (itr != _cache.end()) {
+ return itr->second;
+ }
}
- return Entry::SP();
-}
-
-size_t
-BitVectorSearchCache::size() const
-{
- LockGuard guard(_mutex);
- return _cache.size();
+ return {};
}
void
BitVectorSearchCache::clear()
{
- LockGuard guard(_mutex);
+ std::unique_lock guard(_mutex);
_cache.clear();
+ _size.store(0ul, std::memory_order_relaxed);
}
}
diff --git a/searchlib/src/vespa/searchlib/attribute/bitvector_search_cache.h b/searchlib/src/vespa/searchlib/attribute/bitvector_search_cache.h
index 3936fb2ee67..455c27459cd 100644
--- a/searchlib/src/vespa/searchlib/attribute/bitvector_search_cache.h
+++ b/searchlib/src/vespa/searchlib/attribute/bitvector_search_cache.h
@@ -6,7 +6,8 @@
#include <vespa/vespalib/stllike/hash_map.h>
#include <vespa/vespalib/stllike/string.h>
#include <memory>
-#include <mutex>
+#include <shared_mutex>
+#include <atomic>
namespace search { class BitVector; }
namespace search::attribute {
@@ -22,7 +23,6 @@ public:
using ReadGuardSP = IDocumentMetaStoreContext::IReadGuard::SP;
struct Entry {
- using SP = std::shared_ptr<Entry>;
// We need to keep a document meta store read guard to ensure that no lids that are cached
// in the bit vector are re-used until the guard is released.
ReadGuardSP dmsReadGuard;
@@ -33,18 +33,18 @@ public:
};
private:
- using LockGuard = std::lock_guard<std::mutex>;
- using Cache = vespalib::hash_map<vespalib::string, Entry::SP>;
+ using Cache = vespalib::hash_map<vespalib::string, std::shared_ptr<Entry>>;
- mutable std::mutex _mutex;
+ mutable std::shared_mutex _mutex;
+ std::atomic<uint64_t> _size;
Cache _cache;
public:
BitVectorSearchCache();
~BitVectorSearchCache();
- void insert(const vespalib::string &term, Entry::SP entry);
- Entry::SP find(const vespalib::string &term) const;
- size_t size() const;
+ void insert(const vespalib::string &term, std::shared_ptr<Entry> entry);
+ std::shared_ptr<Entry> find(const vespalib::string &term) const;
+ size_t size() const { return _size.load(std::memory_order_relaxed); }
void clear();
};
diff --git a/searchlib/src/vespa/searchlib/attribute/createsinglestd.cpp b/searchlib/src/vespa/searchlib/attribute/createsinglestd.cpp
index 5bfa44a2ff8..b3f45165f73 100644
--- a/searchlib/src/vespa/searchlib/attribute/createsinglestd.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/createsinglestd.cpp
@@ -61,9 +61,4 @@ AttributeFactory::createSingleStd(stringref name, const Config & info)
return AttributeVector::SP();
}
-template class SingleValueNumericAttribute<IntegerAttributeTemplate<int8_t>>;
-template class SingleValueNumericAttribute<IntegerAttributeTemplate<int16_t>>;
-template class SingleValueNumericAttribute<IntegerAttributeTemplate<int32_t>>;
-template class SingleValueNumericAttribute<IntegerAttributeTemplate<int64_t>>;
-
-} // namespace search
+}
diff --git a/searchlib/src/vespa/searchlib/attribute/distance_metric_utils.cpp b/searchlib/src/vespa/searchlib/attribute/distance_metric_utils.cpp
new file mode 100644
index 00000000000..8044f6aee3f
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/attribute/distance_metric_utils.cpp
@@ -0,0 +1,57 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "distance_metric_utils.h"
+#include <vespa/vespalib/util/exceptions.h>
+
+namespace search::attribute {
+
+namespace {
+
+const vespalib::string euclidean = "euclidean";
+const vespalib::string angular = "angular";
+const vespalib::string geodegrees = "geodegrees";
+const vespalib::string innerproduct = "innerproduct";
+const vespalib::string prenormalized_angular = "prenormalized_angular";
+const vespalib::string dotproduct = "dotproduct";
+const vespalib::string hamming = "hamming";
+
+}
+
+vespalib::string
+DistanceMetricUtils::to_string(DistanceMetric metric)
+{
+ switch (metric) {
+ case DistanceMetric::Euclidean: return euclidean;
+ case DistanceMetric::Angular: return angular;
+ case DistanceMetric::GeoDegrees: return geodegrees;
+ case DistanceMetric::InnerProduct: return innerproduct;
+ case DistanceMetric::Hamming: return hamming;
+ case DistanceMetric::PrenormalizedAngular: return prenormalized_angular;
+ case DistanceMetric::Dotproduct: return dotproduct;
+ }
+ throw vespalib::IllegalArgumentException("Unknown distance metric " + std::to_string(static_cast<int>(metric)));
+}
+
+DistanceMetric
+DistanceMetricUtils::to_distance_metric(const vespalib::string& metric)
+{
+ if (metric == euclidean) {
+ return DistanceMetric::Euclidean;
+ } else if (metric == angular) {
+ return DistanceMetric::Angular;
+ } else if (metric == geodegrees) {
+ return DistanceMetric::GeoDegrees;
+ } else if (metric == innerproduct) {
+ return DistanceMetric::InnerProduct;
+ } else if (metric == prenormalized_angular) {
+ return DistanceMetric::PrenormalizedAngular;
+ } else if (metric == dotproduct) {
+ return DistanceMetric::Dotproduct;
+ } else if (metric == hamming) {
+ return DistanceMetric::Hamming;
+ } else {
+ throw vespalib::IllegalStateException("Unknown distance metric '" + metric + "'");
+ }
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/attribute/distance_metric_utils.h b/searchlib/src/vespa/searchlib/attribute/distance_metric_utils.h
new file mode 100644
index 00000000000..68ba5aa6c23
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/attribute/distance_metric_utils.h
@@ -0,0 +1,16 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/searchcommon/attribute/distance_metric.h>
+#include <vespa/vespalib/stllike/string.h>
+
+namespace search::attribute {
+
+class DistanceMetricUtils {
+public:
+ static vespalib::string to_string(DistanceMetric metric);
+ static DistanceMetric to_distance_metric(const vespalib::string& metric);
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/attribute/enumcomparator.cpp b/searchlib/src/vespa/searchlib/attribute/enumcomparator.cpp
index 0f52b64ede0..5a5702f50e8 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumcomparator.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumcomparator.cpp
@@ -6,18 +6,6 @@
namespace search {
template <typename EntryT>
-EnumStoreComparator<EntryT>::EnumStoreComparator(const DataStoreType& data_store, const EntryT& fallback_value)
- : ParentType(data_store, fallback_value)
-{
-}
-
-template <typename EntryT>
-EnumStoreComparator<EntryT>::EnumStoreComparator(const DataStoreType& data_store)
- : ParentType(data_store)
-{
-}
-
-template <typename EntryT>
bool
EnumStoreComparator<EntryT>::equal_helper(const EntryT& lhs, const EntryT& rhs)
{
diff --git a/searchlib/src/vespa/searchlib/attribute/enumcomparator.h b/searchlib/src/vespa/searchlib/attribute/enumcomparator.h
index 5df85cd1c57..546ac82e389 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumcomparator.h
+++ b/searchlib/src/vespa/searchlib/attribute/enumcomparator.h
@@ -18,8 +18,12 @@ public:
using ParentType = vespalib::datastore::UniqueStoreComparator<EntryT, IEnumStore::InternalIndex>;
using DataStoreType = typename ParentType::DataStoreType;
- EnumStoreComparator(const DataStoreType& data_store, const EntryT& fallback_value);
- EnumStoreComparator(const DataStoreType& data_store);
+ EnumStoreComparator(const DataStoreType& data_store, const EntryT& fallback_value)
+ : ParentType(data_store, fallback_value)
+ {}
+ EnumStoreComparator(const DataStoreType& data_store)
+ : ParentType(data_store)
+ {}
static bool equal_helper(const EntryT& lhs, const EntryT& rhs);
};
diff --git a/searchlib/src/vespa/searchlib/attribute/imported_attribute_vector_read_guard.cpp b/searchlib/src/vespa/searchlib/attribute/imported_attribute_vector_read_guard.cpp
index b50a3720ff8..0a5e2e91446 100644
--- a/searchlib/src/vespa/searchlib/attribute/imported_attribute_vector_read_guard.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/imported_attribute_vector_read_guard.cpp
@@ -18,7 +18,7 @@ ImportedAttributeVectorReadGuard::ImportedAttributeVectorReadGuard(std::shared_p
_imported_attribute(imported_attribute),
_targetLids(),
_target_docid_limit(0u),
- _reference_attribute_guard(imported_attribute.getReferenceAttribute()),
+ _reference_attribute_guard(imported_attribute.getReferenceAttribute()->takeGenerationGuard()),
_target_attribute_guard(imported_attribute.getTargetAttribute()->makeReadGuard(stableEnumGuard)),
_reference_attribute(*imported_attribute.getReferenceAttribute()),
_target_attribute(*_target_attribute_guard->attribute())
diff --git a/searchlib/src/vespa/searchlib/attribute/imported_attribute_vector_read_guard.h b/searchlib/src/vespa/searchlib/attribute/imported_attribute_vector_read_guard.h
index 5889934fa23..c984725c6e4 100644
--- a/searchlib/src/vespa/searchlib/attribute/imported_attribute_vector_read_guard.h
+++ b/searchlib/src/vespa/searchlib/attribute/imported_attribute_vector_read_guard.h
@@ -96,7 +96,7 @@ private:
const ImportedAttributeVector &_imported_attribute;
TargetLids _targetLids;
uint32_t _target_docid_limit;
- AttributeGuard _reference_attribute_guard;
+ vespalib::GenerationHandler::Guard _reference_attribute_guard;
std::unique_ptr<attribute::AttributeReadGuard> _target_attribute_guard;
const ReferenceAttribute &_reference_attribute;
protected:
diff --git a/searchlib/src/vespa/searchlib/attribute/imported_search_context.h b/searchlib/src/vespa/searchlib/attribute/imported_search_context.h
index 77d09c55a41..3cbc9a3d97e 100644
--- a/searchlib/src/vespa/searchlib/attribute/imported_search_context.h
+++ b/searchlib/src/vespa/searchlib/attribute/imported_search_context.h
@@ -32,7 +32,7 @@ class ImportedSearchContext : public ISearchContext {
const ImportedAttributeVector& _imported_attribute;
vespalib::string _queryTerm;
bool _useSearchCache;
- BitVectorSearchCache::Entry::SP _searchCacheLookup;
+ std::shared_ptr<BitVectorSearchCache::Entry> _searchCacheLookup;
IDocumentMetaStoreContext::IReadGuard::SP _dmsReadGuardFallback;
const ReferenceAttribute& _reference_attribute;
const IAttributeVector &_target_attribute;
diff --git a/searchlib/src/vespa/searchlib/attribute/load_utils.hpp b/searchlib/src/vespa/searchlib/attribute/load_utils.hpp
index 463a62ab01a..614e327942a 100644
--- a/searchlib/src/vespa/searchlib/attribute/load_utils.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/load_utils.hpp
@@ -5,6 +5,7 @@
#include "load_utils.h"
#include "attributevector.h"
#include <vespa/searchcommon/attribute/multivalue.h>
+#include <cassert>
namespace search::attribute {
diff --git a/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.cpp b/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.cpp
index 0c34ae6e330..12c887eb407 100644
--- a/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.cpp
@@ -10,28 +10,24 @@ namespace search::attribute {
using vespalib::btree::BTreeNode;
PostingListSearchContext::
-PostingListSearchContext(const IEnumStoreDictionary& dictionary,
- uint32_t docIdLimit,
- uint64_t numValues,
- bool hasWeight,
- bool useBitVector,
- const ISearchContext &baseSearchCtx)
+PostingListSearchContext(const IEnumStoreDictionary& dictionary, bool has_btree_dictionary, uint32_t docIdLimit,
+ uint64_t numValues, bool hasWeight, bool useBitVector, const ISearchContext &baseSearchCtx)
: _dictionary(dictionary),
- _frozenDictionary(_dictionary.get_has_btree_dictionary() ? _dictionary.get_posting_dictionary().getFrozenView() : FrozenDictionary()),
- _lowerDictItr(_dictionary.get_has_btree_dictionary() ? DictionaryConstIterator(BTreeNode::Ref(), _frozenDictionary.getAllocator()) : DictionaryConstIterator()),
- _upperDictItr(_dictionary.get_has_btree_dictionary() ? DictionaryConstIterator(BTreeNode::Ref(), _frozenDictionary.getAllocator()) : DictionaryConstIterator()),
+ _baseSearchCtx(baseSearchCtx),
+ _bv(nullptr),
+ _frozenDictionary(has_btree_dictionary ? _dictionary.get_posting_dictionary().getFrozenView() : FrozenDictionary()),
+ _lowerDictItr(has_btree_dictionary ? DictionaryConstIterator(BTreeNode::Ref(), _frozenDictionary.getAllocator()) : DictionaryConstIterator()),
+ _upperDictItr(has_btree_dictionary ? DictionaryConstIterator(BTreeNode::Ref(), _frozenDictionary.getAllocator()) : DictionaryConstIterator()),
+ _numValues(numValues),
_uniqueValues(0u),
_docIdLimit(docIdLimit),
_dictSize(_frozenDictionary.size()),
- _numValues(numValues),
- _hasWeight(hasWeight),
- _useBitVector(useBitVector),
_pidx(),
_frozenRoot(),
_FSTC(0.0),
_PLSTC(0.0),
- _bv(nullptr),
- _baseSearchCtx(baseSearchCtx)
+ _hasWeight(hasWeight),
+ _useBitVector(useBitVector)
{
}
diff --git a/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.h b/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.h
index d0a8958f615..107abd24069 100644
--- a/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.h
+++ b/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.h
@@ -32,26 +32,25 @@ protected:
using FrozenDictionary = Dictionary::FrozenView;
using EnumIndex = IEnumStore::Index;
- const IEnumStoreDictionary& _dictionary;
- const FrozenDictionary _frozenDictionary;
+ const IEnumStoreDictionary & _dictionary;
+ const ISearchContext &_baseSearchCtx;
+ const BitVector *_bv; // bitvector if _useBitVector has been set
+ const FrozenDictionary _frozenDictionary;
DictionaryConstIterator _lowerDictItr;
DictionaryConstIterator _upperDictItr;
+ uint64_t _numValues; // attr.getStatus().getNumValues();
uint32_t _uniqueValues;
uint32_t _docIdLimit;
uint32_t _dictSize;
- uint64_t _numValues; // attr.getStatus().getNumValues();
- bool _hasWeight;
- bool _useBitVector;
vespalib::datastore::EntryRef _pidx;
vespalib::datastore::EntryRef _frozenRoot; // Posting list in tree form
float _FSTC; // Filtering Search Time Constant
float _PLSTC; // Posting List Search Time Constant
- const BitVector *_bv; // bitvector if _useBitVector has been set
- const ISearchContext &_baseSearchCtx;
-
+ bool _hasWeight;
+ bool _useBitVector;
- PostingListSearchContext(const IEnumStoreDictionary& dictionary, uint32_t docIdLimit, uint64_t numValues, bool hasWeight,
- bool useBitVector, const ISearchContext &baseSearchCtx);
+ PostingListSearchContext(const IEnumStoreDictionary& dictionary, bool has_btree_dictionary, uint32_t docIdLimit,
+ uint64_t numValues, bool hasWeight, bool useBitVector, const ISearchContext &baseSearchCtx);
~PostingListSearchContext() override;
diff --git a/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.hpp b/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.hpp
index 98f89f9080f..d32d8cde7ea 100644
--- a/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.hpp
@@ -22,9 +22,8 @@ namespace search::attribute {
template <typename DataT>
PostingListSearchContextT<DataT>::
PostingListSearchContextT(const IEnumStoreDictionary& dictionary, uint32_t docIdLimit, uint64_t numValues, bool hasWeight,
- const PostingList &postingList,
- bool useBitVector, const ISearchContext &searchContext)
- : PostingListSearchContext(dictionary, docIdLimit, numValues, hasWeight, useBitVector, searchContext),
+ const PostingList &postingList, bool useBitVector, const ISearchContext &searchContext)
+ : PostingListSearchContext(dictionary, dictionary.get_has_btree_dictionary(), docIdLimit, numValues, hasWeight, useBitVector, searchContext),
_postingList(postingList),
_merger(docIdLimit)
{
diff --git a/searchlib/src/vespa/searchlib/attribute/readerbase.cpp b/searchlib/src/vespa/searchlib/attribute/readerbase.cpp
index e4bc2c02ad6..382d9ccb110 100644
--- a/searchlib/src/vespa/searchlib/attribute/readerbase.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/readerbase.cpp
@@ -6,6 +6,7 @@
#include <vespa/fastlib/io/bufferedfile.h>
#include <vespa/searchlib/util/filesizecalculator.h>
#include <vespa/vespalib/util/size_literals.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".search.attribute.readerbase");
@@ -73,6 +74,13 @@ ReaderBase::ReaderBase(AttributeVector &attr)
ReaderBase::~ReaderBase() = default;
+size_t
+ReaderBase::getEnumCount() const {
+ size_t dataSize = _datFile.data_size();
+ assert((dataSize % sizeof(uint32_t)) == 0);
+ return dataSize / sizeof(uint32_t);
+}
+
bool
ReaderBase::hasWeight() const {
return _weightFile.valid();
diff --git a/searchlib/src/vespa/searchlib/attribute/readerbase.h b/searchlib/src/vespa/searchlib/attribute/readerbase.h
index 070dc1f99fb..ff400acc824 100644
--- a/searchlib/src/vespa/searchlib/attribute/readerbase.h
+++ b/searchlib/src/vespa/searchlib/attribute/readerbase.h
@@ -4,7 +4,6 @@
#include <vespa/searchlib/util/file_with_header.h>
#include <vespa/searchlib/util/fileutil.h>
-#include <cassert>
namespace search {
@@ -25,11 +24,7 @@ public:
return (_idxFile.data_size()) /sizeof(uint32_t);
}
- size_t getEnumCount() const {
- size_t dataSize = _datFile.data_size();
- assert((dataSize % sizeof(uint32_t)) == 0);
- return dataSize / sizeof(uint32_t);
- }
+ size_t getEnumCount() const;
size_t getNumValues();
int32_t getNextWeight() { return _weightReader.readHostOrder(); }
diff --git a/searchlib/src/vespa/searchlib/attribute/search_context.cpp b/searchlib/src/vespa/searchlib/attribute/search_context.cpp
index a0345ddce70..a0208ab787e 100644
--- a/searchlib/src/vespa/searchlib/attribute/search_context.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/search_context.cpp
@@ -10,14 +10,6 @@ using search::queryeval::SearchIterator;
namespace search::attribute {
-SearchContext::SearchContext(const AttributeVector &attr) noexcept
- : _attr(attr),
- _plsc(nullptr)
-{
-}
-
-SearchContext::~SearchContext() = default;
-
unsigned int
SearchContext::approximateHits() const
{
diff --git a/searchlib/src/vespa/searchlib/attribute/search_context.h b/searchlib/src/vespa/searchlib/attribute/search_context.h
index 025b0fdf113..cc55beee216 100644
--- a/searchlib/src/vespa/searchlib/attribute/search_context.h
+++ b/searchlib/src/vespa/searchlib/attribute/search_context.h
@@ -30,7 +30,7 @@ public:
SearchContext(SearchContext&&) noexcept = default;
SearchContext& operator=(const SearchContext&) = delete;
SearchContext& operator=(SearchContext&&) noexcept = delete;
- ~SearchContext() override;
+ ~SearchContext() override = default;
unsigned int approximateHits() const override;
std::unique_ptr<queryeval::SearchIterator> createIterator(fef::TermFieldMatchData* matchData, bool strict) override;
@@ -47,7 +47,10 @@ public:
const AttributeVector& attribute() const { return _attr; }
protected:
- SearchContext(const AttributeVector& attr) noexcept;
+ SearchContext(const AttributeVector& attr) noexcept
+ : _attr(attr),
+ _plsc(nullptr)
+ {}
const AttributeVector& _attr;
attribute::IPostingListSearchContext* _plsc;
diff --git a/searchlib/src/vespa/searchlib/attribute/singlenumericattribute.cpp b/searchlib/src/vespa/searchlib/attribute/singlenumericattribute.cpp
index 43e400e14e3..608aeab1a8d 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlenumericattribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/singlenumericattribute.cpp
@@ -11,5 +11,4 @@ template class SingleValueNumericAttribute<IntegerAttributeTemplate<int64_t>>;
template class SingleValueNumericAttribute<FloatingPointAttributeTemplate<float>>;
template class SingleValueNumericAttribute<FloatingPointAttributeTemplate<double>>;
-} // namespace search
-
+}
diff --git a/searchlib/src/vespa/searchlib/attribute/string_search_helper.cpp b/searchlib/src/vespa/searchlib/attribute/string_search_helper.cpp
index 206c2bcbd69..17a0e6256d4 100644
--- a/searchlib/src/vespa/searchlib/attribute/string_search_helper.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/string_search_helper.cpp
@@ -29,10 +29,10 @@ StringSearchHelper::StringSearchHelper(QueryTermUCS4 & term, bool cased)
term.getFuzzyPrefixLength(),
isCased());
} else if (isCased()) {
- _term._char = term.getTerm();
+ _term = term.getTerm();
_termLen = term.getTermLen();
} else {
- term.term(_term._ucs4);
+ _ucs4 = term.asUcs4();
}
}
@@ -49,7 +49,7 @@ StringSearchHelper::isMatch(const char *src) const {
return getFuzzyMatcher().isMatch(src);
}
if (__builtin_expect(isCased(), false)) {
- int res = strncmp(_term._char, src, _termLen);
+ int res = strncmp(_term, src, _termLen);
return (res == 0) && (src[_termLen] == 0 || isPrefix());
}
vespalib::Utf8ReaderForZTS u8reader(src);
@@ -58,11 +58,11 @@ StringSearchHelper::isMatch(const char *src) const {
for (;; ++j) {
val = u8reader.getChar();
val = vespalib::LowerCase::convert(val);
- if (_term._ucs4[j] == 0 || _term._ucs4[j] != val) {
+ if (_ucs4[j] == 0 || _ucs4[j] != val) {
break;
}
}
- return (_term._ucs4[j] == 0 && (val == 0 || isPrefix()));
+ return (_ucs4[j] == 0 && (val == 0 || isPrefix()));
}
}
diff --git a/searchlib/src/vespa/searchlib/attribute/string_search_helper.h b/searchlib/src/vespa/searchlib/attribute/string_search_helper.h
index 4d69b61449e..7bfcf0e4292 100644
--- a/searchlib/src/vespa/searchlib/attribute/string_search_helper.h
+++ b/searchlib/src/vespa/searchlib/attribute/string_search_helper.h
@@ -16,6 +16,7 @@ namespace search::attribute {
*/
class StringSearchHelper {
public:
+ using FuzzyMatcher = vespalib::FuzzyMatcher;
StringSearchHelper(QueryTermUCS4 & qTerm, bool cased);
StringSearchHelper(StringSearchHelper&&) noexcept;
StringSearchHelper(const StringSearchHelper &) = delete;
@@ -27,14 +28,12 @@ public:
bool isCased() const noexcept { return _isCased; }
bool isFuzzy() const noexcept { return _isFuzzy; }
const vespalib::Regex & getRegex() const noexcept { return _regex; }
- const vespalib::FuzzyMatcher & getFuzzyMatcher() const noexcept { return *_fuzzyMatcher; }
+ const FuzzyMatcher & getFuzzyMatcher() const noexcept { return *_fuzzyMatcher; }
private:
vespalib::Regex _regex;
- std::unique_ptr<vespalib::FuzzyMatcher> _fuzzyMatcher;
- union {
- const ucs4_t *_ucs4;
- const char *_char;
- } _term;
+ std::unique_ptr<FuzzyMatcher> _fuzzyMatcher;
+ std::unique_ptr<ucs4_t[]> _ucs4;
+ const char * _term;
uint32_t _termLen;
bool _isPrefix;
bool _isRegex;
diff --git a/searchlib/src/vespa/searchlib/bitcompression/compression.h b/searchlib/src/vespa/searchlib/bitcompression/compression.h
index a77d82d9e8f..2d6b8083d43 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/compression.h
+++ b/searchlib/src/vespa/searchlib/bitcompression/compression.h
@@ -4,7 +4,6 @@
#include <vespa/searchlib/util/comprfile.h>
#include <vespa/vespalib/stllike/string.h>
-#include <cassert>
namespace vespalib {
@@ -1400,7 +1399,6 @@ public:
const uint8_t *
getByteCompr() const
{
- assert((_preRead & 7) == 0);
return reinterpret_cast<const uint8_t *>(getCompr()) +
(getBitOffset() >> 3);
}
diff --git a/searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp b/searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp
index cdae8058d76..7c38931df77 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp
+++ b/searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp
@@ -2,6 +2,7 @@
#include "countcompression.h"
#include <vespa/searchlib/index/postinglistcounts.h>
+#include <cassert>
namespace search::bitcompression {
diff --git a/searchlib/src/vespa/searchlib/bitcompression/pagedict4.h b/searchlib/src/vespa/searchlib/bitcompression/pagedict4.h
index 9208a5be3b8..b162bdc3f2b 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/pagedict4.h
+++ b/searchlib/src/vespa/searchlib/bitcompression/pagedict4.h
@@ -3,8 +3,8 @@
#pragma once
#include "countcompression.h"
-#include <limits>
#include <vespa/vespalib/stllike/string.h>
+#include <cassert>
namespace search::bitcompression {
diff --git a/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.cpp b/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.cpp
index d2269787068..9d6258ce26f 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.cpp
+++ b/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.cpp
@@ -5,6 +5,7 @@
#include <vespa/searchlib/index/schemautil.h>
#include <vespa/vespalib/data/fileheader.h>
#include <vespa/vespalib/stllike/asciistream.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".posocc_fields_params");
@@ -38,6 +39,12 @@ PosOccFieldsParams::operator=(const PosOccFieldsParams &rhs)
return *this;
}
+void
+PosOccFieldsParams::assertCachedParamsRef() const {
+ assert(_numFields == _params.size());
+ assert(_fieldParams == (_params.empty() ? nullptr : &_params[0]));
+}
+
bool
PosOccFieldsParams::operator==(const PosOccFieldsParams &rhs) const
diff --git a/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.h b/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.h
index 963a80f06dc..8748557e5a7 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.h
+++ b/searchlib/src/vespa/searchlib/bitcompression/posocc_fields_params.h
@@ -4,7 +4,6 @@
#include "posocc_field_params.h"
#include <vector>
-#include <cassert>
namespace search::bitcompression {
@@ -32,10 +31,7 @@ public:
_fieldParams = _params.empty() ? nullptr : &_params[0];
}
- void assertCachedParamsRef() const {
- assert(_numFields == _params.size());
- assert(_fieldParams == (_params.empty() ? nullptr : &_params[0]));
- }
+ void assertCachedParamsRef() const;
uint32_t getNumFields() const { return _numFields; }
const PosOccFieldParams *getFieldParams() const { return _fieldParams; }
diff --git a/searchlib/src/vespa/searchlib/bitcompression/posocccompression.cpp b/searchlib/src/vespa/searchlib/bitcompression/posocccompression.cpp
index fd6c723e901..8e1bfd2875c 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/posocccompression.cpp
+++ b/searchlib/src/vespa/searchlib/bitcompression/posocccompression.cpp
@@ -7,6 +7,7 @@
#include <vespa/searchlib/index/postinglistparams.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/data/fileheader.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".posocccompression");
diff --git a/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp b/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
index 8bb24bcbbec..ea7fd5ee76c 100644
--- a/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
+++ b/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
@@ -4,19 +4,20 @@
#include <algorithm>
#include <cassert>
#include <cinttypes>
+#include <mutex>
#include <vespa/log/log.h>
LOG_SETUP(".searchlib.common.bitvectorcache");
namespace search {
-BitVectorCache::BitVectorCache(GenerationHolder &genHolder) :
- _lookupCount(0),
- _needPopulation(false),
- _lock(),
- _keys(),
- _chunks(),
- _genHolder(genHolder)
+BitVectorCache::BitVectorCache(GenerationHolder &genHolder)
+ : _lookupCount(0),
+ _needPopulation(false),
+ _mutex(),
+ _keys(),
+ _chunks(),
+ _genHolder(genHolder)
{
}
@@ -29,7 +30,7 @@ BitVectorCache::computeCountVector(KeySet & keys, CountVector & v) const
std::vector<CondensedBitVector::KeySet> keySets;
ChunkV chunks;
{
- std::lock_guard<std::mutex> guard(_lock);
+ std::shared_lock guard(_mutex);
keySets.resize(_chunks.size());
Key2Index::const_iterator end(_keys.end());
for (Key k : keys) {
@@ -61,13 +62,13 @@ BitVectorCache::KeySet
BitVectorCache::lookupCachedSet(const KeyAndCountSet & keys)
{
KeySet cached(keys.size()*3);
- std::lock_guard<std::mutex> guard(_lock);
- _lookupCount++;
- if (_lookupCount == 2000) {
- _needPopulation = true;
- } else if ((_lookupCount & 0x1fffff) == 0x100000) {
- if (hasCostChanged(guard)) {
- _needPopulation = true;
+ std::shared_lock shared_guard(_mutex);
+ uint64_t lookupCount = _lookupCount++;
+ if (lookupCount == 2000) {
+ requirePopulation();
+ } else if ((lookupCount & 0x1fffff) == 0x100000) {
+ if (hasCostChanged(shared_guard)) {
+ requirePopulation();
}
}
for (const auto & e : keys) {
@@ -79,7 +80,12 @@ BitVectorCache::lookupCachedSet(const KeyAndCountSet & keys)
cached.insert(e.first);
}
} else {
- _keys[e.first] = KeyMeta().lookup().bitCount(e.second);
+ shared_guard.unlock();
+ {
+ std::unique_lock unique_guard(_mutex);
+ _keys[e.first] = KeyMeta().lookup().bitCount(e.second);
+ }
+ shared_guard.lock();
}
}
return cached;
@@ -101,7 +107,7 @@ BitVectorCache::getSorted(Key2Index & keys)
}
bool
-BitVectorCache::hasCostChanged(const std::lock_guard<std::mutex> & guard)
+BitVectorCache::hasCostChanged(const std::shared_lock<std::shared_mutex> & guard)
{
(void) guard;
if ( ! _chunks.empty()) {
@@ -168,10 +174,8 @@ BitVectorCache::populate(Key2Index & newKeys, CondensedBitVector & chunk, const
void
BitVectorCache::populate(uint32_t sz, const PopulateInterface & lookup)
{
- std::unique_lock<std::mutex> guard(_lock);
- if (! _needPopulation) {
- return;
- }
+ if (!needPopulation()) return;
+ std::unique_lock guard(_mutex);
Key2Index newKeys(_keys);
guard.unlock();
@@ -187,7 +191,7 @@ BitVectorCache::populate(uint32_t sz, const PopulateInterface & lookup)
void
BitVectorCache::set(Key key, uint32_t index, bool v)
{
- std::lock_guard<std::mutex> guard(_lock);
+ std::shared_lock guard(_mutex);
auto found = _keys.find(key);
if (found != _keys.end()) {
const KeyMeta & m(found->second);
@@ -207,7 +211,7 @@ BitVectorCache::get(Key key, uint32_t index) const
void
BitVectorCache::removeIndex(uint32_t index)
{
- std::lock_guard<std::mutex> guard(_lock);
+ std::unique_lock guard(_mutex);
for (auto & chunk : _chunks) {
chunk->clearIndex(index);
}
diff --git a/searchlib/src/vespa/searchlib/common/bitvectorcache.h b/searchlib/src/vespa/searchlib/common/bitvectorcache.h
index 6fac1352d94..bb8f019c128 100644
--- a/searchlib/src/vespa/searchlib/common/bitvectorcache.h
+++ b/searchlib/src/vespa/searchlib/common/bitvectorcache.h
@@ -4,7 +4,7 @@
#include "condensedbitvectors.h"
#include <vespa/vespalib/stllike/hash_set.h>
#include <vespa/vespalib/stllike/hash_map.h>
-#include <mutex>
+#include <shared_mutex>
namespace search {
@@ -39,7 +39,7 @@ public:
void removeIndex(uint32_t index);
void adjustDocIdLimit(uint32_t docId);
void populate(uint32_t count, const PopulateInterface &);
- bool needPopulation() const { return _needPopulation; }
+ bool needPopulation() const { return _needPopulation.load(std::memory_order_relaxed); }
void requirePopulation() { _needPopulation = true; }
private:
class KeyMeta {
@@ -75,14 +75,14 @@ private:
VESPA_DLL_LOCAL static SortedKeyMeta getSorted(Key2Index & keys);
VESPA_DLL_LOCAL static void populate(Key2Index & newKeys, CondensedBitVector & chunk, const PopulateInterface & lookup);
- VESPA_DLL_LOCAL bool hasCostChanged(const std::lock_guard<std::mutex> &);
+ VESPA_DLL_LOCAL bool hasCostChanged(const std::shared_lock<std::shared_mutex> &);
- uint64_t _lookupCount;
- bool _needPopulation;
- mutable std::mutex _lock;
- Key2Index _keys;
- ChunkV _chunks;
- GenerationHolder &_genHolder;
+ std::atomic<uint64_t> _lookupCount;
+ std::atomic<bool> _needPopulation;
+ mutable std::shared_mutex _mutex;
+ Key2Index _keys;
+ ChunkV _chunks;
+ GenerationHolder &_genHolder;
};
}
diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldreader.h b/searchlib/src/vespa/searchlib/diskindex/fieldreader.h
index 73b56559115..e97d5deb95c 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fieldreader.h
+++ b/searchlib/src/vespa/searchlib/diskindex/fieldreader.h
@@ -1,14 +1,14 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
+#include "wordnummapper.h"
+#include "docidmapper.h"
+#include "fieldwriter.h"
#include <vespa/searchlib/index/postinglistcounts.h>
#include <vespa/searchlib/index/dictionaryfile.h>
#include <vespa/searchlib/index/docidandfeatures.h>
#include <vespa/searchlib/index/postinglistfile.h>
#include <vespa/searchlib/index/schemautil.h>
-#include "wordnummapper.h"
-#include "docidmapper.h"
-#include "fieldwriter.h"
namespace search::diskindex {
@@ -40,7 +40,7 @@ public:
using PostingListCounts = index::PostingListCounts;
using PostingListParams = index::PostingListParams;
- uint64_t _wordNum;
+ uint64_t _wordNum;
DocIdAndFeatures _docIdAndFeatures;
protected:
std::unique_ptr<DictionaryFileSeqRead> _dictFile;
diff --git a/searchlib/src/vespa/searchlib/diskindex/fileheader.cpp b/searchlib/src/vespa/searchlib/diskindex/fileheader.cpp
index 5399d70fbe7..432651278e0 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fileheader.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/fileheader.cpp
@@ -6,6 +6,7 @@
#include <vespa/vespalib/data/fileheader.h>
#include <vespa/fastos/file.h>
#include <cinttypes>
+#include <cassert>
#include <arpa/inet.h>
#include <vespa/log/log.h>
diff --git a/searchlib/src/vespa/searchlib/diskindex/fusion.cpp b/searchlib/src/vespa/searchlib/diskindex/fusion.cpp
index 4fd9d116244..d12081ee89c 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fusion.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/fusion.cpp
@@ -8,11 +8,9 @@
#include <vespa/searchlib/common/documentsummary.h>
#include <vespa/searchlib/common/i_flush_token.h>
#include <vespa/searchlib/index/schemautil.h>
-#include <vespa/vespalib/io/fileutil.h>
#include <vespa/vespalib/util/error.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/util/lambdatask.h>
-#include <vespa/document/util/queue.h>
#include <filesystem>
#include <system_error>
diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp
index 2170777dbd3..460fac36acc 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp
@@ -3,6 +3,7 @@
#include "zc4_posting_reader.h"
#include "zc4_posting_header.h"
#include <vespa/searchlib/index/docidandfeatures.h>
+#include <cassert>
namespace search::diskindex {
@@ -19,9 +20,7 @@ Zc4PostingReader<bigEndian>::Zc4PostingReader(bool dynamic_k)
}
template <bool bigEndian>
-Zc4PostingReader<bigEndian>::~Zc4PostingReader()
-{
-}
+Zc4PostingReader<bigEndian>::~Zc4PostingReader() = default;
template <bool bigEndian>
void
diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp
index b50835a648d..c71404d449b 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp
@@ -3,7 +3,7 @@
#include "zc4_posting_reader_base.h"
#include "zc4_posting_header.h"
#include <vespa/searchlib/index/docidandfeatures.h>
-
+#include <cassert>
namespace search::diskindex {
using index::PostingListCounts;
diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp
index 202ed5a23cd..3a1b7928c93 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp
@@ -3,6 +3,7 @@
#include "zc4_posting_writer.h"
#include <vespa/searchlib/index/docidandfeatures.h>
#include <vespa/searchlib/index/postinglistcounts.h>
+#include <cassert>
using search::index::DocIdAndFeatures;
using search::index::PostingListCounts;
diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp
index 3f44b56706a..8a84ccc5731 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp
@@ -3,6 +3,7 @@
#include "zc4_posting_writer_base.h"
#include <vespa/searchlib/index/postinglistcounts.h>
#include <vespa/searchlib/index/postinglistparams.h>
+#include <cassert>
using search::index::PostingListCounts;
using search::index::PostingListParams;
diff --git a/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp b/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp
index b67a8409581..df33091a4e8 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp
@@ -1,9 +1,10 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "zcposocciterators.h"
+#include "zc4_posting_params.h"
#include <vespa/searchlib/bitcompression/posocc_fields_params.h>
#include <vespa/searchlib/fef/termfieldmatchdata.h>
-#include "zc4_posting_params.h"
+#include <cassert>
namespace search::diskindex {
diff --git a/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp b/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp
index ef540365208..c9a1563a8e3 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp
@@ -8,6 +8,7 @@
#include <vespa/searchlib/index/postinglistparams.h>
#include <vespa/searchlib/common/fileheadercontext.h>
#include <vespa/vespalib/data/fileheader.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".diskindex.zcposting");
diff --git a/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp b/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp
index 66404c7a0ff..83a4ae20db5 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp
@@ -4,6 +4,7 @@
#include <vespa/searchlib/fef/termfieldmatchdata.h>
#include <vespa/searchlib/fef/termfieldmatchdataarray.h>
#include <vespa/searchlib/bitcompression/posocccompression.h>
+#include <cassert>
namespace search::diskindex {
diff --git a/searchlib/src/vespa/searchlib/expression/arrayatlookupfunctionnode.cpp b/searchlib/src/vespa/searchlib/expression/arrayatlookupfunctionnode.cpp
index 3663d76ad51..d94e8c19981 100644
--- a/searchlib/src/vespa/searchlib/expression/arrayatlookupfunctionnode.cpp
+++ b/searchlib/src/vespa/searchlib/expression/arrayatlookupfunctionnode.cpp
@@ -6,21 +6,22 @@
#include <vespa/searchcommon/attribute/iattributecontext.h>
#include <vespa/vespalib/util/stringfmt.h>
-namespace search {
-namespace expression {
+namespace search::expression {
using vespalib::Serializer;
using vespalib::Deserializer;
IMPLEMENT_EXPRESSIONNODE(ArrayAtLookup, UnaryFunctionNode);
-ArrayAtLookup::ArrayAtLookup()
+ArrayAtLookup::ArrayAtLookup() noexcept
+ : _attributeName(),
+ _attribute(nullptr),
+ _docId(0),
+ _basicAttributeType(BAT_STRING)
{
}
-ArrayAtLookup::~ArrayAtLookup()
-{
-}
+ArrayAtLookup::~ArrayAtLookup() = default;
ArrayAtLookup::ArrayAtLookup(const vespalib::string &attribute, ExpressionNode::UP arg)
: UnaryFunctionNode(std::move(arg)),
@@ -41,11 +42,9 @@ ArrayAtLookup::ArrayAtLookup(const ArrayAtLookup &rhs) :
UnaryFunctionNode(rhs),
_attributeName(rhs._attributeName),
_attribute(rhs._attribute),
- _docId(rhs._docId),
+ _docId(0),
_basicAttributeType(rhs._basicAttributeType)
{
- // why?
- _docId = 0;
}
ArrayAtLookup & ArrayAtLookup::operator= (const ArrayAtLookup &rhs)
@@ -54,7 +53,6 @@ ArrayAtLookup & ArrayAtLookup::operator= (const ArrayAtLookup &rhs)
UnaryFunctionNode::operator =(rhs);
_attributeName = rhs._attributeName;
_attribute = rhs._attribute;
- // _docId = rhs._docId;
_docId = 0;
_basicAttributeType = rhs._basicAttributeType;
}
@@ -65,13 +63,13 @@ void ArrayAtLookup::onPrepareResult()
{
if (_attribute->isIntegerType()) {
_basicAttributeType = BAT_INT;
- setResultType(std::unique_ptr<ResultNode>(new Int64ResultNode()));
+ setResultType(std::make_unique<Int64ResultNode>());
} else if (_attribute->isFloatingPointType()) {
_basicAttributeType = BAT_FLOAT;
- setResultType(std::unique_ptr<ResultNode>(new FloatResultNode()));
+ setResultType(std::make_unique<FloatResultNode>());
} else {
_basicAttributeType = BAT_STRING;
- setResultType(std::unique_ptr<ResultNode>(new StringResultNode()));
+ setResultType(std::make_unique<StringResultNode>());
}
}
@@ -137,7 +135,7 @@ bool ArrayAtLookup::onExecute() const
void ArrayAtLookup::wireAttributes(const search::attribute::IAttributeContext & attrCtx)
{
_attribute = attrCtx.getAttribute(_attributeName);
- if (_attribute == NULL) {
+ if (_attribute == nullptr) {
throw std::runtime_error(vespalib::make_string("Failed locating attribute vector '%s'", _attributeName.c_str()));
}
}
@@ -156,5 +154,4 @@ Deserializer & ArrayAtLookup::onDeserialize(Deserializer & is)
return is;
}
-} // namespace expression
-} // namespace search
+}
diff --git a/searchlib/src/vespa/searchlib/expression/arrayatlookupfunctionnode.h b/searchlib/src/vespa/searchlib/expression/arrayatlookupfunctionnode.h
index be3c6ac2b4a..9404ec09b04 100644
--- a/searchlib/src/vespa/searchlib/expression/arrayatlookupfunctionnode.h
+++ b/searchlib/src/vespa/searchlib/expression/arrayatlookupfunctionnode.h
@@ -3,12 +3,12 @@
#include "unaryfunctionnode.h"
-namespace search {
- namespace attribute {
- class IAttributeVector;
- class IAttributeContext;
- }
-namespace expression {
+namespace search::attribute {
+ class IAttributeVector;
+ class IAttributeContext;
+}
+
+namespace search::expression {
class ArrayAtLookup : public UnaryFunctionNode
{
@@ -16,8 +16,8 @@ public:
DECLARE_EXPRESSIONNODE(ArrayAtLookup);
DECLARE_NBO_SERIALIZE;
- ArrayAtLookup();
- ~ArrayAtLookup();
+ ArrayAtLookup() noexcept;
+ ~ArrayAtLookup() override;
ArrayAtLookup(const vespalib::string &attribute, ExpressionNode::UP arg);
ArrayAtLookup(const search::attribute::IAttributeVector &attr, ExpressionNode::UP indexArg);
ArrayAtLookup(const ArrayAtLookup &rhs);
@@ -32,12 +32,10 @@ private:
BAT_INT, BAT_FLOAT, BAT_STRING
};
- vespalib::string _attributeName = vespalib::string();
- const search::attribute::IAttributeVector * _attribute = 0;
- DocId _docId = 0;
- BasicAttributeType _basicAttributeType = BAT_STRING;
+ vespalib::string _attributeName;
+ const search::attribute::IAttributeVector * _attribute;
+ DocId _docId;
+ BasicAttributeType _basicAttributeType;
};
}
-}
-
diff --git a/searchlib/src/vespa/searchlib/expression/attribute_map_lookup_node.cpp b/searchlib/src/vespa/searchlib/expression/attribute_map_lookup_node.cpp
index 52ee467fac8..c1f61afdfd5 100644
--- a/searchlib/src/vespa/searchlib/expression/attribute_map_lookup_node.cpp
+++ b/searchlib/src/vespa/searchlib/expression/attribute_map_lookup_node.cpp
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "attribute_map_lookup_node.h"
+#include "resultvector.h"
#include <vespa/searchlib/attribute/stringbase.h>
#include <vespa/searchcommon/attribute/attributecontent.h>
#include <vespa/searchcommon/attribute/iattributecontext.h>
diff --git a/searchlib/src/vespa/searchlib/expression/attributenode.cpp b/searchlib/src/vespa/searchlib/expression/attributenode.cpp
index 413cb50ca49..7e46de934f0 100644
--- a/searchlib/src/vespa/searchlib/expression/attributenode.cpp
+++ b/searchlib/src/vespa/searchlib/expression/attributenode.cpp
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "attributenode.h"
+#include "resultvector.h"
#include "enumattributeresult.h"
#include <vespa/searchcommon/attribute/iattributecontext.h>
@@ -17,7 +18,7 @@ template <typename V>
class AttributeNode::IntegerHandler : public AttributeNode::Handler
{
public:
- IntegerHandler(ResultNode & result) :
+ IntegerHandler(ResultNode & result) noexcept :
Handler(),
_vector(((V &)result).getVector()),
_wVector()
@@ -31,7 +32,7 @@ private:
class AttributeNode::FloatHandler : public AttributeNode::Handler
{
public:
- FloatHandler(ResultNode & result) :
+ FloatHandler(ResultNode & result) noexcept :
Handler(),
_vector(((FloatResultNodeVector &)result).getVector()),
_wVector()
@@ -45,7 +46,7 @@ private:
class AttributeNode::StringHandler : public AttributeNode::Handler
{
public:
- StringHandler(ResultNode & result) :
+ StringHandler(ResultNode & result) noexcept :
Handler(),
_vector(((StringResultNodeVector &)result).getVector()),
_wVector()
@@ -59,7 +60,7 @@ private:
class AttributeNode::EnumHandler : public AttributeNode::Handler
{
public:
- EnumHandler(ResultNode & result) :
+ EnumHandler(ResultNode & result) noexcept :
Handler(),
_vector(((EnumResultNodeVector &)result).getVector()),
_wVector()
diff --git a/searchlib/src/vespa/searchlib/expression/attributenode.h b/searchlib/src/vespa/searchlib/expression/attributenode.h
index d668bd3f662..67ec6a3302f 100644
--- a/searchlib/src/vespa/searchlib/expression/attributenode.h
+++ b/searchlib/src/vespa/searchlib/expression/attributenode.h
@@ -2,7 +2,6 @@
#pragma once
#include "functionnode.h"
-#include "resultvector.h"
#include "attributeresult.h"
#include <vespa/vespalib/objects/objectoperation.h>
#include <vespa/vespalib/objects/objectpredicate.h>
diff --git a/searchlib/src/vespa/searchlib/expression/debugwaitfunctionnode.h b/searchlib/src/vespa/searchlib/expression/debugwaitfunctionnode.h
index 2371482825d..4a3ca0cf64d 100644
--- a/searchlib/src/vespa/searchlib/expression/debugwaitfunctionnode.h
+++ b/searchlib/src/vespa/searchlib/expression/debugwaitfunctionnode.h
@@ -2,9 +2,6 @@
#pragma once
#include "unaryfunctionnode.h"
-#include "stringresultnode.h"
-#include "resultvector.h"
-#include <vespa/searchlib/common/sortspec.h>
namespace search::expression {
diff --git a/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp b/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp
index bd13c032a03..16f5ee04be4 100644
--- a/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp
+++ b/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp
@@ -6,6 +6,7 @@
#include <vespa/document/datatype/documenttype.h>
#include <vespa/vespalib/encoding/base64.h>
#include <vespa/vespalib/locale/c.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".searchlib.documentfieldnode");
diff --git a/searchlib/src/vespa/searchlib/expression/expressiontree.cpp b/searchlib/src/vespa/searchlib/expression/expressiontree.cpp
index e47ba4ebeb0..5592f2f863b 100644
--- a/searchlib/src/vespa/searchlib/expression/expressiontree.cpp
+++ b/searchlib/src/vespa/searchlib/expression/expressiontree.cpp
@@ -79,7 +79,7 @@ void
ExpressionTree::onPrepare(bool preserveAccurateTypes)
{
(void) preserveAccurateTypes;
- if (_root.get() != NULL) {
+ if (_root) {
gather(_attributeNodes).from(*_root);
gather(_documentAccessorNodes).from(*_root);
gather(_relevanceNodes).from(*_root);
@@ -141,18 +141,16 @@ ExpressionTree::swap(ExpressionTree & e)
_arrayAtLookupNodes.swap(_arrayAtLookupNodes);
}
-ExpressionTree::~ExpressionTree()
-{
-}
+ExpressionTree::~ExpressionTree() = default;
bool
ExpressionTree::execute(const document::Document & doc, HitRank rank) const
{
- for(DocumentAccessorNodeList::const_iterator it(_documentAccessorNodes.begin()), mt(_documentAccessorNodes.end()); it != mt; it++) {
- (*it)->setDoc(doc);
+ for(auto * node : _documentAccessorNodes) {
+ node->setDoc(doc);
}
- for(RelevanceNodeList::const_iterator it(_relevanceNodes.begin()), mt(_relevanceNodes.end()); it != mt; it++) {
- (*it)->setRelevance(rank);
+ for(auto * node : _relevanceNodes) {
+ node->setRelevance(rank);
}
return _root->execute();
}
diff --git a/searchlib/src/vespa/searchlib/expression/fixedwidthbucketfunctionnode.cpp b/searchlib/src/vespa/searchlib/expression/fixedwidthbucketfunctionnode.cpp
index f7c8941c3e2..1aafd523832 100644
--- a/searchlib/src/vespa/searchlib/expression/fixedwidthbucketfunctionnode.cpp
+++ b/searchlib/src/vespa/searchlib/expression/fixedwidthbucketfunctionnode.cpp
@@ -4,14 +4,13 @@
#include "floatresultnode.h"
#include "integerbucketresultnode.h"
#include "floatbucketresultnode.h"
+#include "resultvector.h"
#include <vespa/vespalib/util/stringfmt.h>
#include <stdexcept>
#include <cmath>
#include <limits>
-
-namespace search {
-namespace expression {
+namespace search::expression {
IMPLEMENT_EXPRESSIONNODE(FixedWidthBucketFunctionNode, UnaryFunctionNode);
@@ -79,7 +78,7 @@ FixedWidthBucketFunctionNode::FloatBucketHandler::update(ResultNode &result, con
bucket.setRange(from, to);
}
-FixedWidthBucketFunctionNode::~FixedWidthBucketFunctionNode() {}
+FixedWidthBucketFunctionNode::~FixedWidthBucketFunctionNode() = default;
void
FixedWidthBucketFunctionNode::onPrepareResult()
@@ -87,20 +86,16 @@ FixedWidthBucketFunctionNode::onPrepareResult()
const ExpressionNode &child = getArg();
const ResultNode &input = *child.getResult();
if (input.getClass().inherits(IntegerResultNode::classId)) {
- ResultNode::UP res(new IntegerBucketResultNode());
- setResultType(std::move(res));
+ setResultType(std::make_unique<IntegerBucketResultNode>());
_bucketHandler.reset(new IntegerBucketHandler(_width->getInteger()));
} else if (input.getClass().inherits(FloatResultNode::classId)) {
- ResultNode::UP res(new FloatBucketResultNode());
- setResultType(std::move(res));
+ setResultType(std::make_unique<FloatBucketResultNode>());
_bucketHandler.reset(new FloatBucketHandler(_width->getFloat()));
} else if (input.getClass().inherits(IntegerResultNodeVector::classId)) {
- ResultNode::UP res(new IntegerBucketResultNodeVector());
- setResultType(std::move(res));
+ setResultType(std::make_unique<IntegerBucketResultNodeVector>());
_bucketHandler.reset(new IntegerVectorBucketHandler(_width->getInteger()));
} else if (input.getClass().inherits(FloatResultNodeVector::classId)) {
- ResultNode::UP res(new FloatBucketResultNodeVector());
- setResultType(std::move(res));
+ setResultType(std::make_unique<FloatBucketResultNodeVector>());
_bucketHandler.reset(new FloatVectorBucketHandler(_width->getFloat()));
} else {
throw std::runtime_error(vespalib::make_string("cannot create appropriate bucket for type '%s'", input.getClass().name()));
@@ -130,7 +125,6 @@ FixedWidthBucketFunctionNode::onDeserialize(vespalib::Deserializer &is)
}
}
-}
// this function was added by ../../forcelink.sh
void forcelink_file_searchlib_expression_fixedwidthbucketfunctionnode() {}
diff --git a/searchlib/src/vespa/searchlib/expression/fixedwidthbucketfunctionnode.h b/searchlib/src/vespa/searchlib/expression/fixedwidthbucketfunctionnode.h
index 000bdf733c5..fcc4ba0ad5d 100644
--- a/searchlib/src/vespa/searchlib/expression/fixedwidthbucketfunctionnode.h
+++ b/searchlib/src/vespa/searchlib/expression/fixedwidthbucketfunctionnode.h
@@ -5,11 +5,9 @@
#include "numericresultnode.h"
#include "integerbucketresultnode.h"
#include "floatbucketresultnode.h"
-#include "resultvector.h"
#include <memory>
-namespace search {
-namespace expression {
+namespace search::expression {
class FixedWidthBucketFunctionNode : public UnaryFunctionNode
{
@@ -19,7 +17,7 @@ public:
using CP = vespalib::CloneablePtr<BucketHandler>;
virtual void update(ResultNode &result, const ResultNode &value) const = 0;
virtual BucketHandler *clone() const = 0;
- virtual ~BucketHandler() {}
+ virtual ~BucketHandler() = default;
};
// update integer result bucket based on integer value
@@ -60,7 +58,7 @@ public:
DECLARE_NBO_SERIALIZE;
FixedWidthBucketFunctionNode() : UnaryFunctionNode(), _width(), _bucketHandler() {}
FixedWidthBucketFunctionNode(ExpressionNode::UP arg) : UnaryFunctionNode(std::move(arg)), _width(), _bucketHandler() {}
- ~FixedWidthBucketFunctionNode();
+ ~FixedWidthBucketFunctionNode() override;
FixedWidthBucketFunctionNode &setWidth(const NumericResultNode::CP &width) {
_width = width;
return *this;
@@ -68,4 +66,3 @@ public:
};
}
-}
diff --git a/searchlib/src/vespa/searchlib/expression/functionnode.h b/searchlib/src/vespa/searchlib/expression/functionnode.h
index 4e23a2f3c60..c2b7dff6a7a 100644
--- a/searchlib/src/vespa/searchlib/expression/functionnode.h
+++ b/searchlib/src/vespa/searchlib/expression/functionnode.h
@@ -4,8 +4,7 @@
#include "expressionnode.h"
#include "resultnode.h"
-namespace search {
-namespace expression {
+namespace search::expression {
class FunctionNode : public ExpressionNode
{
@@ -15,9 +14,9 @@ public:
DECLARE_ABSTRACT_EXPRESSIONNODE(FunctionNode);
const ResultNode * getResult() const override { return _tmpResult.get(); }
ResultNode & updateResult() const { return *_tmpResult; }
- virtual void reset() { _tmpResult.reset(NULL); }
+ virtual void reset() { _tmpResult.reset(nullptr); }
- FunctionNode &setResult(const ResultNode::CP res) { _tmpResult = res; return *this; }
+ FunctionNode &setResult(const ResultNode::CP res) { _tmpResult = std::move(res); return *this; }
protected:
void setResultType(ResultNode::UP res) { _tmpResult = std::move(res); }
void selectMembers(const vespalib::ObjectPredicate & predicate, vespalib::ObjectOperation & operation) override;
@@ -26,5 +25,3 @@ private:
};
}
-}
-
diff --git a/searchlib/src/vespa/searchlib/expression/functionnodes.cpp b/searchlib/src/vespa/searchlib/expression/functionnodes.cpp
index cc6a8c137a7..109b4a59a05 100644
--- a/searchlib/src/vespa/searchlib/expression/functionnodes.cpp
+++ b/searchlib/src/vespa/searchlib/expression/functionnodes.cpp
@@ -83,19 +83,22 @@ IMPLEMENT_EXPRESSIONNODE(ToRawFunctionNode, UnaryFunctionNode);
IMPLEMENT_EXPRESSIONNODE(XorBitFunctionNode, UnaryBitFunctionNode);
IMPLEMENT_EXPRESSIONNODE(MD5BitFunctionNode, UnaryBitFunctionNode);
-void ExpressionNode::onArgument(const ResultNode & arg, ResultNode & result) const
+void
+ExpressionNode::onArgument(const ResultNode & arg, ResultNode & result) const
{
(void) arg;
(void) result;
throw std::runtime_error(make_string("Class %s does not implement onArgument(const ResultNode & arg, ResultNode & result). Probably an indication that it tries to take a multivalued argument, which it can not.", getClass().name()));
}
-void ExpressionNode::executeIterative(const ResultNode & arg, ResultNode & result) const
+void
+ExpressionNode::executeIterative(const ResultNode & arg, ResultNode & result) const
{
onArgument(arg, result);
}
-void ExpressionNode::wireAttributes(const search::attribute::IAttributeContext &)
+void
+ExpressionNode::wireAttributes(const search::attribute::IAttributeContext &)
{
}
@@ -141,7 +144,8 @@ private:
std::map<size_t, std::map<size_t, size_t> > _typeConversion;
};
-ResultNode::UP ArithmeticTypeConversion::getType(const ResultNode & arg1, const ResultNode & arg2)
+ResultNode::UP
+ArithmeticTypeConversion::getType(const ResultNode & arg1, const ResultNode & arg2)
{
size_t baseTypeId = getType(getBaseType2(arg1), getBaseType2(arg2));
size_t dimension = std::max(getDimension(arg1), getDimension(arg2));
@@ -162,13 +166,15 @@ ResultNode::UP ArithmeticTypeConversion::getType(const ResultNode & arg1, const
return result;
}
-ResultNode::UP ArithmeticTypeConversion::getType(const ResultNode & arg)
+ResultNode::UP
+ArithmeticTypeConversion::getType(const ResultNode & arg)
{
size_t baseTypeId = getBaseType(arg);
return ResultNode::UP(static_cast<ResultNode *>(Identifiable::classFromId(baseTypeId)->create()));
}
-size_t ArithmeticTypeConversion::getBaseType(const ResultNode & r)
+size_t
+ArithmeticTypeConversion::getBaseType(const ResultNode & r)
{
if (r.getClass().inherits(ResultNodeVector::classId)) {
return getBaseType(* r.createBaseType());
@@ -177,7 +183,8 @@ size_t ArithmeticTypeConversion::getBaseType(const ResultNode & r)
}
}
-size_t ArithmeticTypeConversion::getBaseType2(const ResultNode & r)
+size_t
+ArithmeticTypeConversion::getBaseType2(const ResultNode & r)
{
if (r.getClass().inherits(ResultNodeVector::classId)) {
return getBaseType2(* r.createBaseType());
@@ -193,7 +200,8 @@ namespace {
}
-void MultiArgFunctionNode::onPrepare(bool preserveAccurateTypes)
+void
+MultiArgFunctionNode::onPrepare(bool preserveAccurateTypes)
{
for(size_t i(0), m(_args.size()); i < m; i++) {
_args[i]->prepare(preserveAccurateTypes);
@@ -201,7 +209,8 @@ void MultiArgFunctionNode::onPrepare(bool preserveAccurateTypes)
prepareResult();
}
-void MultiArgFunctionNode::onPrepareResult()
+void
+MultiArgFunctionNode::onPrepareResult()
{
if (_args.size() == 1) {
setResultType(ArithmeticTypeConversion::getType(*_args[0]->getResult()));
@@ -215,7 +224,8 @@ void MultiArgFunctionNode::onPrepareResult()
}
}
-bool MultiArgFunctionNode::onExecute() const
+bool
+MultiArgFunctionNode::onExecute() const
{
for(size_t i(0), m(_args.size()); i < m; i++) {
_args[i]->execute();
@@ -223,7 +233,8 @@ bool MultiArgFunctionNode::onExecute() const
return calculate(_args, updateResult());
}
-bool MultiArgFunctionNode::onCalculate(const ExpressionNodeVector & args, ResultNode & result) const
+bool
+MultiArgFunctionNode::onCalculate(const ExpressionNodeVector & args, ResultNode & result) const
{
result.set(*args[0]->getResult());
for (size_t i(1), m(args.size()); i < m; i++) {
@@ -232,28 +243,33 @@ bool MultiArgFunctionNode::onCalculate(const ExpressionNodeVector & args, Result
return true;
}
-void BitFunctionNode::onPrepareResult()
+void
+BitFunctionNode::onPrepareResult()
{
- setResultType(std::unique_ptr<ResultNode>(new Int64ResultNode(0)));
+ setResultType(std::make_unique<Int64ResultNode>(0));
}
-void StrCatFunctionNode::onPrepareResult()
+void
+StrCatFunctionNode::onPrepareResult()
{
- setResultType(std::unique_ptr<ResultNode>(new StringResultNode()));
+ setResultType(std::make_unique<StringResultNode>());
}
-void CatFunctionNode::onPrepareResult()
+void
+CatFunctionNode::onPrepareResult()
{
- setResultType(std::unique_ptr<ResultNode>(new RawResultNode()));
+ setResultType(std::make_unique<RawResultNode>());
}
-void CatFunctionNode::onPrepare(bool preserveAccurateTypes)
+void
+CatFunctionNode::onPrepare(bool preserveAccurateTypes)
{
(void) preserveAccurateTypes;
MultiArgFunctionNode::onPrepare(true);
}
-void BitFunctionNode::onArgument(const ResultNode & arg, ResultNode & result) const
+void
+BitFunctionNode::onArgument(const ResultNode & arg, ResultNode & result) const
{
onArgument(arg, static_cast<Int64ResultNode &>(result));
}
@@ -268,7 +284,8 @@ void AndFunctionNode::onArgument(const ResultNode & arg, Int64ResultNode & resul
void OrFunctionNode::onArgument(const ResultNode & arg, Int64ResultNode & result) const { result.orOp(arg); }
void XorFunctionNode::onArgument(const ResultNode & arg, Int64ResultNode & result) const { result.xorOp(arg); }
-ResultNode::CP MaxFunctionNode::getInitialValue() const
+ResultNode::CP
+MaxFunctionNode::getInitialValue() const
{
ResultNode::CP initial;
const ResultNode & arg(*getArg(0).getResult());
@@ -282,7 +299,8 @@ ResultNode::CP MaxFunctionNode::getInitialValue() const
return initial;
}
-ResultNode::CP MinFunctionNode::getInitialValue() const
+ResultNode::CP
+MinFunctionNode::getInitialValue() const
{
ResultNode::CP initial;
const ResultNode & arg(*getArg(0).getResult());
@@ -296,98 +314,115 @@ ResultNode::CP MinFunctionNode::getInitialValue() const
return initial;
}
-ResultNode & ModuloFunctionNode::flatten(const ResultNodeVector &, ResultNode &) const
+ResultNode &
+ModuloFunctionNode::flatten(const ResultNodeVector &, ResultNode &) const
{
throw std::runtime_error("ModuloFunctionNode::flatten() const not implemented since it shall never be used.");
}
-ResultNode & DivideFunctionNode::flatten(const ResultNodeVector &, ResultNode &) const
+ResultNode &
+DivideFunctionNode::flatten(const ResultNodeVector &, ResultNode &) const
{
throw std::runtime_error("DivideFunctionNode::flatten() const not implemented since it shall never be used.");
}
-ResultNode::CP ModuloFunctionNode::getInitialValue() const
+ResultNode::CP
+ModuloFunctionNode::getInitialValue() const
{
throw std::runtime_error("ModuloFunctionNode::getInitialValue() const not implemented since it shall never be used.");
}
-ResultNode::CP DivideFunctionNode::getInitialValue() const
+ResultNode::CP
+DivideFunctionNode::getInitialValue() const
{
throw std::runtime_error("DivideFunctionNode::getInitialValue() const not implemented since it shall never be used.");
}
UnaryBitFunctionNode::~UnaryBitFunctionNode() = default;
-void UnaryBitFunctionNode::onPrepareResult()
+void
+UnaryBitFunctionNode::onPrepareResult()
{
- setResultType(std::unique_ptr<ResultNode>(new RawResultNode()));
+ setResultType(std::make_unique<RawResultNode>());
}
-void UnaryBitFunctionNode::onPrepare(bool preserveAccurateTypes)
+void
+UnaryBitFunctionNode::onPrepare(bool preserveAccurateTypes)
{
(void) preserveAccurateTypes;
UnaryFunctionNode::onPrepare(true);
}
-void UnaryFunctionNode::onPrepareResult()
+void
+UnaryFunctionNode::onPrepareResult()
{
setResultType(std::unique_ptr<ResultNode>(getArg().getResult()->clone()));
}
-void ToStringFunctionNode::onPrepareResult()
+void
+ToStringFunctionNode::onPrepareResult()
{
- setResultType(std::unique_ptr<ResultNode>(new StringResultNode()));
+ setResultType(std::make_unique<StringResultNode>());
}
-bool ToStringFunctionNode::onExecute() const
+bool
+ToStringFunctionNode::onExecute() const
{
getArg().execute();
updateResult().set(*getArg().getResult());
return true;
}
-void ToRawFunctionNode::onPrepareResult()
+void
+ToRawFunctionNode::onPrepareResult()
{
- setResultType(std::unique_ptr<ResultNode>(new RawResultNode()));
+ setResultType(std::make_unique<RawResultNode>());
}
-bool ToRawFunctionNode::onExecute() const
+bool
+ToRawFunctionNode::onExecute() const
{
getArg().execute();
updateResult().set(*getArg().getResult());
return true;
}
-void ToIntFunctionNode::onPrepareResult()
+void
+ToIntFunctionNode::onPrepareResult()
{
- setResultType(std::unique_ptr<ResultNode>(new Int64ResultNode()));
+ setResultType(std::make_unique<Int64ResultNode>());
}
-bool ToIntFunctionNode::onExecute() const
+bool
+ToIntFunctionNode::onExecute() const
{
getArg().execute();
updateResult().set(*getArg().getResult());
return true;
}
-void ToFloatFunctionNode::onPrepareResult()
+void
+ToFloatFunctionNode::onPrepareResult()
{
- setResultType(std::unique_ptr<ResultNode>(new FloatResultNode()));
+ setResultType(std::make_unique<FloatResultNode>());
}
-bool ToFloatFunctionNode::onExecute() const
+bool
+ToFloatFunctionNode::onExecute() const
{
getArg().execute();
updateResult().set(*getArg().getResult());
return true;
}
-void StrLenFunctionNode::onPrepareResult()
+void
+StrLenFunctionNode::onPrepareResult()
{
- setResultType(std::unique_ptr<ResultNode>(new Int64ResultNode()));
+ setResultType(std::make_unique<Int64ResultNode>());
}
-bool StrLenFunctionNode::onExecute() const
+bool
+StrLenFunctionNode::onExecute() const
{
getArg().execute();
char buf[32];
@@ -395,12 +430,14 @@ bool StrLenFunctionNode::onExecute() const
return true;
}
-void NormalizeSubjectFunctionNode::onPrepareResult()
+void
+NormalizeSubjectFunctionNode::onPrepareResult()
{
- setResultType(std::unique_ptr<ResultNode>(new StringResultNode()));
+ setResultType(std::make_unique<StringResultNode>());
}
-bool NormalizeSubjectFunctionNode::onExecute() const
+bool
+NormalizeSubjectFunctionNode::onExecute() const
{
getArg().execute();
char buf[32];
@@ -422,12 +459,14 @@ bool NormalizeSubjectFunctionNode::onExecute() const
return true;
}
-void NumElemFunctionNode::onPrepareResult()
+void
+NumElemFunctionNode::onPrepareResult()
{
- setResultType(std::unique_ptr<ResultNode>(new Int64ResultNode(1)));
+ setResultType(std::make_unique<Int64ResultNode>(1));
}
-bool NumElemFunctionNode::onExecute() const
+bool
+NumElemFunctionNode::onExecute() const
{
getArg().execute();
if (getArg().getResult()->inherits(ResultNodeVector::classId)) {
@@ -436,7 +475,8 @@ bool NumElemFunctionNode::onExecute() const
return true;
}
-bool NegateFunctionNode::onExecute() const
+bool
+NegateFunctionNode::onExecute() const
{
getArg().execute();
updateResult().assign(*getArg().getResult());
@@ -444,7 +484,8 @@ bool NegateFunctionNode::onExecute() const
return true;
}
-bool SortFunctionNode::onExecute() const
+bool
+SortFunctionNode::onExecute() const
{
getArg().execute();
updateResult().assign(*getArg().getResult());
@@ -452,7 +493,8 @@ bool SortFunctionNode::onExecute() const
return true;
}
-bool ReverseFunctionNode::onExecute() const
+bool
+ReverseFunctionNode::onExecute() const
{
getArg().execute();
updateResult().assign(*getArg().getResult());
@@ -460,7 +502,8 @@ bool ReverseFunctionNode::onExecute() const
return true;
}
-bool StrCatFunctionNode::onExecute() const
+bool
+StrCatFunctionNode::onExecute() const
{
asciistream os;
StrCatSerializer nos(os);
@@ -472,7 +515,8 @@ bool StrCatFunctionNode::onExecute() const
return true;
}
-bool CatFunctionNode::onExecute() const
+bool
+CatFunctionNode::onExecute() const
{
nbostream os;
CatSerializer nos(os);
@@ -484,15 +528,16 @@ bool CatFunctionNode::onExecute() const
return true;
}
-XorBitFunctionNode::XorBitFunctionNode() {}
-XorBitFunctionNode::~XorBitFunctionNode() {}
+XorBitFunctionNode::XorBitFunctionNode() = default;
+XorBitFunctionNode::~XorBitFunctionNode() = default;
XorBitFunctionNode::XorBitFunctionNode(ExpressionNode::UP arg, unsigned numBits) :
UnaryBitFunctionNode(std::move(arg), numBits),
_tmpXor(getNumBytes(), 0)
{}
-bool UnaryBitFunctionNode::onExecute() const
+bool
+UnaryBitFunctionNode::onExecute() const
{
_tmpOs.clear();
getArg().execute();
@@ -501,13 +546,15 @@ bool UnaryBitFunctionNode::onExecute() const
return internalExecute(_tmpOs);
}
-void XorBitFunctionNode::onPrepareResult()
+void
+XorBitFunctionNode::onPrepareResult()
{
UnaryBitFunctionNode::onPrepareResult();
_tmpXor.resize(getNumBytes());
}
-bool XorBitFunctionNode::internalExecute(const nbostream & os) const
+bool
+XorBitFunctionNode::internalExecute(const nbostream & os) const
{
const size_t numBytes(_tmpXor.size());
memset(&_tmpXor[0], 0, numBytes);
@@ -524,7 +571,8 @@ bool XorBitFunctionNode::internalExecute(const nbostream & os) const
return true;
}
-bool MD5BitFunctionNode::internalExecute(const nbostream & os) const
+bool
+MD5BitFunctionNode::internalExecute(const nbostream & os) const
{
const unsigned int MD5_DIGEST_LENGTH = 16;
unsigned char md5ScratchPad[MD5_DIGEST_LENGTH];
@@ -533,11 +581,13 @@ bool MD5BitFunctionNode::internalExecute(const nbostream & os) const
return true;
}
-Serializer & FunctionNode::onSerialize(Serializer & os) const
+Serializer &
+FunctionNode::onSerialize(Serializer & os) const
{
return os << _tmpResult;
}
-Deserializer & FunctionNode::onDeserialize(Deserializer & is)
+Deserializer &
+FunctionNode::onDeserialize(Deserializer & is)
{
return is >> _tmpResult;
}
@@ -548,11 +598,13 @@ ConstantNode::visitMembers(vespalib::ObjectVisitor &visitor) const
visit(visitor, "Value", _result);
}
-Serializer & ConstantNode::onSerialize(Serializer & os) const
+Serializer &
+ConstantNode::onSerialize(Serializer & os) const
{
return os << _result;
}
-Deserializer & ConstantNode::onDeserialize(Deserializer & is)
+Deserializer &
+ConstantNode::onDeserialize(Deserializer & is)
{
return is >> _result;
}
@@ -565,14 +617,16 @@ FunctionNode::visitMembers(vespalib::ObjectVisitor & visitor) const
visit(visitor, "tmpResult", _tmpResult);
}
-void FunctionNode::selectMembers(const vespalib::ObjectPredicate & predicate, vespalib::ObjectOperation & operation)
+void
+FunctionNode::selectMembers(const vespalib::ObjectPredicate & predicate, vespalib::ObjectOperation & operation)
{
if (_tmpResult.get()) {
_tmpResult->select(predicate, operation);
}
}
-void MultiArgFunctionNode::selectMembers(const vespalib::ObjectPredicate & predicate, vespalib::ObjectOperation & operation)
+void
+MultiArgFunctionNode::selectMembers(const vespalib::ObjectPredicate & predicate, vespalib::ObjectOperation & operation)
{
FunctionNode::selectMembers(predicate, operation);
for(size_t i(0), m(_args.size()); i < m; i++) {
@@ -580,23 +634,26 @@ void MultiArgFunctionNode::selectMembers(const vespalib::ObjectPredicate & predi
}
}
-Serializer & MultiArgFunctionNode::onSerialize(Serializer & os) const
+Serializer &
+MultiArgFunctionNode::onSerialize(Serializer & os) const
{
FunctionNode::onSerialize(os);
os << _args;
return os;
}
-Deserializer & MultiArgFunctionNode::onDeserialize(Deserializer & is)
+
+Deserializer &
+MultiArgFunctionNode::onDeserialize(Deserializer & is)
{
FunctionNode::onDeserialize(is);
return is >> _args;
}
-MultiArgFunctionNode::MultiArgFunctionNode() : FunctionNode() { }
+MultiArgFunctionNode::MultiArgFunctionNode() noexcept : FunctionNode() { }
MultiArgFunctionNode::MultiArgFunctionNode(const MultiArgFunctionNode &) = default;
MultiArgFunctionNode & MultiArgFunctionNode::operator = (const MultiArgFunctionNode &) = default;
-MultiArgFunctionNode::~MultiArgFunctionNode() {}
+MultiArgFunctionNode::~MultiArgFunctionNode() = default;
void
MultiArgFunctionNode::visitMembers(vespalib::ObjectVisitor &visitor) const
@@ -605,12 +662,14 @@ MultiArgFunctionNode::visitMembers(vespalib::ObjectVisitor &visitor) const
visit(visitor, "args", _args);
}
-Serializer & UnaryBitFunctionNode::onSerialize(Serializer & os) const
+Serializer &
+UnaryBitFunctionNode::onSerialize(Serializer & os) const
{
UnaryFunctionNode::onSerialize(os);
return os << _numBits;
}
-Deserializer & UnaryBitFunctionNode::onDeserialize(Deserializer & is)
+Deserializer &
+UnaryBitFunctionNode::onDeserialize(Deserializer & is)
{
UnaryFunctionNode::onDeserialize(is);
return is >> _numBits;
diff --git a/searchlib/src/vespa/searchlib/expression/interpolatedlookupfunctionnode.cpp b/searchlib/src/vespa/searchlib/expression/interpolatedlookupfunctionnode.cpp
index f220b1d455d..705fba1f75e 100644
--- a/searchlib/src/vespa/searchlib/expression/interpolatedlookupfunctionnode.cpp
+++ b/searchlib/src/vespa/searchlib/expression/interpolatedlookupfunctionnode.cpp
@@ -5,23 +5,20 @@
#include <vespa/searchlib/common/converters.h>
#include <vespa/vespalib/util/stringfmt.h>
-namespace search {
-namespace expression {
+namespace search::expression {
using vespalib::Serializer;
using vespalib::Deserializer;
IMPLEMENT_EXPRESSIONNODE(InterpolatedLookup, UnaryFunctionNode);
-InterpolatedLookup::InterpolatedLookup()
- : _attribute(0),
+InterpolatedLookup::InterpolatedLookup() noexcept
+ : _attribute(nullptr),
_docId(0)
{
}
-InterpolatedLookup::~InterpolatedLookup()
-{
-}
+InterpolatedLookup::~InterpolatedLookup() = default;
InterpolatedLookup::InterpolatedLookup(const vespalib::string &attribute, ExpressionNode::UP arg)
: UnaryFunctionNode(std::move(arg)),
@@ -44,10 +41,8 @@ InterpolatedLookup::InterpolatedLookup(const InterpolatedLookup &rhs) :
UnaryFunctionNode(rhs),
_attributeName(rhs._attributeName),
_attribute(rhs._attribute),
- _docId(rhs._docId)
+ _docId(0)
{
- // why?
- _docId = 0;
}
InterpolatedLookup &
@@ -57,26 +52,27 @@ InterpolatedLookup::operator= (const InterpolatedLookup &rhs)
UnaryFunctionNode::operator =(rhs);
_attributeName = rhs._attributeName;
_attribute = rhs._attribute;
- // _docId = rhs._docId;
_docId = 0;
}
return *this;
}
-void InterpolatedLookup::onPrepareResult()
+void
+InterpolatedLookup::onPrepareResult()
{
- setResultType(std::unique_ptr<ResultNode>(new FloatResultNode()));
+ setResultType(std::make_unique<FloatResultNode>());
}
-static double
-simpleInterpolate(size_t sz, std::vector<double> v, double lookup)
-{
+namespace {
+
+double
+simpleInterpolate(size_t sz, std::vector<double> v, double lookup) {
if (sz == 0 || lookup < v[0])
return 0;
for (size_t i = 1; i < sz; ++i) {
if (lookup < v[i]) {
- double total = v[i] - v[i-1];
- double above = lookup - v[i-1];
+ double total = v[i] - v[i - 1];
+ double above = lookup - v[i - 1];
double result = i - 1;
result += (above / total);
return result;
@@ -85,7 +81,10 @@ simpleInterpolate(size_t sz, std::vector<double> v, double lookup)
return sz - 1;
}
-bool InterpolatedLookup::onExecute() const
+}
+
+bool
+InterpolatedLookup::onExecute() const
{
getArg().execute();
double lookup = getArg().getResult()->getFloat();
@@ -99,27 +98,29 @@ bool InterpolatedLookup::onExecute() const
return true;
}
-void InterpolatedLookup::wireAttributes(const search::attribute::IAttributeContext & attrCtx)
+void
+InterpolatedLookup::wireAttributes(const search::attribute::IAttributeContext & attrCtx)
{
_attribute = attrCtx.getAttribute(_attributeName);
- if (_attribute == NULL) {
+ if (_attribute == nullptr) {
throw std::runtime_error(vespalib::make_string("Failed locating attribute vector '%s'", _attributeName.c_str()));
}
}
-Serializer & InterpolatedLookup::onSerialize(Serializer & os) const
+Serializer &
+InterpolatedLookup::onSerialize(Serializer & os) const
{
UnaryFunctionNode::onSerialize(os);
os << _attributeName;
return os;
}
-Deserializer & InterpolatedLookup::onDeserialize(Deserializer & is)
+Deserializer &
+InterpolatedLookup::onDeserialize(Deserializer & is)
{
UnaryFunctionNode::onDeserialize(is);
is >> _attributeName;
return is;
}
-} // namespace expression
-} // namespace search
+}
diff --git a/searchlib/src/vespa/searchlib/expression/interpolatedlookupfunctionnode.h b/searchlib/src/vespa/searchlib/expression/interpolatedlookupfunctionnode.h
index 794adf9106d..de2ba5c362c 100644
--- a/searchlib/src/vespa/searchlib/expression/interpolatedlookupfunctionnode.h
+++ b/searchlib/src/vespa/searchlib/expression/interpolatedlookupfunctionnode.h
@@ -3,10 +3,9 @@
#include "unaryfunctionnode.h"
-namespace search {
- namespace attribute { class IAttributeVector; }
+namespace search::attribute { class IAttributeVector; }
-namespace expression {
+namespace search::expression {
class InterpolatedLookup : public UnaryFunctionNode
{
@@ -14,8 +13,8 @@ public:
DECLARE_EXPRESSIONNODE(InterpolatedLookup);
DECLARE_NBO_SERIALIZE;
- InterpolatedLookup();
- ~InterpolatedLookup();
+ InterpolatedLookup() noexcept;
+ ~InterpolatedLookup() override;
InterpolatedLookup(const vespalib::string &attribute, ExpressionNode::UP arg);
InterpolatedLookup(const search::attribute::IAttributeVector &attr, ExpressionNode::UP lookupArg);
InterpolatedLookup(const InterpolatedLookup &rhs);
@@ -31,5 +30,3 @@ private:
};
}
-}
-
diff --git a/searchlib/src/vespa/searchlib/expression/multiargfunctionnode.h b/searchlib/src/vespa/searchlib/expression/multiargfunctionnode.h
index 6702fa37215..d38e2f9a6d5 100644
--- a/searchlib/src/vespa/searchlib/expression/multiargfunctionnode.h
+++ b/searchlib/src/vespa/searchlib/expression/multiargfunctionnode.h
@@ -3,8 +3,7 @@
#include "functionnode.h"
-namespace search {
-namespace expression {
+namespace search::expression {
class MultiArgFunctionNode : public FunctionNode
{
@@ -13,11 +12,11 @@ public:
DECLARE_NBO_SERIALIZE;
void visitMembers(vespalib::ObjectVisitor & visitor) const override;
DECLARE_ABSTRACT_EXPRESSIONNODE(MultiArgFunctionNode);
- MultiArgFunctionNode();
+ MultiArgFunctionNode() noexcept;
MultiArgFunctionNode(const MultiArgFunctionNode &);
MultiArgFunctionNode & operator = (const MultiArgFunctionNode &);
- MultiArgFunctionNode(MultiArgFunctionNode &&) = default;
- MultiArgFunctionNode & operator = (MultiArgFunctionNode &&) = default;
+ MultiArgFunctionNode(MultiArgFunctionNode &&) noexcept = default;
+ MultiArgFunctionNode & operator = (MultiArgFunctionNode &&) noexcept = default;
~MultiArgFunctionNode();
MultiArgFunctionNode & appendArg(ExpressionNode::UP arg) { return addArg(std::move(arg)); }
MultiArgFunctionNode & addArg(ExpressionNode::UP arg) {
@@ -42,5 +41,3 @@ private:
};
}
-}
-
diff --git a/searchlib/src/vespa/searchlib/expression/orfunctionnode.h b/searchlib/src/vespa/searchlib/expression/orfunctionnode.h
index a657d896eb2..e2522eedc4f 100644
--- a/searchlib/src/vespa/searchlib/expression/orfunctionnode.h
+++ b/searchlib/src/vespa/searchlib/expression/orfunctionnode.h
@@ -3,14 +3,13 @@
#include "bitfunctionnode.h"
-namespace search {
-namespace expression {
+namespace search::expression {
class OrFunctionNode : public BitFunctionNode
{
public:
DECLARE_EXPRESSIONNODE(OrFunctionNode);
- OrFunctionNode() { }
+ OrFunctionNode() noexcept = default;
private:
ResultNode::CP getInitialValue() const override { return ResultNode::CP(new Int64ResultNode(0)); }
ResultNode & flatten(const ResultNodeVector & v, ResultNode & result) const override { return v.flattenOr(result); }
@@ -18,4 +17,3 @@ private:
};
}
-}
diff --git a/searchlib/src/vespa/searchlib/expression/rangebucketpredef.h b/searchlib/src/vespa/searchlib/expression/rangebucketpredef.h
index 313ef62404d..fb892702444 100644
--- a/searchlib/src/vespa/searchlib/expression/rangebucketpredef.h
+++ b/searchlib/src/vespa/searchlib/expression/rangebucketpredef.h
@@ -7,8 +7,7 @@
#include "floatresultnode.h"
#include "stringresultnode.h"
-namespace search {
-namespace expression {
+namespace search::expression {
class RangeBucketPreDefFunctionNode : public UnaryFunctionNode
{
@@ -57,8 +56,8 @@ private:
public:
DECLARE_EXPRESSIONNODE(RangeBucketPreDefFunctionNode);
DECLARE_NBO_SERIALIZE;
- RangeBucketPreDefFunctionNode() : UnaryFunctionNode(), _predef(), _result(NULL), _nullResult(NULL) {}
- RangeBucketPreDefFunctionNode(ExpressionNode::UP arg) : UnaryFunctionNode(std::move(arg)), _predef(), _result(NULL), _nullResult(NULL) {}
+ RangeBucketPreDefFunctionNode() : UnaryFunctionNode(), _predef(), _result(nullptr), _nullResult(nullptr) {}
+ RangeBucketPreDefFunctionNode(ExpressionNode::UP arg) : UnaryFunctionNode(std::move(arg)), _predef(), _result(nullptr), _nullResult(nullptr) {}
RangeBucketPreDefFunctionNode(const RangeBucketPreDefFunctionNode & rhs);
RangeBucketPreDefFunctionNode & operator = (const RangeBucketPreDefFunctionNode & rhs);
~RangeBucketPreDefFunctionNode();
@@ -72,4 +71,3 @@ public:
};
}
-}
diff --git a/searchlib/src/vespa/searchlib/expression/relevancenode.h b/searchlib/src/vespa/searchlib/expression/relevancenode.h
index 05ac3fe6cc4..675ce996ec3 100644
--- a/searchlib/src/vespa/searchlib/expression/relevancenode.h
+++ b/searchlib/src/vespa/searchlib/expression/relevancenode.h
@@ -3,8 +3,7 @@
#include "floatresultnode.h"
-namespace search {
-namespace expression {
+namespace search::expression {
class RelevanceNode : public ExpressionNode
{
@@ -22,5 +21,3 @@ private:
};
}
-}
-
diff --git a/searchlib/src/vespa/searchlib/expression/resultvector.h b/searchlib/src/vespa/searchlib/expression/resultvector.h
index fa0e2d9a71f..22fac0b214b 100644
--- a/searchlib/src/vespa/searchlib/expression/resultvector.h
+++ b/searchlib/src/vespa/searchlib/expression/resultvector.h
@@ -83,8 +83,8 @@ public:
using Vector = std::vector<B>;
using BaseType = B;
~ResultNodeVectorT() override;
- const Vector & getVector() const { return _result; }
- Vector & getVector() { return _result; }
+ const Vector & getVector() const noexcept { return _result; }
+ Vector & getVector() noexcept { return _result; }
const ResultNode * find(const ResultNode & key) const override;
void sort() override;
void reverse() override;
@@ -113,14 +113,16 @@ template <typename B, typename C, typename G>
ResultNodeVectorT<B, C, G>::~ResultNodeVectorT() = default;
template <typename B, typename C, typename G>
-ResultNodeVector & ResultNodeVectorT<B, C, G>::set(size_t index, const ResultNode & node)
+ResultNodeVector &
+ResultNodeVectorT<B, C, G>::set(size_t index, const ResultNode & node)
{
_result[index].set(node);
return *this;
}
template <typename B, typename C, typename G>
-ResultNodeVector & ResultNodeVectorT<B, C, G>::push_back_safe(const ResultNode & node)
+ResultNodeVector &
+ResultNodeVectorT<B, C, G>::push_back_safe(const ResultNode & node)
{
if (node.inherits(B::classId)) {
_result.push_back(static_cast<const B &>(node));
@@ -133,14 +135,16 @@ ResultNodeVector & ResultNodeVectorT<B, C, G>::push_back_safe(const ResultNode &
}
template <typename B, typename C, typename G>
-ResultNodeVector & ResultNodeVectorT<B, C, G>::push_back(const ResultNode & node)
+ResultNodeVector &
+ResultNodeVectorT<B, C, G>::push_back(const ResultNode & node)
{
_result.push_back(static_cast<const B &>(node));
return *this;
}
template <typename B, typename C, typename G>
-int ResultNodeVectorT<B, C, G>::onCmp(const Identifiable & rhs) const
+int
+ResultNodeVectorT<B, C, G>::onCmp(const Identifiable & rhs) const
{
const ResultNodeVectorT & b(static_cast<const ResultNodeVectorT &>(rhs));
int diff = _result.size() - b._result.size();
@@ -151,20 +155,23 @@ int ResultNodeVectorT<B, C, G>::onCmp(const Identifiable & rhs) const
}
template <typename B, typename C, typename G>
-void ResultNodeVectorT<B, C, G>::sort()
+void
+ResultNodeVectorT<B, C, G>::sort()
{
using LC = cmpT<B>;
std::sort(_result.begin(), _result.end(), typename LC::less());
}
template <typename B, typename C, typename G>
-void ResultNodeVectorT<B, C, G>::reverse()
+void
+ResultNodeVectorT<B, C, G>::reverse()
{
std::reverse(_result.begin(), _result.end());
}
template <typename B, typename C, typename G>
-size_t ResultNodeVectorT<B, C, G>::hash() const
+size_t
+ResultNodeVectorT<B, C, G>::hash() const
{
size_t h(0);
for(typename Vector::const_iterator it(_result.begin()), mt(_result.end()); it != mt; it++) {
@@ -174,7 +181,8 @@ size_t ResultNodeVectorT<B, C, G>::hash() const
}
template <typename B, typename C, typename G>
-void ResultNodeVectorT<B, C, G>::negate()
+void
+ResultNodeVectorT<B, C, G>::negate()
{
for(typename Vector::iterator it(_result.begin()), mt(_result.end()); it != mt; it++) {
it->negate();
@@ -182,25 +190,28 @@ void ResultNodeVectorT<B, C, G>::negate()
}
template <typename B, typename C, typename G>
-const ResultNode * ResultNodeVectorT<B, C, G>::find(const ResultNode & key) const
+const ResultNode *
+ResultNodeVectorT<B, C, G>::find(const ResultNode & key) const
{
G getter;
typename Vector::const_iterator found = std::lower_bound(_result.begin(), _result.end(), getter(key), typename C::less() );
if (found != _result.end()) {
typename C::equal equal;
- return equal(*found, getter(key)) ? &(*found) : NULL;
+ return equal(*found, getter(key)) ? &(*found) : nullptr;
}
- return NULL;
+ return nullptr;
}
template <typename B, typename C, typename G>
-vespalib::Serializer & ResultNodeVectorT<B, C, G>::onSerialize(vespalib::Serializer & os) const
+vespalib::Serializer &
+ResultNodeVectorT<B, C, G>::onSerialize(vespalib::Serializer & os) const
{
return serialize(_result, os);
}
template <typename B, typename C, typename G>
-vespalib::Deserializer & ResultNodeVectorT<B, C, G>::onDeserialize(vespalib::Deserializer & is)
+vespalib::Deserializer &
+ResultNodeVectorT<B, C, G>::onDeserialize(vespalib::Deserializer & is)
{
return deserialize(_result, is);
}
@@ -226,8 +237,8 @@ public:
B v;
v.set(r);
const std::vector<B> & vec(this->getVector());
- for (size_t i(0), m(vec.size()); i < m; i++) {
- v.multiply(vec[i]);
+ for (const B & item : vec) {
+ v.multiply(item);
}
r.set(v);
return r;
@@ -236,8 +247,8 @@ public:
Int64ResultNode v;
v.set(r);
const std::vector<B> & vec(this->getVector());
- for (size_t i(0), m(vec.size()); i < m; i++) {
- v.andOp(vec[i]);
+ for (const B & item : vec) {
+ v.andOp(item);
}
r.set(v);
return r;
@@ -246,8 +257,8 @@ public:
Int64ResultNode v;
v.set(r);
const std::vector<B> & vec(this->getVector());
- for (size_t i(0), m(vec.size()); i < m; i++) {
- v.orOp(vec[i]);
+ for (const B & item : vec) {
+ v.orOp(item);
}
r.set(v);
return r;
@@ -256,8 +267,8 @@ public:
Int64ResultNode v;
v.set(r);
const std::vector<B> & vec(this->getVector());
- for (size_t i(0), m(vec.size()); i < m; i++) {
- v.xorOp(vec[i]);
+ for (const B & item : vec) {
+ v.xorOp(item);
}
r.set(v);
return r;
@@ -266,8 +277,8 @@ public:
B v;
v.set(r);
const std::vector<B> & vec(this->getVector());
- for (size_t i(0), m(vec.size()); i < m; i++) {
- v.add(vec[i]);
+ for (const B & item : vec) {
+ v.add(item);
}
r.set(v);
return r;
@@ -276,8 +287,8 @@ public:
B v;
v.set(r);
const std::vector<B> & vec(this->getVector());
- for (size_t i(0), m(vec.size()); i < m; i++) {
- v.max(vec[i]);
+ for (const B & item : vec) {
+ v.max(item);
}
r.set(v);
return r;
@@ -286,8 +297,8 @@ public:
B v;
v.set(r);
const std::vector<B> & vec(this->getVector());
- for (size_t i(0), m(vec.size()); i < m; i++) {
- v.min(vec[i]);
+ for (const B & item : vec) {
+ v.min(item);
}
r.set(v);
return r;
@@ -296,10 +307,10 @@ public:
B v;
v.set(r);
const std::vector<B> & vec(this->getVector());
- for (size_t i(0), m(vec.size()); i < m; i++) {
+ for (const B & item : vec) {
B squared;
- squared.set(vec[i]);
- squared.multiply(vec[i]);
+ squared.set(item);
+ squared.multiply(item);
v.add(squared);
}
r.set(v);
@@ -311,7 +322,7 @@ public:
class BoolResultNodeVector : public NumericResultNodeVectorT<BoolResultNode>
{
public:
- BoolResultNodeVector() { }
+ BoolResultNodeVector() noexcept = default;
DECLARE_RESULTNODE(BoolResultNodeVector);
const IntegerBucketResultNode& getNullBucket() const override { return IntegerBucketResultNode::getNull(); }
@@ -320,7 +331,7 @@ public:
class Int8ResultNodeVector : public NumericResultNodeVectorT<Int8ResultNode>
{
public:
- Int8ResultNodeVector() { }
+ Int8ResultNodeVector() noexcept = default;
DECLARE_RESULTNODE(Int8ResultNodeVector);
const IntegerBucketResultNode& getNullBucket() const override { return IntegerBucketResultNode::getNull(); }
@@ -329,7 +340,7 @@ public:
class Int16ResultNodeVector : public NumericResultNodeVectorT<Int16ResultNode>
{
public:
- Int16ResultNodeVector() { }
+ Int16ResultNodeVector() = default;
DECLARE_RESULTNODE(Int16ResultNodeVector);
const IntegerBucketResultNode& getNullBucket() const override { return IntegerBucketResultNode::getNull(); }
@@ -338,7 +349,7 @@ public:
class Int32ResultNodeVector : public NumericResultNodeVectorT<Int32ResultNode>
{
public:
- Int32ResultNodeVector() { }
+ Int32ResultNodeVector() = default;
DECLARE_RESULTNODE(Int32ResultNodeVector);
const IntegerBucketResultNode& getNullBucket() const override { return IntegerBucketResultNode::getNull(); }
@@ -347,7 +358,7 @@ public:
class Int64ResultNodeVector : public NumericResultNodeVectorT<Int64ResultNode>
{
public:
- Int64ResultNodeVector() { }
+ Int64ResultNodeVector() = default;
DECLARE_RESULTNODE(Int64ResultNodeVector);
const IntegerBucketResultNode& getNullBucket() const override { return IntegerBucketResultNode::getNull(); }
@@ -358,14 +369,14 @@ using IntegerResultNodeVector = Int64ResultNodeVector;
class EnumResultNodeVector : public NumericResultNodeVectorT<EnumResultNode>
{
public:
- EnumResultNodeVector() {}
+ EnumResultNodeVector() = default;
DECLARE_RESULTNODE(EnumResultNodeVector);
};
class FloatResultNodeVector : public NumericResultNodeVectorT<FloatResultNode>
{
public:
- FloatResultNodeVector() { }
+ FloatResultNodeVector() = default;
DECLARE_RESULTNODE(FloatResultNodeVector);
const FloatBucketResultNode& getNullBucket() const override { return FloatBucketResultNode::getNull(); }
@@ -374,7 +385,7 @@ public:
class StringResultNodeVector : public ResultNodeVectorT<StringResultNode, cmpT<ResultNode>, vespalib::Identity>
{
public:
- StringResultNodeVector() { }
+ StringResultNodeVector() = default;
DECLARE_RESULTNODE(StringResultNodeVector);
const StringBucketResultNode& getNullBucket() const override { return StringBucketResultNode::getNull(); }
@@ -383,7 +394,7 @@ public:
class RawResultNodeVector : public ResultNodeVectorT<RawResultNode, cmpT<ResultNode>, vespalib::Identity>
{
public:
- RawResultNodeVector() { }
+ RawResultNodeVector() = default;
DECLARE_RESULTNODE(RawResultNodeVector);
const RawBucketResultNode& getNullBucket() const override { return RawBucketResultNode::getNull(); }
@@ -392,28 +403,28 @@ public:
class IntegerBucketResultNodeVector : public ResultNodeVectorT<IntegerBucketResultNode, contains<IntegerBucketResultNode, int64_t>, GetInteger >
{
public:
- IntegerBucketResultNodeVector() { }
+ IntegerBucketResultNodeVector() = default;
DECLARE_RESULTNODE(IntegerBucketResultNodeVector);
};
class FloatBucketResultNodeVector : public ResultNodeVectorT<FloatBucketResultNode, contains<FloatBucketResultNode, double>, GetFloat >
{
public:
- FloatBucketResultNodeVector() { }
+ FloatBucketResultNodeVector() = default;
DECLARE_RESULTNODE(FloatBucketResultNodeVector);
};
class StringBucketResultNodeVector : public ResultNodeVectorT<StringBucketResultNode, contains<StringBucketResultNode, ResultNode::ConstBufferRef>, GetString >
{
public:
- StringBucketResultNodeVector() { }
+ StringBucketResultNodeVector() = default;
DECLARE_RESULTNODE(StringBucketResultNodeVector);
};
class RawBucketResultNodeVector : public ResultNodeVectorT<RawBucketResultNode, contains<RawBucketResultNode, ResultNode::ConstBufferRef>, GetString >
{
public:
- RawBucketResultNodeVector() { }
+ RawBucketResultNodeVector() = default;
DECLARE_RESULTNODE(RawBucketResultNodeVector);
};
diff --git a/searchlib/src/vespa/searchlib/expression/unaryfunctionnode.h b/searchlib/src/vespa/searchlib/expression/unaryfunctionnode.h
index 6283204e88b..96e54d78ec9 100644
--- a/searchlib/src/vespa/searchlib/expression/unaryfunctionnode.h
+++ b/searchlib/src/vespa/searchlib/expression/unaryfunctionnode.h
@@ -3,14 +3,13 @@
#include "multiargfunctionnode.h"
-namespace search {
-namespace expression {
+namespace search::expression {
class UnaryFunctionNode : public MultiArgFunctionNode
{
public:
DECLARE_ABSTRACT_EXPRESSIONNODE(UnaryFunctionNode);
- UnaryFunctionNode() { }
+ UnaryFunctionNode() noexcept = default;
UnaryFunctionNode(ExpressionNode::UP arg) :
MultiArgFunctionNode()
{
@@ -23,5 +22,3 @@ private:
};
}
-}
-
diff --git a/searchlib/src/vespa/searchlib/features/closenessfeature.cpp b/searchlib/src/vespa/searchlib/features/closenessfeature.cpp
index 048a507b3fd..05579ad4fc1 100644
--- a/searchlib/src/vespa/searchlib/features/closenessfeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/closenessfeature.cpp
@@ -44,7 +44,7 @@ ConvertRawScoreToCloseness::ConvertRawScoreToCloseness(const fef::IQueryEnvironm
void
ConvertRawScoreToCloseness::execute(uint32_t docId)
{
- feature_t max_closeness = 0.0;
+ feature_t max_closeness = _bundle.min_rawscore();
assert(_md);
for (const auto& elem : _bundle.elements()) {
const TermFieldMatchData *tfmd = _md->resolveTermField(elem.handle);
diff --git a/searchlib/src/vespa/searchlib/features/distance_calculator_bundle.cpp b/searchlib/src/vespa/searchlib/features/distance_calculator_bundle.cpp
index fad4c649165..22afaa3ca84 100644
--- a/searchlib/src/vespa/searchlib/features/distance_calculator_bundle.cpp
+++ b/searchlib/src/vespa/searchlib/features/distance_calculator_bundle.cpp
@@ -97,7 +97,8 @@ DistanceCalculatorBundle::DistanceCalculatorBundle(const fef::IQueryEnvironment&
uint32_t field_id,
const vespalib::string& feature_name)
- : _elems()
+ : _elems(),
+ _min_rawscore(0.0)
{
_elems.reserve(env.getNumTerms());
const auto* attr = resolve_attribute_for_field(env, field_id, feature_name);
@@ -107,6 +108,7 @@ DistanceCalculatorBundle::DistanceCalculatorBundle(const fef::IQueryEnvironment&
const auto* term = env.getTerm(i);
if (term->query_tensor_name().has_value() && (attr != nullptr)) {
_elems.emplace_back(handle, make_distance_calculator(env, *attr, term->query_tensor_name().value(), feature_name));
+ _min_rawscore = _elems.back().calc->function().min_rawscore();
} else {
_elems.emplace_back(handle);
}
@@ -118,7 +120,8 @@ DistanceCalculatorBundle::DistanceCalculatorBundle(const fef::IQueryEnvironment&
std::optional<uint32_t> field_id,
const vespalib::string& label,
const vespalib::string& feature_name)
- : _elems()
+ : _elems(),
+ _min_rawscore(0.0)
{
const ITermData* term = util::getTermByLabel(env, label);
if (term != nullptr) {
@@ -135,6 +138,7 @@ DistanceCalculatorBundle::DistanceCalculatorBundle(const fef::IQueryEnvironment&
const auto* attr = resolve_attribute_for_field(env, term_field.getFieldId(), feature_name);
if (attr != nullptr) {
calc = make_distance_calculator(env, *attr, term->query_tensor_name().value(), feature_name);
+ _min_rawscore = calc->function().min_rawscore();
}
}
_elems.emplace_back(handle, std::move(calc));
diff --git a/searchlib/src/vespa/searchlib/features/distance_calculator_bundle.h b/searchlib/src/vespa/searchlib/features/distance_calculator_bundle.h
index e3be52aecc5..cb85985cc09 100644
--- a/searchlib/src/vespa/searchlib/features/distance_calculator_bundle.h
+++ b/searchlib/src/vespa/searchlib/features/distance_calculator_bundle.h
@@ -34,6 +34,7 @@ public:
};
private:
std::vector<Element> _elems;
+ double _min_rawscore;
public:
DistanceCalculatorBundle(const fef::IQueryEnvironment& env,
@@ -47,6 +48,8 @@ public:
const std::vector<Element>& elements() const { return _elems; }
+ double min_rawscore() const { return _min_rawscore; }
+
static void prepare_shared_state(const fef::IQueryEnvironment& env,
fef::IObjectStore& store,
uint32_t field_id,
diff --git a/searchlib/src/vespa/searchlib/fef/itermdata.h b/searchlib/src/vespa/searchlib/fef/itermdata.h
index 306c91f7ab2..9a063cf93ee 100644
--- a/searchlib/src/vespa/searchlib/fef/itermdata.h
+++ b/searchlib/src/vespa/searchlib/fef/itermdata.h
@@ -16,7 +16,7 @@ namespace search::fef {
class ITermData
{
protected:
- virtual ~ITermData() {}
+ virtual ~ITermData() = default;
public:
/**
diff --git a/searchlib/src/vespa/searchlib/fef/itermfielddata.h b/searchlib/src/vespa/searchlib/fef/itermfielddata.h
index 057a5794fa9..88fa8c5f781 100644
--- a/searchlib/src/vespa/searchlib/fef/itermfielddata.h
+++ b/searchlib/src/vespa/searchlib/fef/itermfielddata.h
@@ -76,7 +76,7 @@ public:
**/
virtual TermFieldHandle getHandle(MatchDataDetails requested_details) const = 0;
protected:
- virtual ~ITermFieldData() {}
+ virtual ~ITermFieldData() = default;
private:
uint32_t _fieldId;
uint32_t _matching_doc_count;
diff --git a/searchlib/src/vespa/searchlib/fef/matchdatalayout.cpp b/searchlib/src/vespa/searchlib/fef/matchdatalayout.cpp
index 99326b2c1e6..632bd422581 100644
--- a/searchlib/src/vespa/searchlib/fef/matchdatalayout.cpp
+++ b/searchlib/src/vespa/searchlib/fef/matchdatalayout.cpp
@@ -6,8 +6,7 @@
namespace search::fef {
MatchDataLayout::MatchDataLayout()
- : _numTermFields(0),
- _fieldIds()
+ : _fieldIds()
{
}
@@ -17,9 +16,8 @@ MatchDataLayout::~MatchDataLayout() = default;
MatchData::UP
MatchDataLayout::createMatchData() const
{
- assert(_numTermFields == _fieldIds.size());
- auto md = std::make_unique<MatchData>(MatchData::params().numTermFields(_numTermFields));
- for (size_t i = 0; i < _numTermFields; ++i) {
+ auto md = std::make_unique<MatchData>(MatchData::params().numTermFields(_fieldIds.size()));
+ for (size_t i = 0; i < _fieldIds.size(); ++i) {
md->resolveTermField(i)->setFieldId(_fieldIds[i]);
}
return md;
diff --git a/searchlib/src/vespa/searchlib/fef/matchdatalayout.h b/searchlib/src/vespa/searchlib/fef/matchdatalayout.h
index 05d25a322db..8f7717ce7ac 100644
--- a/searchlib/src/vespa/searchlib/fef/matchdatalayout.h
+++ b/searchlib/src/vespa/searchlib/fef/matchdatalayout.h
@@ -14,14 +14,16 @@ namespace search::fef {
class MatchDataLayout
{
private:
- uint32_t _numTermFields;
std::vector<uint32_t> _fieldIds;
-
public:
/**
* Create an empty object.
**/
MatchDataLayout();
+ MatchDataLayout(MatchDataLayout &&) noexcept = default;
+ MatchDataLayout & operator=(MatchDataLayout &&) noexcept = default;
+ MatchDataLayout(const MatchDataLayout &) = default;
+ MatchDataLayout & operator=(const MatchDataLayout &) = delete;
~MatchDataLayout();
/**
@@ -32,8 +34,9 @@ public:
**/
TermFieldHandle allocTermField(uint32_t fieldId) {
_fieldIds.push_back(fieldId);
- return _numTermFields++;
+ return _fieldIds.size() - 1;
}
+ void reserve(size_t sz) { _fieldIds.reserve(sz); }
/**
* Create a match data object with the layout described by this
diff --git a/searchlib/src/vespa/searchlib/fef/objectstore.h b/searchlib/src/vespa/searchlib/fef/objectstore.h
index 06575c61eb5..7ba08284111 100644
--- a/searchlib/src/vespa/searchlib/fef/objectstore.h
+++ b/searchlib/src/vespa/searchlib/fef/objectstore.h
@@ -2,7 +2,6 @@
#pragma once
#include <vespa/vespalib/stllike/hash_map.h>
-#include <cassert>
namespace search::fef {
@@ -66,7 +65,6 @@ const T &
as_value(const Anything &val) {
using WrapperType = AnyWrapper<T>;
const auto *wrapper = dynamic_cast<const WrapperType *>(&val);
- assert(wrapper != nullptr);
return wrapper->getValue();
}
diff --git a/searchlib/src/vespa/searchlib/fef/phrase_splitter_query_env.cpp b/searchlib/src/vespa/searchlib/fef/phrase_splitter_query_env.cpp
index dfa1d9886f7..bbbdbd69c67 100644
--- a/searchlib/src/vespa/searchlib/fef/phrase_splitter_query_env.cpp
+++ b/searchlib/src/vespa/searchlib/fef/phrase_splitter_query_env.cpp
@@ -43,7 +43,6 @@ PhraseSplitterQueryEnv::PhraseSplitterQueryEnv(const IQueryEnvironment & queryEn
TermFieldHandle numHandles = 0; // how many handles existed in underlying data
for (uint32_t i = 0; i < queryEnv.getNumTerms(); ++i) {
const ITermData *td = queryEnv.getTerm(i);
- assert(td != nullptr);
considerTerm(i, *td, fieldId);
numHandles += td->numFields();
}
diff --git a/searchlib/src/vespa/searchlib/fef/phrasesplitter.cpp b/searchlib/src/vespa/searchlib/fef/phrasesplitter.cpp
index 90a058eda00..b74f12bdb97 100644
--- a/searchlib/src/vespa/searchlib/fef/phrasesplitter.cpp
+++ b/searchlib/src/vespa/searchlib/fef/phrasesplitter.cpp
@@ -42,7 +42,6 @@ PhraseSplitter::update()
for (const auto &copy_info : _phrase_splitter_query_env.get_copy_info()) {
const TermFieldMatchData *src = _matchData->resolveTermField(copy_info.orig_handle);
TermFieldMatchData *dst = resolveSplittedTermField(copy_info.split_handle);
- assert(src != nullptr && dst != nullptr);
copyTermFieldMatchData(*dst, *src, copy_info.offsetInPhrase);
}
diff --git a/searchlib/src/vespa/searchlib/fef/simpletermdata.h b/searchlib/src/vespa/searchlib/fef/simpletermdata.h
index d501d0848e8..391a00e4c8a 100644
--- a/searchlib/src/vespa/searchlib/fef/simpletermdata.h
+++ b/searchlib/src/vespa/searchlib/fef/simpletermdata.h
@@ -7,7 +7,6 @@
#include "simpletermfielddata.h"
#include <vespa/searchlib/query/weight.h>
#include <vector>
-#include <cassert>
namespace search::fef {
@@ -128,24 +127,4 @@ public:
}
};
-/**
- * convenience adapter for easy iteration
- **/
-class SimpleTermFieldRangeAdapter
-{
- SimpleTermData& _ref;
- size_t _idx;
- size_t _lim;
-public:
- explicit SimpleTermFieldRangeAdapter(SimpleTermData& ref)
- : _ref(ref), _idx(0), _lim(ref.numFields())
- {}
-
- [[nodiscard]] bool valid() const { return (_idx < _lim); }
-
- [[nodiscard]] SimpleTermFieldData& get() const { return _ref.field(_idx); }
-
- void next() { assert(valid()); ++_idx; }
-};
-
}
diff --git a/searchlib/src/vespa/searchlib/fef/termfieldmatchdataarray.h b/searchlib/src/vespa/searchlib/fef/termfieldmatchdataarray.h
index 3c1b76ad40e..46d370ee8fe 100644
--- a/searchlib/src/vespa/searchlib/fef/termfieldmatchdataarray.h
+++ b/searchlib/src/vespa/searchlib/fef/termfieldmatchdataarray.h
@@ -3,7 +3,6 @@
#pragma once
#include <vector>
-#include <cassert>
#include <cstddef>
namespace search::fef {
@@ -43,7 +42,6 @@ public:
* @param value the pointer to be added
**/
TermFieldMatchDataArray &add(TermFieldMatchData *value) {
- assert(value != nullptr);
_array.push_back(value);
return *this;
}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp b/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp
index 093052608c5..199e9a4b8a0 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp
@@ -97,7 +97,7 @@ FieldIndex<interleaved_features>::findFrozen(const vespalib::stringref word) con
if (itr.valid()) {
return _postingListStore.beginFrozen(itr.getData().load_acquire());
}
- return typename PostingList::Iterator();
+ return typename PostingList::ConstIterator();
}
template <bool interleaved_features>
diff --git a/searchlib/src/vespa/searchlib/memoryindex/url_field_inverter.cpp b/searchlib/src/vespa/searchlib/memoryindex/url_field_inverter.cpp
index c79d856676d..c1537c6b290 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/url_field_inverter.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/url_field_inverter.cpp
@@ -10,6 +10,7 @@
#include <vespa/vespalib/text/utf8.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <stdexcept>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".memoryindex.url_field_inverter");
diff --git a/searchlib/src/vespa/searchlib/parsequery/stackdumpiterator.cpp b/searchlib/src/vespa/searchlib/parsequery/stackdumpiterator.cpp
index ae1d2c16960..f7c38b54227 100644
--- a/searchlib/src/vespa/searchlib/parsequery/stackdumpiterator.cpp
+++ b/searchlib/src/vespa/searchlib/parsequery/stackdumpiterator.cpp
@@ -195,6 +195,7 @@ bool SimpleQueryStackDumpIterator::readNext() {
case ParseItem::ITEM_TRUE:
case ParseItem::ITEM_FALSE:
// no content
+ _currArity = 0;
break;
default:
// Unknown item, so report that no more are available
diff --git a/searchlib/src/vespa/searchlib/query/query_term_ucs4.cpp b/searchlib/src/vespa/searchlib/query/query_term_ucs4.cpp
index e68685bd78c..8c3c2514877 100644
--- a/searchlib/src/vespa/searchlib/query/query_term_ucs4.cpp
+++ b/searchlib/src/vespa/searchlib/query/query_term_ucs4.cpp
@@ -38,17 +38,26 @@ QueryTermUCS4::fillUCS4() {
* This is a 'dirty' optimisation, but this is done to avoid writing a lot of data and blow the cpu caches with something
* you do not really need most of the time. That matters when qps is very high and query is wide, and hits are few.
*/
- std::lock_guard guard(_globalMutex);
- ucs4_t * ucs4 = _termUCS4.load(std::memory_order_relaxed);
- if (ucs4 != nullptr) return ucs4;
- ucs4 = new ucs4_t[_cachedTermLen + 1];
+ std::unique_ptr<ucs4_t[]> ucs4 = asUcs4();
+ ucs4_t * next = ucs4.get();
+ {
+ std::lock_guard guard(_globalMutex);
+ ucs4_t *prev = _termUCS4.load(std::memory_order_relaxed);
+ if (prev != nullptr) return prev;
+ _termUCS4.store(ucs4.release(), std::memory_order_relaxed);
+ }
+ return next;
+}
+
+std::unique_ptr<ucs4_t[]>
+QueryTermUCS4::asUcs4() const {
+ auto ucs4 = std::make_unique<ucs4_t[]>(_cachedTermLen + 1);
vespalib::Utf8Reader r(getTermString());
uint32_t i(0);
while (r.hasMore()) {
ucs4[i++] = r.getChar();
}
ucs4[_cachedTermLen] = 0;
- _termUCS4.store(ucs4);
return ucs4;
}
diff --git a/searchlib/src/vespa/searchlib/query/query_term_ucs4.h b/searchlib/src/vespa/searchlib/query/query_term_ucs4.h
index 0639ce8a74c..673927cf685 100644
--- a/searchlib/src/vespa/searchlib/query/query_term_ucs4.h
+++ b/searchlib/src/vespa/searchlib/query/query_term_ucs4.h
@@ -21,6 +21,7 @@ public:
uint32_t getTermLen() const { return _cachedTermLen; }
uint32_t term(const char * & t) const { t = getTerm(); return _cachedTermLen; }
void visitMembers(vespalib::ObjectVisitor &visitor) const override;
+ std::unique_ptr<ucs4_t[]> asUcs4() const;
uint32_t term(const ucs4_t * & t) {
t = _termUCS4.load(std::memory_order_relaxed);
if (t == nullptr) {
diff --git a/searchlib/src/vespa/searchlib/query/tree/node.h b/searchlib/src/vespa/searchlib/query/tree/node.h
index af9925e2ea3..7123d52a503 100644
--- a/searchlib/src/vespa/searchlib/query/tree/node.h
+++ b/searchlib/src/vespa/searchlib/query/tree/node.h
@@ -22,4 +22,3 @@ class Node {
};
}
-
diff --git a/searchlib/src/vespa/searchlib/query/tree/termnodes.cpp b/searchlib/src/vespa/searchlib/query/tree/termnodes.cpp
index 00f17f7963c..51882e6d185 100644
--- a/searchlib/src/vespa/searchlib/query/tree/termnodes.cpp
+++ b/searchlib/src/vespa/searchlib/query/tree/termnodes.cpp
@@ -3,6 +3,7 @@
#include "termnodes.h"
#include <vespa/vespalib/util/exceptions.h>
#include <charconv>
+#include <cassert>
using vespalib::IllegalArgumentException;
using vespalib::stringref;
diff --git a/searchlib/src/vespa/searchlib/queryeval/blueprint.h b/searchlib/src/vespa/searchlib/queryeval/blueprint.h
index 1ea02e41a62..dc7a0992d82 100644
--- a/searchlib/src/vespa/searchlib/queryeval/blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/blueprint.h
@@ -331,6 +331,7 @@ public:
size_t childCnt() const { return _children.size(); }
const Blueprint &getChild(size_t n) const { return *_children[n]; }
Blueprint &getChild(size_t n) { return *_children[n]; }
+ void reserve(size_t sz) { _children.reserve(sz); }
IntermediateBlueprint & insertChild(size_t n, Blueprint::UP child);
IntermediateBlueprint &addChild(Blueprint::UP child);
Blueprint::UP removeChild(size_t n);
diff --git a/searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp b/searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp
index d9338641a39..a2d244250cf 100644
--- a/searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp
@@ -75,13 +75,12 @@ CreateBlueprintVisitorHelper::handleNumberTermAsText(query::NumberTerm &n)
template <typename WS, typename NODE>
void
CreateBlueprintVisitorHelper::createWeightedSet(std::unique_ptr<WS> bp, NODE &n) {
- FieldSpecList fields;
+ bp->reserve(n.getNumTerms());
for (size_t i = 0; i < n.getNumTerms(); ++i) {
- fields.clear();
- fields.add(bp->getNextChildField(_field));
auto term = n.getAsString(i);
query::SimpleStringTerm node(term.first, n.getView(), 0, term.second); // TODO Temporary
- bp->addTerm(_searchable.createBlueprint(_requestContext, fields, node), term.second.percent());
+ FieldSpec field = bp->getNextChildField(_field);
+ bp->addTerm(_searchable.createBlueprint(_requestContext, field, node), term.second.percent());
}
setResult(std::move(bp));
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp
index 61b717b1104..de5bdc33e3c 100644
--- a/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp
@@ -25,6 +25,13 @@ DotProductBlueprint::getNextChildField(const FieldSpec &outer)
}
void
+DotProductBlueprint::reserve(size_t num_children) {
+ _weights.reserve(num_children);
+ _terms.reserve(num_children);
+ _layout.reserve(num_children);
+}
+
+void
DotProductBlueprint::addTerm(Blueprint::UP term, int32_t weight)
{
HitEstimate childEst = term->getState().estimate();
@@ -41,8 +48,7 @@ DotProductBlueprint::addTerm(Blueprint::UP term, int32_t weight)
}
SearchIterator::UP
-DotProductBlueprint::createLeafSearch(const search::fef::TermFieldMatchDataArray &tfmda,
- bool) const
+DotProductBlueprint::createLeafSearch(const search::fef::TermFieldMatchDataArray &tfmda, bool) const
{
assert(tfmda.size() == 1);
assert(getState().numFields() == 1);
diff --git a/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h
index 4ba59ba755f..2975958b5af 100644
--- a/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h
@@ -26,6 +26,7 @@ public:
FieldSpec getNextChildField(const FieldSpec &outer);
// used by create visitor
+ void reserve(size_t num_children);
void addTerm(Blueprint::UP term, int32_t weight);
SearchIteratorUP createLeafSearch(const search::fef::TermFieldMatchDataArray &tfmda, bool strict) const override;
diff --git a/searchlib/src/vespa/searchlib/queryeval/equiv_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/equiv_blueprint.cpp
index af6b59dd6ca..384dc0cd227 100644
--- a/searchlib/src/vespa/searchlib/queryeval/equiv_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/equiv_blueprint.cpp
@@ -44,7 +44,7 @@ EquivBlueprint::EquivBlueprint(FieldSpecBaseList fields,
fef::MatchDataLayout subtree_mdl)
: ComplexLeafBlueprint(std::move(fields)),
_estimate(),
- _layout(subtree_mdl),
+ _layout(std::move(subtree_mdl)),
_terms(),
_exactness()
{
diff --git a/searchlib/src/vespa/searchlib/queryeval/fake_search.cpp b/searchlib/src/vespa/searchlib/queryeval/fake_search.cpp
index 94814dbb9b3..d2aa72011e6 100644
--- a/searchlib/src/vespa/searchlib/queryeval/fake_search.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/fake_search.cpp
@@ -5,9 +5,20 @@
#include <vespa/searchlib/fef/termfieldmatchdata.h>
#include <vespa/vespalib/objects/visit.h>
#include <vespa/searchcommon/attribute/i_search_context.h>
+#include <cassert>
namespace search::queryeval {
+FakeSearch::FakeSearch(const vespalib::string &tag, const vespalib::string &field,
+ const vespalib::string &term, const FakeResult &res,
+ fef::TermFieldMatchDataArray tfmda)
+ : _tag(tag), _field(field), _term(term),
+ _result(res), _offset(0), _tfmda(std::move(tfmda)),
+ _ctx(nullptr)
+{
+ assert(_tfmda.size() == 1);
+}
+
void
FakeSearch::doSeek(uint32_t docid)
{
diff --git a/searchlib/src/vespa/searchlib/queryeval/fake_search.h b/searchlib/src/vespa/searchlib/queryeval/fake_search.h
index 5cd04f80499..7b7fdf0f078 100644
--- a/searchlib/src/vespa/searchlib/queryeval/fake_search.h
+++ b/searchlib/src/vespa/searchlib/queryeval/fake_search.h
@@ -29,13 +29,7 @@ public:
const vespalib::string &field,
const vespalib::string &term,
const FakeResult &res,
- fef::TermFieldMatchDataArray tfmda)
- : _tag(tag), _field(field), _term(term),
- _result(res), _offset(0), _tfmda(std::move(tfmda)),
- _ctx(nullptr)
- {
- assert(_tfmda.size() == 1);
- }
+ fef::TermFieldMatchDataArray tfmda);
void attr_ctx(const attribute::ISearchContext *ctx) { _ctx = ctx; }
bool is_attr() const { return (_ctx != nullptr); }
void doSeek(uint32_t docid) override;
diff --git a/searchlib/src/vespa/searchlib/queryeval/field_spec.h b/searchlib/src/vespa/searchlib/queryeval/field_spec.h
index a1050209b41..fd925fdf4ff 100644
--- a/searchlib/src/vespa/searchlib/queryeval/field_spec.h
+++ b/searchlib/src/vespa/searchlib/queryeval/field_spec.h
@@ -86,7 +86,7 @@ public:
class FieldSpecList
{
private:
- std::vector<FieldSpec> _list;
+ vespalib::SmallVector<FieldSpec, 1> _list;
public:
FieldSpecList() = default;
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp
index 7c307a1e35f..87ddb8b6edc 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp
@@ -14,6 +14,8 @@ LOG_SETUP(".searchlib.queryeval.nearest_neighbor_blueprint");
using vespalib::eval::Value;
+namespace vespalib { class Doom; }
+
namespace search::queryeval {
namespace {
@@ -40,7 +42,8 @@ NearestNeighborBlueprint::NearestNeighborBlueprint(const queryeval::FieldSpec& f
uint32_t explore_additional_hits,
double distance_threshold,
double global_filter_lower_limit,
- double global_filter_upper_limit)
+ double global_filter_upper_limit,
+ const vespalib::Doom& doom)
: ComplexLeafBlueprint(field),
_distance_calc(std::move(distance_calc)),
_attr_tensor(_distance_calc->attribute_tensor()),
@@ -58,7 +61,8 @@ NearestNeighborBlueprint::NearestNeighborBlueprint(const queryeval::FieldSpec& f
_global_filter(GlobalFilter::create()),
_global_filter_set(false),
_global_filter_hits(),
- _global_filter_hit_ratio()
+ _global_filter_hit_ratio(),
+ _doom(doom)
{
if (distance_threshold < std::numeric_limits<double>::max()) {
_distance_threshold = _distance_calc->function().convert_threshold(distance_threshold);
@@ -109,10 +113,10 @@ NearestNeighborBlueprint::perform_top_k(const search::tensor::NearestNeighborInd
uint32_t k = _adjusted_target_hits;
const auto &df = _distance_calc->function();
if (_global_filter->is_active()) {
- _found_hits = nns_index->find_top_k_with_filter(k, df, *_global_filter, k + _explore_additional_hits, _distance_threshold);
+ _found_hits = nns_index->find_top_k_with_filter(k, df, *_global_filter, k + _explore_additional_hits, _doom, _distance_threshold);
_algorithm = Algorithm::INDEX_TOP_K_WITH_FILTER;
} else {
- _found_hits = nns_index->find_top_k(k, df, k + _explore_additional_hits, _distance_threshold);
+ _found_hits = nns_index->find_top_k(k, df, k + _explore_additional_hits, _doom, _distance_threshold);
_algorithm = Algorithm::INDEX_TOP_K;
}
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h
index 3defb34cffd..f88cdd5adb1 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h
@@ -45,6 +45,7 @@ private:
bool _global_filter_set;
std::optional<uint32_t> _global_filter_hits;
std::optional<double> _global_filter_hit_ratio;
+ const vespalib::Doom& _doom;
void perform_top_k(const search::tensor::NearestNeighborIndex* nns_index);
public:
@@ -53,7 +54,8 @@ public:
uint32_t target_hits, bool approximate, uint32_t explore_additional_hits,
double distance_threshold,
double global_filter_lower_limit,
- double global_filter_upper_limit);
+ double global_filter_upper_limit,
+ const vespalib::Doom& doom);
NearestNeighborBlueprint(const NearestNeighborBlueprint&) = delete;
NearestNeighborBlueprint& operator=(const NearestNeighborBlueprint&) = delete;
~NearestNeighborBlueprint();
diff --git a/searchlib/src/vespa/searchlib/queryeval/same_element_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/same_element_blueprint.cpp
index 3be28ab75de..9c3910b20f9 100644
--- a/searchlib/src/vespa/searchlib/queryeval/same_element_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/same_element_blueprint.cpp
@@ -3,8 +3,6 @@
#include "same_element_blueprint.h"
#include "same_element_search.h"
#include "field_spec.hpp"
-#include "andsearch.h"
-#include "emptysearch.h"
#include <vespa/searchlib/fef/termfieldmatchdata.h>
#include <vespa/searchlib/attribute/searchcontextelementiterator.h>
#include <vespa/vespalib/objects/visit.hpp>
@@ -66,8 +64,7 @@ SameElementBlueprint::fetchPostings(const ExecuteInfo &execInfo)
std::unique_ptr<SameElementSearch>
SameElementBlueprint::create_same_element_search(search::fef::TermFieldMatchData& tfmd, bool strict) const
{
- fef::MatchDataLayout my_layout = _layout;
- fef::MatchData::UP md = my_layout.createMatchData();
+ fef::MatchData::UP md = _layout.createMatchData();
std::vector<ElementIterator::UP> children(_terms.size());
for (size_t i = 0; i < _terms.size(); ++i) {
const State &childState = _terms[i]->getState();
diff --git a/searchlib/src/vespa/searchlib/queryeval/same_element_search.cpp b/searchlib/src/vespa/searchlib/queryeval/same_element_search.cpp
index 98c51d7f1ca..5db1e0057cd 100644
--- a/searchlib/src/vespa/searchlib/queryeval/same_element_search.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/same_element_search.cpp
@@ -6,6 +6,7 @@
#include <vespa/vespalib/objects/visit.hpp>
#include <algorithm>
#include <functional>
+#include <cassert>
using TFMD = search::fef::TermFieldMatchData;
diff --git a/searchlib/src/vespa/searchlib/queryeval/searchable.h b/searchlib/src/vespa/searchlib/queryeval/searchable.h
index 2438cbf5a3b..a36a7f34e1c 100644
--- a/searchlib/src/vespa/searchlib/queryeval/searchable.h
+++ b/searchlib/src/vespa/searchlib/queryeval/searchable.h
@@ -22,23 +22,12 @@ class FieldSpecList;
**/
class Searchable
{
-protected:
- /**
- * Create a blueprint searching a single field.
- *
- * @return blueprint
- * @param requestContext that belongs to the query
- * @param field the field to search
- * @param term the query tree term
- **/
- virtual std::unique_ptr<Blueprint> createBlueprint(const IRequestContext & requestContext,
- const FieldSpec &field,
- const search::query::Node &term) = 0;
-
public:
using SP = std::shared_ptr<Searchable>;
Searchable() = default;
+ virtual ~Searchable() = default;
+
/**
* Create a blueprint searching a set of fields. The default
@@ -53,7 +42,17 @@ public:
virtual std::unique_ptr<Blueprint> createBlueprint(const IRequestContext & requestContext,
const FieldSpecList &fields,
const search::query::Node &term);
- virtual ~Searchable() = default;
+ /**
+ * Create a blueprint searching a single field.
+ *
+ * @return blueprint
+ * @param requestContext that belongs to the query
+ * @param field the field to search
+ * @param term the query tree term
+ **/
+ virtual std::unique_ptr<Blueprint> createBlueprint(const IRequestContext & requestContext,
+ const FieldSpec &field,
+ const search::query::Node &term) = 0;
};
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp b/searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp
index f5069fd4f53..ea264935d42 100644
--- a/searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp
@@ -4,6 +4,7 @@
#include <vespa/searchlib/fef/termfieldmatchdata.h>
#include <vespa/vespalib/objects/visit.h>
#include <functional>
+#include <cassert>
using search::fef::TermFieldMatchData;
using std::unique_ptr;
diff --git a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp
index fe212666ec9..b4b55098eaa 100644
--- a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp
@@ -56,6 +56,12 @@ ParallelWeakAndBlueprint::getNextChildField(const FieldSpec &outer)
}
void
+ParallelWeakAndBlueprint::reserve(size_t num_children) {
+ _weights.reserve(num_children);
+ _terms.reserve(num_children);
+}
+
+void
ParallelWeakAndBlueprint::addTerm(Blueprint::UP term, int32_t weight)
{
HitEstimate childEst = term->getState().estimate();
@@ -78,6 +84,7 @@ ParallelWeakAndBlueprint::createLeafSearch(const search::fef::TermFieldMatchData
assert(tfmda.size() == 1);
fef::MatchData::UP childrenMatchData = _layout.createMatchData();
wand::Terms terms;
+ terms.reserve(_terms.size());
for (size_t i = 0; i < _terms.size(); ++i) {
const State &childState = _terms[i]->getState();
assert(childState.numFields() == 1);
diff --git a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h
index 1a481be5c32..a2c13f12485 100644
--- a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h
@@ -44,7 +44,7 @@ public:
score_t scoreThreshold,
double thresholdBoostFactor,
uint32_t scoresAdjustFrequency);
- virtual ~ParallelWeakAndBlueprint() override;
+ ~ParallelWeakAndBlueprint() override;
const WeakAndHeap &getScores() const { return _scores; }
@@ -56,6 +56,7 @@ public:
FieldSpec getNextChildField(const FieldSpec &outer);
// Used by create visitor
+ void reserve(size_t num_children);
void addTerm(Blueprint::UP term, int32_t weight);
SearchIterator::UP createLeafSearch(const fef::TermFieldMatchDataArray &tfmda, bool strict) const override;
diff --git a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp
index f855b72812a..ee55a89dcdc 100644
--- a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp
@@ -74,6 +74,13 @@ WeightedSetTermBlueprint::WeightedSetTermBlueprint(const FieldSpec &field)
WeightedSetTermBlueprint::~WeightedSetTermBlueprint() = default;
void
+WeightedSetTermBlueprint::reserve(size_t num_children) {
+ _weights.reserve(num_children);
+ _terms.reserve(num_children);
+ _layout.reserve(num_children);
+}
+
+void
WeightedSetTermBlueprint::addTerm(Blueprint::UP term, int32_t weight)
{
HitEstimate childEst = term->getState().estimate();
@@ -100,7 +107,7 @@ WeightedSetTermBlueprint::createLeafSearch(const fef::TermFieldMatchDataArray &t
// TODO: pass ownership with unique_ptr
children[i] = _terms[i]->createSearch(*md, true).release();
}
- return SearchIterator::UP(WeightedSetTermSearch::create(children, *tfmda[0], _children_field.isFilter(), _weights, std::move(md)));
+ return WeightedSetTermSearch::create(children, *tfmda[0], _children_field.isFilter(), _weights, std::move(md));
}
SearchIterator::UP
diff --git a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h
index 2a3db3ec52d..3827dc8a35f 100644
--- a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h
@@ -30,6 +30,7 @@ public:
FieldSpec getNextChildField(const FieldSpec &) { return _children_field; }
// used by create visitor
+ void reserve(size_t num_children);
void addTerm(Blueprint::UP term, int32_t weight);
SearchIteratorUP createLeafSearch(const fef::TermFieldMatchDataArray &tfmda, bool strict) const override;
diff --git a/searchlib/src/vespa/searchlib/tensor/angular_distance.cpp b/searchlib/src/vespa/searchlib/tensor/angular_distance.cpp
index ab8f2b30df9..ec30236154a 100644
--- a/searchlib/src/vespa/searchlib/tensor/angular_distance.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/angular_distance.cpp
@@ -2,6 +2,7 @@
#include "angular_distance.h"
#include "temporary_vector_store.h"
+#include <numbers>
using vespalib::typify_invoke;
using vespalib::eval::TypifyCellType;
@@ -72,6 +73,12 @@ public:
return distance;
}
double convert_threshold(double threshold) const override {
+ if (threshold < 0.0) {
+ return 0.0;
+ }
+ if (threshold > std::numbers::pi) {
+ return 2.0;
+ }
double cosine_similarity = cos(threshold);
return 1.0 - cosine_similarity;
}
diff --git a/searchlib/src/vespa/searchlib/tensor/distance_calculator.h b/searchlib/src/vespa/searchlib/tensor/distance_calculator.h
index f29cd389732..b65f4ff1868 100644
--- a/searchlib/src/vespa/searchlib/tensor/distance_calculator.h
+++ b/searchlib/src/vespa/searchlib/tensor/distance_calculator.h
@@ -46,7 +46,7 @@ public:
double calc_raw_score(uint32_t docid) const {
auto vectors = _attr_tensor.get_vectors(docid);
- double result = 0.0;
+ double result = _dist_fun->min_rawscore();
for (uint32_t i = 0; i < vectors.subspaces(); ++i) {
double distance = _dist_fun->calc(vectors.cells(i));
double score = _dist_fun->to_rawscore(distance);
diff --git a/searchlib/src/vespa/searchlib/tensor/distance_function.h b/searchlib/src/vespa/searchlib/tensor/distance_function.h
index a06c451d5e2..0df7fe6cc1d 100644
--- a/searchlib/src/vespa/searchlib/tensor/distance_function.h
+++ b/searchlib/src/vespa/searchlib/tensor/distance_function.h
@@ -30,6 +30,13 @@ public:
virtual double to_distance(double rawscore) const {
return (1.0 / rawscore) - 1.0;
}
+
+ /**
+ * The minimum rawscore (also used as closeness) that this distance function can return.
+ */
+ virtual double min_rawscore() const {
+ return 0.0;
+ }
};
}
diff --git a/searchlib/src/vespa/searchlib/tensor/geo_degrees_distance.cpp b/searchlib/src/vespa/searchlib/tensor/geo_degrees_distance.cpp
index 0212830efb6..628a550cf8c 100644
--- a/searchlib/src/vespa/searchlib/tensor/geo_degrees_distance.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/geo_degrees_distance.cpp
@@ -2,6 +2,7 @@
#include "geo_degrees_distance.h"
#include "temporary_vector_store.h"
+#include <numbers>
using vespalib::typify_invoke;
using vespalib::eval::TypifyCellType;
@@ -56,6 +57,13 @@ public:
return hav_central_angle;
}
double convert_threshold(double threshold) const override {
+ if (threshold < 0.0) {
+ return 0.0;
+ }
+ constexpr double max_threshold = std::numbers::pi * earth_mean_radius;
+ if (threshold > max_threshold) {
+ threshold = max_threshold;
+ }
double half_angle = threshold / (2 * earth_mean_radius);
double rt_hav = sin(half_angle);
return rt_hav * rt_hav;
diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
index 3fdad3d507b..748a747d515 100644
--- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
@@ -6,6 +6,7 @@
#include "hash_set_visited_tracker.h"
#include "hnsw_index_loader.hpp"
#include "hnsw_index_saver.h"
+#include "mips_distance_transform.h"
#include "random_level_generator.h"
#include "vector_bundle.h"
#include <vespa/searchlib/attribute/address_space_components.h>
@@ -17,6 +18,7 @@
#include <vespa/vespalib/data/slime/inserter.h>
#include <vespa/vespalib/datastore/array_store.hpp>
#include <vespa/vespalib/datastore/compaction_strategy.h>
+#include <vespa/vespalib/util/doom.h>
#include <vespa/vespalib/util/memory_allocator.h>
#include <vespa/vespalib/util/size_literals.h>
#include <vespa/vespalib/util/time.h>
@@ -31,6 +33,7 @@ using search::StateExplorerUtils;
using search::queryeval::GlobalFilter;
using vespalib::datastore::CompactionStrategy;
using vespalib::datastore::EntryRef;
+using vespalib::GenericHeader;
namespace {
@@ -41,6 +44,29 @@ constexpr size_t max_level_array_size = 16;
constexpr size_t max_link_array_size = 193;
constexpr vespalib::duration MAX_COUNT_DURATION(100ms);
+const vespalib::string hnsw_max_squared_norm = "hnsw.max_squared_norm";
+
+void save_mips_max_distance(GenericHeader& header, DistanceFunctionFactory& dff) {
+ auto* mips_dff = dynamic_cast<MipsDistanceFunctionFactoryBase*>(&dff);
+ if (mips_dff != nullptr) {
+ auto& norm_store = mips_dff->get_max_squared_norm_store();
+ header.putTag(GenericHeader::Tag(hnsw_max_squared_norm, norm_store.get_max()));
+ }
+}
+
+void load_mips_max_distance(const GenericHeader& header, DistanceFunctionFactory& dff) {
+ auto* mips_dff = dynamic_cast<MipsDistanceFunctionFactoryBase*>(&dff);
+ if (mips_dff != nullptr) {
+ auto& norm_store = mips_dff->get_max_squared_norm_store();
+ if (header.hasTag(hnsw_max_squared_norm)) {
+ auto& tag = header.getTag(hnsw_max_squared_norm);
+ if (tag.getType() == GenericHeader::Tag::Type::TYPE_FLOAT) {
+ (void) norm_store.get_max(tag.asFloat());
+ }
+ }
+ }
+}
+
bool has_link_to(vespalib::ConstArrayRef<uint32_t> links, uint32_t id) {
for (uint32_t link : links) {
if (link == id) return true;
@@ -151,8 +177,9 @@ template <HnswIndexType type>
bool
HnswIndex<type>::have_closer_distance(HnswTraversalCandidate candidate, const HnswTraversalCandidateVector& result) const
{
+ auto df = _distance_ff->for_insertion_vector(get_vector(candidate.nodeid));
for (const auto & neighbor : result) {
- double dist = calc_distance(candidate.nodeid, neighbor.nodeid);
+ double dist = calc_distance(*df, neighbor.nodeid);
if (dist < candidate.distance) {
return true;
}
@@ -228,8 +255,9 @@ HnswIndex<type>::shrink_if_needed(uint32_t nodeid, uint32_t level)
if (old_links.size() > max_links) {
HnswTraversalCandidateVector neighbors;
neighbors.reserve(old_links.size());
+ auto df = _distance_ff->for_insertion_vector(get_vector(nodeid));
for (uint32_t neighbor_nodeid : old_links) {
- double dist = calc_distance(nodeid, neighbor_nodeid);
+ double dist = calc_distance(*df, neighbor_nodeid);
neighbors.emplace_back(neighbor_nodeid, dist);
}
auto split = select_neighbors(neighbors, max_links);
@@ -272,17 +300,6 @@ HnswIndex<type>::remove_link_to(uint32_t remove_from, uint32_t remove_id, uint32
_graph.set_link_array(remove_from, level, new_links);
}
-
-template <HnswIndexType type>
-double
-HnswIndex<type>::calc_distance(uint32_t lhs_nodeid, uint32_t rhs_nodeid) const
-{
- auto lhs = get_vector(lhs_nodeid);
- auto df = _distance_ff->for_insertion_vector(lhs);
- auto rhs = get_vector(rhs_nodeid);
- return df->calc(rhs);
-}
-
template <HnswIndexType type>
double
HnswIndex<type>::calc_distance(const BoundDistanceFunction &df, uint32_t rhs_nodeid) const
@@ -357,12 +374,19 @@ HnswIndex<type>::search_layer_helper(
const BoundDistanceFunction &df,
uint32_t neighbors_to_find,
BestNeighbors& best_neighbors, uint32_t level, const GlobalFilter *filter,
- uint32_t nodeid_limit, uint32_t estimated_visited_nodes) const
+ uint32_t nodeid_limit, const vespalib::Doom* const doom,
+ uint32_t estimated_visited_nodes) const
{
NearestPriQ candidates;
GlobalFilterWrapper<type> filter_wrapper(filter);
filter_wrapper.clamp_nodeid_limit(nodeid_limit);
VisitedTracker visited(nodeid_limit, estimated_visited_nodes);
+ if (doom != nullptr && doom->soft_doom()) {
+ while (!best_neighbors.empty()) {
+ best_neighbors.pop();
+ }
+ return;
+ }
for (const auto &entry : best_neighbors.peek()) {
if (entry.nodeid >= nodeid_limit) {
continue;
@@ -407,6 +431,9 @@ HnswIndex<type>::search_layer_helper(
}
}
}
+ if (doom != nullptr && doom->soft_doom()) {
+ break;
+ }
}
}
@@ -416,14 +443,15 @@ void
HnswIndex<type>::search_layer(
const BoundDistanceFunction &df,
uint32_t neighbors_to_find,
- BestNeighbors& best_neighbors, uint32_t level, const GlobalFilter *filter) const
+ BestNeighbors& best_neighbors, uint32_t level,
+ const vespalib::Doom* const doom, const GlobalFilter *filter) const
{
uint32_t nodeid_limit = _graph.nodes_size.load(std::memory_order_acquire);
uint32_t estimated_visited_nodes = estimate_visited_nodes(level, nodeid_limit, neighbors_to_find, filter);
if (estimated_visited_nodes >= nodeid_limit / 128) {
- search_layer_helper<BitVectorVisitedTracker>(df, neighbors_to_find, best_neighbors, level, filter, nodeid_limit, estimated_visited_nodes);
+ search_layer_helper<BitVectorVisitedTracker>(df, neighbors_to_find, best_neighbors, level, filter, nodeid_limit, doom, estimated_visited_nodes);
} else {
- search_layer_helper<HashSetVisitedTracker>(df, neighbors_to_find, best_neighbors, level, filter, nodeid_limit, estimated_visited_nodes);
+ search_layer_helper<HashSetVisitedTracker>(df, neighbors_to_find, best_neighbors, level, filter, nodeid_limit, doom, estimated_visited_nodes);
}
}
@@ -506,7 +534,7 @@ HnswIndex<type>::internal_prepare_add_node(PreparedAddDoc& op, TypedCells input_
search_level = std::min(node_max_level, search_level);
// Find neighbors of the added document in each level it should exist in.
while (search_level >= 0) {
- search_layer(*df, _cfg.neighbors_to_explore_at_construction(), best_neighbors, search_level);
+ search_layer(*df, _cfg.neighbors_to_explore_at_construction(), best_neighbors, search_level, nullptr);
auto neighbors = select_neighbors(best_neighbors.peek(), _cfg.max_links_on_inserts());
auto& links = connections[search_level];
links.reserve(neighbors.used.size());
@@ -614,10 +642,14 @@ HnswIndex<type>::mutual_reconnect(const LinkArrayRef &cluster, uint32_t level)
for (uint32_t i = 0; i + 1 < cluster.size(); ++i) {
uint32_t n_id_1 = cluster[i];
LinkArrayRef n_list_1 = _graph.get_link_array(n_id_1, level);
+ std::unique_ptr<BoundDistanceFunction> df;
for (uint32_t j = i + 1; j < cluster.size(); ++j) {
uint32_t n_id_2 = cluster[j];
if (has_link_to(n_list_1, n_id_2)) continue;
- pairs.emplace_back(n_id_1, n_id_2, calc_distance(n_id_1, n_id_2));
+ if (!df) {
+ df = _distance_ff->for_insertion_vector(get_vector(n_id_1));
+ }
+ pairs.emplace_back(n_id_1, n_id_2, calc_distance(*df, n_id_2));
}
}
std::sort(pairs.begin(), pairs.end());
@@ -836,16 +868,18 @@ HnswIndex<type>::shrink_lid_space(uint32_t doc_id_limit)
template <HnswIndexType type>
std::unique_ptr<NearestNeighborIndexSaver>
-HnswIndex<type>::make_saver() const
+HnswIndex<type>::make_saver(GenericHeader& header) const
{
+ save_mips_max_distance(header, distance_function_factory());
return std::make_unique<HnswIndexSaver<type>>(_graph);
}
template <HnswIndexType type>
std::unique_ptr<NearestNeighborIndexLoader>
-HnswIndex<type>::make_loader(FastOS_FileInterface& file)
+HnswIndex<type>::make_loader(FastOS_FileInterface& file, const vespalib::GenericHeader& header)
{
assert(get_entry_nodeid() == 0); // cannot load after index has data
+ load_mips_max_distance(header, distance_function_factory());
using ReaderType = FileReader<uint32_t>;
using LoaderType = HnswIndexLoader<ReaderType, type>;
return std::make_unique<LoaderType>(_graph, _id_mapping, std::make_unique<ReaderType>(&file));
@@ -865,9 +899,10 @@ HnswIndex<type>::top_k_by_docid(
uint32_t k,
const BoundDistanceFunction &df,
const GlobalFilter *filter, uint32_t explore_k,
+ const vespalib::Doom& doom,
double distance_threshold) const
{
- SearchBestNeighbors candidates = top_k_candidates(df, std::max(k, explore_k), filter);
+ SearchBestNeighbors candidates = top_k_candidates(df, std::max(k, explore_k), filter, doom);
auto result = candidates.get_neighbors(k, distance_threshold);
std::sort(result.begin(), result.end(), NeighborsByDocId());
return result;
@@ -879,9 +914,10 @@ HnswIndex<type>::find_top_k(
uint32_t k,
const BoundDistanceFunction &df,
uint32_t explore_k,
+ const vespalib::Doom& doom,
double distance_threshold) const
{
- return top_k_by_docid(k, df, nullptr, explore_k, distance_threshold);
+ return top_k_by_docid(k, df, nullptr, explore_k, doom, distance_threshold);
}
template <HnswIndexType type>
@@ -890,16 +926,18 @@ HnswIndex<type>::find_top_k_with_filter(
uint32_t k,
const BoundDistanceFunction &df,
const GlobalFilter &filter, uint32_t explore_k,
+ const vespalib::Doom& doom,
double distance_threshold) const
{
- return top_k_by_docid(k, df, &filter, explore_k, distance_threshold);
+ return top_k_by_docid(k, df, &filter, explore_k, doom, distance_threshold);
}
template <HnswIndexType type>
typename HnswIndex<type>::SearchBestNeighbors
HnswIndex<type>::top_k_candidates(
const BoundDistanceFunction &df,
- uint32_t k, const GlobalFilter *filter) const
+ uint32_t k, const GlobalFilter *filter,
+ const vespalib::Doom& doom) const
{
SearchBestNeighbors best_neighbors;
auto entry = _graph.get_entry_node();
@@ -917,7 +955,7 @@ HnswIndex<type>::top_k_candidates(
--search_level;
}
best_neighbors.push(entry_point);
- search_layer(df, k, best_neighbors, 0, filter);
+ search_layer(df, k, best_neighbors, 0, &doom, filter);
return best_neighbors;
}
@@ -1000,7 +1038,7 @@ HnswIndex<type>::count_reachable_nodes() const
visited[entry.nodeid] = true;
}
vespalib::steady_time doom = vespalib::steady_clock::now() + MAX_COUNT_DURATION;
- while (search_level >= 0) {
+ while (search_level > 0) {
for (uint32_t idx = 0; idx < found_links.size(); ++idx) {
if (vespalib::steady_clock::now() > doom) {
return {found_links.size(), false};
@@ -1019,7 +1057,35 @@ HnswIndex<type>::count_reachable_nodes() const
}
--search_level;
}
- return {found_links.size(), true};
+ uint32_t found_cnt = found_links.size();
+ search::AllocatedBitVector visitNext(visited.size());
+ for (uint32_t nodeid : found_links) {
+ visitNext.setBit(nodeid);
+ }
+ bool runAnotherVisit = true;
+ while (runAnotherVisit) {
+ if (vespalib::steady_clock::now() > doom) {
+ return {found_cnt, false};
+ }
+ runAnotherVisit = false;
+ visitNext.foreach_truebit(
+ [&] (uint32_t nodeid) {
+ // note: search_level == 0
+ auto neighbors = _graph.acquire_link_array(nodeid, 0);
+ for (uint32_t neighbor : neighbors) {
+ if (neighbor >= visited.size() || visited[neighbor]) {
+ continue;
+ }
+ ++found_cnt;
+ visited[neighbor] = true;
+ visitNext.setBit(neighbor);
+ runAnotherVisit = true;
+ }
+ visitNext.clearBit(nodeid);
+ }
+ );
+ }
+ return {found_cnt, true};
}
template class HnswIndex<HnswIndexType::SINGLE>;
diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h
index 0809dcf4fe3..1ea8d1be558 100644
--- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h
+++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h
@@ -158,7 +158,6 @@ protected:
return _vectors.get_vectors(docid);
}
- double calc_distance(uint32_t lhs_nodeid, uint32_t rhs_nodeid) const;
double calc_distance(const BoundDistanceFunction &df, uint32_t rhs_nodeid) const;
double calc_distance(const BoundDistanceFunction &df, uint32_t rhs_docid, uint32_t rhs_subspace) const;
uint32_t estimate_visited_nodes(uint32_t level, uint32_t nodeid_limit, uint32_t neighbors_to_find, const GlobalFilter* filter) const;
@@ -171,12 +170,15 @@ protected:
void search_layer_helper(const BoundDistanceFunction &df, uint32_t neighbors_to_find, BestNeighbors& best_neighbors,
uint32_t level, const GlobalFilter *filter,
uint32_t nodeid_limit,
+ const vespalib::Doom* const doom,
uint32_t estimated_visited_nodes) const;
template <class BestNeighbors>
void search_layer(const BoundDistanceFunction &df, uint32_t neighbors_to_find, BestNeighbors& best_neighbors,
- uint32_t level, const GlobalFilter *filter = nullptr) const;
+ uint32_t level, const vespalib::Doom* const doom,
+ const GlobalFilter *filter = nullptr) const;
std::vector<Neighbor> top_k_by_docid(uint32_t k, const BoundDistanceFunction &df,
const GlobalFilter *filter, uint32_t explore_k,
+ const vespalib::Doom& doom,
double distance_threshold) const;
internal::PreparedAddDoc internal_prepare_add(uint32_t docid, VectorBundle input_vectors,
@@ -211,26 +213,29 @@ public:
void get_state(const vespalib::slime::Inserter& inserter) const override;
void shrink_lid_space(uint32_t doc_id_limit) override;
- std::unique_ptr<NearestNeighborIndexSaver> make_saver() const override;
- std::unique_ptr<NearestNeighborIndexLoader> make_loader(FastOS_FileInterface& file) override;
+ std::unique_ptr<NearestNeighborIndexSaver> make_saver(vespalib::GenericHeader& header) const override;
+ std::unique_ptr<NearestNeighborIndexLoader> make_loader(FastOS_FileInterface& file, const vespalib::GenericHeader& header) override;
std::vector<Neighbor> find_top_k(
uint32_t k,
const BoundDistanceFunction &df,
uint32_t explore_k,
+ const vespalib::Doom& doom,
double distance_threshold) const override;
std::vector<Neighbor> find_top_k_with_filter(
uint32_t k,
const BoundDistanceFunction &df,
const GlobalFilter &filter, uint32_t explore_k,
+ const vespalib::Doom& doom,
double distance_threshold) const override;
DistanceFunctionFactory &distance_function_factory() const override { return *_distance_ff; }
SearchBestNeighbors top_k_candidates(
const BoundDistanceFunction &df,
- uint32_t k, const GlobalFilter *filter) const;
+ uint32_t k, const GlobalFilter *filter,
+ const vespalib::Doom& doom) const;
uint32_t get_entry_nodeid() const { return _graph.get_entry_node().nodeid; }
int32_t get_entry_level() const { return _graph.get_entry_node().level; }
diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_multi_best_neighbors.h b/searchlib/src/vespa/searchlib/tensor/hnsw_multi_best_neighbors.h
index de707999f11..67eff4b33c7 100644
--- a/searchlib/src/vespa/searchlib/tensor/hnsw_multi_best_neighbors.h
+++ b/searchlib/src/vespa/searchlib/tensor/hnsw_multi_best_neighbors.h
@@ -57,6 +57,7 @@ public:
_candidates.pop();
}
const HnswCandidateVector& peek() const { return _candidates.peek(); }
+ bool empty() const { return _candidates.empty(); }
const HnswCandidate& top() const { return _candidates.top(); }
size_t size() const { return _docids.size(); }
void emplace(uint32_t nodeid, uint32_t docid, EntryRef ref, double distance) {
diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_single_best_neighbors.h b/searchlib/src/vespa/searchlib/tensor/hnsw_single_best_neighbors.h
index e7c0a7fded6..acb14f79b7a 100644
--- a/searchlib/src/vespa/searchlib/tensor/hnsw_single_best_neighbors.h
+++ b/searchlib/src/vespa/searchlib/tensor/hnsw_single_best_neighbors.h
@@ -25,6 +25,7 @@ public:
void push(const HnswCandidate& candidate) { _candidates.push(candidate); }
void pop() { _candidates.pop(); }
const HnswCandidateVector& peek() const { return _candidates.peek(); }
+ bool empty() const { return _candidates.empty(); }
const HnswCandidate& top() const { return _candidates.top(); }
size_t size() const { return _candidates.size(); }
void emplace(uint32_t nodeid, uint32_t docid, EntryRef ref, double distance) { _candidates.emplace(nodeid, docid, ref, distance); }
diff --git a/searchlib/src/vespa/searchlib/tensor/mips_distance_transform.cpp b/searchlib/src/vespa/searchlib/tensor/mips_distance_transform.cpp
index 16f9eeeabc2..5ad3a044df6 100644
--- a/searchlib/src/vespa/searchlib/tensor/mips_distance_transform.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/mips_distance_transform.cpp
@@ -67,6 +67,9 @@ public:
double to_distance(double rawscore) const override {
return -rawscore;
}
+ double min_rawscore() const override {
+ return std::numeric_limits<double>::lowest();
+ }
double calc_with_limit(const vespalib::eval::TypedCells& rhs, double) const override {
return calc(rhs);
}
diff --git a/searchlib/src/vespa/searchlib/tensor/mips_distance_transform.h b/searchlib/src/vespa/searchlib/tensor/mips_distance_transform.h
index fabd6bfcc57..833fa3e689b 100644
--- a/searchlib/src/vespa/searchlib/tensor/mips_distance_transform.h
+++ b/searchlib/src/vespa/searchlib/tensor/mips_distance_transform.h
@@ -37,6 +37,18 @@ public:
}
};
+class MipsDistanceFunctionFactoryBase : public DistanceFunctionFactory {
+protected:
+ std::shared_ptr<MaximumSquaredNormStore> _sq_norm_store;
+public:
+ MipsDistanceFunctionFactoryBase()
+ : _sq_norm_store(std::make_shared<MaximumSquaredNormStore>())
+ {
+ }
+ ~MipsDistanceFunctionFactoryBase() = default;
+ MaximumSquaredNormStore& get_max_squared_norm_store() noexcept { return *_sq_norm_store; }
+};
+
/**
* Factory for distance functions which can apply a transformation
* mapping Maximum Inner Product Search to a nearest neighbor
@@ -45,10 +57,10 @@ public:
* to the longest vector inserted so far, or at least length 1.
*/
template<typename FloatType>
-class MipsDistanceFunctionFactory : public DistanceFunctionFactory {
- std::shared_ptr<MaximumSquaredNormStore> _sq_norm_store;
+class MipsDistanceFunctionFactory : public MipsDistanceFunctionFactoryBase {
public:
- MipsDistanceFunctionFactory() : _sq_norm_store(std::make_shared<MaximumSquaredNormStore>()) {}
+ MipsDistanceFunctionFactory() : MipsDistanceFunctionFactoryBase() { }
+ ~MipsDistanceFunctionFactory() = default;
BoundDistanceFunction::UP for_query_vector(const vespalib::eval::TypedCells& lhs) override;
diff --git a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h
index 4b7b934fee0..a11be086697 100644
--- a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h
+++ b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h
@@ -14,6 +14,8 @@
class FastOS_FileInterface;
+namespace vespalib { class Doom; }
+namespace vespalib { class GenericHeader; }
namespace vespalib::datastore {
class CompactionSpec;
class CompactionStrategy;
@@ -88,18 +90,19 @@ public:
* This function is always called by the attribute write thread,
* and the caller ensures that an attribute read guard is held during the lifetime of the saver.
*/
- virtual std::unique_ptr<NearestNeighborIndexSaver> make_saver() const = 0;
+ virtual std::unique_ptr<NearestNeighborIndexSaver> make_saver(vespalib::GenericHeader& header) const = 0;
/**
* Creates a loader that is used to load the index from the given file.
*
* This might throw std::runtime_error.
*/
- virtual std::unique_ptr<NearestNeighborIndexLoader> make_loader(FastOS_FileInterface& file) = 0;
+ virtual std::unique_ptr<NearestNeighborIndexLoader> make_loader(FastOS_FileInterface& file, const vespalib::GenericHeader& header) = 0;
virtual std::vector<Neighbor> find_top_k(uint32_t k,
const BoundDistanceFunction &df,
uint32_t explore_k,
+ const vespalib::Doom& doom,
double distance_threshold) const = 0;
// only return neighbors where the corresponding filter bit is set
@@ -107,6 +110,7 @@ public:
const BoundDistanceFunction &df,
const GlobalFilter &filter,
uint32_t explore_k,
+ const vespalib::Doom& doom,
double distance_threshold) const = 0;
virtual DistanceFunctionFactory &distance_function_factory() const = 0;
diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp b/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp
index 5e554f76779..f499695a584 100644
--- a/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp
@@ -357,10 +357,11 @@ TensorAttribute::onInitSave(vespalib::stringref fileName)
{
vespalib::GenerationHandler::Guard guard(getGenerationHandler().
takeGuard());
- auto index_saver = (_index ? _index->make_saver() : std::unique_ptr<NearestNeighborIndexSaver>());
+ auto header = this->createAttributeHeader(fileName);
+ auto index_saver = (_index ? _index->make_saver(header.get_extra_tags()) : std::unique_ptr<NearestNeighborIndexSaver>());
return std::make_unique<TensorAttributeSaver>
(std::move(guard),
- this->createAttributeHeader(fileName),
+ std::move(header),
attribute::make_entry_ref_vector_snapshot(_refVector, getCommittedDocIdLimit()),
_tensorStore,
std::move(index_saver));
diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_attribute_loader.cpp b/searchlib/src/vespa/searchlib/tensor/tensor_attribute_loader.cpp
index aada583627b..2ea28fd822d 100644
--- a/searchlib/src/vespa/searchlib/tensor/tensor_attribute_loader.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/tensor_attribute_loader.cpp
@@ -273,7 +273,7 @@ TensorAttributeLoader::load_index()
{
FileWithHeader index_file(LoadUtils::openFile(_attr, TensorAttributeSaver::index_file_suffix()));
try {
- auto index_loader = _index->make_loader(index_file.file());
+ auto index_loader = _index->make_loader(index_file.file(), index_file.header());
size_t cnt = 0;
while (index_loader->load_next()) {
if ((++cnt % LOAD_COMMIT_INTERVAL) == 0) {
diff --git a/searchlib/src/vespa/searchlib/test/diskindex/threelevelcountbuffers.cpp b/searchlib/src/vespa/searchlib/test/diskindex/threelevelcountbuffers.cpp
index bc92b691ce8..04c8b6b3904 100644
--- a/searchlib/src/vespa/searchlib/test/diskindex/threelevelcountbuffers.cpp
+++ b/searchlib/src/vespa/searchlib/test/diskindex/threelevelcountbuffers.cpp
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "threelevelcountbuffers.h"
+#include <cassert>
namespace search::diskindex {
diff --git a/searchlib/src/vespa/searchlib/test/features/distance_closeness_fixture.cpp b/searchlib/src/vespa/searchlib/test/features/distance_closeness_fixture.cpp
index e0444e8dca7..f6fb96cb74b 100644
--- a/searchlib/src/vespa/searchlib/test/features/distance_closeness_fixture.cpp
+++ b/searchlib/src/vespa/searchlib/test/features/distance_closeness_fixture.cpp
@@ -32,12 +32,13 @@ namespace {
std::shared_ptr<TensorAttribute>
create_tensor_attribute(const vespalib::string& attr_name,
const vespalib::string& tensor_type,
+ DistanceMetric distance_metric,
bool direct_tensor,
uint32_t docid_limit)
{
Config cfg(BasicType::TENSOR, CollectionType::SINGLE);
cfg.setTensorType(ValueType::from_spec(tensor_type));
- cfg.set_distance_metric(DistanceMetric::Euclidean);
+ cfg.set_distance_metric(distance_metric);
std::shared_ptr<TensorAttribute> result;
if (cfg.tensorType().is_dense()) {
result = std::make_shared<DenseTensorAttribute>(attr_name, cfg);
@@ -59,8 +60,9 @@ FeatureDumpFixture::~FeatureDumpFixture() = default;
DistanceClosenessFixture::DistanceClosenessFixture(size_t fooCnt, size_t barCnt,
const Labels& labels,
const vespalib::string& featureName,
- const vespalib::string& query_tensor)
- : DistanceClosenessFixture("tensor(x[2])", false, fooCnt, barCnt, labels, featureName, query_tensor)
+ const vespalib::string& query_tensor,
+ DistanceMetric distance_metric)
+ : DistanceClosenessFixture("tensor(x[2])", false, fooCnt, barCnt, labels, featureName, query_tensor, distance_metric)
{
}
@@ -69,7 +71,8 @@ DistanceClosenessFixture::DistanceClosenessFixture(const vespalib::string& tenso
size_t fooCnt, size_t barCnt,
const Labels& labels,
const vespalib::string& featureName,
- const vespalib::string& query_tensor)
+ const vespalib::string& query_tensor,
+ DistanceMetric distance_metric)
: queryEnv(&indexEnv), rankSetup(factory, indexEnv),
mdl(), match_data(), rankProgram(), fooHandles(), barHandles(),
tensor_attr(),
@@ -96,7 +99,7 @@ DistanceClosenessFixture::DistanceClosenessFixture(const vespalib::string& tenso
queryEnv.getTerms().push_back(term);
}
if (!query_tensor.empty()) {
- tensor_attr = create_tensor_attribute("bar", tensor_type, direct_tensor, docid_limit);
+ tensor_attr = create_tensor_attribute("bar", tensor_type, distance_metric, direct_tensor, docid_limit);
indexEnv.getAttributeMap().add(tensor_attr);
search::fef::indexproperties::type::Attribute::set(indexEnv.getProperties(), "bar", tensor_type);
set_query_tensor("qbar", "tensor(x[2])", TensorSpec::from_expr(query_tensor));
diff --git a/searchlib/src/vespa/searchlib/test/features/distance_closeness_fixture.h b/searchlib/src/vespa/searchlib/test/features/distance_closeness_fixture.h
index 8aae1ecb942..768e54cc19b 100644
--- a/searchlib/src/vespa/searchlib/test/features/distance_closeness_fixture.h
+++ b/searchlib/src/vespa/searchlib/test/features/distance_closeness_fixture.h
@@ -2,6 +2,7 @@
#pragma once
+#include <vespa/searchcommon/attribute/distance_metric.h>
#include <vespa/searchlib/features/setup.h>
#include <vespa/searchlib/fef/fef.h>
#include <vespa/searchlib/fef/test/indexenvironment.h>
@@ -61,12 +62,14 @@ struct DistanceClosenessFixture : BlueprintFactoryFixture, IndexEnvironmentFixtu
bool _failed;
DistanceClosenessFixture(size_t fooCnt, size_t barCnt,
const Labels &labels, const vespalib::string &featureName,
- const vespalib::string& query_tensor = "");
+ const vespalib::string& query_tensor = "",
+ search::attribute::DistanceMetric distance_metric = search::attribute::DistanceMetric::Euclidean);
DistanceClosenessFixture(const vespalib::string& tensor_type,
bool direct_tensor,
size_t fooCnt, size_t barCnt,
const Labels &labels, const vespalib::string &featureName,
- const vespalib::string& query_tensor = "");
+ const vespalib::string& query_tensor = "",
+ search::attribute::DistanceMetric distance_metric = search::attribute::DistanceMetric::Euclidean);
~DistanceClosenessFixture();
void set_attribute_tensor(uint32_t docid, const vespalib::eval::TensorSpec& spec);
void set_query_tensor(const vespalib::string& query_tensor_name,
@@ -86,8 +89,11 @@ struct DistanceClosenessFixture : BlueprintFactoryFixture, IndexEnvironmentFixtu
setScore(fooHandles[i], docId, 1.0/(1.0+distance));
}
void setBarScore(uint32_t i, uint32_t docId, feature_t distance) {
+ set_bar_rawscore(i, docId, 1.0/(1.0+distance));
+ }
+ void set_bar_rawscore(uint32_t i, uint32_t docid, feature_t rawscore) {
ASSERT_LT(i, barHandles.size());
- setScore(barHandles[i], docId, 1.0/(1.0+distance));
+ setScore(barHandles[i], docid, rawscore);
}
bool failed() const noexcept { return _failed; }
};
diff --git a/searchlib/src/vespa/searchlib/util/rawbuf.cpp b/searchlib/src/vespa/searchlib/util/rawbuf.cpp
index 04d69544047..3af29d7eed5 100644
--- a/searchlib/src/vespa/searchlib/util/rawbuf.cpp
+++ b/searchlib/src/vespa/searchlib/util/rawbuf.cpp
@@ -1,32 +1,11 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "rawbuf.h"
-#include <vespa/vespalib/util/compress.h>
#include <cassert>
-#include <cstring>
#include <cstdlib>
namespace search {
-RawBuf::RawBuf(size_t size)
- : _bufStart(nullptr),
- _bufEnd(nullptr),
- _bufFillPos(nullptr),
- _bufDrainPos(nullptr)
-{
- if (size > 0) {
- _bufStart = static_cast<char *>(malloc(size));
- }
- _bufEnd = _bufStart + size;
- _bufDrainPos = _bufFillPos = _bufStart;
-}
-
-RawBuf::~RawBuf()
-{
- free(_bufStart);
-}
-
-
/**
* Allocate a new buffer at least as large as the parameter value,
* move any content to the new and delete the old buffer.
@@ -50,45 +29,6 @@ RawBuf::expandBuf(size_t needlen)
_bufEnd = _bufStart + size;
}
-
-/**
- * Put 'data' of 'len'gth into the buffer. If insufficient room,
- * make the buffer larger.
- */
-void
-RawBuf::append(const void *data, size_t len)
-{
- if (__builtin_expect(len != 0, true)) {
- ensureSize(len);
- memcpy(_bufFillPos, data, len);
- _bufFillPos += len;
- }
-}
-
-void
-RawBuf::append(uint8_t byte)
-{
- ensureSize(1);
- *_bufFillPos++ = byte;
-}
-
-void
-RawBuf::appendCompressedPositiveNumber(uint64_t n)
-{
- size_t len(vespalib::compress::Integer::compressedPositiveLength(n));
- ensureSize(len);
- _bufFillPos += vespalib::compress::Integer::compressPositive(n, _bufFillPos);
-}
-
-void
-RawBuf::appendCompressedNumber(int64_t n)
-{
- size_t len(vespalib::compress::Integer::compressedLength(n));
- ensureSize(len);
- _bufFillPos += vespalib::compress::Integer::compress(n, _bufFillPos);
-}
-
-
/**
* Compact any free space from the beginning of the buffer, by
* copying the contents to the start of the buffer.
diff --git a/searchlib/src/vespa/searchlib/util/rawbuf.h b/searchlib/src/vespa/searchlib/util/rawbuf.h
index 30018cb45c2..9ecfbc23c24 100644
--- a/searchlib/src/vespa/searchlib/util/rawbuf.h
+++ b/searchlib/src/vespa/searchlib/util/rawbuf.h
@@ -2,8 +2,9 @@
#pragma once
-#include <cstdint>
-#include <cstddef>
+#include <vespa/vespalib/util/compress.h>
+#include <cstring>
+#include <cstdlib>
namespace search {
/**
@@ -45,13 +46,35 @@ private:
public:
RawBuf(const RawBuf &) = delete;
RawBuf& operator=(const RawBuf &) = delete;
- explicit RawBuf(size_t size); // malloc-s given size, assigns to _bufStart
- ~RawBuf(); // Frees _bufStart, i.e. the char[].
+ explicit RawBuf(size_t size)
+ : _bufStart(static_cast<char *>(malloc(size))),
+ _bufEnd(_bufStart + size),
+ _bufFillPos(_bufStart),
+ _bufDrainPos(_bufStart)
+ { }
+ ~RawBuf() {
+ free(_bufStart);
+ } // Frees _bufStart, i.e. the char[].
- void append(const void *data, size_t len);
- void append(uint8_t byte);
- void appendCompressedPositiveNumber(uint64_t n);
- void appendCompressedNumber(int64_t n);
+ void append(const void *data, size_t len) {
+ if (__builtin_expect(len != 0, true)) {
+ ensureSize(len);
+ memcpy(_bufFillPos, data, len);
+ _bufFillPos += len;
+ }
+ }
+ void append(uint8_t byte) {
+ ensureSize(1);
+ *_bufFillPos++ = byte;
+ }
+ void appendCompressedPositiveNumber(uint64_t n) {
+ ensureSize(vespalib::compress::Integer::compressedPositiveLength(n));
+ _bufFillPos += vespalib::compress::Integer::compressPositive(n, _bufFillPos);
+ }
+ void appendCompressedNumber(int64_t n) {
+ ensureSize(vespalib::compress::Integer::compressedLength(n));
+ _bufFillPos += vespalib::compress::Integer::compress(n, _bufFillPos);
+ }
size_t GetFreeLen() const { return _bufEnd - _bufFillPos; }
const char *GetDrainPos() const { return _bufDrainPos; }
char * GetWritableFillPos(size_t len) { preAlloc(len); return _bufFillPos; }
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp b/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp
index db4ac0f32b0..f1fee7e9ac3 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp
+++ b/searchsummary/src/vespa/searchsummary/docsummary/docsumstate.cpp
@@ -14,6 +14,7 @@
#include <vespa/searchlib/parsequery/stackdumpiterator.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
#include <vespa/vespalib/util/issue.h>
+#include <cassert>
using search::common::GeoLocationParser;
using search::common::GeoLocationSpec;
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/geoposdfw.cpp b/searchsummary/src/vespa/searchsummary/docsummary/geoposdfw.cpp
index 6d668561651..07e4bde54d0 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/geoposdfw.cpp
+++ b/searchsummary/src/vespa/searchsummary/docsummary/geoposdfw.cpp
@@ -10,6 +10,7 @@
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/util/issue.h>
#include <climits>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".searchlib.docsummary.geoposdfw");
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/juniper_query_adapter.cpp b/searchsummary/src/vespa/searchsummary/docsummary/juniper_query_adapter.cpp
index 80e21f0be96..509aa366ff6 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/juniper_query_adapter.cpp
+++ b/searchsummary/src/vespa/searchsummary/docsummary/juniper_query_adapter.cpp
@@ -121,6 +121,9 @@ JuniperQueryAdapter::Traverse(juniper::IQueryVisitor *v) const
break;
case search::ParseItem::ITEM_TRUE:
case search::ParseItem::ITEM_FALSE:
+ if (!v->VisitOther(&item, iterator.getArity())) {
+ rc = skipItem(&iterator);
+ }
break;
// Unhandled items are just ignored by juniper
case search::ParseItem::ITEM_WAND:
diff --git a/security-utils/src/main/java/com/yahoo/security/HKDF.java b/security-utils/src/main/java/com/yahoo/security/HKDF.java
index ca9a111b5c4..3692937c797 100644
--- a/security-utils/src/main/java/com/yahoo/security/HKDF.java
+++ b/security-utils/src/main/java/com/yahoo/security/HKDF.java
@@ -97,7 +97,7 @@ public final class HKDF {
* "as if random". May not be empty, use {@link #unsaltedExtractedFrom(byte[])}
* if unsalted extraction is desired (though this is not recommended).
* @param ikm secret initial Input Keying Material value.
- * @return a new HDFK instance ready for deriving keys based on the salt and IKM.
+ * @return a new HKDF instance ready for deriving keys based on the salt and IKM.
*/
public static HKDF extractedFrom(byte[] salt, byte[] ikm) {
validateExtractionParams(salt, ikm);
@@ -122,7 +122,7 @@ public final class HKDF {
PRK = HMAC-Hash(salt, IKM)
*/
- var mac = createKeyedHmacSha256(salt); // Note: HDFK is initially keyed on the salt, _not_ on ikm!
+ var mac = createKeyedHmacSha256(salt); // Note: HKDF is initially keyed on the salt, _not_ on ikm!
mac.update(ikm);
return new HKDF(/*PRK = */ mac.doFinal());
}
@@ -133,7 +133,7 @@ public final class HKDF {
* <p>Prefer using the salted {@link #extractedFrom(byte[], byte[])} method if possible.</p>
*
* @param ikm secret initial Input Keying Material value.
- * @return a new HDFK instance ready for deriving keys based on the IKM and an all-zero salt.
+ * @return a new HKDF instance ready for deriving keys based on the IKM and an all-zero salt.
*/
public static HKDF unsaltedExtractedFrom(byte[] ikm) {
return extractedFrom(ALL_ZEROS_SALT, ikm);
diff --git a/security-utils/src/main/java/com/yahoo/security/token/Token.java b/security-utils/src/main/java/com/yahoo/security/token/Token.java
new file mode 100644
index 00000000000..af50ad9a733
--- /dev/null
+++ b/security-utils/src/main/java/com/yahoo/security/token/Token.java
@@ -0,0 +1,86 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.security.token;
+
+import com.yahoo.security.HKDF;
+
+import java.util.Objects;
+
+import static com.yahoo.security.ArrayUtils.toUtf8Bytes;
+
+/**
+ * <p>A token represents an arbitrary, opaque sequence of secret bytes (preferably from a secure
+ * random source) whose possession gives the holder the right to some resource(s) or action(s).
+ * For a token to be recognized it must be presented in its entirety, i.e. bitwise exact. This
+ * includes any (optional) text prefixes.
+ * </p><p>
+ * Only the party <em>presenting</em> the token should store the token secret itself; any
+ * parties that need to identify and/or verify the token should store <em>derivations</em>
+ * of the token instead (TokenFingerprint and TokenCheckHash, respectively).
+ * </p><p>
+ * A Token <em>object</em> is bound to a particular TokenDomain, but any given secret token
+ * string may be used to create many Token objects for any number of domains; it is opaque and
+ * not in and by itself tied to any specific domain.
+ * </p>
+ */
+public class Token {
+
+ private final TokenDomain domain;
+ private final String secretTokenString;
+ private final byte[] secretTokenBytes;
+ private final TokenFingerprint fingerprint;
+
+ Token(TokenDomain domain, String secretTokenString) {
+ this.domain = domain;
+ this.secretTokenString = secretTokenString;
+ this.secretTokenBytes = toUtf8Bytes(secretTokenString);
+ this.fingerprint = TokenFingerprint.of(this);
+ }
+
+ public static Token of(TokenDomain domain, String secretTokenString) {
+ return new Token(domain, secretTokenString);
+ }
+
+ public TokenDomain domain() { return domain; }
+ public String secretTokenString() { return secretTokenString; }
+ public TokenFingerprint fingerprint() { return fingerprint; }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Token token = (Token) o;
+ // We assume that domain+fingerprint suffices for equality check.
+ // If underlying secret bytes checking is added it MUST use SideChannelSafe.arraysEqual()
+ // to avoid leaking secret data via timing side-channels.
+ return Objects.equals(domain, token.domain) &&
+ Objects.equals(fingerprint, token.fingerprint);
+ }
+
+ // Important: actual secret bytes must NOT be part of hashCode calculation, as that risks
+ // leaking parts of the secret to an attacker that can influence and observe side effects
+ // of the hash code.
+ @Override
+ public int hashCode() {
+ return Objects.hash(domain, fingerprint);
+ }
+
+ @Override
+ public String toString() {
+ // Avoid leaking raw token secret as part of toString() output
+ // Fingerprint first, since that's the most important bit.
+ return "Token(fingerprint: %s, domain: %s)".formatted(fingerprint, domain);
+ }
+
+ /**
+ * Token derivations are created by invoking a HKDF (using HMAC-SHA256) that expands the
+ * original token secret to the provided number of bytes and the provided domain separation
+ * context. The same source token secret will result in different derivations when
+ * different contexts are used, but will always generate a deterministic result for the
+ * same token+#bytes+context combination.
+ */
+ byte[] toDerivedBytes(int nHashBytes, byte[] domainSeparationContext) {
+ var hkdf = HKDF.unsaltedExtractedFrom(secretTokenBytes);
+ return hkdf.expand(nHashBytes, domainSeparationContext);
+ }
+
+}
diff --git a/security-utils/src/main/java/com/yahoo/security/token/TokenCheckHash.java b/security-utils/src/main/java/com/yahoo/security/token/TokenCheckHash.java
new file mode 100644
index 00000000000..e4d9825842e
--- /dev/null
+++ b/security-utils/src/main/java/com/yahoo/security/token/TokenCheckHash.java
@@ -0,0 +1,46 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.security.token;
+
+import java.util.Arrays;
+
+import static com.yahoo.security.ArrayUtils.hex;
+
+/**
+ * A token check hash represents a hash derived from a token in such a way that
+ * distinct "audiences" for the token compute entirely different hashes even for
+ * identical token values.
+ */
+public record TokenCheckHash(byte[] hashBytes) {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ TokenCheckHash tokenCheckHash = (TokenCheckHash) o;
+ // We don't consider token hashes secret data, so no harm in data-dependent equals()
+ return Arrays.equals(hashBytes, tokenCheckHash.hashBytes);
+ }
+
+ @Override
+ public int hashCode() {
+ return Arrays.hashCode(hashBytes);
+ }
+
+ public String toHexString() {
+ return hex(hashBytes);
+ }
+
+ @Override
+ public String toString() {
+ return toHexString();
+ }
+
+ public static TokenCheckHash of(Token token, int nHashBytes) {
+ return new TokenCheckHash(token.toDerivedBytes(nHashBytes, token.domain().checkHashContext()));
+ }
+
+ public static TokenCheckHash ofRawBytes(byte[] hashBytes) {
+ return new TokenCheckHash(Arrays.copyOf(hashBytes, hashBytes.length));
+ }
+
+}
diff --git a/security-utils/src/main/java/com/yahoo/security/token/TokenDomain.java b/security-utils/src/main/java/com/yahoo/security/token/TokenDomain.java
new file mode 100644
index 00000000000..e01d942cacf
--- /dev/null
+++ b/security-utils/src/main/java/com/yahoo/security/token/TokenDomain.java
@@ -0,0 +1,57 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.security.token;
+
+import java.util.Arrays;
+
+import static com.yahoo.security.ArrayUtils.fromUtf8Bytes;
+import static com.yahoo.security.ArrayUtils.toUtf8Bytes;
+
+/**
+ * <p>A token domain controls how token fingerprints and check-hashes are derived from
+ * a particular token. Even with identical token contents, different domain contexts
+ * are expected to produce entirely different derivations (with an extremely high
+ * probability).
+ * </p><p>
+ * Since tokens are just opaque sequences of high entropy bytes (with an arbitrary
+ * prefix), they do not by themselves provide any kind of inherent domain separation.
+ * Token domains exist to allow for <em>explicit</em> domain separation between
+ * different usages of tokens.
+ * </p><p>
+ * Fingerprint contexts will usually be the same across an entire deployment of a token
+ * evaluation infrastructure, in order to allow for identifying tokens "globally"
+ * across that deployment.
+ * </p><p>
+ * Access check hash contexts should be unique for each logical token evaluation audience,
+ * ensuring that access hashes from an unrelated audience (with a different context) can
+ * never be made to match, be it accidentally or deliberately.
+ * </p>
+ */
+public record TokenDomain(byte[] fingerprintContext, byte[] checkHashContext) {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ TokenDomain that = (TokenDomain) o;
+ return Arrays.equals(fingerprintContext, that.fingerprintContext) &&
+ Arrays.equals(checkHashContext, that.checkHashContext);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = Arrays.hashCode(fingerprintContext);
+ result = 31 * result + Arrays.hashCode(checkHashContext);
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ return "'%s'/'%s'".formatted(fromUtf8Bytes(fingerprintContext), fromUtf8Bytes(checkHashContext));
+ }
+
+ public static TokenDomain of(String fingerprintContext, String checkHashContext) {
+ return new TokenDomain(toUtf8Bytes(fingerprintContext),
+ toUtf8Bytes(checkHashContext));
+ }
+
+}
diff --git a/security-utils/src/main/java/com/yahoo/security/token/TokenFingerprint.java b/security-utils/src/main/java/com/yahoo/security/token/TokenFingerprint.java
new file mode 100644
index 00000000000..9ce8d55f161
--- /dev/null
+++ b/security-utils/src/main/java/com/yahoo/security/token/TokenFingerprint.java
@@ -0,0 +1,60 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.security.token;
+
+import java.util.Arrays;
+import java.util.HexFormat;
+
+import static com.yahoo.security.ArrayUtils.hex;
+
+/**
+ * <p>A token fingerprint represents an opaque sequence of bytes that is expected
+ * to globally identify any particular token within a particular token domain.
+ * </p><p>
+ * Token fingerprints should not be used directly for access checks; use derived
+ * {@link TokenCheckHash} instances for this purpose.
+ * </p><p>
+ * Fingerprints are printed in the common hex:hex:hex:... format, e.g.
+ * <code>53:2e:4e:09:d5:4f:96:f4:1a:44:82:ef:f0:44:b9:a2</code>
+ * </p>
+ */
+public record TokenFingerprint(byte[] hashBytes) {
+
+ public static final int FINGERPRINT_BITS = 128;
+ public static final int FINGERPRINT_BYTES = FINGERPRINT_BITS / 8;
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ TokenFingerprint that = (TokenFingerprint) o;
+ // We don't consider token fingerprints secret data, so no harm in data-dependent equals()
+ return Arrays.equals(hashBytes, that.hashBytes);
+ }
+
+ @Override
+ public int hashCode() {
+ return Arrays.hashCode(hashBytes);
+ }
+
+ public String toHexString() {
+ return hex(hashBytes);
+ }
+
+ public String toDelimitedHexString() {
+ return HexFormat.ofDelimiter(":").formatHex(hashBytes);
+ }
+
+ @Override
+ public String toString() {
+ return toDelimitedHexString();
+ }
+
+ public static TokenFingerprint of(Token token) {
+ return new TokenFingerprint(token.toDerivedBytes(FINGERPRINT_BYTES, token.domain().fingerprintContext()));
+ }
+
+ public static TokenFingerprint ofRawBytes(byte[] hashBytes) {
+ return new TokenFingerprint(Arrays.copyOf(hashBytes, hashBytes.length));
+ }
+
+}
diff --git a/security-utils/src/main/java/com/yahoo/security/token/TokenGenerator.java b/security-utils/src/main/java/com/yahoo/security/token/TokenGenerator.java
new file mode 100644
index 00000000000..4dabca4b4ba
--- /dev/null
+++ b/security-utils/src/main/java/com/yahoo/security/token/TokenGenerator.java
@@ -0,0 +1,39 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.security.token;
+
+import com.yahoo.security.Base62;
+
+import java.security.SecureRandom;
+
+/**
+ * <p>
+ * Generates new {@link Token} instances that encapsulate a given number of cryptographically
+ * secure random bytes and, with a sufficiently high number of bytes (>= 16), can be expected
+ * to be globally unique and computationally infeasible to guess or brute force.
+ * </p><p>
+ * Tokens are returned in a printable and copy/paste-friendly form (Base62) with an optional
+ * prefix string.
+ * </p><p>
+ * Example of token string generated with the prefix "itsa_me_mario_" and 32 random bytes:
+ * </p>
+ * <pre>
+ * itsa_me_mario_nALfICMyrC4NFagwAkiOdGh80DPS1vSUPprGUKVPLya
+ * </pre>
+ * <p>
+ * Tokens are considered secret information, and must be treated as such.
+ * </p>
+ */
+public class TokenGenerator {
+
+ private static final SecureRandom CSPRNG = new SecureRandom();
+
+ public static Token generateToken(TokenDomain domain, String prefix, int nRandomBytes) {
+ if (nRandomBytes <= 0) {
+ throw new IllegalArgumentException("Token bytes must be a positive integer");
+ }
+ byte[] tokenRand = new byte[nRandomBytes];
+ CSPRNG.nextBytes(tokenRand);
+ return new Token(domain, "%s%s".formatted(prefix, Base62.codec().encode(tokenRand)));
+ }
+
+}
diff --git a/security-utils/src/test/java/com/yahoo/security/token/TokenTest.java b/security-utils/src/test/java/com/yahoo/security/token/TokenTest.java
new file mode 100644
index 00000000000..3418929f60b
--- /dev/null
+++ b/security-utils/src/test/java/com/yahoo/security/token/TokenTest.java
@@ -0,0 +1,134 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.security.token;
+
+import org.junit.jupiter.api.Test;
+
+import static com.yahoo.security.ArrayUtils.toUtf8Bytes;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+public class TokenTest {
+
+ private static final TokenDomain TEST_DOMAIN = TokenDomain.of("my fingerprint", "my check hash");
+
+ @Test
+ void tokens_are_equality_comparable() {
+ var td1 = TokenDomain.of("fingerprint 1", "hash 1");
+ var td2 = TokenDomain.of("fingerprint 2", "hash 2");
+
+ var td1_t1 = Token.of(td1, "foo");
+ var td1_t2 = Token.of(td1, "foo");
+ var td1_t3 = Token.of(td1, "bar");
+ var td2_t1 = Token.of(td2, "foo");
+ // Tokens in same domain with same content are equal
+ assertEquals(td1_t1, td1_t2);
+ // Tokens in same domain with different content are not equal
+ assertNotEquals(td1_t1, td1_t3);
+ // Tokens in different domains are not considered equal
+ assertNotEquals(td1_t1, td2_t1);
+ }
+
+ @Test
+ void check_hashes_are_equality_comparable() {
+ var h1 = TokenCheckHash.ofRawBytes(toUtf8Bytes("foo"));
+ var h2 = TokenCheckHash.ofRawBytes(toUtf8Bytes("foo"));
+ var h3 = TokenCheckHash.ofRawBytes(toUtf8Bytes("bar"));
+ assertEquals(h1, h2);
+ assertNotEquals(h1, h3);
+ }
+
+ @Test
+ void token_generator_generates_new_tokens() {
+ var t1 = TokenGenerator.generateToken(TEST_DOMAIN, "foo_", 16);
+ var t2 = TokenGenerator.generateToken(TEST_DOMAIN, "foo_", 16);
+ // The space of possible generated tokens is effectively infinite, so we'll
+ // pragmatically round down infinity to 2...!
+ assertNotEquals(t1, t2);
+ assertTrue(t1.secretTokenString().startsWith("foo_"));
+ assertTrue(t2.secretTokenString().startsWith("foo_"));
+ // Token sizes are always greater than their raw binary size due to base62-encoding
+ assertTrue(t1.secretTokenString().length() > 20);
+ assertTrue(t2.secretTokenString().length() > 20);
+ }
+
+ @Test
+ void token_fingerprint_considers_entire_token_string_and_domain() {
+ var td = TokenDomain.of("my fingerprint", "my check hash");
+ var t1 = Token.of(td, "kittens_123456789");
+ var t2 = Token.of(td, "puppies_123456789");
+ assertEquals("563487a25ae28bc64ed804244bce70de", t1.fingerprint().toHexString());
+ assertEquals("4b63155af536346d49a52300f5d65364", t2.fingerprint().toHexString());
+
+ var td2 = TokenDomain.of("my fingerprint 2", "my check hash");
+ var t3 = Token.of(td2, "kittens_123456789");
+ assertEquals("201890b5e18e69c364ca09f3c7a00f8e", t3.fingerprint().toHexString());
+
+ // Only the _fingerprint_ context should matter
+ var td3 = TokenDomain.of("my fingerprint 2", "my check hash 2");
+ var t4 = Token.of(td3, "kittens_123456789");
+ assertEquals("201890b5e18e69c364ca09f3c7a00f8e", t4.fingerprint().toHexString());
+ }
+
+ @Test
+ void fingerprint_is_printed_with_delimiters_by_default() {
+ var t = Token.of(TEST_DOMAIN, "bar");
+ var fp = t.fingerprint();
+ assertEquals("7c:47:14:4e:5d:c6:84:7a:5d:20:08:6d:bd:17:70:00", fp.toString());
+ }
+
+ @Test
+ void token_check_hash_differs_from_fingerprint() { // ... with extremely high probability
+ var t = Token.of(TEST_DOMAIN, "foo");
+ var fp = t.fingerprint();
+ // Generate check-hashes with the same length as fingerprints.
+ // If we generate with different lengths, hashes will differ by definition, but that wouldn't
+ // really tell us anything about whether the hashes are actually derived differently.
+ var hash = TokenCheckHash.of(t, TokenFingerprint.FINGERPRINT_BYTES);
+ assertEquals("532e4e09d54f96f41a4482eff044b9a2", fp.toHexString());
+ assertEquals("f0f56b46df55f73eccb9409c203b02c7", hash.toHexString());
+ }
+
+ @Test
+ void different_check_hash_domains_give_different_outputs() {
+ var d1 = TokenDomain.of("my fingerprint", "domain: 1");
+ var d2 = TokenDomain.of("my fingerprint", "domain: 2");
+ var d3 = TokenDomain.of("my fingerprint", "domain: 3");
+ assertEquals("cc0c504b52bfd9b0a9cdb1651c0f3515", TokenCheckHash.of(Token.of(d1, "foo"), 16).toHexString());
+ assertEquals("a27c7fc350699c71bc456a86bd571479", TokenCheckHash.of(Token.of(d2, "foo"), 16).toHexString());
+ assertEquals("119cc7046689e6de796fd4005aaab6dc", TokenCheckHash.of(Token.of(d3, "foo"), 16).toHexString());
+ }
+
+ @Test
+ void token_stringification_does_not_contain_raw_secret() {
+ var t = Token.of(TEST_DOMAIN, "foo");
+ assertEquals("Token(fingerprint: 53:2e:4e:09:d5:4f:96:f4:1a:44:82:ef:f0:44:b9:a2, " +
+ "domain: 'my fingerprint'/'my check hash')",
+ t.toString());
+ }
+
+ @Test
+ void token_fingerprints_and_check_hashes_are_stable() {
+ var d1 = TokenDomain.of("my fingerprint: 1", "domain: 1");
+ var d2 = TokenDomain.of("my fingerprint: 2", "domain: 2");
+
+ var t1 = Token.of(d1, "my_token_1");
+ assertEquals("e029edf4b9061a82b45fdf5cf1507804", t1.fingerprint().toHexString());
+ assertEquals("e029edf4b9061a82b45fdf5cf1507804", TokenFingerprint.of(t1).toHexString());
+ var t1_h1 = TokenCheckHash.of(t1, 32);
+ var t1_h2 = TokenCheckHash.of(t1, 16);
+ assertEquals("65da02dbed156442d85c93caf930217488916082936d17fef29137dc12110062", t1_h1.toHexString());
+ assertEquals("65da02dbed156442d85c93caf9302174", t1_h2.toHexString()); // same prefix, just truncated
+
+ var t2 = Token.of(d1, "my_token_2");
+ assertEquals("f1b9f90e996ec16125fec41ebc0c46a9", t2.fingerprint().toHexString());
+ var t2_h = TokenCheckHash.of(t2, 32);
+ assertEquals("8f3695492c3fd977b44067580ad57e87883317973e7c09cd859666da8edbd42f", t2_h.toHexString());
+
+ var t3 = Token.of(d2, "my_token_1"); // Different domain
+ assertEquals("90960354d1a6e5ec316117da72c31792", t3.fingerprint().toHexString());
+ var t3_h = TokenCheckHash.of(t3, 32);
+ assertEquals("f566dbec641aa64723dd19124afe6c96a821638f9b59f46bbe14f61c3704b32a", t3_h.toHexString());
+ }
+
+}
diff --git a/storage/src/tests/distributor/check_condition_test.cpp b/storage/src/tests/distributor/check_condition_test.cpp
index 1b5cede8af6..757a9329ea6 100644
--- a/storage/src/tests/distributor/check_condition_test.cpp
+++ b/storage/src/tests/distributor/check_condition_test.cpp
@@ -242,6 +242,32 @@ TEST_F(CheckConditionTest, check_fails_if_replica_set_changed_between_start_and_
});
}
+TEST_F(CheckConditionTest, check_fails_if_bucket_ownership_changed_between_start_and_completion_pending_transition_case) {
+ test_cond_with_2_gets_sent([&](auto& cond) {
+ cond.handle_reply(_sender, make_matched_reply(0));
+ simulate_set_pending_cluster_state("version:2 storage:1 distributor:1 .0.s:d"); // technically, no distributors own anything
+ cond.handle_reply(_sender, make_matched_reply(1));
+ }, [&](auto& outcome) {
+ EXPECT_FALSE(outcome.matched_condition());
+ EXPECT_FALSE(outcome.not_found());
+ EXPECT_TRUE(outcome.failed());
+ EXPECT_EQ(outcome.error_code().getResult(), api::ReturnCode::BUCKET_NOT_FOUND);
+ });
+}
+
+TEST_F(CheckConditionTest, check_fails_if_bucket_ownership_changed_between_start_and_completion_completed_transition_case) {
+ test_cond_with_2_gets_sent([&](auto& cond) {
+ cond.handle_reply(_sender, make_matched_reply(0));
+ enable_cluster_state("version:2 storage:1 distributor:1 .0.s:d"); // technically, no distributors own anything
+ cond.handle_reply(_sender, make_matched_reply(1));
+ }, [&](auto& outcome) {
+ EXPECT_FALSE(outcome.matched_condition());
+ EXPECT_FALSE(outcome.not_found());
+ EXPECT_TRUE(outcome.failed());
+ EXPECT_EQ(outcome.error_code().getResult(), api::ReturnCode::BUCKET_NOT_FOUND);
+ });
+}
+
TEST_F(CheckConditionTest, nested_get_traces_are_propagated_to_outcome) {
test_cond_with_2_gets_sent([&](auto& cond) {
cond.handle_reply(_sender, make_trace_reply(0, 100, "hello"));
@@ -253,4 +279,20 @@ TEST_F(CheckConditionTest, nested_get_traces_are_propagated_to_outcome) {
});
}
+TEST_F(CheckConditionTest, condition_evaluation_increments_probe_latency_metrics) {
+ getClock().setAbsoluteTimeInSeconds(1);
+ EXPECT_EQ(_metrics.latency.getLongValue("count"), 0);
+ EXPECT_EQ(_metrics.ok.getLongValue("last"), 0);
+ test_cond_with_2_gets_sent([&](auto& cond) {
+ cond.handle_reply(_sender, make_matched_reply(0));
+ getClock().setAbsoluteTimeInSeconds(3);
+ cond.handle_reply(_sender, make_matched_reply(1));
+ }, [&](auto& outcome) noexcept {
+ (void)outcome;
+ });
+ EXPECT_EQ(_metrics.latency.getLongValue("count"), 1);
+ EXPECT_EQ(_metrics.ok.getLongValue("last"), 1);
+ EXPECT_DOUBLE_EQ(_metrics.latency.getLast(), 2'000.0); // in millis
+}
+
}
diff --git a/storage/src/tests/distributor/putoperationtest.cpp b/storage/src/tests/distributor/putoperationtest.cpp
index ff375e5b902..76b6741442e 100644
--- a/storage/src/tests/distributor/putoperationtest.cpp
+++ b/storage/src/tests/distributor/putoperationtest.cpp
@@ -73,7 +73,8 @@ public:
operation_context(),
getDistributorBucketSpace(),
msg,
- metrics().puts);
+ metrics().puts,
+ metrics().put_condition_probes);
op->start(_sender);
}
diff --git a/storage/src/tests/distributor/removeoperationtest.cpp b/storage/src/tests/distributor/removeoperationtest.cpp
index d352d23bb8c..d169c80a95d 100644
--- a/storage/src/tests/distributor/removeoperationtest.cpp
+++ b/storage/src/tests/distributor/removeoperationtest.cpp
@@ -41,7 +41,8 @@ struct RemoveOperationTest : Test, DistributorStripeTestUtil {
operation_context(),
getDistributorBucketSpace(),
msg,
- metrics().removes);
+ metrics().removes,
+ metrics().remove_condition_probes);
op->start(_sender);
}
diff --git a/storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp b/storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp
index 4c83dde30da..ae2385a36d8 100644
--- a/storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp
+++ b/storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp
@@ -40,15 +40,11 @@ struct ChangedBucketOwnershipHandlerTest : Test {
uint16_t wantedOwner,
const lib::ClusterState& state);
- std::shared_ptr<api::SetSystemStateCommand> createStateCmd(
- const lib::ClusterState& state) const
- {
+ std::shared_ptr<api::SetSystemStateCommand> createStateCmd(const lib::ClusterState& state) const {
return std::make_shared<api::SetSystemStateCommand>(state);
}
- std::shared_ptr<api::SetSystemStateCommand> createStateCmd(
- const std::string& stateStr) const
- {
+ std::shared_ptr<api::SetSystemStateCommand> createStateCmd(const std::string& stateStr) const {
return createStateCmd(lib::ClusterState(stateStr));
}
@@ -71,11 +67,17 @@ struct ChangedBucketOwnershipHandlerTest : Test {
template <typename MsgType, typename... MsgParams>
void expectDownAbortsMessage(bool expected, MsgParams&& ... params);
- lib::ClusterState getDefaultTestClusterState() const {
+ std::shared_ptr<AbortBucketOperationsCommand> fetch_dispatched_abort_operations_command() {
+ _bottom->waitForMessages(2, 60); // abort cmd + set cluster state cmd
+ EXPECT_EQ(2, _bottom->getNumCommands());
+ return std::dynamic_pointer_cast<AbortBucketOperationsCommand>(_bottom->getCommand(0));
+ }
+
+ static lib::ClusterState getDefaultTestClusterState() {
return lib::ClusterState("distributor:4 storage:1");
}
- lib::ClusterState getStorageDownTestClusterState() const {
+ static lib::ClusterState getStorageDownTestClusterState() {
return lib::ClusterState("distributor:4 storage:1 .0.s:d");
}
@@ -173,29 +175,26 @@ hasAbortedNoneOf(const AbortBucketOperationsCommand::SP& cmd, const Vec& v)
bool
hasOnlySetSystemStateCmdQueued(DummyStorageLink& link) {
+ link.waitForMessages(1, 60);
if (link.getNumCommands() != 1) {
std::cerr << "expected 1 command, found"
<< link.getNumCommands() << "\n";
}
- api::SetSystemStateCommand::SP cmd(
- std::dynamic_pointer_cast<api::SetSystemStateCommand>(
- link.getCommand(0)));
- return (cmd.get() != 0);
+ auto cmd = std::dynamic_pointer_cast<api::SetSystemStateCommand>(link.getCommand(0));
+ return static_cast<bool>(cmd);
}
}
void
-ChangedBucketOwnershipHandlerTest::applyDistribution(
- Redundancy redundancy, NodeCount nodeCount)
+ChangedBucketOwnershipHandlerTest::applyDistribution(Redundancy redundancy, NodeCount nodeCount)
{
_app->setDistribution(redundancy, nodeCount);
_handler->storageDistributionChanged();
}
void
-ChangedBucketOwnershipHandlerTest::applyClusterState(
- const lib::ClusterState& state)
+ChangedBucketOwnershipHandlerTest::applyClusterState(const lib::ClusterState& state)
{
_app->setClusterState(state);
_handler->reloadClusterState();
@@ -212,10 +211,8 @@ TEST_F(ChangedBucketOwnershipHandlerTest, enumerate_buckets_belonging_on_changed
auto node2Buckets(insertBuckets(2, 2, stateBefore));
_top->sendDown(createStateCmd("distributor:4 .1.s:d .3.s:d storage:1"));
- // TODO: refactor into own function
- ASSERT_EQ(2, _bottom->getNumCommands());
- auto cmd = std::dynamic_pointer_cast<AbortBucketOperationsCommand>(_bottom->getCommand(0));
- ASSERT_TRUE(cmd.get() != 0);
+ auto cmd = fetch_dispatched_abort_operations_command();
+ ASSERT_TRUE(cmd);
EXPECT_TRUE(hasAbortedAllOf(cmd, node1Buckets));
EXPECT_TRUE(hasAbortedAllOf(cmd, node3Buckets));
@@ -280,10 +277,8 @@ TEST_F(ChangedBucketOwnershipHandlerTest, down_edge_to_no_available_distributors
lib::ClusterState downState("distributor:3 .0.s:d .1.s:s .2.s:s storage:1");
_top->sendDown(createStateCmd(downState));
- // TODO: refactor into own function
- ASSERT_EQ(2, _bottom->getNumCommands());
- auto cmd = std::dynamic_pointer_cast<AbortBucketOperationsCommand>(_bottom->getCommand(0));
- ASSERT_TRUE(cmd.get() != 0);
+ auto cmd = fetch_dispatched_abort_operations_command();
+ ASSERT_TRUE(cmd);
EXPECT_TRUE(hasAbortedAllOf(cmd, node0Buckets));
EXPECT_TRUE(hasAbortedAllOf(cmd, node1Buckets));
@@ -304,10 +299,8 @@ TEST_F(ChangedBucketOwnershipHandlerTest, ownership_changed_on_distributor_up_ed
auto node2Buckets(insertBuckets(2, 2, stateAfter));
_top->sendDown(createStateCmd(stateAfter));
- // TODO: refactor into own function
- ASSERT_EQ(2, _bottom->getNumCommands());
- auto cmd = std::dynamic_pointer_cast<AbortBucketOperationsCommand>(_bottom->getCommand(0));
- ASSERT_TRUE(cmd.get() != 0);
+ auto cmd = fetch_dispatched_abort_operations_command();
+ ASSERT_TRUE(cmd);
EXPECT_TRUE(hasAbortedAllOf(cmd, node1Buckets));
EXPECT_TRUE(hasAbortedNoneOf(cmd, node0Buckets));
@@ -319,8 +312,7 @@ TEST_F(ChangedBucketOwnershipHandlerTest, ownership_changed_on_distributor_up_ed
}
void
-ChangedBucketOwnershipHandlerTest::sendAndExpectAbortedCreateBucket(
- uint16_t fromDistributorIndex)
+ChangedBucketOwnershipHandlerTest::sendAndExpectAbortedCreateBucket(uint16_t fromDistributorIndex)
{
document::BucketId bucket(16, 6786);
auto msg = std::make_shared<api::CreateBucketCommand>(makeDocumentBucket(bucket));
@@ -350,7 +342,7 @@ TEST_F(ChangedBucketOwnershipHandlerTest, distribution_config_change_updates_own
/**
* Generate and dispatch a message of the given type with the provided
- * aruments as if that message was sent from distributor 1. Messages will
+ * arguments as if that message was sent from distributor 1. Messages will
* be checked as if the state contains 4 distributors in Up state. This
* means that it suffices to send in a message with a bucket that is not
* owned by distributor 1 in this state to trigger an abort.
@@ -382,7 +374,7 @@ ChangedBucketOwnershipHandlerTest::expectChangeAbortsMessage(bool expected, MsgP
/**
* Generate and dispatch a message of the given type with the provided
- * aruments as if that message was sent from distributor 1. Messages will
+ * arguments as if that message was sent from distributor 1. Messages will
* be checked as if the state contains 4 distributors in Up state and storage
* node is down. This means that any abortable message will trigger an abort.
*/
@@ -394,6 +386,7 @@ ChangedBucketOwnershipHandlerTest::expectDownAbortsMessage(bool expected, MsgPar
(void) _bottom->getCommandsOnce();
ASSERT_NO_FATAL_FAILURE((expectChangeAbortsMessage<MsgType, MsgParams...>(false, std::forward<MsgParams>(params)...)));
_top->sendDown(createStateCmd(getStorageDownTestClusterState()));
+ _bottom->waitForMessages(3, 60);
ASSERT_EQ(_bottom->getNumCommands(), 3);
auto setSystemStateCommand = std::dynamic_pointer_cast<api::SetSystemStateCommand>(_bottom->getCommand(2));
ASSERT_TRUE(setSystemStateCommand);
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
index b47e0697a91..9a5fd595b1d 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
@@ -166,7 +166,10 @@ DistributorStripeComponent::update_bucket_database(
}
}
- UpdateBucketDatabaseProcessor processor(getClock(), found_down_node ? up_nodes : changed_nodes, bucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId()).get_available_nodes(), (update_flags & DatabaseUpdate::RESET_TRUSTED) != 0);
+ UpdateBucketDatabaseProcessor processor(getClock(),
+ found_down_node ? up_nodes : changed_nodes,
+ bucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId()).get_available_nodes(),
+ (update_flags & DatabaseUpdate::RESET_TRUSTED) != 0);
bucketSpace.getBucketDatabase().process_update(bucket.getBucketId(), processor, (update_flags & DatabaseUpdate::CREATE_IF_NONEXISTING) != 0);
}
diff --git a/storage/src/vespa/storage/distributor/distributormetricsset.cpp b/storage/src/vespa/storage/distributor/distributormetricsset.cpp
index fad44782dd4..cbc0e6f6eef 100644
--- a/storage/src/vespa/storage/distributor/distributormetricsset.cpp
+++ b/storage/src/vespa/storage/distributor/distributormetricsset.cpp
@@ -16,11 +16,13 @@ BucketDbMetrics::~BucketDbMetrics() = default;
DistributorMetricSet::DistributorMetricSet()
: MetricSet("distributor", {{"distributor"}}, ""),
puts("puts", this),
+ put_condition_probes("put_condition_probes", this),
updates(this),
update_puts("update_puts", this),
update_gets("update_gets", this),
update_metadata_gets("update_metadata_gets", this),
removes("removes", this),
+ remove_condition_probes("remove_condition_probes", this),
removelocations("removelocations", this),
gets("gets", this),
stats("stats", this),
diff --git a/storage/src/vespa/storage/distributor/distributormetricsset.h b/storage/src/vespa/storage/distributor/distributormetricsset.h
index ac140b85282..739e84759f1 100644
--- a/storage/src/vespa/storage/distributor/distributormetricsset.h
+++ b/storage/src/vespa/storage/distributor/distributormetricsset.h
@@ -20,24 +20,26 @@ struct BucketDbMetrics : metrics::MetricSet {
class DistributorMetricSet : public metrics::MetricSet {
public:
PersistenceOperationMetricSet puts;
- UpdateMetricSet updates;
+ PersistenceOperationMetricSet put_condition_probes;
+ UpdateMetricSet updates;
PersistenceOperationMetricSet update_puts;
PersistenceOperationMetricSet update_gets;
PersistenceOperationMetricSet update_metadata_gets;
PersistenceOperationMetricSet removes;
+ PersistenceOperationMetricSet remove_condition_probes;
PersistenceOperationMetricSet removelocations;
PersistenceOperationMetricSet gets;
PersistenceOperationMetricSet stats;
PersistenceOperationMetricSet getbucketlists;
- VisitorMetricSet visits;
- metrics::DoubleAverageMetric stateTransitionTime;
- metrics::DoubleAverageMetric set_cluster_state_processing_time;
- metrics::DoubleAverageMetric activate_cluster_state_processing_time;
- metrics::DoubleAverageMetric recoveryModeTime;
- metrics::LongValueMetric docsStored;
- metrics::LongValueMetric bytesStored;
- BucketDbMetrics mutable_dbs;
- BucketDbMetrics read_only_dbs;
+ VisitorMetricSet visits;
+ metrics::DoubleAverageMetric stateTransitionTime;
+ metrics::DoubleAverageMetric set_cluster_state_processing_time;
+ metrics::DoubleAverageMetric activate_cluster_state_processing_time;
+ metrics::DoubleAverageMetric recoveryModeTime;
+ metrics::LongValueMetric docsStored;
+ metrics::LongValueMetric bytesStored;
+ BucketDbMetrics mutable_dbs;
+ BucketDbMetrics read_only_dbs;
explicit DistributorMetricSet();
~DistributorMetricSet() override;
diff --git a/storage/src/vespa/storage/distributor/externaloperationhandler.cpp b/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
index 6cb404aaa0a..d6bb5562a07 100644
--- a/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
+++ b/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
@@ -332,7 +332,9 @@ bool ExternalOperationHandler::onPut(const std::shared_ptr<api::PutCommand>& cmd
if (allow) {
_op = std::make_shared<PutOperation>(_node_ctx, _op_ctx,
_op_ctx.bucket_space_repo().get(bucket_space),
- std::move(cmd), getMetrics().puts, std::move(handle));
+ std::move(cmd),
+ getMetrics().puts, getMetrics().put_condition_probes,
+ std::move(handle));
} else {
_msg_sender.sendUp(makeConcurrentMutationRejectionReply(*cmd, cmd->getDocumentId(), metrics));
}
@@ -386,7 +388,8 @@ bool ExternalOperationHandler::onRemove(const std::shared_ptr<api::RemoveCommand
auto &distributorBucketSpace(_op_ctx.bucket_space_repo().get(bucket_space));
_op = std::make_shared<RemoveOperation>(_node_ctx, _op_ctx, distributorBucketSpace, std::move(cmd),
- getMetrics().removes, std::move(handle));
+ getMetrics().removes, getMetrics().remove_condition_probes,
+ std::move(handle));
} else {
_msg_sender.sendUp(makeConcurrentMutationRejectionReply(*cmd, cmd->getDocumentId(), metrics));
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp b/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
index 9f7dbcaa132..0e12e3e3019 100644
--- a/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
@@ -58,7 +58,7 @@ CheckCondition::CheckCondition(const document::Bucket& bucket,
const documentapi::TestAndSetCondition& tas_condition,
const DistributorBucketSpace& bucket_space,
const DistributorNodeContext& node_ctx,
- PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
uint32_t trace_level,
private_ctor_tag)
: _doc_id_bucket(bucket),
@@ -66,7 +66,8 @@ CheckCondition::CheckCondition(const document::Bucket& bucket,
_node_ctx(node_ctx),
_cluster_state_version_at_creation_time(_bucket_space.getClusterState().getVersion()),
_cond_get_op(),
- _sent_message_map()
+ _sent_message_map(),
+ _outcome()
{
// Condition checks only return metadata back to the distributor and thus have an empty fieldset.
// Side note: the BucketId provided to the GetCommand is ignored; GetOperation computes explicitly from the doc ID.
@@ -75,8 +76,8 @@ CheckCondition::CheckCondition(const document::Bucket& bucket,
get_cmd->getTrace().setLevel(trace_level);
_cond_get_op = std::make_shared<GetOperation>(_node_ctx, _bucket_space,
_bucket_space.getBucketDatabase().acquire_read_guard(),
- std::move(get_cmd),
- metric, api::InternalReadConsistency::Strong);
+ std::move(get_cmd), condition_probe_metrics,
+ api::InternalReadConsistency::Strong);
}
CheckCondition::~CheckCondition() = default;
@@ -126,6 +127,10 @@ bool CheckCondition::replica_set_changed_after_get_operation() const {
return (replicas_in_db_now != _cond_get_op->replicas_in_db());
}
+bool CheckCondition::distributor_no_longer_owns_bucket() const {
+ return !_bucket_space.check_ownership_in_pending_and_current_state(_doc_id_bucket.getBucketId()).isOwned();
+}
+
CheckCondition::Outcome::Result
CheckCondition::newest_replica_to_outcome(const std::optional<NewestReplica>& newest) noexcept {
if (!newest) {
@@ -158,9 +163,13 @@ void CheckCondition::handle_internal_get_operation_reply(std::shared_ptr<api::St
reply->steal_trace());
return;
}
- const auto state_version_now = _bucket_space.getClusterState().getVersion();
+ auto state_version_now = _bucket_space.getClusterState().getVersion();
+ if (_bucket_space.has_pending_cluster_state()) {
+ state_version_now = _bucket_space.get_pending_cluster_state().getVersion();
+ }
if ((state_version_now != _cluster_state_version_at_creation_time)
- && replica_set_changed_after_get_operation())
+ && (replica_set_changed_after_get_operation()
+ || distributor_no_longer_owns_bucket()))
{
// BUCKET_NOT_FOUND is semantically (usually) inaccurate here, but it's what we use for this purpose
// in existing operations. Checking the replica set will implicitly check for ownership changes,
@@ -220,7 +229,7 @@ CheckCondition::create_if_inconsistent_replicas(const document::Bucket& bucket,
const documentapi::TestAndSetCondition& tas_condition,
const DistributorNodeContext& node_ctx,
const DistributorStripeOperationContext& op_ctx,
- PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
uint32_t trace_level)
{
// TODO move this check to the caller?
@@ -237,8 +246,8 @@ CheckCondition::create_if_inconsistent_replicas(const document::Bucket& bucket,
if (!all_nodes_support_document_condition_probe(entries, op_ctx)) {
return {}; // Want write-repair, but one or more nodes are too old to use the feature
}
- return std::make_shared<CheckCondition>(bucket, doc_id, tas_condition, bucket_space,
- node_ctx, metric, trace_level, private_ctor_tag{});
+ return std::make_shared<CheckCondition>(bucket, doc_id, tas_condition, bucket_space, node_ctx,
+ condition_probe_metrics, trace_level, private_ctor_tag{});
}
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/check_condition.h b/storage/src/vespa/storage/distributor/operations/external/check_condition.h
index 062c9bb831d..999b79adc3d 100644
--- a/storage/src/vespa/storage/distributor/operations/external/check_condition.h
+++ b/storage/src/vespa/storage/distributor/operations/external/check_condition.h
@@ -114,7 +114,7 @@ public:
const documentapi::TestAndSetCondition& tas_condition,
const DistributorBucketSpace& bucket_space,
const DistributorNodeContext& node_ctx,
- PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
uint32_t trace_level,
private_ctor_tag);
~CheckCondition();
@@ -135,10 +135,11 @@ public:
const documentapi::TestAndSetCondition& tas_condition,
const DistributorNodeContext& node_ctx,
const DistributorStripeOperationContext& op_ctx,
- PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
uint32_t trace_level);
private:
[[nodiscard]] bool replica_set_changed_after_get_operation() const;
+ [[nodiscard]] bool distributor_no_longer_owns_bucket() const;
void handle_internal_get_operation_reply(std::shared_ptr<api::StorageReply> reply);
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
index 952aeff0800..8c6fdb314f3 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
@@ -26,6 +26,7 @@ PutOperation::PutOperation(const DistributorNodeContext& node_ctx,
DistributorBucketSpace& bucket_space,
std::shared_ptr<api::PutCommand> msg,
PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencing_handle)
: SequencedOperation(std::move(sequencing_handle)),
_tracker_instance(metric, std::make_shared<api::PutReply>(*msg), node_ctx, op_ctx, msg->getTimestamp()),
@@ -34,7 +35,7 @@ PutOperation::PutOperation(const DistributorNodeContext& node_ctx,
_doc_id_bucket_id(document::BucketIdFactory{}.getBucketId(_msg->getDocumentId())),
_node_ctx(node_ctx),
_op_ctx(op_ctx),
- _temp_metric(metric), // TODO
+ _condition_probe_metrics(condition_probe_metrics),
_bucket_space(bucket_space)
{
}
@@ -156,7 +157,7 @@ void PutOperation::start_conditional_put(DistributorStripeMessageSender& sender)
document::Bucket bucket(_msg->getBucket().getBucketSpace(), _doc_id_bucket_id);
_check_condition = CheckCondition::create_if_inconsistent_replicas(bucket, _bucket_space, _msg->getDocumentId(),
_msg->getCondition(), _node_ctx, _op_ctx,
- _temp_metric, _msg->getTrace().getLevel());
+ _condition_probe_metrics, _msg->getTrace().getLevel());
if (!_check_condition) {
start_direct_put_dispatch(sender);
} else {
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.h b/storage/src/vespa/storage/distributor/operations/external/putoperation.h
index 6befb8d3e38..635accc1865 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.h
@@ -28,6 +28,7 @@ public:
DistributorBucketSpace& bucketSpace,
std::shared_ptr<api::PutCommand> msg,
PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencingHandle = SequencingHandle());
~PutOperation() override;
@@ -44,7 +45,7 @@ private:
document::BucketId _doc_id_bucket_id;
const DistributorNodeContext& _node_ctx;
DistributorStripeOperationContext& _op_ctx;
- PersistenceOperationMetricSet& _temp_metric;
+ PersistenceOperationMetricSet& _condition_probe_metrics;
DistributorBucketSpace& _bucket_space;
std::shared_ptr<CheckCondition> _check_condition;
diff --git a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
index 59ae4120fd6..96182b0744f 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
@@ -16,6 +16,7 @@ RemoveOperation::RemoveOperation(const DistributorNodeContext& node_ctx,
DistributorBucketSpace& bucketSpace,
std::shared_ptr<api::RemoveCommand> msg,
PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencingHandle)
: SequencedOperation(std::move(sequencingHandle)),
_tracker_instance(metric,
@@ -26,7 +27,7 @@ RemoveOperation::RemoveOperation(const DistributorNodeContext& node_ctx,
_doc_id_bucket_id(document::BucketIdFactory{}.getBucketId(_msg->getDocumentId())),
_node_ctx(node_ctx),
_op_ctx(op_ctx),
- _temp_metric(metric), // TODO
+ _condition_probe_metrics(condition_probe_metrics),
_bucket_space(bucketSpace),
_check_condition()
{
@@ -48,7 +49,7 @@ void RemoveOperation::start_conditional_remove(DistributorStripeMessageSender& s
document::Bucket bucket(_msg->getBucket().getBucketSpace(), _doc_id_bucket_id);
_check_condition = CheckCondition::create_if_inconsistent_replicas(bucket, _bucket_space, _msg->getDocumentId(),
_msg->getCondition(), _node_ctx, _op_ctx,
- _temp_metric, _msg->getTrace().getLevel());
+ _condition_probe_metrics, _msg->getTrace().getLevel());
if (!_check_condition) {
start_direct_remove_dispatch(sender);
} else {
diff --git a/storage/src/vespa/storage/distributor/operations/external/removeoperation.h b/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
index 349a6182937..9f3a98294ea 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
@@ -19,6 +19,7 @@ public:
DistributorBucketSpace& bucketSpace,
std::shared_ptr<api::RemoveCommand> msg,
PersistenceOperationMetricSet& metric,
+ PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencingHandle = SequencingHandle());
~RemoveOperation() override;
@@ -36,7 +37,7 @@ private:
document::BucketId _doc_id_bucket_id;
const DistributorNodeContext& _node_ctx;
DistributorStripeOperationContext& _op_ctx;
- PersistenceOperationMetricSet& _temp_metric;
+ PersistenceOperationMetricSet& _condition_probe_metrics;
DistributorBucketSpace& _bucket_space;
std::shared_ptr<CheckCondition> _check_condition;
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
index 0cb4b223c11..73c65f54b21 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
@@ -34,6 +34,7 @@ TwoPhaseUpdateOperation::TwoPhaseUpdateOperation(
: SequencedOperation(std::move(sequencingHandle)),
_updateMetric(metrics.updates),
_putMetric(metrics.update_puts),
+ _put_condition_probe_metrics(metrics.put_condition_probes), // Updates never trigger put write repair, so we sneakily use a ref to someone else
_getMetric(metrics.update_gets),
_metadata_get_metrics(metrics.update_metadata_gets),
_updateCmd(std::move(msg)),
@@ -263,7 +264,7 @@ TwoPhaseUpdateOperation::schedulePutsWithUpdatedDocument(std::shared_ptr<documen
document::Bucket bucket(_updateCmd->getBucket().getBucketSpace(), document::BucketId(0));
auto put = std::make_shared<api::PutCommand>(bucket, doc, putTimestamp);
copyMessageSettings(*_updateCmd, *put);
- auto putOperation = std::make_shared<PutOperation>(_node_ctx, _op_ctx, _bucketSpace, std::move(put), _putMetric);
+ auto putOperation = std::make_shared<PutOperation>(_node_ctx, _op_ctx, _bucketSpace, std::move(put), _putMetric, _put_condition_probe_metrics);
PutOperation & op = *putOperation;
IntermediateMessageSender intermediate(_sentMessageMap, std::move(putOperation), sender);
op.start(intermediate, _node_ctx.clock().getSystemTime());
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
index 486ed766510..d2ad5359fa6 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
@@ -139,6 +139,7 @@ private:
UpdateMetricSet& _updateMetric;
PersistenceOperationMetricSet& _putMetric;
+ PersistenceOperationMetricSet& _put_condition_probe_metrics;
PersistenceOperationMetricSet& _getMetric;
PersistenceOperationMetricSet& _metadata_get_metrics;
std::shared_ptr<api::UpdateCommand> _updateCmd;
diff --git a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp
index 944b4bafa0a..e66884c4060 100644
--- a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp
+++ b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp
@@ -58,8 +58,8 @@ PersistenceFailuresMetricSet::clone(std::vector<Metric::UP>& ownerList, CopyType
if (copyType == INACTIVE) {
return MetricSet::clone(ownerList, INACTIVE, owner, includeUnused);
}
- return (PersistenceFailuresMetricSet*)
- (new PersistenceFailuresMetricSet(owner))->assignValues(*this);
+ return dynamic_cast<PersistenceFailuresMetricSet*>(
+ (new PersistenceFailuresMetricSet(owner))->assignValues(*this));
}
PersistenceOperationMetricSet::PersistenceOperationMetricSet(const std::string& name, MetricSet* owner)
@@ -69,6 +69,11 @@ PersistenceOperationMetricSet::PersistenceOperationMetricSet(const std::string&
failures(this)
{ }
+PersistenceOperationMetricSet::PersistenceOperationMetricSet(const std::string& name)
+ : PersistenceOperationMetricSet(name, nullptr)
+{
+}
+
PersistenceOperationMetricSet::~PersistenceOperationMetricSet() = default;
MetricSet *
@@ -78,9 +83,8 @@ PersistenceOperationMetricSet::clone(std::vector<Metric::UP>& ownerList, CopyTyp
if (copyType == INACTIVE) {
return MetricSet::clone(ownerList, INACTIVE, owner, includeUnused);
}
- return (PersistenceOperationMetricSet*)
- (new PersistenceOperationMetricSet(getName(), owner))
- ->assignValues(*this);
+ return dynamic_cast<PersistenceOperationMetricSet*>(
+ (new PersistenceOperationMetricSet(getName(), owner))->assignValues(*this));
}
void
diff --git a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.h b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.h
index b818d1bdd9f..eb1c3f57252 100644
--- a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.h
+++ b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.h
@@ -40,10 +40,11 @@ class PersistenceOperationMetricSet : public metrics::MetricSet
mutable std::mutex _mutex;
public:
metrics::DoubleAverageMetric latency;
- metrics::LongCountMetric ok;
+ metrics::LongCountMetric ok;
PersistenceFailuresMetricSet failures;
- PersistenceOperationMetricSet(const std::string& name, metrics::MetricSet* owner = nullptr);
+ PersistenceOperationMetricSet(const std::string& name, metrics::MetricSet* owner);
+ explicit PersistenceOperationMetricSet(const std::string& name);
~PersistenceOperationMetricSet() override;
MetricSet * clone(std::vector<Metric::UP>& ownerList, CopyType copyType,
@@ -57,7 +58,6 @@ public:
*/
void updateFromResult(const api::ReturnCode& result);
- friend class LockWrapper;
class LockWrapper {
std::unique_lock<std::mutex> _lock;
PersistenceOperationMetricSet& _self;
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp b/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp
index c0f4041e284..f6b7c7e5f0b 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp
@@ -184,7 +184,8 @@ computeAllPossibleHandlerThreads(const vespa::config::content::StorFilestorConfi
return cfg.numThreads +
computeNumResponseThreads(cfg.numResponseThreads) +
cfg.numNetworkThreads +
- cfg.numVisitorThreads;
+ cfg.numVisitorThreads +
+ 1; // Async cluster state processing thread (might be a pessimization to include here...)
}
}
diff --git a/storage/src/vespa/storage/persistence/persistenceutil.cpp b/storage/src/vespa/storage/persistence/persistenceutil.cpp
index 77e8532f0d2..e8534e3f299 100644
--- a/storage/src/vespa/storage/persistence/persistenceutil.cpp
+++ b/storage/src/vespa/storage/persistence/persistenceutil.cpp
@@ -36,6 +36,7 @@ MessageTracker::MessageTracker(const framework::MilliSecTimer & timer,
ThrottleToken throttle_token)
: MessageTracker(timer, env, replySender, true, std::move(bucketLock), std::move(msg), std::move(throttle_token))
{}
+
MessageTracker::MessageTracker(const framework::MilliSecTimer & timer,
const PersistenceUtil & env,
MessageSender & replySender,
@@ -90,7 +91,7 @@ MessageTracker::sendReply() {
if (count_result_as_failure()) {
_env._metrics.failedOperations.inc();
}
- vespalib::duration duration = vespalib::from_s(_timer.getElapsedTimeAsDouble()/1000.0);
+ vespalib::duration duration = _timer.getElapsedTime();
if (duration >= WARN_ON_SLOW_OPERATIONS) {
LOGBT(warning, _msg->getType().toString(),
"Slow processing of message %s. Processing time: %1.1f s (>=%1.1f s)",
diff --git a/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp b/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp
index 9d7dd95d922..3b97ff6c018 100644
--- a/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp
+++ b/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp
@@ -28,6 +28,7 @@ ChangedBucketOwnershipHandler::ChangedBucketOwnershipHandler(
_component(compReg, "changedbucketownershiphandler"),
_metrics(),
_configFetcher(std::make_unique<config::ConfigFetcher>(configUri.getContext())),
+ _state_sync_executor(1), // single thread for sequential task execution
_stateLock(),
_currentState(), // Not set yet, so ownership will not be valid
_currentOwnership(std::make_shared<OwnershipState>(
@@ -98,7 +99,7 @@ ChangedBucketOwnershipHandler::Metrics::Metrics(metrics::MetricSet* owner)
idealStateOpsAborted("ideal_state_ops_aborted", {}, "Number of outdated ideal state operations aborted", this),
externalLoadOpsAborted("external_load_ops_aborted", {}, "Number of outdated external load operations aborted", this)
{}
-ChangedBucketOwnershipHandler::Metrics::~Metrics() { }
+ChangedBucketOwnershipHandler::Metrics::~Metrics() = default;
ChangedBucketOwnershipHandler::OwnershipState::OwnershipState(const ContentBucketSpaceRepo &contentBucketSpaceRepo,
std::shared_ptr<const lib::ClusterStateBundle> state)
@@ -114,7 +115,7 @@ ChangedBucketOwnershipHandler::OwnershipState::OwnershipState(const ContentBucke
}
-ChangedBucketOwnershipHandler::OwnershipState::~OwnershipState() {}
+ChangedBucketOwnershipHandler::OwnershipState::~OwnershipState() = default;
const lib::ClusterState&
@@ -235,18 +236,79 @@ ChangedBucketOwnershipHandler::makeLazyAbortPredicate(
_component.getIndex()));
}
-/*
- * If we go from:
- * 1) Not all down -> all distributors down
- * - abort ops for _all_ buckets
- * 2) All distributors down -> not down
- * - no-op, since down edge must have been handled first
- * 3) All down -> all down
- * - no-op
- * 4) Some nodes down or up
- * - abort ops for buckets that have changed ownership between
- * current and new cluster state.
- */
+class ChangedBucketOwnershipHandler::ClusterStateSyncAndApplyTask
+ : public vespalib::Executor::Task
+{
+ ChangedBucketOwnershipHandler& _owner;
+ std::shared_ptr<api::SetSystemStateCommand> _command;
+public:
+ ClusterStateSyncAndApplyTask(ChangedBucketOwnershipHandler& owner,
+ std::shared_ptr<api::SetSystemStateCommand> command) noexcept
+ : _owner(owner),
+ _command(std::move(command))
+ {}
+
+ /*
+ * If we go from:
+ * 1) Not all down -> all distributors down
+ * - abort ops for _all_ buckets
+ * 2) All distributors down -> not down
+ * - no-op, since down edge must have been handled first
+ * 3) All down -> all down
+ * - no-op
+ * 4) Some nodes down or up
+ * - abort ops for buckets that have changed ownership between
+ * current and new cluster state.
+ */
+ void run() override {
+ OwnershipState::CSP old_ownership;
+ OwnershipState::CSP new_ownership;
+ // Update the ownership state inspected by all bucket-mutating operations passing through
+ // this component so that messages from outdated distributors will be rejected. Note that
+ // this is best-effort; with our current multitude of RPC threads directly dispatching
+ // operations into the persistence provider, it's possible for a thread carrying an outdated
+ // operation to have already passed the barrier, but be preempted so that it will apply the
+ // op _after_ the abort step has completed.
+ {
+ std::lock_guard guard(_owner._stateLock);
+ old_ownership = _owner._currentOwnership;
+ _owner.setCurrentOwnershipWithStateNoLock(_command->getClusterStateBundle());
+ new_ownership = _owner._currentOwnership;
+ }
+ assert(new_ownership->valid());
+ // If we're going from not having a state to having a state, we per
+ // definition cannot possibly have gotten any load that needs aborting,
+ // as no such load is allowed through this component when this is the
+ // case.
+ if (!old_ownership->valid()) {
+ return _owner.sendDown(_command);
+ }
+
+ if (allDistributorsDownInState(old_ownership->getBaselineState())) {
+ LOG(debug, "No need to send aborts on transition '%s' -> '%s'",
+ old_ownership->getBaselineState().toString().c_str(),
+ new_ownership->getBaselineState().toString().c_str());
+ return _owner.sendDown(_command);;
+ }
+ _owner.logTransition(old_ownership->getBaselineState(), new_ownership->getBaselineState());
+
+ metrics::MetricTimer duration_timer;
+ auto predicate = _owner.makeLazyAbortPredicate(old_ownership, new_ownership);
+ auto abort_cmd = std::make_shared<AbortBucketOperationsCommand>(std::move(predicate));
+
+ // Will not return until all operation aborts have been performed
+ // on the lower level links, at which point it is safe to send down
+ // the SetSystemStateCommand.
+ _owner.sendDown(abort_cmd);
+ duration_timer.stop(_owner._metrics.averageAbortProcessingTime);
+
+ // Conflicting operations have been aborted and incoming conflicting operations
+ // are aborted inline; send down the state command actually making the state change
+ // visible on the content node.
+ _owner.sendDown(_command);
+ }
+};
+
bool
ChangedBucketOwnershipHandler::onSetSystemState(
const std::shared_ptr<api::SetSystemStateCommand>& stateCmd)
@@ -255,47 +317,13 @@ ChangedBucketOwnershipHandler::onSetSystemState(
LOG(debug, "Operation aborting is config-disabled");
return false; // Early out.
}
- OwnershipState::CSP oldOwnership;
- OwnershipState::CSP newOwnership;
- // Get old state and update own current cluster state _before_ it is
- // applied to the rest of the system. This helps ensure that no message
- // can get through in the off-case that the lower level storage links
- // don't apply the state immediately for some reason.
- {
- std::lock_guard guard(_stateLock);
- oldOwnership = _currentOwnership;
- setCurrentOwnershipWithStateNoLock(stateCmd->getClusterStateBundle());
- newOwnership = _currentOwnership;
- }
- assert(newOwnership->valid());
- // If we're going from not having a state to having a state, we per
- // definition cannot possibly have gotten any load that needs aborting,
- // as no such load is allowed through this component when this is the
- // case.
- if (!oldOwnership->valid()) {
- return false;
- }
-
- if (allDistributorsDownInState(oldOwnership->getBaselineState())) {
- LOG(debug, "No need to send aborts on transition '%s' -> '%s'",
- oldOwnership->getBaselineState().toString().c_str(),
- newOwnership->getBaselineState().toString().c_str());
- return false;
- }
- logTransition(oldOwnership->getBaselineState(), newOwnership->getBaselineState());
-
- metrics::MetricTimer durationTimer;
- auto predicate(makeLazyAbortPredicate(oldOwnership, newOwnership));
- AbortBucketOperationsCommand::SP cmd(
- new AbortBucketOperationsCommand(std::move(predicate)));
-
- // Will not return until all operation aborts have been performed
- // on the lower level links, at which point it is safe to send down
- // the SetSystemStateCommand.
- sendDown(cmd);
-
- durationTimer.stop(_metrics.averageAbortProcessingTime);
- return false;
+ // Dispatch to background worker. This indirection is because operations such as lid-space compaction
+ // may cause the implicit operation abort waiting step to block the caller for a relatively long time.
+ // It is very important that the executor only has 1 thread, which means this has FIFO behavior.
+ [[maybe_unused]] auto rejected_task = _state_sync_executor.execute(std::make_unique<ClusterStateSyncAndApplyTask>(*this, stateCmd));
+ // If this fails, we have processed a message _after_ onClose has been called, which should not happen.
+ assert(!rejected_task);
+ return true;
}
/**
@@ -411,8 +439,7 @@ ChangedBucketOwnershipHandler::onDown(
const std::shared_ptr<api::StorageMessage>& msg)
{
if (msg->getType() == api::MessageType::SETSYSTEMSTATE) {
- return onSetSystemState(
- std::static_pointer_cast<api::SetSystemStateCommand>(msg));
+ return onSetSystemState(std::static_pointer_cast<api::SetSystemStateCommand>(msg));
}
if (!isMutatingCommandAndNeedsChecking(*msg)) {
return false;
@@ -451,4 +478,10 @@ ChangedBucketOwnershipHandler::onInternalReply(
return (reply->getType() == AbortBucketOperationsReply::ID);
}
+void
+ChangedBucketOwnershipHandler::onClose()
+{
+ _state_sync_executor.shutdown().sync();
+}
+
}
diff --git a/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.h b/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.h
index e753d96871e..8798d109955 100644
--- a/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.h
+++ b/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.h
@@ -10,6 +10,7 @@
#include <vespa/metrics/valuemetric.h>
#include <vespa/metrics/countmetric.h>
#include <vespa/metrics/metricset.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
#include <atomic>
#include <vector>
#include <unordered_map>
@@ -60,26 +61,24 @@ class ChangedBucketOwnershipHandler
private config::IFetcherCallback<vespa::config::content::PersistenceConfig>
{
public:
- class Metrics : public metrics::MetricSet
- {
+ class Metrics : public metrics::MetricSet {
public:
metrics::LongAverageMetric averageAbortProcessingTime;
metrics::LongCountMetric idealStateOpsAborted;
metrics::LongCountMetric externalLoadOpsAborted;
- Metrics(metrics::MetricSet* owner = 0);
- ~Metrics();
+ explicit Metrics(metrics::MetricSet* owner = nullptr);
+ ~Metrics() override;
};
/**
* Wrapper around the distribution & state pairs that decides how to
* compute the owner distributor for a bucket. It's possible to have
* an ownership state with a nullptr cluster state when the node
- * initially starts up, which is why no owership state must be used unless
+ * initially starts up, which is why no ownership state must be used unless
* invoking valid() on it returns true.
*/
- class OwnershipState
- {
+ class OwnershipState {
using BucketSpace = document::BucketSpace;
std::unordered_map<BucketSpace, std::shared_ptr<const lib::Distribution>, BucketSpace::hash> _distributions;
std::shared_ptr<const lib::ClusterStateBundle> _state;
@@ -93,7 +92,7 @@ public:
static const uint16_t FAILED_TO_RESOLVE = 0xffff;
- bool valid() const {
+ [[nodiscard]] bool valid() const noexcept {
return (!_distributions.empty() && _state);
}
@@ -114,16 +113,21 @@ public:
void reloadClusterState();
private:
- ServiceLayerComponent _component;
- Metrics _metrics;
- std::unique_ptr<config::ConfigFetcher> _configFetcher;
- mutable std::mutex _stateLock;
- std::shared_ptr<const lib::ClusterStateBundle> _currentState;
- OwnershipState::CSP _currentOwnership;
-
- std::atomic<bool> _abortQueuedAndPendingOnStateChange;
- std::atomic<bool> _abortMutatingIdealStateOps;
- std::atomic<bool> _abortMutatingExternalLoadOps;
+ class ClusterStateSyncAndApplyTask;
+
+ using ConfigFetcherUP = std::unique_ptr<config::ConfigFetcher>;
+ using ClusterStateBundleCSP = std::shared_ptr<const lib::ClusterStateBundle>;
+
+ ServiceLayerComponent _component;
+ Metrics _metrics;
+ ConfigFetcherUP _configFetcher;
+ vespalib::ThreadStackExecutor _state_sync_executor;
+ mutable std::mutex _stateLock;
+ ClusterStateBundleCSP _currentState;
+ OwnershipState::CSP _currentOwnership;
+ std::atomic<bool> _abortQueuedAndPendingOnStateChange;
+ std::atomic<bool> _abortMutatingIdealStateOps;
+ std::atomic<bool> _abortMutatingExternalLoadOps;
std::unique_ptr<AbortBucketOperationsCommand::AbortPredicate>
makeLazyAbortPredicate(
@@ -183,14 +187,12 @@ private:
public:
ChangedBucketOwnershipHandler(const config::ConfigUri& configUri,
ServiceLayerComponentRegister& compReg);
- ~ChangedBucketOwnershipHandler();
+ ~ChangedBucketOwnershipHandler() override;
- bool onSetSystemState(
- const std::shared_ptr<api::SetSystemStateCommand>&) override;
+ bool onSetSystemState(const std::shared_ptr<api::SetSystemStateCommand>&) override;
bool onDown(const std::shared_ptr<api::StorageMessage>&) override;
-
- bool onInternalReply(
- const std::shared_ptr<api::InternalReply>& reply) override;
+ bool onInternalReply(const std::shared_ptr<api::InternalReply>& reply) override;
+ void onClose() override;
void configure(std::unique_ptr<vespa::config::content::PersistenceConfig>) override;
diff --git a/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp b/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp
index c53dfae294a..9d62122af87 100644
--- a/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp
+++ b/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp
@@ -140,7 +140,7 @@ get_tensor_type(const document::FieldValue& fv)
}
AttributeVector::SP
-createAttribute(const vespalib::string & name, const document::FieldValue & fv)
+createAttribute(const vespalib::string & name, const document::FieldValue & fv, search::attribute::DistanceMetric dm)
{
LOG(debug, "Create single value attribute '%s' with value type '%s'", name.c_str(), fv.className());
if (fv.isA(document::FieldValue::Type::BYTE) || fv.isA(document::FieldValue::Type::INT) || fv.isA(document::FieldValue::Type::LONG)) {
@@ -156,6 +156,7 @@ createAttribute(const vespalib::string & name, const document::FieldValue & fv)
auto tdt = get_tensor_type(fv);
assert(tdt != nullptr);
cfg.setTensorType(tdt->getTensorType());
+ cfg.set_distance_metric(dm);
return std::make_shared<search::tensor::TensorExtAttribute>(name, cfg);
} else {
LOG(debug, "Can not make an attribute out of %s of type '%s'.", name.c_str(), fv.className());
@@ -860,7 +861,7 @@ void SearchVisitor::setupAttributeVector(const FieldPath &fieldPath) {
} else if (typeSeen == WSET) {
attr = createMultiValueAttribute (attrName, fv, false);
} else {
- attr = createAttribute(attrName, fv);
+ attr = createAttribute(attrName, fv, _fieldSearchSpecMap.get_distance_metric(attrName));
}
if (attr) {
diff --git a/streamingvisitors/src/vespa/vsm/searcher/nearest_neighbor_field_searcher.cpp b/streamingvisitors/src/vespa/vsm/searcher/nearest_neighbor_field_searcher.cpp
index d4aee8ad652..772f336e5df 100644
--- a/streamingvisitors/src/vespa/vsm/searcher/nearest_neighbor_field_searcher.cpp
+++ b/streamingvisitors/src/vespa/vsm/searcher/nearest_neighbor_field_searcher.cpp
@@ -5,6 +5,7 @@
#include <vespa/document/datatype/tensor_data_type.h>
#include <vespa/document/fieldvalue/tensorfieldvalue.h>
#include <vespa/searchcommon/attribute/config.h>
+#include <vespa/searchlib/attribute/distance_metric_utils.h>
#include <vespa/searchlib/fef/iqueryenvironment.h>
#include <vespa/searchlib/fef/query_value.h>
#include <vespa/searchlib/query/streaming/nearest_neighbor_query_node.h>
@@ -14,10 +15,14 @@
#include <vespa/searchlib/tensor/tensor_ext_attribute.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/util/issue.h>
+#include <algorithm>
+#include <cctype>
using search::attribute::BasicType;
using search::attribute::CollectionType;
using search::attribute::Config;
+using search::attribute::DistanceMetric;
+using search::attribute::DistanceMetricUtils;
using search::fef::QueryValue;
using search::tensor::DistanceCalculator;
using search::tensor::TensorExtAttribute;
@@ -28,10 +33,11 @@ namespace {
constexpr uint32_t scratch_docid = 0;
std::unique_ptr<TensorExtAttribute>
-make_attribute(const ValueType& tensor_type)
+make_attribute(const ValueType& tensor_type, search::attribute::DistanceMetric dm)
{
Config cfg(BasicType::TENSOR, CollectionType::SINGLE);
cfg.setTensorType(tensor_type);
+ cfg.set_distance_metric(dm);
auto result = std::make_unique<TensorExtAttribute>("nnfs_attr", cfg);
uint32_t docid;
result->addDoc(docid);
@@ -61,7 +67,7 @@ NearestNeighborFieldSearcher::NodeAndCalc::to_raw_score(double distance)
}
NearestNeighborFieldSearcher::NearestNeighborFieldSearcher(FieldIdT fid,
- search::attribute::DistanceMetric metric)
+ DistanceMetric metric)
: FieldSearcher(fid),
_metric(metric),
_attr(),
@@ -89,7 +95,7 @@ NearestNeighborFieldSearcher::prepare(search::streaming::QueryTermList& qtl,
vespalib::Issue::report("Data type for field %u is '%s', but expected it to be a tensor type",
field(), field_paths[field()].back().getDataType().toString().c_str());
}
- _attr = make_attribute(tensor_type->getTensorType());
+ _attr = make_attribute(tensor_type->getTensorType(), _metric);
_calcs.clear();
for (auto term : qtl) {
auto* nn_term = term->as_nearest_neighbor_query_node();
@@ -134,25 +140,20 @@ NearestNeighborFieldSearcher::onValue(const document::FieldValue& fv)
}
}
-search::attribute::DistanceMetric
+DistanceMetric
NearestNeighborFieldSearcher::distance_metric_from_string(const vespalib::string& value)
{
- using search::attribute::DistanceMetric;
// Valid string values must match the definition of DistanceMetric in
// config-model/src/main/java/com/yahoo/schema/document/Attribute.java
- if (value == "EUCLIDEAN") {
+ auto v = value;
+ std::transform(v.begin(), v.end(), v.begin(),
+ [](unsigned char c) { return std::tolower(c); });
+ try {
+ return DistanceMetricUtils::to_distance_metric(v);
+ } catch (vespalib::IllegalStateException&) {
+ vespalib::Issue::report("Distance metric '%s' is not supported. Using 'euclidean' instead", value.c_str());
return DistanceMetric::Euclidean;
- } else if (value == "ANGULAR") {
- return DistanceMetric::Angular;
- } else if (value == "GEODEGREES") {
- return DistanceMetric::GeoDegrees;
- } else if (value == "INNERPRODUCT") {
- return DistanceMetric::InnerProduct;
- } else if (value == "HAMMING") {
- return DistanceMetric::Hamming;
}
- vespalib::Issue::report("Distance metric '%s' is not supported. Using 'euclidean' instead", value.c_str());
- return DistanceMetric::Euclidean;
}
}
diff --git a/streamingvisitors/src/vespa/vsm/vsm/docsumfilter.cpp b/streamingvisitors/src/vespa/vsm/vsm/docsumfilter.cpp
index 596525e17d7..146dd487769 100644
--- a/streamingvisitors/src/vespa/vsm/vsm/docsumfilter.cpp
+++ b/streamingvisitors/src/vespa/vsm/vsm/docsumfilter.cpp
@@ -11,6 +11,7 @@
#include <vespa/document/datatype/datatype.h>
#include <vespa/document/fieldvalue/stringfieldvalue.h>
#include <vespa/vespalib/data/slime/inserter.h>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".vsm.docsumfilter");
diff --git a/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp b/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp
index 98ed8a26938..f6ac3a6c88a 100644
--- a/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp
+++ b/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp
@@ -313,6 +313,23 @@ void FieldSearchSpecMap::buildSearcherMap(const StringFieldIdTMapT & fieldsInQue
std::sort(fieldSearcherMap.begin(), fieldSearcherMap.end(), lesserField);
}
+search::attribute::DistanceMetric
+FieldSearchSpecMap::get_distance_metric(const vespalib::string& name) const
+{
+ auto dm = search::attribute::DistanceMetric::Euclidean;
+ auto fid = _nameIdMap.fieldNo(name);
+ if (fid == vsm::StringFieldIdTMap::npos) {
+ return dm;
+ }
+ auto itr = _specMap.find(fid);
+ if (itr == _specMap.end()) {
+ return dm;
+ }
+ if (!itr->second.uses_nearest_neighbor_search_method()) {
+ return dm;
+ }
+ return vsm::NearestNeighborFieldSearcher::distance_metric_from_string(itr->second.get_arg1());
+}
vespalib::asciistream & operator <<(vespalib::asciistream & os, const FieldSearchSpecMap & df)
{
diff --git a/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.h b/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.h
index 14a30ed8c36..0fa0eca4357 100644
--- a/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.h
+++ b/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.h
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
+#include <vespa/searchcommon/attribute/distance_metric.h>
#include <vespa/vsm/searcher/fieldsearcher.h>
#include <vespa/vsm/config/vsm-cfif.h>
@@ -21,6 +22,8 @@ public:
FieldIdT id() const { return _id; }
bool valid() const { return static_cast<bool>(_searcher); }
size_t maxLength() const { return _maxLength; }
+ bool uses_nearest_neighbor_search_method() const noexcept { return _searchMethod == VsmfieldsConfig::Fieldspec::Searchmethod::NEAREST_NEIGHBOR; }
+ const vespalib::string& get_arg1() const noexcept { return _arg1; }
/**
* Reconfigures the field searcher based on information in the given query term.
@@ -87,6 +90,7 @@ public:
friend vespalib::asciistream & operator <<(vespalib::asciistream & os, const FieldSearchSpecMap & f);
static vespalib::string stripNonFields(const vespalib::string & rawIndex);
+ search::attribute::DistanceMetric get_distance_metric(const vespalib::string& name) const;
private:
FieldSearchSpecMapT _specMap; // mapping from field id to field search spec
diff --git a/tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java b/tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java
index db2e5ac5f95..9ccd0588d6d 100644
--- a/tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java
+++ b/tenant-cd-api/src/main/java/ai/vespa/hosted/cd/EnabledInRegions.java
@@ -35,7 +35,7 @@ class EnabledInRegionsCondition implements ExecutionCondition {
return ConditionEvaluationResult.enabled(EnabledInRegions.class.getSimpleName() + " is not present");
List<String> enablingRegions = List.of(annotation.get().value());
- String thisRegion = TestRuntime.get().application().instance();
+ String thisRegion = TestRuntime.get().zone().region();
String reason = "Enabled in: %s. Current region: %s.".formatted(enablingRegions.isEmpty() ? "no regions" : "regions " + String.join(", ", enablingRegions), thisRegion);
return enablingRegions.contains(thisRegion) ? ConditionEvaluationResult.enabled(reason) : ConditionEvaluationResult.disabled(reason);
}
diff --git a/vdslib/pom.xml b/vdslib/pom.xml
index e966d8ce6b6..53fa6275eb1 100644
--- a/vdslib/pom.xml
+++ b/vdslib/pom.xml
@@ -47,6 +47,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/client/AthenzIdentityProviderImpl.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/client/AthenzIdentityProviderImpl.java
index 2f8ef8cbcc0..e97409b40ef 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/client/AthenzIdentityProviderImpl.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/client/AthenzIdentityProviderImpl.java
@@ -241,7 +241,7 @@ public final class AthenzIdentityProviderImpl extends AbstractComponent implemen
@Override
public PrivateKey getPrivateKey() {
- return autoReloadingX509KeyManager.getPrivateKey(AutoReloadingX509KeyManager.CERTIFICATE_ALIAS);
+ return autoReloadingX509KeyManager.getCurrentCertificateWithKey().privateKey();
}
@Override
@@ -251,7 +251,7 @@ public final class AthenzIdentityProviderImpl extends AbstractComponent implemen
@Override
public List<X509Certificate> getIdentityCertificate() {
- return List.of(autoReloadingX509KeyManager.getCertificateChain(AutoReloadingX509KeyManager.CERTIFICATE_ALIAS));
+ return List.of(autoReloadingX509KeyManager.getCurrentCertificateWithKey().certificate());
}
@Override
diff --git a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
index 7c2db779693..de2ee3fb412 100644
--- a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
+++ b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
@@ -90,7 +90,7 @@ net.openhft:zero-allocation-hashing:0.16
org.antlr:antlr-runtime:3.5.3
org.antlr:antlr4-runtime:4.11.1
org.apache.aries.spifly:org.apache.aries.spifly.dynamic.bundle:1.3.6
-org.apache.commons:commons-compress:1.22
+org.apache.commons:commons-compress:1.23.0
org.apache.commons:commons-csv:1.8
org.apache.commons:commons-exec:1.3
org.apache.commons:commons-lang3:3.12.0
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java
index 3f54808a758..730d9787735 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java
@@ -20,6 +20,7 @@ import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.time.Duration;
+import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.StringJoiner;
@@ -128,12 +129,13 @@ class HttpFeedClient implements FeedClient {
}
private void verifyConnection(FeedClientBuilderImpl builder, Cluster cluster) {
+ Instant start = Instant.now();
try {
HttpRequest request = new HttpRequest("POST",
getPath(DocumentId.of("feeder", "handshake", "dummy")) + getQuery(empty(), true),
requestHeaders,
null,
- Duration.ofSeconds(10));
+ Duration.ofSeconds(15));
CompletableFuture<HttpResponse> future = new CompletableFuture<>();
cluster.dispatch(request, future);
HttpResponse response = future.get(20, TimeUnit.SECONDS);
@@ -155,7 +157,8 @@ class HttpFeedClient implements FeedClient {
}
}
catch (ExecutionException e) {
- throw new FeedException("failed handshake with server: " + e.getCause(), e.getCause());
+ Duration duration = Duration.between(start, Instant.now());
+ throw new FeedException("failed handshake with server after " + duration + ": " + e.getCause(), e.getCause());
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpFeedClientTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpFeedClientTest.java
index 295108b5ed5..66c9adb2ced 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpFeedClientTest.java
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpFeedClientTest.java
@@ -213,9 +213,11 @@ class HttpFeedClientTest {
@Test
void testHandshake() {
// dummy:123 does not exist, and results in a host-not-found exception.
- assertTrue(assertThrows(FeedException.class,
- () -> new HttpFeedClient(new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy:123")))))
- .getMessage().startsWith("failed handshake with server: java.net.UnknownHostException"));
+ FeedException exception = assertThrows(FeedException.class,
+ () -> new HttpFeedClient(new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy:123")))));
+ String message = exception.getMessage();
+ assertTrue(message.startsWith("failed handshake with server after "), message);
+ assertTrue(message.contains("java.net.UnknownHostException"), message);
HttpResponse oldResponse = HttpResponse.of(400, "{\"pathId\":\"/document/v1/test/build/docid/foo\",\"message\":\"Could not read document, no document?\"}".getBytes(UTF_8));
HttpResponse okResponse = HttpResponse.of(200, null);
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
index d3babc682a5..e6d5ea48e8f 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
@@ -1219,10 +1219,12 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
parameters.setPriority(DocumentProtocol.Priority.NORMAL_4);
getProperty(request, FROM_TIMESTAMP, unsignedLongParser).ifPresent(parameters::setFromTimestamp);
- getProperty(request, TO_TIMESTAMP, unsignedLongParser).ifPresent(parameters::setToTimestamp);
- if (Long.compareUnsigned(parameters.getFromTimestamp(), parameters.getToTimestamp()) > 0) {
- throw new IllegalArgumentException("toTimestamp must be greater than, or equal to, fromTimestamp");
- }
+ getProperty(request, TO_TIMESTAMP, unsignedLongParser).ifPresent(ts -> {
+ parameters.setToTimestamp(ts);
+ if (Long.compareUnsigned(parameters.getFromTimestamp(), parameters.getToTimestamp()) > 0) {
+ throw new IllegalArgumentException("toTimestamp must be greater than, or equal to, fromTimestamp");
+ }
+ });
StorageCluster storageCluster = resolveCluster(cluster, clusters);
parameters.setRoute(storageCluster.name());
@@ -1367,8 +1369,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
try (response) {
callback.onEnd(response);
- if (getVisitorStatistics() != null)
- response.writeDocumentCount(getVisitorStatistics().getDocumentsVisited());
+ response.writeDocumentCount(getVisitorStatistics() == null ? 0 : getVisitorStatistics().getDocumentsVisited());
if (session.get() != null)
response.writeTrace(session.get().getTrace());
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
index dec546b4294..e8f42fbecfa 100644
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
@@ -355,6 +355,7 @@ public class DocumentV1ApiTest {
{
"pathId": "/document/v1/space/music/docid",
"documents": [],
+ "documentCount": 0,
"message": "failure?"
}""", response.readAll());
assertEquals(200, response.getStatus());
@@ -383,7 +384,8 @@ public class DocumentV1ApiTest {
response = driver.sendRequest("http://localhost/document/v1/space/music/docid?destinationCluster=content&selection=true&cluster=content&timeout=60", POST);
assertSameJson("""
{
- "pathId": "/document/v1/space/music/docid"
+ "pathId": "/document/v1/space/music/docid",
+ "documentCount": 0
}""",
response.readAll());
assertEquals(200, response.getStatus());
@@ -416,7 +418,8 @@ public class DocumentV1ApiTest {
}""");
assertSameJson("""
{
- "pathId": "/document/v1/space/music/docid"
+ "pathId": "/document/v1/space/music/docid",
+ "documentCount": 0
}""",
response.readAll());
assertEquals(200, response.getStatus());
@@ -470,6 +473,7 @@ public class DocumentV1ApiTest {
assertSameJson("""
{
"pathId": "/document/v1/space/music/docid",
+ "documentCount": 0,
"message": "boom"
}""",
response.readAll());
@@ -511,6 +515,7 @@ public class DocumentV1ApiTest {
{
"pathId": "/document/v1/space/music/group/best%27",
"documents": [],
+ "documentCount": 0,
"message": "error"
}""",
response.readAll());
@@ -951,6 +956,39 @@ public class DocumentV1ApiTest {
driver.close();
}
+ private void doTestVisitRequestWithParams(String httpReqParams, Consumer<VisitorParameters> paramChecker) {
+ try (var driver = new RequestHandlerTestDriver(handler)) {
+ access.expect(parameters -> {
+ paramChecker.accept(parameters);
+ parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.SUCCESS, "great success");
+ });
+ var response = driver.sendRequest("http://localhost/document/v1/?cluster=content&%s".formatted(httpReqParams));
+ assertSameJson("""
+ {
+ "pathId": "/document/v1/",
+ "documents": [ ],
+ "documentCount": 0
+ }""",
+ response.readAll());
+ assertEquals(200, response.getStatus());
+ }
+ }
+
+ @Test
+ public void visit_timestamp_ranges_can_be_open_in_both_ends() {
+ // Only specifying fromTimestamp; visit up to current time
+ doTestVisitRequestWithParams("fromTimestamp=1234", (params) -> {
+ assertEquals(params.getFromTimestamp(), 1234);
+ assertEquals(params.getToTimestamp(), 0); // Means "current wall clock time" when it hits storage
+ });
+
+ // Only specifying toTimestamp; visit all docs up to this time point
+ doTestVisitRequestWithParams("toTimestamp=2345", (params) -> {
+ assertEquals(params.getFromTimestamp(), 0); // The dawn of time(tm)
+ assertEquals(params.getToTimestamp(), 2345);
+ });
+ }
+
@Test
public void testThroughput() throws InterruptedException {
DocumentOperationExecutorConfig executorConfig = new DocumentOperationExecutorConfig.Builder().build();
diff --git a/vespajlib/pom.xml b/vespajlib/pom.xml
index 9b10f82c986..5ba7f2d2ff2 100644
--- a/vespajlib/pom.xml
+++ b/vespajlib/pom.xml
@@ -104,6 +104,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
index 31f4038c16e..150021091ca 100644
--- a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
+++ b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
@@ -37,10 +37,10 @@ public abstract class Maintainer implements Runnable {
private final AtomicBoolean shutDown = new AtomicBoolean();
private final boolean ignoreCollision;
private final Clock clock;
- private final Double successFactorBaseline;
+ private final double successFactorBaseline;
public Maintainer(String name, Duration interval, Clock clock, JobControl jobControl,
- JobMetrics jobMetrics, List<String> clusterHostnames, boolean ignoreCollision, Double successFactorBaseline) {
+ JobMetrics jobMetrics, List<String> clusterHostnames, boolean ignoreCollision, double successFactorBaseline) {
this.name = name;
this.interval = requireInterval(interval);
this.jobControl = Objects.requireNonNull(jobControl);
@@ -98,7 +98,7 @@ public abstract class Maintainer implements Runnable {
* Called once each time this maintenance job should run.
*
* @return the degree to which the run was deviated from the successFactorBaseline - a number between -1 (no success), to 0 (complete success).
- * Note that this indicates whether something is wrong, so e.g if the call did nothing because it should do
+ * Note that this indicates whether something is wrong, so e.g. if the call did nothing because it should do
* nothing, 0.0 should be returned.
*/
protected abstract double maintain();
diff --git a/vespajlib/src/main/java/com/yahoo/slime/SlimeUtils.java b/vespajlib/src/main/java/com/yahoo/slime/SlimeUtils.java
index 6acd0679da2..b08346f7cec 100644
--- a/vespajlib/src/main/java/com/yahoo/slime/SlimeUtils.java
+++ b/vespajlib/src/main/java/com/yahoo/slime/SlimeUtils.java
@@ -126,20 +126,24 @@ public class SlimeUtils {
return Duration.ofMillis(field.asLong());
}
+ public static boolean isPresent(Inspector field) {
+ return field.valid() && field.type() != Type.NIX;
+ }
+
public static Optional<String> optionalString(Inspector inspector) {
- return Optional.of(inspector.asString()).filter(s -> !s.isEmpty());
+ return Optional.of(inspector).filter(SlimeUtils::isPresent).map(Inspector::asString);
}
public static OptionalLong optionalLong(Inspector field) {
- return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty();
+ return isPresent(field) ? OptionalLong.of(field.asLong()) : OptionalLong.empty();
}
public static OptionalInt optionalInteger(Inspector field) {
- return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty();
+ return isPresent(field) ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty();
}
public static OptionalDouble optionalDouble(Inspector field) {
- return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty();
+ return isPresent(field) ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty();
}
public static Optional<Instant> optionalInstant(Inspector field) {
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java b/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java
index 45e581d73e8..9c34875dfd7 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java
@@ -213,7 +213,7 @@ public class JsonFormat {
if (root.field("cells").valid() && ! primitiveContent(root.field("cells")))
decodeCells(root.field("cells"), builder);
else if (root.field("values").valid() && builder.type().dimensions().stream().allMatch(d -> d.isIndexed()))
- decodeValues(root.field("values"), builder);
+ decodeValuesAtTop(root.field("values"), builder);
else if (root.field("blocks").valid())
decodeBlocks(root.field("blocks"), builder);
else
@@ -252,11 +252,11 @@ public class JsonFormat {
builder.cell(asAddress(key, builder.type()), decodeNumeric(value));
}
- private static void decodeValues(Inspector values, Tensor.Builder builder) {
- decodeValues(values, builder, new MutableInteger(0));
+ private static void decodeValuesAtTop(Inspector values, Tensor.Builder builder) {
+ decodeNestedValues(values, builder, new MutableInteger(0));
}
- private static void decodeValues(Inspector values, Tensor.Builder builder, MutableInteger index) {
+ private static void decodeNestedValues(Inspector values, Tensor.Builder builder, MutableInteger index) {
if ( ! (builder instanceof IndexedTensor.BoundBuilder indexedBuilder))
throw new IllegalArgumentException("An array of values can only be used with a dense tensor. Use a map instead");
if (values.type() == Type.STRING) {
@@ -275,7 +275,7 @@ public class JsonFormat {
values.traverse((ArrayTraverser) (__, value) -> {
if (value.type() == Type.ARRAY)
- decodeValues(value, builder, index);
+ decodeNestedValues(value, builder, index);
else if (value.type() == Type.LONG || value.type() == Type.DOUBLE)
indexedBuilder.cellByDirectIndex(index.next(), value.asDouble());
else
@@ -300,7 +300,7 @@ public class JsonFormat {
if (block.type() != Type.OBJECT)
throw new IllegalArgumentException("Expected an item in a blocks array to be an object, not " + block.type());
mixedBuilder.block(decodeAddress(block.field("address"), mixedBuilder.type().mappedSubtype()),
- decodeValues(block.field("values"), mixedBuilder));
+ decodeValuesInBlock(block.field("values"), mixedBuilder));
}
/** Decodes a tensor value directly at the root, where the format is decided by the tensor type. */
@@ -311,7 +311,7 @@ public class JsonFormat {
if (isArrayOfObjects(root))
decodeCells(root, builder);
else if ( ! hasMapped)
- decodeValues(root, builder);
+ decodeValuesAtTop(root, builder);
else if (hasMapped && hasIndexed)
decodeBlocks(root, builder);
else
@@ -330,7 +330,7 @@ public class JsonFormat {
if (value.type() != Type.ARRAY)
throw new IllegalArgumentException("Expected an item in a blocks array to be an array, not " + value.type());
mixedBuilder.block(asAddress(key, mixedBuilder.type().mappedSubtype()),
- decodeValues(value, mixedBuilder));
+ decodeValuesInBlock(value, mixedBuilder));
}
private static byte decodeHex(String input, int index) {
@@ -408,7 +408,7 @@ public class JsonFormat {
};
}
- private static double[] decodeValues(Inspector valuesField, MixedTensor.BoundBuilder mixedBuilder) {
+ private static double[] decodeValuesInBlock(Inspector valuesField, MixedTensor.BoundBuilder mixedBuilder) {
double[] values = new double[(int)mixedBuilder.denseSubspaceSize()];
if (valuesField.type() == Type.ARRAY) {
if (valuesField.entries() == 0) {
diff --git a/vespalib/src/apps/vespa-stress-and-validate-memory/stress_and_validate_memory.cpp b/vespalib/src/apps/vespa-stress-and-validate-memory/stress_and_validate_memory.cpp
index a7a227e45b3..e31f5e6413e 100644
--- a/vespalib/src/apps/vespa-stress-and-validate-memory/stress_and_validate_memory.cpp
+++ b/vespalib/src/apps/vespa-stress-and-validate-memory/stress_and_validate_memory.cpp
@@ -14,6 +14,7 @@
std::atomic<bool> stopped = false;
std::mutex log_mutex;
using namespace vespalib;
+using vespalib::alloc::PtrAndSize;
const char * description =
"Runs stress test of memory by slowly growing a heap filled with 0.\n"
@@ -121,7 +122,6 @@ public:
size_t make_and_load_alloc_per_thread();
void random_write(unsigned int *seed);
private:
- using PtrAndSize = std::pair<void *, size_t>;
const Config & _cfg;
mutable std::mutex _mutex;
alloc::MmapFileAllocator _allocator;
@@ -153,7 +153,7 @@ FileBackedMemory::make_and_load_alloc_per_thread() {
std::lock_guard guard(_mutex);
alloc = _allocator.alloc(cfg().alloc_size());
}
- memset(alloc.first, 0, cfg().alloc_size());
+ memset(alloc.get(), 0, cfg().alloc_size());
std::lock_guard guard(_mutex);
_allocations.push_back(std::move(alloc));
return 1;
@@ -166,7 +166,7 @@ FileBackedMemory::random_write(unsigned int *seed) {
std::lock_guard guard(_mutex);
ptrAndSize = _allocations[rand_r(seed) % _allocations.size()];
}
- memset(ptrAndSize.first, rand_r(seed)%256, ptrAndSize.second);
+ memset(ptrAndSize.get(), rand_r(seed)%256, ptrAndSize.size());
}
void
diff --git a/vespalib/src/tests/btree/btree_test.cpp b/vespalib/src/tests/btree/btree_test.cpp
index afad3523fa3..b2f9f01e517 100644
--- a/vespalib/src/tests/btree/btree_test.cpp
+++ b/vespalib/src/tests/btree/btree_test.cpp
@@ -2,7 +2,6 @@
#include <string>
#include <vespa/vespalib/btree/btreeroot.h>
-#include <vespa/vespalib/btree/btreebuilder.h>
#include <vespa/vespalib/btree/btreenodeallocator.h>
#include <vespa/vespalib/btree/btree.h>
#include <vespa/vespalib/btree/btreestore.h>
@@ -297,6 +296,12 @@ BTreeTest::assertMemoryUsage(const vespalib::MemoryUsage & exp, const vespalib::
return result;
}
+TEST_F(BTreeTest, control_iterator_size) {
+ EXPECT_EQ(120u, sizeof(BTreeIteratorBase<uint32_t, uint32_t, NoAggregated>));
+ EXPECT_EQ(120u, sizeof(BTreeIteratorBase<uint32_t, BTreeNoLeafData, NoAggregated>));
+ EXPECT_EQ(288u, sizeof(MyTree::Iterator));
+}
+
TEST_F(BTreeTest, require_that_node_insert_works)
{
GenerationHandler g;
diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
index 1df03f6eb0a..ba6782334bd 100644
--- a/vespalib/src/tests/datastore/array_store/array_store_test.cpp
+++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
@@ -65,6 +65,7 @@ struct ArrayStoreTest : public TestT
EntryRef ref = add(input);
assertGet(ref, input);
}
+ size_t reference_store_count(EntryRef ref) const __attribute__((noinline));
EntryRef add(const ElemVector &input) {
EntryRef result;
if (add_using_allocate) {
@@ -78,7 +79,7 @@ struct ArrayStoreTest : public TestT
// This is default and preferred way of adding an array.
result = store.add(ConstArrayRef(input));
}
- assert(refStore.count(result) == 0);
+ assert(reference_store_count(result) == 0);
refStore.insert(std::make_pair(result, input));
return result;
}
@@ -167,6 +168,13 @@ struct ArrayStoreTest : public TestT
template <typename TestT, typename ElemT, typename RefT>
ArrayStoreTest<TestT, ElemT, RefT>::~ArrayStoreTest() = default;
+template <typename TestT, typename ElemT, typename RefT>
+size_t
+ArrayStoreTest<TestT, ElemT, RefT>::reference_store_count(EntryRef ref) const
+{
+ return refStore.count(ref);
+}
+
struct TestParam {
bool add_using_allocate;
TestParam(bool add_using_allocate_in) : add_using_allocate(add_using_allocate_in) {}
diff --git a/vespalib/src/tests/util/mmap_file_allocator/mmap_file_allocator_test.cpp b/vespalib/src/tests/util/mmap_file_allocator/mmap_file_allocator_test.cpp
index 545733a1ebd..ef16998902e 100644
--- a/vespalib/src/tests/util/mmap_file_allocator/mmap_file_allocator_test.cpp
+++ b/vespalib/src/tests/util/mmap_file_allocator/mmap_file_allocator_test.cpp
@@ -6,6 +6,7 @@
using vespalib::alloc::MemoryAllocator;
using vespalib::alloc::MmapFileAllocator;
+using vespalib::alloc::PtrAndSize;
namespace {
@@ -18,10 +19,10 @@ struct MyAlloc
void* data;
size_t size;
- MyAlloc(MemoryAllocator& allocator_in, MemoryAllocator::PtrAndSize buf)
+ MyAlloc(MemoryAllocator& allocator_in, PtrAndSize buf)
: allocator(allocator_in),
- data(buf.first),
- size(buf.second)
+ data(buf.get()),
+ size(buf.size())
{
}
@@ -30,7 +31,7 @@ struct MyAlloc
allocator.free(data, size);
}
- MemoryAllocator::PtrAndSize asPair() const noexcept { return std::make_pair(data, size); }
+ PtrAndSize asPair() const noexcept { return PtrAndSize(data, size); }
};
}
diff --git a/vespalib/src/vespa/vespalib/btree/btree.h b/vespalib/src/vespa/vespalib/btree/btree.h
index c2f5aac01b7..0099da718a3 100644
--- a/vespalib/src/vespa/vespalib/btree/btree.h
+++ b/vespalib/src/vespa/vespalib/btree/btree.h
@@ -39,50 +39,22 @@ public:
using ConstIterator = typename TreeType::ConstIterator;
using FrozenView = typename TreeType::FrozenView;
using AggrCalcType = typename TreeType::AggrCalcType;
-private:
- NodeAllocatorType _alloc;
- TreeType _tree;
-
- BTree(const BTree &rhs);
- BTree &
- operator=(BTree &rhs);
-
-public:
+ BTree(const BTree &rhs) = delete;
+ BTree & operator=(BTree &rhs) = delete;
BTree();
~BTree();
const NodeAllocatorType &getAllocator() const { return _alloc; }
NodeAllocatorType &getAllocator() { return _alloc; }
-
- void
- disableFreeLists() {
- _alloc.disableFreeLists();
- }
-
- void
- disable_entry_hold_list()
- {
- _alloc.disable_entry_hold_list();
- }
-
- // Inherit doc from BTreeRoot
- void clear() {
- _tree.clear(_alloc);
- }
- void assign(Builder & rhs) {
- _tree.assign(rhs, _alloc);
- }
+ void disableFreeLists() { _alloc.disableFreeLists(); }
+ void disable_entry_hold_list() { _alloc.disable_entry_hold_list(); }
+ void clear() { _tree.clear(_alloc); }
+ void assign(Builder & rhs) { _tree.assign(rhs, _alloc); }
bool insert(const KeyType & key, const DataType & data, CompareT comp = CompareT()) {
return _tree.insert(key, data, _alloc, comp);
}
-
- void
- insert(Iterator &itr,
- const KeyType &key, const DataType &data)
- {
- _tree.insert(itr, key, data);
- }
+ void insert(Iterator &itr, const KeyType &key, const DataType &data) { _tree.insert(itr, key, data); }
Iterator find(const KeyType & key, CompareT comp = CompareT()) const {
return _tree.find(key, _alloc, comp);
@@ -97,55 +69,23 @@ public:
return _tree.remove(key, _alloc, comp);
}
- void
- remove(Iterator &itr)
- {
- _tree.remove(itr);
- }
-
- Iterator begin() const {
- return _tree.begin(_alloc);
- }
- FrozenView getFrozenView() const {
- return _tree.getFrozenView(_alloc);
- }
- size_t size() const {
- return _tree.size(_alloc);
- }
- vespalib::string toString() const {
- return _tree.toString(_alloc);
- }
- bool isValid(CompareT comp = CompareT()) const {
- return _tree.isValid(_alloc, comp);
- }
- bool isValidFrozen(CompareT comp = CompareT()) const {
- return _tree.isValidFrozen(_alloc, comp);
- }
- size_t bitSize() const {
- return _tree.bitSize(_alloc);
- }
+ void remove(Iterator &itr) { _tree.remove(itr); }
+ Iterator begin() const { return _tree.begin(_alloc); }
+ FrozenView getFrozenView() const { return _tree.getFrozenView(_alloc); }
+ size_t size() const { return _tree.size(_alloc); }
+ vespalib::string toString() const { return _tree.toString(_alloc); }
+ bool isValid(CompareT comp = CompareT()) const { return _tree.isValid(_alloc, comp); }
+ bool isValidFrozen(CompareT comp = CompareT()) const { return _tree.isValidFrozen(_alloc, comp); }
+ size_t bitSize() const { return _tree.bitSize(_alloc); }
size_t bitSize(BTreeNode::Ref node) const {
return _tree.bitSize(node, _alloc);
}
- void setRoot(BTreeNode::Ref newRoot) {
- _tree.setRoot(newRoot, _alloc);
- }
- BTreeNode::Ref getRoot() const {
- return _tree.getRoot();
- }
- vespalib::MemoryUsage getMemoryUsage() const {
- return _alloc.getMemoryUsage();
- }
-
- const AggrT &
- getAggregated() const
- {
- return _tree.getAggregated(_alloc);
- }
+ void setRoot(BTreeNode::Ref newRoot) { _tree.setRoot(newRoot, _alloc); }
+ BTreeNode::Ref getRoot() const { return _tree.getRoot(); }
+ vespalib::MemoryUsage getMemoryUsage() const { return _alloc.getMemoryUsage(); }
+ const AggrT & getAggregated() const { return _tree.getAggregated(_alloc); }
- void
- thaw(Iterator &itr)
- {
+ void thaw(Iterator &itr) {
assert(&itr.getAllocator() == &getAllocator());
_tree.thaw(itr);
}
@@ -153,18 +93,17 @@ public:
void compact_worst(const datastore::CompactionStrategy& compaction_strategy);
template <typename FunctionType>
- void
- foreach_key(FunctionType func) const
- {
+ void foreach_key(FunctionType func) const {
_alloc.getNodeStore().foreach_key(_tree.getRoot(), func);
}
template <typename FunctionType>
- void
- foreach(FunctionType func) const
- {
+ void foreach(FunctionType func) const {
_alloc.getNodeStore().foreach(_tree.getRoot(), func);
}
+private:
+ NodeAllocatorType _alloc;
+ TreeType _tree;
};
}
diff --git a/vespalib/src/vespa/vespalib/btree/btreebuilder.hpp b/vespalib/src/vespa/vespalib/btree/btreebuilder.hpp
index c15190a895e..a941689dda2 100644
--- a/vespalib/src/vespa/vespalib/btree/btreebuilder.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreebuilder.hpp
@@ -3,6 +3,7 @@
#pragma once
#include "btreebuilder.h"
+#include <cassert>
namespace vespalib::btree {
diff --git a/vespalib/src/vespa/vespalib/btree/btreeiterator.h b/vespalib/src/vespa/vespalib/btree/btreeiterator.h
index 4b99edf592a..2418da18c23 100644
--- a/vespalib/src/vespa/vespalib/btree/btreeiterator.h
+++ b/vespalib/src/vespa/vespalib/btree/btreeiterator.h
@@ -5,6 +5,7 @@
#include "btreenode.h"
#include "btreenodeallocator.h"
#include "btreetraits.h"
+#include <cassert>
namespace vespalib::btree {
@@ -35,137 +36,70 @@ class NodeElement
using NodeType = NodeT;
using KeyType = typename NodeType::KeyType;
using DataType = typename NodeType::DataType;
- const NodeType *_node;
- uint32_t _idx;
+ uint64_t _nodeAndIdx;
- NodeType *
- getWNode() const
- {
- return const_cast<NodeType *>(_node);
- }
+ NodeType * getWNode() const { return const_cast<NodeType *>(getNode()); }
+ static constexpr uint8_t NODE_BITS = 57;
+ static constexpr uint8_t IDX_BITS = 64 - NODE_BITS;
+ static constexpr uint64_t NODE_MASK = (1ul << NODE_BITS) - 1ul;
+ static constexpr uint64_t IDX_MASK = (1ul << IDX_BITS) - 1ul;
+ static constexpr uint8_t IDX_SHIFT = NODE_BITS;
+ static constexpr uint64_t IDX_ONE = 1ul << NODE_BITS;
+ static_assert((NodeType::maxSlots() + 1) < (1ul << IDX_BITS), "IDX can be out of bounds above 127");
public:
- NodeElement()
- : _node(nullptr),
- _idx(0u)
- {
- }
-
- NodeElement(const NodeType *node, uint32_t idx)
- : _node(node),
- _idx(idx)
- {
- }
-
- void
- setNode(const NodeType *node)
- {
- _node = node;
- }
-
- const NodeType *
- getNode() const
- {
- return _node;
- }
+ NodeElement() noexcept : _nodeAndIdx(0ul) { }
+ NodeElement(const NodeType *node, uint32_t idx) noexcept
+ : _nodeAndIdx(uint64_t(node) | uint64_t(idx) << IDX_SHIFT)
+ { }
- void
- setIdx(uint32_t idx)
- {
- _idx = idx;
+ void invalidate() noexcept { _nodeAndIdx = 0; }
+ void setNode(const NodeType *node) noexcept {
+ _nodeAndIdx = (_nodeAndIdx & ~NODE_MASK) | uint64_t(node);
}
-
- uint32_t
- getIdx() const
- {
- return _idx;
+ const NodeType * getNode() const noexcept { return reinterpret_cast<const NodeType *>(_nodeAndIdx & NODE_MASK); }
+ void setIdx(uint32_t idx) noexcept {
+ _nodeAndIdx = (_nodeAndIdx & NODE_MASK) | (uint64_t(idx) << IDX_SHIFT);
}
+ uint32_t getIdx() const noexcept { return _nodeAndIdx >> IDX_SHIFT; }
+ void incIdx() noexcept { _nodeAndIdx += IDX_ONE; }
+ void decIdx() noexcept { _nodeAndIdx -= IDX_ONE; }
- void
- incIdx()
- {
- ++_idx;
- }
-
- void
- decIdx()
- {
- --_idx;
- }
-
- void
- setNodeAndIdx(const NodeType *node, uint32_t idx)
- {
- _node = node;
- _idx = idx;
- }
-
- const KeyType &
- getKey() const
- {
- return _node->getKey(_idx);
- }
-
- const DataType &
- getData() const
- {
- return _node->getData(_idx);
+ void setNodeAndIdx(const NodeType *node, uint32_t idx) noexcept {
+ _nodeAndIdx = uint64_t(node) | uint64_t(idx) << IDX_SHIFT;
}
+ const KeyType & getKey() const noexcept { return getNode()->getKey(getIdx()); }
+ const DataType & getData() const noexcept { return getNode()->getData(getIdx()); }
// Only use during compaction when changing reference to moved value
- DataType &getWData() { return getWNode()->getWData(_idx); }
-
- bool
- valid() const
- {
- return _node != nullptr;
+ DataType &getWData() noexcept { return getWNode()->getWData(getIdx()); }
+ bool valid() const noexcept { return _nodeAndIdx != 0; }
+ void adjustLeftVictimKilled() noexcept {
+ assert(getIdx() > 0);
+ decIdx();
}
- void
- adjustLeftVictimKilled()
- {
- assert(_idx > 0);
- --_idx;
+ void adjustSteal(uint32_t stolen) noexcept {
+ assert(getIdx() + stolen < getNode()->validSlots());
+ setIdx(getIdx() + stolen);
}
- void
- adjustSteal(uint32_t stolen)
- {
- assert(_idx + stolen < _node->validSlots());
- _idx += stolen;
- }
-
- void
- adjustSplit(bool inRightSplit)
- {
+ void adjustSplit(bool inRightSplit) noexcept {
if (inRightSplit)
- ++_idx;
+ incIdx();
}
- bool
- adjustSplit(bool inRightSplit, const NodeType *splitNode)
- {
+ bool adjustSplit(bool inRightSplit, const NodeType *splitNode) noexcept {
adjustSplit(inRightSplit);
- if (_idx >= _node->validSlots()) {
- _idx -= _node->validSlots();
- _node = splitNode;
+ if (getIdx() >= getNode()->validSlots()) {
+ setNodeAndIdx(splitNode, getIdx() - getNode()->validSlots());
return true;
}
return false;
}
- void
- swap(NodeElement &rhs)
- {
- std::swap(_node, rhs._node);
- std::swap(_idx, rhs._idx);
- }
-
- bool
- operator!=(const NodeElement &rhs) const
- {
- return _node != rhs._node ||
- _idx != rhs._idx;
+ bool operator!=(const NodeElement &rhs) const noexcept {
+ return _nodeAndIdx != rhs._nodeAndIdx;
}
};
@@ -183,9 +117,7 @@ template <typename KeyT,
class BTreeIteratorBase
{
protected:
- using NodeAllocatorType = BTreeNodeAllocator<KeyT, DataT, AggrT,
- INTERNAL_SLOTS,
- LEAF_SLOTS>;
+ using NodeAllocatorType = BTreeNodeAllocator<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>;
using InternalNodeType = BTreeInternalNode<KeyT, AggrT, INTERNAL_SLOTS>;
using LeafNodeType = BTreeLeafNode<KeyT, DataT, AggrT, LEAF_SLOTS> ;
using InternalNodeTypeRefPair = typename InternalNodeType::RefPair;
@@ -225,7 +157,6 @@ protected:
// Temporary leaf node when iterating over short arrays
std::unique_ptr<LeafNodeTempType> _compatLeafNode;
-
private:
/*
* Find the next leaf node, called by operator++() as needed.
@@ -243,8 +174,7 @@ protected:
*
* @param pidx Number of levels above leaf nodes to take into account.
*/
- size_t
- position(uint32_t pidx) const;
+ size_t position(uint32_t pidx) const;
/**
* Create iterator pointing to first element in the tree referenced
@@ -268,13 +198,12 @@ protected:
/**
* Default constructor. Iterator is not associated with a tree.
*/
- BTreeIteratorBase();
+ BTreeIteratorBase() noexcept;
/**
* Step iterator forwards. If at end then leave it at end.
*/
- BTreeIteratorBase &
- operator++() {
+ BTreeIteratorBase & operator++() {
if (_leaf.getNode() == nullptr) {
return *this;
}
@@ -290,8 +219,7 @@ protected:
* Step iterator backwards. If at end then place it at last valid
* position in tree (cf. rbegin())
*/
- BTreeIteratorBase &
- operator--();
+ BTreeIteratorBase & operator--();
~BTreeIteratorBase();
BTreeIteratorBase(const BTreeIteratorBase &other);
@@ -311,9 +239,7 @@ protected:
* from this iterator position to end of subtree.
*/
template <typename FunctionType>
- void
- foreach_key_range_start(uint32_t level, FunctionType func) const
- {
+ void foreach_key_range_start(uint32_t level, FunctionType func) const {
if (level > 0u) {
--level;
foreach_key_range_start(level, func);
@@ -332,9 +258,7 @@ protected:
* subtree before this iterator position).
*/
template <typename FunctionType>
- void
- foreach_key_range_end(uint32_t level, FunctionType func) const
- {
+ void foreach_key_range_end(uint32_t level, FunctionType func) const {
if (level > 0u) {
--level;
auto &store = _allocator->getNodeStore();
@@ -348,8 +272,7 @@ protected:
}
public:
- bool
- operator==(const BTreeIteratorBase & rhs) const {
+ bool operator==(const BTreeIteratorBase & rhs) const {
if (_leaf.getIdx() != rhs._leaf.getIdx()) {
return false;
}
@@ -367,83 +290,55 @@ public:
return true;
}
- bool
- operator!=(const BTreeIteratorBase & rhs) const
- {
- return !operator==(rhs);
- }
+ bool operator!=(const BTreeIteratorBase & rhs) const { return !operator==(rhs); }
/**
* Swap iterator with the other.
*
* @param rhs Other iterator.
*/
- void
- swap(BTreeIteratorBase & rhs);
+ void swap(BTreeIteratorBase & rhs);
/**
* Get key at current iterator location.
*/
- const KeyType &
- getKey() const
- {
- return _leaf.getKey();
- }
+ const KeyType & getKey() const { return _leaf.getKey(); }
/**
* Get data at current iterator location.
*/
- const DataType &
- getData() const
- {
- return _leaf.getData();
- }
+ const DataType & getData() const { return _leaf.getData(); }
/**
* Check if iterator is at a valid element, i.e. not at end.
*/
- bool
- valid() const
- {
- return _leaf.valid();
- }
+ bool valid() const { return _leaf.valid(); }
/**
* Return the number of elements in the tree.
*/
- size_t
- size() const;
+ size_t size() const;
/**
* Return the current position in the tree.
*/
- size_t
- position() const
- {
- return position(_pathSize);
- }
+ size_t position() const { return position(_pathSize); }
/**
* Return the distance between two positions in the tree.
*/
- ssize_t
- operator-(const BTreeIteratorBase &rhs) const;
+ ssize_t operator-(const BTreeIteratorBase &rhs) const;
/**
* Return if the tree has data or not (e.g. keys and data or only keys).
*/
- static bool
- hasData()
- {
- return LeafNodeType::hasData();
- }
+ static bool hasData() { return LeafNodeType::hasData(); }
/**
* Move the iterator directly to end. Used by findHelper method in BTree.
*/
- void
- setupEnd();
+ void setupEnd();
/**
* Setup iterator to be empty and not be associated with any tree.
@@ -453,50 +348,41 @@ public:
/**
* Move iterator to beyond last element in the current tree.
*/
- void
- end() __attribute__((noinline));
+ void end() __attribute__((noinline));
/**
* Move iterator to beyond last element in the given tree.
*
* @param rootRef Reference to root of tree.
*/
- void
- end(BTreeNode::Ref rootRef);
+ void end(BTreeNode::Ref rootRef);
/**
* Move iterator to first element in the current tree.
*/
- void
- begin();
+ void begin();
/**
* Move iterator to first element in the given tree.
*
* @param rootRef Reference to root of tree.
*/
- void
- begin(BTreeNode::Ref rootRef);
+ void begin(BTreeNode::Ref rootRef);
/**
* Move iterator to last element in the current tree.
*/
- void
- rbegin();
+ void rbegin();
/*
* Get aggregated values for the current tree.
*/
- const AggrT &
- getAggregated() const;
+ const AggrT & getAggregated() const;
- bool
- identical(const BTreeIteratorBase &rhs) const;
+ bool identical(const BTreeIteratorBase &rhs) const;
template <typename FunctionType>
- void
- foreach_key(FunctionType func) const
- {
+ void foreach_key(FunctionType func) const {
if (_pathSize > 0) {
_path[_pathSize - 1].getNode()->
foreach_key(_allocator->getNodeStore(), func);
@@ -511,9 +397,7 @@ public:
* range [this iterator, end_itr)).
*/
template <typename FunctionType>
- void
- foreach_key_range(const BTreeIteratorBase &end_itr, FunctionType func) const
- {
+ void foreach_key_range(const BTreeIteratorBase &end_itr, FunctionType func) const {
if (!valid()) {
return;
}
@@ -584,9 +468,7 @@ class BTreeConstIterator : public BTreeIteratorBase<KeyT, DataT, AggrT,
TraitsT::PATH_SIZE>
{
protected:
- using ParentType = BTreeIteratorBase<KeyT,
- DataT,
- AggrT,
+ using ParentType = BTreeIteratorBase<KeyT, DataT, AggrT,
TraitsT::INTERNAL_SLOTS,
TraitsT::LEAF_SLOTS,
TraitsT::PATH_SIZE>;
@@ -645,17 +527,12 @@ public:
/**
* Default constructor. Iterator is not associated with a tree.
*/
- BTreeConstIterator()
- : ParentType()
- {
- }
+ BTreeConstIterator() noexcept : ParentType() { }
/**
* Step iterator forwards. If at end then leave it at end.
*/
- BTreeConstIterator &
- operator++()
- {
+ BTreeConstIterator & operator++() {
ParentType::operator++();
return *this;
}
@@ -664,9 +541,7 @@ public:
* Step iterator backwards. If at end then place it at last valid
* position in tree (cf. rbegin())
*/
- BTreeConstIterator &
- operator--()
- {
+ BTreeConstIterator & operator--() {
ParentType::operator--();
return *this;
}
@@ -679,8 +554,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- lower_bound(const KeyType & key, CompareT comp = CompareT());
+ void lower_bound(const KeyType & key, CompareT comp = CompareT());
/**
* Position iterator at first position with a key that is greater
@@ -689,9 +563,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- lower_bound(BTreeNode::Ref rootRef,
- const KeyType & key, CompareT comp = CompareT());
+ void lower_bound(BTreeNode::Ref rootRef, const KeyType & key, CompareT comp = CompareT());
/**
* Step iterator forwards until it is at a position with a key
@@ -704,8 +576,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- seek(const KeyType &key, CompareT comp = CompareT());
+ void seek(const KeyType &key, CompareT comp = CompareT());
/**
* Step iterator forwards until it is at a position with a key
@@ -717,8 +588,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- binarySeek(const KeyType &key, CompareT comp = CompareT());
+ void binarySeek(const KeyType &key, CompareT comp = CompareT());
/**
* Step iterator forwards until it is at a position with a key
@@ -730,8 +600,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- linearSeek(const KeyType &key, CompareT comp = CompareT());
+ void linearSeek(const KeyType &key, CompareT comp = CompareT());
/**
* Step iterator forwards until it is at a position with a key
@@ -744,8 +613,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- seekPast(const KeyType &key, CompareT comp = CompareT());
+ void seekPast(const KeyType &key, CompareT comp = CompareT());
/**
* Step iterator forwards until it is at a position with a key
@@ -757,8 +625,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- binarySeekPast(const KeyType &key, CompareT comp = CompareT());
+ void binarySeekPast(const KeyType &key, CompareT comp = CompareT());
/**
* Step iterator forwards until it is at a position with a key
@@ -770,8 +637,7 @@ public:
* @param key Key to search for
* @param comp Comparator for the tree ordering.
*/
- void
- linearSeekPast(const KeyType &key, CompareT comp = CompareT());
+ void linearSeekPast(const KeyType &key, CompareT comp = CompareT());
/**
* Validate the iterator as a valid iterator or positioned at
@@ -781,8 +647,7 @@ public:
* @param rootRef Reference to root of tree to operate on
* @param comp Comparator for the tree ordering.
*/
- void
- validate(BTreeNode::Ref rootRef, CompareT comp = CompareT());
+ void validate(BTreeNode::Ref rootRef, CompareT comp = CompareT());
};
@@ -795,15 +660,10 @@ template <typename KeyT,
typename AggrT = NoAggregated,
typename CompareT = std::less<KeyT>,
typename TraitsT = BTreeDefaultTraits>
-class BTreeIterator : public BTreeConstIterator<KeyT, DataT, AggrT,
- CompareT, TraitsT>
+class BTreeIterator : public BTreeConstIterator<KeyT, DataT, AggrT, CompareT, TraitsT>
{
public:
- using ParentType = BTreeConstIterator<KeyT,
- DataT,
- AggrT,
- CompareT,
- TraitsT>;
+ using ParentType = BTreeConstIterator<KeyT, DataT, AggrT, CompareT, TraitsT>;
using NodeAllocatorType = typename ParentType::NodeAllocatorType;
using InternalNodeType = typename ParentType::InternalNodeType;
using LeafNodeType = typename ParentType::LeafNodeType;
@@ -844,40 +704,27 @@ public:
{
}
- BTreeIterator()
- : ParentType()
- {
- }
+ BTreeIterator() : ParentType() { }
- BTreeIterator &
- operator++()
- {
+ BTreeIterator & operator++() {
ParentType::operator++();
return *this;
}
- BTreeIterator &
- operator--()
- {
+ BTreeIterator & operator--() {
ParentType::operator--();
return *this;
}
- NodeAllocatorType &
- getAllocator() const
- {
+ NodeAllocatorType & getAllocator() const {
return const_cast<NodeAllocatorType &>(*_allocator);
}
- BTreeNode::Ref
- moveFirstLeafNode(BTreeNode::Ref rootRef);
+ BTreeNode::Ref moveFirstLeafNode(BTreeNode::Ref rootRef);
- void
- moveNextLeafNode();
+ void moveNextLeafNode();
- void
- writeData(const DataType &data)
- {
+ void writeData(const DataType &data) {
_leaf.getWNode()->writeData(_leaf.getIdx(), data);
}
@@ -889,8 +736,7 @@ public:
* The new key must have the same semantic meaning as the old key.
* Typically used when compacting data store containing keys.
*/
- void
- writeKey(const KeyType &key);
+ void writeKey(const KeyType &key);
/**
* Updata data at the current iterator position. The tree should
@@ -900,71 +746,33 @@ public:
* @param aggrCalc Calculator for updating aggregated information.
*/
template <class AggrCalcT>
- void
- updateData(const DataType &data, const AggrCalcT &aggrCalc);
+ void updateData(const DataType &data, const AggrCalcT &aggrCalc);
/**
* Thaw a path from the root node down the the current leaf node in
* the current tree, allowing for updates to be performed without
* disturbing the frozen version of the tree.
*/
- BTreeNode::Ref
- thaw(BTreeNode::Ref rootRef);
+ BTreeNode::Ref thaw(BTreeNode::Ref rootRef);
private:
/* Insert into empty tree */
template <class AggrCalcT>
- BTreeNode::Ref
- insertFirst(const KeyType &key, const DataType &data,
- const AggrCalcT &aggrCalc);
-
- LeafNodeType *
- getLeafNode() const
- {
- return _leaf.getWNode();
- }
-
- bool
- setLeafNodeIdx(uint32_t idx, const LeafNodeType *splitLeafNode);
-
- void
- setLeafNodeIdx(uint32_t idx)
- {
- _leaf.setIdx(idx);
- }
-
- uint32_t
- getLeafNodeIdx() const
- {
- return _leaf.getIdx();
- }
-
- uint32_t
- getPathSize() const
- {
- return _pathSize;
- }
-
- PathElement &
- getPath(uint32_t pidx)
- {
- return _path[pidx];
- }
+ BTreeNode::Ref insertFirst(const KeyType &key, const DataType &data, const AggrCalcT &aggrCalc);
+ LeafNodeType * getLeafNode() const { return _leaf.getWNode(); }
+ bool setLeafNodeIdx(uint32_t idx, const LeafNodeType *splitLeafNode);
+ void setLeafNodeIdx(uint32_t idx) { _leaf.setIdx(idx); }
+ uint32_t getLeafNodeIdx() const { return _leaf.getIdx(); }
+ uint32_t getPathSize() const { return _pathSize; }
+ PathElement & getPath(uint32_t pidx) { return _path[pidx]; }
template <class AggrCalcT>
- BTreeNode::Ref
- addLevel(BTreeNode::Ref rootRef, BTreeNode::Ref splitNodeRef,
- bool inRightSplit, const AggrCalcT &aggrCalc);
+ BTreeNode::Ref addLevel(BTreeNode::Ref rootRef, BTreeNode::Ref splitNodeRef, bool inRightSplit, const AggrCalcT &aggrCalc);
- BTreeNode::Ref
- removeLevel(BTreeNode::Ref rootRef, InternalNodeType *rootNode);
+ BTreeNode::Ref removeLevel(BTreeNode::Ref rootRef, InternalNodeType *rootNode);
+ void removeLast(BTreeNode::Ref rootRef);
- void
- removeLast(BTreeNode::Ref rootRef);
-
- void
- adjustSteal(uint32_t level, bool leftVictimKilled, uint32_t stolen)
- {
+ void adjustSteal(uint32_t level, bool leftVictimKilled, uint32_t stolen) {
assert(_pathSize > level);
if (leftVictimKilled) {
_path[level].adjustLeftVictimKilled();
diff --git a/vespalib/src/vespa/vespalib/btree/btreeiterator.hpp b/vespalib/src/vespa/vespalib/btree/btreeiterator.hpp
index 8ecd26835c4..5884bb2849b 100644
--- a/vespalib/src/vespa/vespalib/btree/btreeiterator.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreeiterator.hpp
@@ -22,8 +22,8 @@ BTreeIteratorBase(const BTreeIteratorBase &other)
for (size_t i = 0; i < _pathSize; ++i) {
_path[i] = other._path[i];
}
- if (other._compatLeafNode.get()) {
- _compatLeafNode.reset( new LeafNodeTempType(*other._compatLeafNode));
+ if (other._compatLeafNode) {
+ _compatLeafNode = std::make_unique<LeafNodeTempType>(*other._compatLeafNode);
}
if (other._leaf.getNode() == other._compatLeafNode.get()) {
_leaf.setNode(_compatLeafNode.get());
@@ -57,7 +57,7 @@ clearPath(uint32_t pathSize)
uint32_t level = _pathSize;
while (level > pathSize) {
--level;
- _path[level].setNodeAndIdx(nullptr, 0u);
+ _path[level].invalidate();
}
_pathSize = pathSize;
}
@@ -87,7 +87,7 @@ void
BTreeIteratorBase<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS, PATH_SIZE>::
setupEnd()
{
- _leaf.setNodeAndIdx(nullptr, 0u);
+ _leaf.invalidate();
}
@@ -98,7 +98,7 @@ BTreeIteratorBase<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS, PATH_SIZE>::
setupEmpty()
{
clearPath(0u);
- _leaf.setNodeAndIdx(nullptr, 0u);
+ _leaf.invalidate();
_leafRoot = nullptr;
}
@@ -112,7 +112,7 @@ end()
if (_pathSize == 0) {
if (_leafRoot == nullptr)
return;
- _leaf.setNodeAndIdx(nullptr, 0u);
+ _leaf.invalidate();
return;
}
uint32_t level = _pathSize - 1;
@@ -131,7 +131,7 @@ end()
assert(childRef.valid());
}
assert(_allocator->isLeafRef(childRef));
- _leaf.setNodeAndIdx(nullptr, 0u);
+ _leaf.invalidate();
}
@@ -149,7 +149,7 @@ end(BTreeNode::Ref rootRef)
clearPath(0u);
const LeafNodeType *lnode = _allocator->mapLeafRef(rootRef);
_leafRoot = lnode;
- _leaf.setNodeAndIdx(nullptr, 0u);
+ _leaf.invalidate();
return;
}
_leafRoot = nullptr;
@@ -171,7 +171,7 @@ end(BTreeNode::Ref rootRef)
childRef = inode->getChild(idx - 1);
assert(childRef.valid());
}
- _leaf.setNodeAndIdx(nullptr, 0u);
+ _leaf.invalidate();
}
@@ -199,7 +199,7 @@ findNextLeafNode()
return;
}
}
- _leaf.setNodeAndIdx(nullptr, 0u);
+ _leaf.invalidate();
}
@@ -435,8 +435,8 @@ BTreeIteratorBase(const KeyDataType *shortArray,
_leafRoot(nullptr),
_compatLeafNode()
{
- if(arraySize > 0) {
- _compatLeafNode.reset(new LeafNodeTempType(shortArray, arraySize));
+ if (arraySize > 0) {
+ _compatLeafNode = std::make_unique<LeafNodeTempType>(shortArray, arraySize);
_leaf.setNode(_compatLeafNode.get());
_leafRoot = _leaf.getNode();
if constexpr (AggrCalcT::hasAggregated()) {
@@ -450,7 +450,7 @@ BTreeIteratorBase(const KeyDataType *shortArray,
template <typename KeyT, typename DataT, typename AggrT,
uint32_t INTERNAL_SLOTS, uint32_t LEAF_SLOTS, uint32_t PATH_SIZE>
BTreeIteratorBase<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS, PATH_SIZE>::
-BTreeIteratorBase()
+BTreeIteratorBase() noexcept
: _leaf(nullptr, 0u),
_path(),
_pathSize(0),
@@ -558,7 +558,7 @@ lower_bound(const KeyType & key, CompareT comp)
return;
uint32_t idx = _leafRoot->template lower_bound<CompareT>(key, comp);
if (idx >= _leafRoot->validSlots()) {
- _leaf.setNodeAndIdx(nullptr, 0u);
+ _leaf.invalidate();
} else {
_leaf.setNodeAndIdx(_leafRoot, idx);
}
@@ -608,7 +608,7 @@ lower_bound(BTreeNode::Ref rootRef, const KeyType & key, CompareT comp)
_leafRoot = lnode;
uint32_t idx = lnode->template lower_bound<CompareT>(key, comp);
if (idx >= lnode->validSlots()) {
- _leaf.setNodeAndIdx(nullptr, 0u);
+ _leaf.invalidate();
} else {
_leaf.setNodeAndIdx(lnode, idx);
}
@@ -1269,7 +1269,7 @@ removeLevel(BTreeNode::Ref rootRef, InternalNodeType *rootNode)
NodeAllocatorType &allocator(getAllocator());
allocator.holdNode(rootRef, rootNode);
--_pathSize;
- _path[_pathSize].setNodeAndIdx(nullptr, 0u);
+ _path[_pathSize].invalidate();
if (_pathSize == 0) {
_leafRoot = _leaf.getNode();
}
@@ -1286,7 +1286,7 @@ removeLast(BTreeNode::Ref rootRef)
NodeAllocatorType &allocator(getAllocator());
allocator.holdNode(rootRef, getLeafNode());
_leafRoot = nullptr;
- _leaf.setNode(nullptr);
+ _leaf.invalidate();
}
template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
diff --git a/vespalib/src/vespa/vespalib/btree/btreenode.h b/vespalib/src/vespa/vespalib/btree/btreenode.h
index 1196b172d1f..0a77a0b4685 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenode.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenode.h
@@ -39,20 +39,20 @@ public:
static constexpr uint8_t LEAF_LEVEL = 0;
protected:
uint16_t _validSlots;
- BTreeNode(uint8_t level)
+ BTreeNode(uint8_t level) noexcept
: _level(level),
_isFrozen(false),
_validSlots(0)
{}
- BTreeNode(const BTreeNode &rhs)
+ BTreeNode(const BTreeNode &rhs) noexcept
: _level(rhs._level),
_isFrozen(rhs._isFrozen),
_validSlots(rhs._validSlots)
{}
BTreeNode &
- operator=(const BTreeNode &rhs)
+ operator=(const BTreeNode &rhs) noexcept
{
assert(!_isFrozen);
_level = rhs._level;
@@ -89,8 +89,8 @@ class BTreeNodeDataWrap
public:
DataT _data[NumSlots];
- BTreeNodeDataWrap() : _data() {}
- ~BTreeNodeDataWrap() { }
+ BTreeNodeDataWrap() noexcept : _data() {}
+ ~BTreeNodeDataWrap() = default;
void copyData(const BTreeNodeDataWrap &rhs, uint32_t validSlots) {
const DataT *rdata = rhs._data;
@@ -100,11 +100,11 @@ public:
*ldata = *rdata;
}
- const DataT &getData(uint32_t idx) const { return _data[idx]; }
+ const DataT &getData(uint32_t idx) const noexcept { return _data[idx]; }
// Only use during compaction when changing reference to moved value
- DataT &getWData(uint32_t idx) { return _data[idx]; }
- void setData(uint32_t idx, const DataT &data) { _data[idx] = data; }
- static bool hasData() { return true; }
+ DataT &getWData(uint32_t idx) noexcept { return _data[idx]; }
+ void setData(uint32_t idx, const DataT &data) noexcept { _data[idx] = data; }
+ static bool hasData() noexcept { return true; }
};
@@ -112,7 +112,7 @@ template <uint32_t NumSlots>
class BTreeNodeDataWrap<BTreeNoLeafData, NumSlots>
{
public:
- BTreeNodeDataWrap() {}
+ BTreeNodeDataWrap() noexcept {}
void copyData(const BTreeNodeDataWrap &rhs, uint32_t validSlots) {
(void) rhs;
@@ -145,7 +145,7 @@ class BTreeNodeAggregatedWrap
static AggrT _instance;
public:
- BTreeNodeAggregatedWrap()
+ BTreeNodeAggregatedWrap() noexcept
: _aggr()
{}
AggrT &getAggregated() { return _aggr; }
@@ -161,7 +161,7 @@ class BTreeNodeAggregatedWrap<NoAggregated>
static NoAggregated _instance;
public:
- BTreeNodeAggregatedWrap() {}
+ BTreeNodeAggregatedWrap() noexcept {}
NoAggregated &getAggregated() { return _instance; }
const NoAggregated &getAggregated() const { return _instance; }
@@ -174,14 +174,14 @@ template <typename KeyT, uint32_t NumSlots>
class BTreeNodeT : public BTreeNode {
protected:
KeyT _keys[NumSlots];
- BTreeNodeT(uint8_t level)
+ BTreeNodeT(uint8_t level) noexcept
: BTreeNode(level),
_keys()
{}
~BTreeNodeT() = default;
- BTreeNodeT(const BTreeNodeT &rhs)
+ BTreeNodeT(const BTreeNodeT &rhs) noexcept
: BTreeNode(rhs)
{
const KeyT *rkeys = rhs._keys;
@@ -192,7 +192,7 @@ protected:
}
BTreeNodeT &
- operator=(const BTreeNodeT &rhs)
+ operator=(const BTreeNodeT &rhs) noexcept
{
BTreeNode::operator=(rhs);
const KeyT *rkeys = rhs._keys;
@@ -204,16 +204,16 @@ protected:
}
public:
- const KeyT & getKey(uint32_t idx) const { return _keys[idx]; }
- const KeyT & getLastKey() const { return _keys[validSlots() - 1]; }
- void writeKey(uint32_t idx, const KeyT & key) {
+ const KeyT & getKey(uint32_t idx) const noexcept { return _keys[idx]; }
+ const KeyT & getLastKey() const noexcept { return _keys[validSlots() - 1]; }
+ void writeKey(uint32_t idx, const KeyT & key) noexcept {
if constexpr (std::is_same_v<KeyT, vespalib::datastore::AtomicEntryRef>) {
_keys[idx].store_release(key.load_relaxed());
} else {
_keys[idx] = key;
}
}
- void write_key_relaxed(uint32_t idx, const KeyT & key) { _keys[idx] = key; }
+ void write_key_relaxed(uint32_t idx, const KeyT & key) noexcept { _keys[idx] = key; }
template <typename CompareT>
uint32_t lower_bound(uint32_t sidx, const KeyT & key, CompareT comp) const;
@@ -224,10 +224,10 @@ public:
template <typename CompareT>
uint32_t upper_bound(uint32_t sidx, const KeyT & key, CompareT comp) const;
- bool isFull() const { return validSlots() == NumSlots; }
- bool isAtLeastHalfFull() const { return validSlots() >= minSlots(); }
- static uint32_t maxSlots() { return NumSlots; }
- static uint32_t minSlots() { return NumSlots / 2; }
+ bool isFull() const noexcept { return validSlots() == NumSlots; }
+ bool isAtLeastHalfFull() const noexcept { return validSlots() >= minSlots(); }
+ static constexpr uint32_t maxSlots() noexcept { return NumSlots; }
+ static constexpr uint32_t minSlots() noexcept { return NumSlots / 2; }
};
template <typename KeyT, typename DataT, typename AggrT, uint32_t NumSlots>
@@ -247,14 +247,14 @@ public:
using DataWrapType::setData;
using DataWrapType::copyData;
protected:
- BTreeNodeTT(uint8_t level)
+ BTreeNodeTT(uint8_t level) noexcept
: ParentType(level),
DataWrapType()
{}
- ~BTreeNodeTT() {}
+ ~BTreeNodeTT() = default;
- BTreeNodeTT(const BTreeNodeTT &rhs)
+ BTreeNodeTT(const BTreeNodeTT &rhs) noexcept
: ParentType(rhs),
DataWrapType(rhs),
AggrWrapType(rhs)
@@ -262,7 +262,7 @@ protected:
copyData(rhs, _validSlots);
}
- BTreeNodeTT &operator=(const BTreeNodeTT &rhs) {
+ BTreeNodeTT &operator=(const BTreeNodeTT &rhs) noexcept {
ParentType::operator=(rhs);
AggrWrapType::operator=(rhs);
copyData(rhs, _validSlots);
@@ -325,19 +325,19 @@ public:
private:
uint32_t _validLeaves;
protected:
- BTreeInternalNode()
+ BTreeInternalNode() noexcept
: ParentType(EMPTY_LEVEL),
_validLeaves(0u)
{}
- BTreeInternalNode(const BTreeInternalNode &rhs)
+ BTreeInternalNode(const BTreeInternalNode &rhs) noexcept
: ParentType(rhs),
_validLeaves(rhs._validLeaves)
{}
- ~BTreeInternalNode() {}
+ ~BTreeInternalNode() = default;
- BTreeInternalNode &operator=(const BTreeInternalNode &rhs) {
+ BTreeInternalNode &operator=(const BTreeInternalNode &rhs) noexcept {
ParentType::operator=(rhs);
_validLeaves = rhs._validLeaves;
return *this;
@@ -430,8 +430,7 @@ public:
}
};
-template <typename KeyT, typename DataT, typename AggrT,
- uint32_t NumSlots = 16>
+template <typename KeyT, typename DataT, typename AggrT, uint32_t NumSlots = 16>
class BTreeLeafNode : public BTreeNodeTT<KeyT, DataT, AggrT, NumSlots>
{
public:
@@ -460,17 +459,17 @@ public:
using KeyType = KeyT;
using DataType = DataT;
protected:
- BTreeLeafNode() : ParentType(LEAF_LEVEL) {}
+ BTreeLeafNode() noexcept : ParentType(LEAF_LEVEL) {}
- BTreeLeafNode(const BTreeLeafNode &rhs)
+ BTreeLeafNode(const BTreeLeafNode &rhs) noexcept
: ParentType(rhs)
{}
- BTreeLeafNode(const KeyDataType *smallArray, uint32_t arraySize);
+ BTreeLeafNode(const KeyDataType *smallArray, uint32_t arraySize) noexcept;
~BTreeLeafNode() = default;
- BTreeLeafNode &operator=(const BTreeLeafNode &rhs) {
+ BTreeLeafNode &operator=(const BTreeLeafNode &rhs) noexcept {
ParentType::operator=(rhs);
return *this;
}
@@ -535,8 +534,7 @@ public:
using ParentType = BTreeLeafNode<KeyT, DataT, AggrT, NumSlots>;
using KeyDataType = typename ParentType::KeyDataType;
- BTreeLeafNodeTemp(const KeyDataType *smallArray,
- uint32_t arraySize)
+ BTreeLeafNodeTemp(const KeyDataType *smallArray, uint32_t arraySize) noexcept
: ParentType(smallArray, arraySize)
{}
diff --git a/vespalib/src/vespa/vespalib/btree/btreenode.hpp b/vespalib/src/vespa/vespalib/btree/btreenode.hpp
index c8bc4ec614c..7de65261e93 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenode.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreenode.hpp
@@ -4,6 +4,7 @@
#include "btreenode.h"
#include <algorithm>
+#include <cassert>
namespace vespalib::btree {
@@ -74,9 +75,7 @@ upper_bound(uint32_t sidx, const KeyT & key, CompareT comp) const
template <typename KeyT, typename DataT, typename AggrT, uint32_t NumSlots>
void
-BTreeNodeTT<KeyT, DataT, AggrT, NumSlots>::insert(uint32_t idx,
- const KeyT &key,
- const DataT &data)
+BTreeNodeTT<KeyT, DataT, AggrT, NumSlots>::insert(uint32_t idx, const KeyT &key, const DataT &data)
{
assert(validSlots() < NodeType::maxSlots());
assert(!getFrozen());
@@ -208,8 +207,7 @@ stealSomeFromRightNode(NodeType *victim)
template <typename KeyT, typename DataT, typename AggrT, uint32_t NumSlots>
void
-BTreeNodeTT<KeyT, DataT, AggrT, NumSlots>::cleanRange(uint32_t from,
- uint32_t to)
+BTreeNodeTT<KeyT, DataT, AggrT, NumSlots>::cleanRange(uint32_t from, uint32_t to)
{
assert(from < to);
assert(to <= validSlots());
@@ -366,7 +364,7 @@ BTreeInternalNode<KeyT, AggrT, NumSlots>::cleanFrozen()
template <typename KeyT, typename DataT, typename AggrT, uint32_t NumSlots>
BTreeLeafNode<KeyT, DataT, AggrT, NumSlots>::
-BTreeLeafNode(const KeyDataType *smallArray, uint32_t arraySize)
+BTreeLeafNode(const KeyDataType *smallArray, uint32_t arraySize) noexcept
: ParentType(LEAF_LEVEL)
{
assert(arraySize <= BTreeLeafNode::maxSlots());
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
index b537602c703..c7c635b4471 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
@@ -34,10 +34,6 @@ public:
using DataStoreBase = datastore::DataStoreBase;
private:
- BTreeNodeAllocator(const BTreeNodeAllocator &rhs);
-
- BTreeNodeAllocator & operator=(const BTreeNodeAllocator &rhs);
-
NodeStore _nodeStore;
using RefVector = vespalib::Array<BTreeNode::Ref>;
@@ -53,6 +49,8 @@ private:
RefVector _leafHoldUntilFreeze;
public:
+ BTreeNodeAllocator(const BTreeNodeAllocator &rhs) = delete;
+ BTreeNodeAllocator & operator=(const BTreeNodeAllocator &rhs) = delete;
BTreeNodeAllocator();
~BTreeNodeAllocator();
diff --git a/vespalib/src/vespa/vespalib/btree/btreeroot.h b/vespalib/src/vespa/vespalib/btree/btreeroot.h
index c23cf900367..cd1d98725dc 100644
--- a/vespalib/src/vespa/vespalib/btree/btreeroot.h
+++ b/vespalib/src/vespa/vespalib/btree/btreeroot.h
@@ -13,10 +13,10 @@ namespace vespalib::btree {
template <typename, typename, typename, size_t, size_t>
class BTreeNodeAllocator;
-template <typename, typename, typename, size_t, size_t, class> class
-BTreeBuilder;
-template <typename, typename, typename, size_t, size_t, class> class
-BTreeAggregator;
+template <typename, typename, typename, size_t, size_t, class>
+class BTreeBuilder;
+template <typename, typename, typename, size_t, size_t, class>
+class BTreeAggregator;
template <typename KeyT,
typename DataT,
@@ -61,15 +61,14 @@ public:
const NodeAllocatorType *const _allocator;
public:
using Iterator = ConstIterator;
- FrozenView();
- FrozenView(BTreeNode::Ref frozenRoot,
- const NodeAllocatorType & allocator);
- ConstIterator find(const KeyType& key,
- CompareT comp = CompareT()) const;
- ConstIterator lowerBound(const KeyType &key,
- CompareT comp = CompareT()) const;
- ConstIterator upperBound(const KeyType &key,
- CompareT comp = CompareT()) const;
+ FrozenView() : _frozenRoot(BTreeNode::Ref()),_allocator(nullptr) {}
+ FrozenView(BTreeNode::Ref frozenRoot, const NodeAllocatorType & allocator)
+ : _frozenRoot(frozenRoot),
+ _allocator(&allocator)
+ {}
+ ConstIterator find(const KeyType& key, CompareT comp = CompareT()) const;
+ ConstIterator lowerBound(const KeyType &key, CompareT comp = CompareT()) const;
+ ConstIterator upperBound(const KeyType &key, CompareT comp = CompareT()) const;
ConstIterator begin() const {
return ConstIterator(_frozenRoot, *_allocator);
}
@@ -78,7 +77,12 @@ public:
}
BTreeNode::Ref getRoot() const { return _frozenRoot; }
- size_t size() const;
+ size_t size() const {
+ if (NodeAllocatorType::isValidRef(_frozenRoot)) {
+ return _allocator->validLeaves(_frozenRoot);
+ }
+ return 0u;
+ }
const NodeAllocatorType &getAllocator() const { return *_allocator; }
const AggrT &getAggregated() const {
diff --git a/vespalib/src/vespa/vespalib/btree/btreeroot.hpp b/vespalib/src/vespa/vespalib/btree/btreeroot.hpp
index fdcc957009b..73e5fb32b92 100644
--- a/vespalib/src/vespa/vespalib/btree/btreeroot.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreeroot.hpp
@@ -14,8 +14,7 @@ namespace vespalib::btree {
//----------------------- BTreeRoot ------------------------------------------//
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
vespalib::string
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
toString(BTreeNode::Ref node,
@@ -123,8 +122,7 @@ isValid(BTreeNode::Ref node,
return true;
}
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
typename BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::Iterator
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
findHelper(BTreeNode::Ref root, const KeyType & key,
@@ -138,20 +136,17 @@ findHelper(BTreeNode::Ref root, const KeyType & key,
return itr;
}
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
typename BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::Iterator
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
-lowerBoundHelper(BTreeNode::Ref root, const KeyType & key,
- const NodeAllocatorType & allocator, CompareT comp)
+lowerBoundHelper(BTreeNode::Ref root, const KeyType & key, const NodeAllocatorType & allocator, CompareT comp)
{
Iterator itr(BTreeNode::Ref(), allocator);
itr.lower_bound(root, key, comp);
return itr;
}
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
typename BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::Iterator
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
upperBoundHelper(BTreeNode::Ref root, const KeyType & key,
@@ -167,31 +162,10 @@ upperBoundHelper(BTreeNode::Ref root, const KeyType & key,
//----------------------- BTreeRoot::FrozenView ----------------------------------//
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
-BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
-FrozenView::FrozenView()
- : _frozenRoot(BTreeNode::Ref()),
- _allocator(nullptr)
-{
-}
-
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
-BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
-FrozenView::FrozenView(BTreeNode::Ref frozenRoot,
- const NodeAllocatorType & allocator)
- : _frozenRoot(frozenRoot),
- _allocator(&allocator)
-{
-}
-
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
typename BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::ConstIterator
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
-FrozenView::find(const KeyType & key,
- CompareT comp) const
+FrozenView::find(const KeyType & key, CompareT comp) const
{
ConstIterator itr(BTreeNode::Ref(), *_allocator);
itr.lower_bound(_frozenRoot, key, comp);
@@ -201,24 +175,20 @@ FrozenView::find(const KeyType & key,
return itr;
}
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
typename BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::ConstIterator
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
-FrozenView::lowerBound(const KeyType & key,
- CompareT comp) const
+FrozenView::lowerBound(const KeyType & key, CompareT comp) const
{
ConstIterator itr(BTreeNode::Ref(), *_allocator);
itr.lower_bound(_frozenRoot, key, comp);
return itr;
}
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
typename BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::ConstIterator
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
-FrozenView::upperBound(const KeyType & key,
- CompareT comp) const
+FrozenView::upperBound(const KeyType & key, CompareT comp) const
{
ConstIterator itr(_frozenRoot, *_allocator);
if (itr.valid() && !comp(key, itr.getKey())) {
@@ -227,30 +197,15 @@ FrozenView::upperBound(const KeyType & key,
return itr;
}
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
-size_t
-BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
-FrozenView::size() const
-{
- if (NodeAllocatorType::isValidRef(_frozenRoot)) {
- return _allocator->validLeaves(_frozenRoot);
- }
- return 0u;
-}
-
//----------------------- BTreeRoot ----------------------------------------------//
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::BTreeRootT() = default;
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::~BTreeRootT() = default;
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
void
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
clear(NodeAllocatorType &allocator)
@@ -263,39 +218,32 @@ clear(NodeAllocatorType &allocator)
}
}
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
typename BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::Iterator
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
-find(const KeyType & key, const NodeAllocatorType & allocator,
- CompareT comp) const
+find(const KeyType & key, const NodeAllocatorType & allocator, CompareT comp) const
{
return findHelper(_root, key, allocator, comp);
}
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
typename BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::Iterator
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
-lowerBound(const KeyType & key, const NodeAllocatorType & allocator,
- CompareT comp) const
+lowerBound(const KeyType & key, const NodeAllocatorType & allocator, CompareT comp) const
{
return lowerBoundHelper(_root, key, allocator, comp);
}
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
typename BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::Iterator
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
-upperBound(const KeyType & key, const NodeAllocatorType & allocator,
- CompareT comp) const
+upperBound(const KeyType & key, const NodeAllocatorType & allocator, CompareT comp) const
{
return upperBoundHelper(_root, key, allocator, comp);
}
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
size_t
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
size(const NodeAllocatorType &allocator) const
@@ -307,8 +255,7 @@ size(const NodeAllocatorType &allocator) const
}
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
size_t
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
frozenSize(const NodeAllocatorType &allocator) const
@@ -321,8 +268,7 @@ frozenSize(const NodeAllocatorType &allocator) const
}
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
vespalib::string
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
toString(const NodeAllocatorType &allocator) const
@@ -353,8 +299,7 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, class AggrCalcT>
bool
BTreeRoot<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
-isValidFrozen(const NodeAllocatorType &allocator,
- CompareT comp) const
+isValidFrozen(const NodeAllocatorType &allocator, CompareT comp) const
{
BTreeNode::Ref frozenRoot = getFrozenRoot();
if (NodeAllocatorType::isValidRef(frozenRoot)) {
@@ -382,8 +327,7 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT>
size_t
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
-bitSize(BTreeNode::Ref node,
- const NodeAllocatorType &allocator) const
+bitSize(BTreeNode::Ref node, const NodeAllocatorType &allocator) const
{
if (allocator.isLeafRef(node)) {
return sizeof(LeafNodeType) * 8;
@@ -399,8 +343,7 @@ bitSize(BTreeNode::Ref node,
}
-template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
- typename TraitsT>
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT, typename TraitsT>
void
BTreeRootT<KeyT, DataT, AggrT, CompareT, TraitsT>::
thaw(Iterator &itr)
@@ -455,8 +398,7 @@ insert(Iterator &itr,
{
using Inserter = BTreeInserter<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>;
bool oldFrozen = isFrozen();
- Inserter::insert(_root, itr, key, data,
- aggrCalc);
+ Inserter::insert(_root, itr, key, data,aggrCalc);
if (oldFrozen && !isFrozen())
itr.getAllocator().needFreeze(this);
}
@@ -483,8 +425,7 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, class AggrCalcT>
void
BTreeRoot<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
-remove(Iterator &itr,
- const AggrCalcT &aggrCalc)
+remove(Iterator &itr, const AggrCalcT &aggrCalc)
{
using Remover = BTreeRemover<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>;
bool oldFrozen = isFrozen();
diff --git a/vespalib/src/vespa/vespalib/btree/btreerootbase.hpp b/vespalib/src/vespa/vespalib/btree/btreerootbase.hpp
index 025fed3853a..dea383676f2 100644
--- a/vespalib/src/vespa/vespalib/btree/btreerootbase.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreerootbase.hpp
@@ -3,6 +3,7 @@
#pragma once
#include "btreerootbase.h"
+#include <cassert>
namespace vespalib::btree {
diff --git a/vespalib/src/vespa/vespalib/geo/zcurve.cpp b/vespalib/src/vespa/vespalib/geo/zcurve.cpp
index c207f966704..d04a04fda0a 100644
--- a/vespalib/src/vespa/vespalib/geo/zcurve.cpp
+++ b/vespalib/src/vespa/vespalib/geo/zcurve.cpp
@@ -10,15 +10,38 @@ namespace vespalib::geo {
namespace {
+ /**
+ * An area defined by its upper left and lower right corners. The
+ * z-coordinates between these corners act as a spacial
+ * over-estimation of the actual area. These areas may never cross
+ * signed borders, since that would break the whole concept of
+ * hierarchical spatial partitioning.
+ **/
+struct Area {
+ const ZCurve::Point min;
+ const ZCurve::Point max;
+ Area(const Area &rhs) = default;
+ Area(int32_t min_x, int32_t min_y,
+ int32_t max_x, int32_t max_y)
+ : min(min_x, min_y), max(max_x, max_y)
+ {
+ assert((min_x <= max_x) && ((min_x < 0) == (max_x < 0)));
+ assert((min_y <= max_y) && ((min_y < 0) == (max_y < 0)));
+ }
+ Area &operator=(Area &&rhs) { new ((void*)this) Area(rhs); return *this; }
+ int64_t size() const { return (static_cast<int64_t>(max.x) - min.x + 1) * (static_cast<int64_t>(max.y) - min.y + 1); }
+ int64_t estimate() const { return (max.z - min.z + 1); }
+ int64_t error() const { return estimate() - size(); }
+};
+
class ZAreaQueue
{
private:
struct MaxAreaErrorCmp {
- bool operator()(const ZCurve::Area &a, const ZCurve::Area &b) const {
+ bool operator()(const Area &a, const Area &b) const {
return (a.error() > b.error());
}
};
- using Area = ZCurve::Area;
using Range = ZCurve::Range;
using RangeVector = ZCurve::RangeVector;
using Queue = PriorityQueue<Area, MaxAreaErrorCmp, LeftArrayHeap>;
@@ -61,7 +84,6 @@ public:
class ZAreaSplitter
{
private:
- using Area = ZCurve::Area;
using RangeVector = ZCurve::RangeVector;
ZAreaQueue _queue;
diff --git a/vespalib/src/vespa/vespalib/geo/zcurve.h b/vespalib/src/vespa/vespalib/geo/zcurve.h
index 2f92b3a019b..c5fbdc08dce 100644
--- a/vespalib/src/vespa/vespalib/geo/zcurve.h
+++ b/vespalib/src/vespa/vespalib/geo/zcurve.h
@@ -3,7 +3,6 @@
#pragma once
#include <cstdint>
-#include <cassert>
#include <vector>
namespace vespalib::geo {
@@ -163,30 +162,6 @@ public:
Point(int32_t x_, int32_t y_) : x(x_), y(y_), z(encode(x_, y_)) {}
};
- /**
- * An area defined by its upper left and lower right corners. The
- * z-coordinates between these corners act as a spacial
- * over-estimation of the actual area. These areas may never cross
- * signed borders, since that would break the whole concept of
- * hierarchical spatial partitioning.
- **/
- struct Area {
- const Point min;
- const Point max;
- Area(const Area &rhs) = default;
- Area(int32_t min_x, int32_t min_y,
- int32_t max_x, int32_t max_y)
- : min(min_x, min_y), max(max_x, max_y)
- {
- assert((min_x <= max_x) && ((min_x < 0) == (max_x < 0)));
- assert((min_y <= max_y) && ((min_y < 0) == (max_y < 0)));
- }
- Area &operator=(Area &&rhs) { new ((void*)this) Area(rhs); return *this; }
- int64_t size() const { return (static_cast<int64_t>(max.x) - min.x + 1) * (static_cast<int64_t>(max.y) - min.y + 1); }
- int64_t estimate() const { return (max.z - min.z + 1); }
- int64_t error() const { return estimate() - size(); }
- };
-
class Range
{
private:
@@ -212,11 +187,9 @@ public:
static RangeVector find_ranges(int min_x, int min_y,
int max_x, int max_y);
- static int64_t
- encodeSlow(int32_t x, int32_t y);
+ static int64_t encodeSlow(int32_t x, int32_t y);
- static void
- decodeSlow(int64_t enc, int32_t *xp, int32_t *yp);
+ static void decodeSlow(int64_t enc, int32_t *xp, int32_t *yp);
};
}
diff --git a/vespalib/src/vespa/vespalib/metrics/simple_metrics_manager.cpp b/vespalib/src/vespa/vespalib/metrics/simple_metrics_manager.cpp
index 4b6b82697f7..534177e480a 100644
--- a/vespalib/src/vespa/vespalib/metrics/simple_metrics_manager.cpp
+++ b/vespalib/src/vespa/vespalib/metrics/simple_metrics_manager.cpp
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "simple_metrics_manager.h"
#include "simple_tick.h"
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".vespalib.metrics.simple_metrics_manager");
diff --git a/vespalib/src/vespa/vespalib/metrics/stable_store.h b/vespalib/src/vespa/vespalib/metrics/stable_store.h
index f249fd7729e..d456150ab7e 100644
--- a/vespalib/src/vespa/vespalib/metrics/stable_store.h
+++ b/vespalib/src/vespa/vespalib/metrics/stable_store.h
@@ -4,7 +4,6 @@
#include <memory>
#include <vector>
-#include <assert.h>
namespace vespalib {
@@ -54,8 +53,8 @@ private:
StableStore(size_t sz, UP &&more, std::vector<T> &&mine);
- size_t _size;
- UP _more;
+ size_t _size;
+ UP _more;
std::vector<T> _mine;
};
diff --git a/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp b/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp
index 7a9c7720b43..a87a44cabfb 100644
--- a/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp
+++ b/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp
@@ -122,7 +122,7 @@ RequiredPeerCredential::RequiredPeerCredential(Field field, vespalib::string mus
{
}
-RequiredPeerCredential::RequiredPeerCredential(const RequiredPeerCredential &) = default;
+RequiredPeerCredential::RequiredPeerCredential(const RequiredPeerCredential &) noexcept = default;
RequiredPeerCredential::RequiredPeerCredential(RequiredPeerCredential &&) noexcept = default;
RequiredPeerCredential & RequiredPeerCredential::operator=(RequiredPeerCredential &&) noexcept = default;
RequiredPeerCredential::~RequiredPeerCredential() = default;
diff --git a/vespalib/src/vespa/vespalib/net/tls/peer_policies.h b/vespalib/src/vespa/vespalib/net/tls/peer_policies.h
index 56aa2986de3..ed8926e2aea 100644
--- a/vespalib/src/vespa/vespalib/net/tls/peer_policies.h
+++ b/vespalib/src/vespa/vespalib/net/tls/peer_policies.h
@@ -30,7 +30,7 @@ private:
public:
RequiredPeerCredential() = default;
RequiredPeerCredential(Field field, vespalib::string must_match_pattern);
- RequiredPeerCredential(const RequiredPeerCredential &);
+ RequiredPeerCredential(const RequiredPeerCredential &) noexcept;
RequiredPeerCredential & operator=(const RequiredPeerCredential &) = delete;
RequiredPeerCredential(RequiredPeerCredential &&) noexcept;
RequiredPeerCredential & operator=(RequiredPeerCredential &&) noexcept;
diff --git a/vespalib/src/vespa/vespalib/stllike/allocator.h b/vespalib/src/vespa/vespalib/stllike/allocator.h
index b34533740a2..f35cfce8359 100644
--- a/vespalib/src/vespa/vespalib/stllike/allocator.h
+++ b/vespalib/src/vespa/vespalib/stllike/allocator.h
@@ -17,7 +17,7 @@ public:
allocator_large() noexcept : _allocator(alloc::MemoryAllocator::select_allocator()) {}
using value_type = T;
constexpr T * allocate(std::size_t n) {
- return static_cast<T *>(_allocator->alloc(n*sizeof(T)).first);
+ return static_cast<T *>(_allocator->alloc(n*sizeof(T)).get());
}
void deallocate(T * p, std::size_t n) {
_allocator->free(p, n*sizeof(T));
diff --git a/vespalib/src/vespa/vespalib/test/memory_allocator_observer.cpp b/vespalib/src/vespa/vespalib/test/memory_allocator_observer.cpp
index ba23970f0ea..43817e63948 100644
--- a/vespalib/src/vespa/vespalib/test/memory_allocator_observer.cpp
+++ b/vespalib/src/vespa/vespalib/test/memory_allocator_observer.cpp
@@ -20,7 +20,7 @@ MemoryAllocatorObserver::MemoryAllocatorObserver(Stats &stats)
}
MemoryAllocatorObserver::~MemoryAllocatorObserver() = default;
-MemoryAllocatorObserver::PtrAndSize
+PtrAndSize
MemoryAllocatorObserver::alloc(size_t sz) const
{
++_stats.alloc_cnt;
diff --git a/vespalib/src/vespa/vespalib/testkit/time_bomb.cpp b/vespalib/src/vespa/vespalib/testkit/time_bomb.cpp
index 9d4b2ea30c8..a93baac6589 100644
--- a/vespalib/src/vespa/vespalib/testkit/time_bomb.cpp
+++ b/vespalib/src/vespa/vespalib/testkit/time_bomb.cpp
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "time_bomb.h"
+#include <cstdint>
#include <vespa/log/log.h>
LOG_SETUP(".vespalib.testkit.time_bomb");
@@ -14,7 +15,7 @@ void bomb(Gate &gate, vespalib::duration timeout) {
return;
}
}
- size_t countdown = std::min(count_s(timeout), 5l);
+ size_t countdown = std::min(count_s(timeout), INT64_C(5));
while (countdown > 0) {
fprintf(stderr, "...%zu...\n", countdown--);
if (gate.await(1s)) {
diff --git a/vespalib/src/vespa/vespalib/util/CMakeLists.txt b/vespalib/src/vespa/vespalib/util/CMakeLists.txt
index 91365d446c1..21642cbd842 100644
--- a/vespalib/src/vespa/vespalib/util/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/util/CMakeLists.txt
@@ -31,6 +31,7 @@ vespa_add_library(vespalib_vespalib_util OBJECT
exceptions.cpp
execution_profiler.cpp
executor_idle_tracking.cpp
+ fake_doom.cpp
featureset.cpp
file_area_freelist.cpp
foregroundtaskexecutor.cpp
diff --git a/vespalib/src/vespa/vespalib/util/alloc.cpp b/vespalib/src/vespa/vespalib/util/alloc.cpp
index 83c8a7de7e2..cb9a4b688b3 100644
--- a/vespalib/src/vespa/vespalib/util/alloc.cpp
+++ b/vespalib/src/vespa/vespalib/util/alloc.cpp
@@ -284,12 +284,12 @@ AutoAllocator::getAllocator(size_t mmapLimit, size_t alignment) {
return getAutoAllocator(availableAutoAllocators().first, mmapLimit, alignment);
}
-MemoryAllocator::PtrAndSize
+PtrAndSize
HeapAllocator::alloc(size_t sz) const {
return salloc(sz);
}
-MemoryAllocator::PtrAndSize
+PtrAndSize
HeapAllocator::salloc(size_t sz) {
return PtrAndSize((sz > 0) ? malloc(sz) : nullptr, sz);
}
@@ -299,10 +299,10 @@ void HeapAllocator::free(PtrAndSize alloc) const {
}
void HeapAllocator::sfree(PtrAndSize alloc) {
- if (alloc.first) { ::free(alloc.first); }
+ if (alloc.get()) { ::free(alloc.get()); }
}
-MemoryAllocator::PtrAndSize
+PtrAndSize
AlignedHeapAllocator::alloc(size_t sz) const {
if (!sz) { return PtrAndSize(nullptr, 0); }
void* ptr;
@@ -318,12 +318,12 @@ MMapAllocator::resize_inplace(PtrAndSize current, size_t newSize) const {
return sresize_inplace(current, newSize);
}
-MemoryAllocator::PtrAndSize
+PtrAndSize
MMapAllocator::alloc(size_t sz) const {
return salloc(sz, nullptr);
}
-MemoryAllocator::PtrAndSize
+PtrAndSize
MMapAllocator::salloc(size_t sz, void * wantedAddress)
{
void * buf(nullptr);
@@ -382,23 +382,23 @@ MMapAllocator::salloc(size_t sz, void * wantedAddress)
size_t
MMapAllocator::sresize_inplace(PtrAndSize current, size_t newSize) {
newSize = round_up_to_page_size(newSize);
- if (newSize > current.second) {
+ if (newSize > current.size()) {
return extend_inplace(current, newSize);
- } else if (newSize < current.second) {
+ } else if (newSize < current.size()) {
return shrink_inplace(current, newSize);
} else {
- return current.second;
+ return current.size();
}
}
size_t
MMapAllocator::extend_inplace(PtrAndSize current, size_t newSize) {
- if (current.second == 0u) {
+ if (current.size() == 0u) {
return 0u;
}
- PtrAndSize got = MMapAllocator::salloc(newSize - current.second, static_cast<char *>(current.first)+current.second);
- if ((static_cast<const char *>(current.first) + current.second) == static_cast<const char *>(got.first)) {
- return current.second + got.second;
+ PtrAndSize got = MMapAllocator::salloc(newSize - current.size(), static_cast<char *>(current.get())+current.size());
+ if ((static_cast<const char *>(current.get()) + current.size()) == static_cast<const char *>(got.get())) {
+ return current.size() + got.size();
} else {
MMapAllocator::sfree(got);
return 0;
@@ -407,7 +407,7 @@ MMapAllocator::extend_inplace(PtrAndSize current, size_t newSize) {
size_t
MMapAllocator::shrink_inplace(PtrAndSize current, size_t newSize) {
- PtrAndSize toUnmap(static_cast<char *>(current.first)+newSize, current.second - newSize);
+ PtrAndSize toUnmap(static_cast<char *>(current.get())+newSize, current.size() - newSize);
sfree(toUnmap);
return newSize;
}
@@ -418,27 +418,27 @@ void MMapAllocator::free(PtrAndSize alloc) const {
void MMapAllocator::sfree(PtrAndSize alloc)
{
- if (alloc.first != nullptr) {
- int madvise_retval = madvise(alloc.first, alloc.second, MADV_DONTNEED);
+ if (alloc.get() != nullptr) {
+ int madvise_retval = madvise(alloc.get(), alloc.size(), MADV_DONTNEED);
if (madvise_retval != 0) {
std::error_code ec(errno, std::system_category());
if (errno == EINVAL) {
- LOG(debug, "madvise(%p, %lx)=%d, errno=%s", alloc.first, alloc.second, madvise_retval, ec.message().c_str());
+ LOG(debug, "madvise(%p, %lx)=%d, errno=%s", alloc.get(), alloc.size(), madvise_retval, ec.message().c_str());
} else {
- LOG(warning, "madvise(%p, %lx)=%d, errno=%s", alloc.first, alloc.second, madvise_retval, ec.message().c_str());
+ LOG(warning, "madvise(%p, %lx)=%d, errno=%s", alloc.get(), alloc.size(), madvise_retval, ec.message().c_str());
}
}
- int munmap_retval = munmap(alloc.first, alloc.second);
+ int munmap_retval = munmap(alloc.get(), alloc.size());
if (munmap_retval != 0) {
std::error_code ec(errno, std::system_category());
- LOG(warning, "munmap(%p, %lx)=%d, errno=%s", alloc.first, alloc.second, munmap_retval, ec.message().c_str());
+ LOG(warning, "munmap(%p, %lx)=%d, errno=%s", alloc.get(), alloc.size(), munmap_retval, ec.message().c_str());
abort();
}
- if (alloc.second >= _G_MMapLogLimit) {
+ if (alloc.size() >= _G_MMapLogLimit) {
std::lock_guard guard(_G_lock);
- MMapInfo info = _G_HugeMappings[alloc.first];
- assert(alloc.second == info._sz);
- _G_HugeMappings.erase(alloc.first);
+ MMapInfo info = _G_HugeMappings[alloc.get()];
+ assert(alloc.size() == info._sz);
+ _G_HugeMappings.erase(alloc.get());
LOG(info, "munmap %ld of size %ld", info._id, info._sz);
LOG(info, "%ld mappings of accumulated size %ld", _G_HugeMappings.size(), sum(_G_HugeMappings));
}
@@ -447,7 +447,7 @@ void MMapAllocator::sfree(PtrAndSize alloc)
size_t
AutoAllocator::resize_inplace(PtrAndSize current, size_t newSize) const {
- if (useMMap(current.second) && useMMap(newSize)) {
+ if (useMMap(current.size()) && useMMap(newSize)) {
newSize = roundUpToHugePages(newSize);
return MMapAllocator::sresize_inplace(current, newSize);
} else {
@@ -455,7 +455,7 @@ AutoAllocator::resize_inplace(PtrAndSize current, size_t newSize) const {
}
}
-MMapAllocator::PtrAndSize
+PtrAndSize
AutoAllocator::alloc(size_t sz) const {
if ( ! useMMap(sz)) {
if (_alignment == 0) {
@@ -471,7 +471,7 @@ AutoAllocator::alloc(size_t sz) const {
void
AutoAllocator::free(PtrAndSize alloc) const {
- if ( ! isMMapped(alloc.second)) {
+ if ( ! isMMapped(alloc.size())) {
return HeapAllocator::sfree(alloc);
} else {
return MMapAllocator::sfree(alloc);
@@ -513,7 +513,7 @@ Alloc::resize_inplace(size_t newSize)
}
size_t extendedSize = _allocator->resize_inplace(_alloc, newSize);
if (extendedSize >= newSize) {
- _alloc.second = extendedSize;
+ _alloc = PtrAndSize(_alloc.get(), extendedSize);
return true;
}
return false;
@@ -571,6 +571,14 @@ Alloc::alloc_with_allocator(const MemoryAllocator* allocator) noexcept
return Alloc(allocator);
}
+PtrAndSize::PtrAndSize(void * ptr, size_t sz) noexcept
+ : _ptr(ptr), _sz(sz)
+{
+ constexpr uint8_t MAX_PTR_BITS = 57;
+ constexpr uint64_t MAX_PTR = 1ul << MAX_PTR_BITS;
+ assert((uint64_t(ptr) + sz) < MAX_PTR);
+}
+
}
}
diff --git a/vespalib/src/vespa/vespalib/util/alloc.h b/vespalib/src/vespa/vespalib/util/alloc.h
index 4066894b4e3..b78c10dd381 100644
--- a/vespalib/src/vespa/vespalib/util/alloc.h
+++ b/vespalib/src/vespa/vespalib/util/alloc.h
@@ -15,14 +15,12 @@ namespace vespalib::alloc {
**/
class Alloc
{
-private:
- using PtrAndSize = std::pair<void *, size_t>;
public:
- size_t size() const { return _alloc.second; }
- void * get() { return _alloc.first; }
- const void * get() const { return _alloc.first; }
- void * operator -> () { return _alloc.first; }
- const void * operator -> () const { return _alloc.first; }
+ size_t size() const noexcept { return _alloc.size(); }
+ void * get() noexcept { return _alloc.get(); }
+ const void * get() const noexcept { return _alloc.get(); }
+ void * operator -> () noexcept { return get(); }
+ const void * operator -> () const noexcept { return get(); }
/*
* If possible the allocations will be resized. If it was possible it will return true
* And you have an area that can be accessed up to the new size.
@@ -42,7 +40,7 @@ public:
}
Alloc & operator=(Alloc && rhs) noexcept {
if (this != & rhs) {
- if (_alloc.first != nullptr) {
+ if (_alloc.get() != nullptr) {
_allocator->free(_alloc);
}
_alloc = rhs._alloc;
@@ -53,9 +51,9 @@ public:
}
Alloc() noexcept : _alloc(nullptr, 0), _allocator(nullptr) { }
~Alloc() {
- if (_alloc.first != nullptr) {
+ if (_alloc.get() != nullptr) {
_allocator->free(_alloc);
- _alloc.first = nullptr;
+ _alloc = PtrAndSize();
}
}
void swap(Alloc & rhs) noexcept {
@@ -63,10 +61,9 @@ public:
std::swap(_allocator, rhs._allocator);
}
void reset() {
- if (_alloc.first != nullptr) {
+ if (_alloc.get() != nullptr) {
_allocator->free(_alloc);
- _alloc.first = nullptr;
- _alloc.second = 0u;
+ _alloc = PtrAndSize();
}
}
Alloc create(size_t sz) const noexcept {
@@ -96,8 +93,7 @@ private:
_allocator(allocator)
{ }
void clear() {
- _alloc.first = nullptr;
- _alloc.second = 0;
+ _alloc = PtrAndSize();
_allocator = nullptr;
}
PtrAndSize _alloc;
diff --git a/vespalib/src/vespa/vespalib/util/fake_doom.cpp b/vespalib/src/vespa/vespalib/util/fake_doom.cpp
new file mode 100644
index 00000000000..4ca71afd2c5
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/util/fake_doom.cpp
@@ -0,0 +1,16 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "fake_doom.h"
+
+namespace vespalib {
+
+FakeDoom::FakeDoom(steady_time::duration time_to_doom)
+ : _time(steady_clock::now()),
+ _clock(_time),
+ _doom(_clock, _clock.getTimeNS() + time_to_doom)
+{
+}
+
+FakeDoom::~FakeDoom() = default;
+
+}
diff --git a/vespalib/src/vespa/vespalib/util/fake_doom.h b/vespalib/src/vespa/vespalib/util/fake_doom.h
new file mode 100644
index 00000000000..496129d8f0f
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/util/fake_doom.h
@@ -0,0 +1,24 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "doom.h"
+
+namespace vespalib {
+
+/*
+ * Class containing a fake doom controlled by the time_to_doom
+ * constructor argument.
+ */
+class FakeDoom {
+ std::atomic<steady_time> _time;
+ Clock _clock;
+ Doom _doom;
+public:
+ FakeDoom() : FakeDoom(1s) { }
+ FakeDoom(steady_time::duration time_to_doom);
+ ~FakeDoom();
+ const Doom& get_doom() const noexcept { return _doom; }
+};
+
+}
diff --git a/vespalib/src/vespa/vespalib/util/fiddle.h b/vespalib/src/vespa/vespalib/util/fiddle.h
index f4d2ac33695..b6799d9c778 100644
--- a/vespalib/src/vespa/vespalib/util/fiddle.h
+++ b/vespalib/src/vespa/vespalib/util/fiddle.h
@@ -4,8 +4,7 @@
#include <cassert>
-namespace vespalib {
-namespace bits {
+namespace vespalib::bits {
//-----------------------------------------------------------------------------
@@ -79,6 +78,5 @@ uint32_t split_range(uint32_t min, uint32_t max,
//-----------------------------------------------------------------------------
-} // namespace bits
-} // namespace vespalib
+}
diff --git a/vespalib/src/vespa/vespalib/util/generationhandler.cpp b/vespalib/src/vespa/vespalib/util/generationhandler.cpp
index 3562926d88d..8edf0d6fae4 100644
--- a/vespalib/src/vespa/vespalib/util/generationhandler.cpp
+++ b/vespalib/src/vespa/vespalib/util/generationhandler.cpp
@@ -5,7 +5,7 @@
namespace vespalib {
-GenerationHandler::GenerationHold::GenerationHold(void)
+GenerationHandler::GenerationHold::GenerationHold() noexcept
: _refCount(1),
_generation(0),
_next(0)
@@ -16,13 +16,13 @@ GenerationHandler::GenerationHold::~GenerationHold() {
}
void
-GenerationHandler::GenerationHold::setValid() {
+GenerationHandler::GenerationHold::setValid() noexcept {
assert(!valid(_refCount));
_refCount.fetch_sub(1);
}
bool
-GenerationHandler::GenerationHold::setInvalid() {
+GenerationHandler::GenerationHold::setInvalid() noexcept {
uint32_t refs = _refCount;
assert(valid(refs));
if (refs != 0) {
@@ -31,13 +31,8 @@ GenerationHandler::GenerationHold::setInvalid() {
return _refCount.compare_exchange_strong(refs, 1, std::memory_order_seq_cst);
}
-void
-GenerationHandler::GenerationHold::release() {
- _refCount.fetch_sub(2);
-}
-
GenerationHandler::GenerationHold *
-GenerationHandler::GenerationHold::acquire() {
+GenerationHandler::GenerationHold::acquire() noexcept {
if (valid(_refCount.fetch_add(2))) {
return this;
} else {
@@ -47,7 +42,7 @@ GenerationHandler::GenerationHold::acquire() {
}
GenerationHandler::GenerationHold *
-GenerationHandler::GenerationHold::copy(GenerationHold *self) {
+GenerationHandler::GenerationHold::copy(GenerationHold *self) noexcept {
if (self == nullptr) {
return nullptr;
} else {
@@ -58,39 +53,8 @@ GenerationHandler::GenerationHold::copy(GenerationHold *self) {
}
}
-uint32_t
-GenerationHandler::GenerationHold::getRefCount() const {
- return _refCount / 2;
-}
-
-GenerationHandler::Guard::Guard()
- : _hold(nullptr)
-{
-}
-
-GenerationHandler::Guard::Guard(GenerationHold *hold)
- : _hold(hold->acquire())
-{
-}
-
-GenerationHandler::Guard::~Guard()
-{
- cleanup();
-}
-
-GenerationHandler::Guard::Guard(const Guard & rhs)
- : _hold(GenerationHold::copy(rhs._hold))
-{
-}
-
-GenerationHandler::Guard::Guard(Guard &&rhs)
- : _hold(rhs._hold)
-{
- rhs._hold = nullptr;
-}
-
GenerationHandler::Guard &
-GenerationHandler::Guard::operator=(const Guard & rhs)
+GenerationHandler::Guard::operator=(const Guard & rhs) noexcept
{
if (&rhs != this) {
cleanup();
@@ -100,7 +64,7 @@ GenerationHandler::Guard::operator=(const Guard & rhs)
}
GenerationHandler::Guard &
-GenerationHandler::Guard::operator=(Guard &&rhs)
+GenerationHandler::Guard::operator=(Guard &&rhs) noexcept
{
if (&rhs != this) {
cleanup();
diff --git a/vespalib/src/vespa/vespalib/util/generationhandler.h b/vespalib/src/vespa/vespalib/util/generationhandler.h
index 6ba71b7f5fb..b346b1fa4e2 100644
--- a/vespalib/src/vespa/vespalib/util/generationhandler.h
+++ b/vespalib/src/vespa/vespalib/util/generationhandler.h
@@ -28,20 +28,24 @@ public:
// least significant bit is invalid flag
std::atomic<uint32_t> _refCount;
- static bool valid(uint32_t refCount) { return (refCount & 1) == 0u; }
+ static bool valid(uint32_t refCount) noexcept { return (refCount & 1) == 0u; }
public:
std::atomic<generation_t> _generation;
GenerationHold *_next; // next free element or next newer element.
- GenerationHold();
+ GenerationHold() noexcept;
~GenerationHold();
- void setValid();
- bool setInvalid();
- void release();
- GenerationHold *acquire();
- static GenerationHold *copy(GenerationHold *self);
- uint32_t getRefCount() const;
+ void setValid() noexcept;
+ bool setInvalid() noexcept;
+ void release() noexcept {
+ _refCount.fetch_sub(2);
+ }
+ GenerationHold *acquire() noexcept;
+ static GenerationHold *copy(GenerationHold *self) noexcept;
+ uint32_t getRefCount() const noexcept {
+ return _refCount.load(std::memory_order_relaxed) / 2;
+ }
};
/**
@@ -50,22 +54,26 @@ public:
class Guard {
private:
GenerationHold *_hold;
- void cleanup() {
+ void cleanup() noexcept {
if (_hold != nullptr) {
_hold->release();
_hold = nullptr;
}
}
public:
- Guard();
- Guard(GenerationHold *hold); // hold is never nullptr
- ~Guard();
- Guard(const Guard & rhs);
- Guard(Guard &&rhs);
- Guard & operator=(const Guard & rhs);
- Guard & operator=(Guard &&rhs);
-
- bool valid() const {
+ Guard() noexcept : _hold(nullptr) { }
+ Guard(GenerationHold *hold) noexcept : _hold(hold->acquire()) { } // hold is never nullptr
+ ~Guard() { cleanup(); }
+ Guard(const Guard & rhs) noexcept : _hold(GenerationHold::copy(rhs._hold)) { }
+ Guard(Guard &&rhs) noexcept
+ : _hold(rhs._hold)
+ {
+ rhs._hold = nullptr;
+ }
+ Guard & operator=(const Guard & rhs) noexcept;
+ Guard & operator=(Guard &&rhs) noexcept;
+
+ bool valid() const noexcept {
return _hold != nullptr;
}
generation_t getGeneration() const { return _hold->_generation.load(std::memory_order_relaxed); }
@@ -75,9 +83,9 @@ private:
std::atomic<generation_t> _generation;
std::atomic<generation_t> _oldest_used_generation;
std::atomic<GenerationHold *> _last; // Points to "current generation" entry
- GenerationHold *_first; // Points to "firstUsedGeneration" entry
- GenerationHold *_free; // List of free entries
- uint32_t _numHolds; // Number of allocated generation hold entries
+ GenerationHold *_first; // Points to "firstUsedGeneration" entry
+ GenerationHold *_free; // List of free entries
+ uint32_t _numHolds; // Number of allocated generation hold entries
void set_generation(generation_t generation) noexcept { _generation.store(generation, std::memory_order_relaxed); }
diff --git a/vespalib/src/vespa/vespalib/util/latch.h b/vespalib/src/vespa/vespalib/util/latch.h
index 3ae49aeb11f..9110b898372 100644
--- a/vespalib/src/vespa/vespalib/util/latch.h
+++ b/vespalib/src/vespa/vespalib/util/latch.h
@@ -4,7 +4,6 @@
#include <mutex>
#include <condition_variable>
-#include <cassert>
namespace vespalib {
diff --git a/vespalib/src/vespa/vespalib/util/memory_allocator.h b/vespalib/src/vespa/vespalib/util/memory_allocator.h
index 2bcd7e5889d..e9a494f3e6f 100644
--- a/vespalib/src/vespa/vespalib/util/memory_allocator.h
+++ b/vespalib/src/vespa/vespalib/util/memory_allocator.h
@@ -8,6 +8,17 @@
namespace vespalib::alloc {
+class PtrAndSize {
+public:
+ PtrAndSize() noexcept : _ptr(nullptr), _sz(0ul) {}
+ PtrAndSize(void * ptr, size_t sz) noexcept;
+ void * get() const noexcept { return _ptr; }
+ size_t size() const noexcept { return _sz; }
+private:
+ void * _ptr;
+ size_t _sz;
+};
+
/*
* Abstract base class for allocating memory at a low level.
*/
@@ -15,7 +26,6 @@ class MemoryAllocator {
public:
static constexpr size_t PAGE_SIZE = 4_Ki;
static constexpr size_t HUGEPAGE_SIZE = 2_Mi;
- using PtrAndSize = std::pair<void *, size_t>;
MemoryAllocator(const MemoryAllocator &) = delete;
MemoryAllocator & operator = (const MemoryAllocator &) = delete;
MemoryAllocator() = default;
diff --git a/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp b/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp
index 8c89f6745e4..929b926c03e 100644
--- a/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp
+++ b/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp
@@ -42,7 +42,7 @@ MmapFileAllocator::alloc_area(size_t sz) const
return offset;
}
-MmapFileAllocator::PtrAndSize
+PtrAndSize
MmapFileAllocator::alloc(size_t sz) const
{
if (sz == 0) {
@@ -72,23 +72,23 @@ MmapFileAllocator::alloc(size_t sz) const
void
MmapFileAllocator::free(PtrAndSize alloc) const
{
- if (alloc.second == 0) {
- assert(alloc.first == nullptr);
+ if (alloc.size() == 0) {
+ assert(alloc.get() == nullptr);
return; // empty allocation
}
- assert(alloc.first != nullptr);
+ assert(alloc.get() != nullptr);
// Check that matching allocation is registered
- auto itr = _allocations.find(alloc.first);
+ auto itr = _allocations.find(alloc.get());
assert(itr != _allocations.end());
- assert(itr->first == alloc.first);
- assert(itr->second.size == alloc.second);
+ assert(itr->first == alloc.get());
+ assert(itr->second.size == alloc.size());
auto offset = itr->second.offset;
_allocations.erase(itr);
- int retval = madvise(alloc.first, alloc.second, MADV_DONTNEED);
+ int retval = madvise(alloc.get(), alloc.size(), MADV_DONTNEED);
assert(retval == 0);
- retval = munmap(alloc.first, alloc.second);
+ retval = munmap(alloc.get(), alloc.size());
assert(retval == 0);
- _freelist.free(offset, alloc.second);
+ _freelist.free(offset, alloc.size());
}
size_t
diff --git a/vespalog/pom.xml b/vespalog/pom.xml
index f8260471fa2..6f721f59f38 100644
--- a/vespalog/pom.xml
+++ b/vespalog/pom.xml
@@ -49,6 +49,10 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
+ <configuration>
+ <bundleType>CORE</bundleType>
+ <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/vespalog/src/vespa/log/control-file.h b/vespalog/src/vespa/log/control-file.h
index 31c15077c8c..c9ab746c3e2 100644
--- a/vespalog/src/vespa/log/control-file.h
+++ b/vespalog/src/vespa/log/control-file.h
@@ -60,7 +60,7 @@ public:
unsigned int *getLevels(const char *name);
void ensureComponent(const char *pattern);
- static unsigned int *defaultLevels();
+ static unsigned int *defaultLevels() __attribute__((noinline));
// make sure in-memory changes are synchronized to disk
void flush();
diff --git a/vespamalloc/src/vespamalloc/malloc/common.h b/vespamalloc/src/vespamalloc/malloc/common.h
index 58e05878f64..501b45cd067 100644
--- a/vespamalloc/src/vespamalloc/malloc/common.h
+++ b/vespamalloc/src/vespamalloc/malloc/common.h
@@ -59,6 +59,8 @@ using OSMemory = MmapMemory;
using SizeClassT = int;
constexpr size_t ALWAYS_REUSE_LIMIT = 0x100000ul;
+constexpr uint8_t MAX_PTR_BITS = 57; // Maximum number of bits a pointer can use (Intel IceLake)
+constexpr uint64_t MAX_PTR = 1ul << MAX_PTR_BITS;
inline constexpr int
msbIdx(uint64_t v) {
diff --git a/vespamalloc/src/vespamalloc/malloc/threadproxy.cpp b/vespamalloc/src/vespamalloc/malloc/threadproxy.cpp
index 02eb624ee64..4a02d599b63 100644
--- a/vespamalloc/src/vespamalloc/malloc/threadproxy.cpp
+++ b/vespamalloc/src/vespamalloc/malloc/threadproxy.cpp
@@ -58,6 +58,7 @@ void * mallocThreadProxy (void * arg)
vespamalloc::Mutex::addThread();
vespamalloc::_G_myMemP->initThisThread();
void * result = nullptr;
+ ASSERT_STACKTRACE(uint64_t(&result) < vespamalloc::MAX_PTR); // Sanity check that stack is a legal PTR.
DEBUG(fprintf(stderr, "arg(%p=%p), local(%p=%p)\n", &arg, arg, &ta, ta));
pthread_cleanup_push(cleanupThread, ta);
diff --git a/vespamalloc/src/vespamalloc/util/osmem.cpp b/vespamalloc/src/vespamalloc/util/osmem.cpp
index 0267e091bab..f1d4a527732 100644
--- a/vespamalloc/src/vespamalloc/util/osmem.cpp
+++ b/vespamalloc/src/vespamalloc/util/osmem.cpp
@@ -168,6 +168,7 @@ MmapMemory::get(size_t len)
errno = prevErrno; // The temporary error should not impact if the end is good.
memory = getNormalPages(len);
}
+ ASSERT_STACKTRACE((uint64_t(&memory) + len) < vespamalloc::MAX_PTR);
return memory;
}