aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJon Bratseth <bratseth@gmail.com>2024-05-13 21:55:47 +0200
committerGitHub <noreply@github.com>2024-05-13 21:55:47 +0200
commitee0cd3294fbfea91aa20816fd56c621724017939 (patch)
tree5c28c45090422e270c6163f81e75e4fcb5f17b55
parente1bb1b57d9cef2aba9a5c0191e649d7aac8147eb (diff)
parente59b79fdc60d6b6994013caf50ab1f5decb930ce (diff)
Merge branch 'master' into bratseth/stemming-trace
-rw-r--r--Gemfile.lock18
-rw-r--r--build_settings.cmake3
-rw-r--r--client/README.md2
-rw-r--r--client/go/go.mod8
-rw-r--r--client/go/go.sum8
-rw-r--r--client/go/internal/vespa/document/throttler.go33
-rw-r--r--client/go/internal/vespa/document/throttler_test.go13
-rw-r--r--client/js/app/package.json2
-rw-r--r--client/js/app/yarn.lock36
-rw-r--r--clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterController.java49
-rw-r--r--clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterControllerClusterConfigurer.java2
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AggregatedClusterStats.java2
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStatsAggregator.java9
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java10
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GlobalBucketSyncStatsCalculator.java45
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MetricUpdater.java4
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/RemoteClusterControllerTask.java1
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/Response.java6
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/ClusterStateRequest.java6
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStatsAggregatorTest.java27
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ContentNodeStatsBuilder.java8
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GlobalBucketSyncStatsCalculatorTest.java59
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/ClusterControllerMock.java18
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/ClusterTest.java42
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/StateRestApiTest.java2
-rw-r--r--clustercontroller-utils/src/main/java/com/yahoo/vespa/clustercontroller/utils/staterestapi/response/UnitMetrics.java2
-rw-r--r--clustercontroller-utils/src/test/java/com/yahoo/vespa/clustercontroller/utils/staterestapi/DummyStateApi.java4
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/application/ConfigDefinitionDir.java1
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java1
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java22
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/application/ValidationProcessor.java4
-rw-r--r--config-application-package/src/test/java/com/yahoo/config/application/OverrideProcessorTest.java69
-rw-r--r--config-model/src/main/java/com/yahoo/schema/RankProfile.java5
-rw-r--r--config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java5
-rw-r--r--config-model/src/main/java/com/yahoo/schema/derived/SchemaInfo.java13
-rw-r--r--config-model/src/main/java/com/yahoo/schema/document/Matching.java4
-rw-r--r--config-model/src/main/java/com/yahoo/schema/document/SDField.java6
-rw-r--r--config-model/src/main/java/com/yahoo/schema/document/TypedKey.java20
-rw-r--r--config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedFields.java1
-rw-r--r--config-model/src/main/java/com/yahoo/schema/parser/ParsedMatchSettings.java3
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/AttributesImplicitWord.java3
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/ExactMatch.java15
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/TextMatch.java6
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/TypedTransformProvider.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/documentmodel/SummaryField.java10
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java20
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForLocalLLMValidator.java55
-rw-r--r--config-model/src/main/javacc/SchemaParser.jj14
-rw-r--r--config-model/src/test/derived/advanced/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/annotationsimplicitstruct/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/annotationsinheritance/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/annotationsinheritance2/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/annotationsreference/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/annotationssimple/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/arrays/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/attributeprefetch/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/attributes/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/attributes/index-info.cfg2
-rw-r--r--config-model/src/test/derived/bolding_dynamic_summary/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/complex/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/emptydefault/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/exactmatch/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/hnsw_index/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/id/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/imported_position_field_summary/schema-info.cfg2
-rw-r--r--config-model/src/test/derived/indexswitches/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/inheritance/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/language/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/lowercase/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/multiplesummaries/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/music/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/nearestneighbor_streaming/vsmfields.cfg8
-rw-r--r--config-model/src/test/derived/neuralnet_noqueryprofile/schema-info.cfg4
-rw-r--r--config-model/src/test/derived/newrank/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/orderilscripts/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/position_array/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/position_attribute/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/position_extra/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/prefixexactattribute/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/rankingexpression/schema-info.cfg29
-rw-r--r--config-model/src/test/derived/rankprofilemodularity/schema-info.cfg8
-rw-r--r--config-model/src/test/derived/ranktypes/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/schemainheritance/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/schemainheritance/schema-info.cfg4
-rw-r--r--config-model/src/test/derived/structanyorder/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/tensor/index-info.cfg10
-rw-r--r--config-model/src/test/derived/tokenization/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/types/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/uri_array/ilscripts.cfg1
-rw-r--r--config-model/src/test/derived/uri_wset/ilscripts.cfg1
-rw-r--r--config-model/src/test/java/com/yahoo/schema/parser/SchemaParserTestCase.java17
-rw-r--r--config-model/src/test/java/com/yahoo/schema/processing/IndexingScriptRewriterTestCase.java19
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexFieldsValidatorTestCase.java26
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForLocalLLMValidatorTest.java79
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/ContentClusterFixture.java15
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java40
-rw-r--r--configd/src/apps/sentinel/manager.cpp15
-rw-r--r--configd/src/apps/sentinel/manager.h3
-rw-r--r--configd/src/apps/sentinel/sentinel.cpp14
-rw-r--r--configdefinitions/src/vespa/CMakeLists.txt2
-rw-r--r--configdefinitions/src/vespa/athenz-provider-service.def37
-rw-r--r--configdefinitions/src/vespa/ilscripts.def2
-rw-r--r--configserver/src/main/resources/configserver-app/services.xml6
-rw-r--r--container-core/src/main/java/com/yahoo/processing/request/CompoundName.java70
-rw-r--r--container-core/src/test/java/com/yahoo/processing/request/CompoundNameTestCase.java63
-rw-r--r--container-search/abi-spec.json2
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/parser/SimpleParser.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Group.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java17
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroups.java13
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java44
-rw-r--r--container-search/src/main/java/com/yahoo/search/handler/Json2SingleLevelMap.java8
-rw-r--r--container-search/src/main/java/com/yahoo/search/schema/RankProfile.java11
-rw-r--r--container-search/src/main/java/com/yahoo/search/schema/SchemaInfoConfigurer.java7
-rw-r--r--container-search/src/main/java/com/yahoo/search/significance/SignificanceSearcher.java59
-rw-r--r--container-search/src/main/resources/configdefinitions/container.search.schema-info.def1
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java17
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java6
-rw-r--r--container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java2
-rw-r--r--container-search/src/test/java/com/yahoo/search/schema/SchemaInfoTester.java4
-rw-r--r--container-search/src/test/java/com/yahoo/search/significance/test/SignificanceSearcherTest.java54
-rw-r--r--container-search/src/test/java/com/yahoo/vespa/streamingvisitors/StreamingSearcherTestCase.java1
-rw-r--r--container-search/src/test/resources/config/with_significance/rank-profiles.cfg3
-rw-r--r--dependency-versions/pom.xml16
-rw-r--r--docprocs/src/main/java/com/yahoo/docprocs/indexing/ScriptManager.java2
-rw-r--r--document/src/main/java/com/yahoo/document/json/JsonReader.java4
-rw-r--r--document/src/main/java/com/yahoo/document/json/LazyTokenBuffer.java4
-rw-r--r--document/src/main/java/com/yahoo/document/json/TokenBuffer.java2
-rw-r--r--document/src/main/java/com/yahoo/document/json/document/DocumentParser.java13
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java32
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java6
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ExactExpression.java32
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/TokenizeExpression.java5
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/AnnotatorConfig.java27
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java23
-rw-r--r--indexinglanguage/src/main/javacc/IndexingParser.jj14
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ExactTestCase.java9
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/TokenizeTestCase.java11
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/AnnotatorConfigTestCase.java5
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java2
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/ExpressionTestCase.java2
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/DynamicThrottlePolicy.java4
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/MetricsParser.java33
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java4
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java8
-rw-r--r--metrics/src/tests/CMakeLists.txt1
-rw-r--r--metrics/src/tests/stresstest.cpp137
-rw-r--r--model-integration/abi-spec.json2
-rw-r--r--model-integration/src/main/java/ai/vespa/llm/clients/LocalLLM.java38
-rw-r--r--model-integration/src/main/java/ai/vespa/rankingexpression/importer/lightgbm/LightGBMImporter.java4
-rwxr-xr-xmodel-integration/src/main/resources/configdefinitions/llm-local-client.def7
-rw-r--r--model-integration/src/test/java/ai/vespa/llm/clients/LocalLLMTest.java23
-rw-r--r--parent/pom.xml6
-rw-r--r--predicate-search-core/src/main/java/com/yahoo/search/predicate/PredicateQueryParser.java11
-rw-r--r--searchcore/src/vespa/searchcore/proton/documentmetastore/lid_allocator.cpp3
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/rpc_hooks.cpp10
-rw-r--r--searchlib/src/tests/queryeval/iterator_benchmark/intermediate_blueprint_factory.cpp42
-rw-r--r--searchlib/src/tests/queryeval/iterator_benchmark/intermediate_blueprint_factory.h25
-rw-r--r--searchlib/src/tests/queryeval/iterator_benchmark/iterator_benchmark_test.cpp100
-rw-r--r--searchlib/src/tests/util/token_extractor/token_extractor_test.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp38
-rw-r--r--searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.h10
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_index.cpp15
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_index.h8
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/blueprint.cpp13
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/flow_tuning.h6
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp10
-rw-r--r--searchlib/src/vespa/searchlib/tensor/euclidean_distance.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/util/token_extractor.cpp2
-rw-r--r--searchsummary/src/tests/docsummary/tokens_converter/tokens_converter_test.cpp2
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/health/StateV1HealthMonitor.java1
-rw-r--r--storage/src/tests/common/testhelper.cpp20
-rw-r--r--storage/src/tests/common/teststorageapp.cpp5
-rw-r--r--storage/src/tests/persistence/filestorage/filestormanagertest.cpp31
-rw-r--r--storage/src/tests/persistence/mergehandlertest.cpp6
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/AccessTokenResponseEntity.java5
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identity/SiaIdentityProvider.java4
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/utils/SiaUtils.java6
-rw-r--r--vespa-athenz/src/test/java/com/yahoo/vespa/athenz/utils/SiaUtilsTest.java1
-rw-r--r--vespa-dependencies-enforcer/allowed-maven-dependencies.txt2
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/JsonFeeder.java14
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DynamicThrottler.java28
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java4
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/StaticThrottler.java2
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/DynamicThrottlerTest.java30
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java5
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java7
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java15
-rw-r--r--vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java2
-rw-r--r--vespajlib/src/test/java/com/yahoo/slime/JsonBenchmark.java2
-rw-r--r--vespalog/src/logger/runserver.cpp35
-rw-r--r--vespamalloc/src/vespamalloc/malloc/mmappool.cpp4
-rw-r--r--vespamalloc/src/vespamalloc/util/callstack.cpp44
-rw-r--r--vespamalloc/src/vespamalloc/util/callstack.h2
-rw-r--r--zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyMetrics.java2
-rw-r--r--zookeeper-server/CMakeLists.txt2
-rw-r--r--zookeeper-server/pom.xml1
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/CMakeLists.txt2
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/pom.xml99
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/ConfigServerZooKeeperServer.java45
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/ReconfigurableVespaZooKeeperServer.java49
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaMtlsAuthenticationProvider.java48
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaQuorumPeer.java60
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java96
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperServerImpl.java56
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/common/ClientX509Util.java230
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/common/NetUtils.java94
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/SyncRequestProcessor.java353
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/VespaNettyServerCnxnFactory.java37
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/ZooKeeperServer.java2412
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/LeaderZooKeeperServer.java309
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/Learner.java928
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/LearnerZooKeeperServer.java180
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/ObserverZooKeeperServer.java136
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/QuorumPeer.java2711
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/ReadOnlyZooKeeperServer.java236
-rw-r--r--zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/SendAckRequestProcessor.java83
-rw-r--r--zookeeper-server/zookeeper-server/CMakeLists.txt4
-rw-r--r--zookeeper-server/zookeeper-server/pom.xml4
-rw-r--r--zookeeper-server/zookeeper-server/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java1
-rw-r--r--zookeeper-server/zookeeper-server/src/main/java/org/apache/zookeeper/server/ZooKeeperServer.java16
-rw-r--r--zookeeper-server/zookeeper-server/src/main/java/org/apache/zookeeper/server/quorum/Learner.java3
225 files changed, 1717 insertions, 9003 deletions
diff --git a/Gemfile.lock b/Gemfile.lock
index e40e7b96017..b98be0cafcf 100644
--- a/Gemfile.lock
+++ b/Gemfile.lock
@@ -5,16 +5,17 @@ GEM
addressable (2.8.6)
public_suffix (>= 2.0.2, < 6.0)
afm (0.2.2)
- async (2.6.4)
+ async (2.6.5)
console (~> 1.10)
fiber-annotation
io-event (~> 1.1)
timers (~> 4.1)
colorator (1.1.0)
concurrent-ruby (1.2.2)
- console (1.23.2)
+ console (1.23.7)
fiber-annotation
fiber-local
+ json
em-websocket (0.5.3)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0)
@@ -27,7 +28,7 @@ GEM
forwardable-extended (2.6.0)
google-protobuf (3.24.4-x86_64-linux)
hashery (2.1.2)
- html-proofer (5.0.8)
+ html-proofer (5.0.9)
addressable (~> 2.3)
async (~> 2.1)
nokogiri (~> 1.13)
@@ -39,7 +40,7 @@ GEM
http_parser.rb (0.8.0)
i18n (1.14.1)
concurrent-ruby (~> 1.0)
- io-event (1.3.2)
+ io-event (1.3.3)
jekyll (4.3.3)
addressable (~> 2.4)
colorator (~> 1.0)
@@ -64,6 +65,7 @@ GEM
jekyll (>= 3.8, < 5.0)
jekyll-watch (2.2.1)
listen (~> 3.0)
+ json (2.7.2)
kramdown (2.4.0)
rexml
kramdown-parser-gfm (1.1.0)
@@ -77,7 +79,7 @@ GEM
jekyll (>= 3.5, < 5.0)
jekyll-feed (~> 0.9)
jekyll-seo-tag (~> 2.1)
- nokogiri (1.16.2-x86_64-linux)
+ nokogiri (1.16.4-x86_64-linux)
racc (~> 1.4)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
@@ -87,7 +89,7 @@ GEM
hashery (~> 2.0)
ruby-rc4
ttfunk
- public_suffix (5.0.4)
+ public_suffix (5.0.5)
racc (1.7.3)
rainbow (3.1.1)
rake (13.2.1)
@@ -104,12 +106,12 @@ GEM
unicode-display_width (>= 1.1.1, < 3)
timers (4.3.5)
ttfunk (1.7.0)
- typhoeus (1.4.0)
+ typhoeus (1.4.1)
ethon (>= 0.9.0)
unicode-display_width (2.4.2)
webrick (1.8.1)
yell (2.2.2)
- zeitwerk (2.6.11)
+ zeitwerk (2.6.13)
PLATFORMS
x86_64-linux
diff --git a/build_settings.cmake b/build_settings.cmake
index 1549ac83c74..e046d7f71f3 100644
--- a/build_settings.cmake
+++ b/build_settings.cmake
@@ -65,6 +65,9 @@ endif()
set(VESPA_ATOMIC_LIB "atomic")
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
set(CXX_SPECIFIC_WARN_OPTS "-Wnon-virtual-dtor -Wformat-security -Wno-overloaded-virtual")
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 18.0)
+ set(CXX_SPECIFIC_WARN_OPTS "${CXX_SPECIFIC_WARN_OPTS} -Wno-error=vla-cxx-extension")
+ endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-delete-null-pointer-checks -fsized-deallocation")
if(CMAKE_HOST_SYSTEM_NAME STREQUAL "Darwin")
set(VESPA_ATOMIC_LIB "")
diff --git a/client/README.md b/client/README.md
index f51db87d631..e730614230f 100644
--- a/client/README.md
+++ b/client/README.md
@@ -39,7 +39,7 @@ This is a [work-in-progress javascript app](js/app) for querying a Vespa applica
<!-- ToDo: move this / demote this somehow -->
-### vespa_query_dsl
+### vespa\_query\_dsl
This lib is used for composing Vespa
[YQL queries](https://docs.vespa.ai/en/reference/query-language-reference.html).
diff --git a/client/go/go.mod b/client/go/go.mod
index ba0af5a763e..7117d5ef334 100644
--- a/client/go/go.mod
+++ b/client/go/go.mod
@@ -16,8 +16,8 @@ require (
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.9.0
github.com/zalando/go-keyring v0.2.4
- golang.org/x/net v0.24.0
- golang.org/x/sys v0.19.0
+ golang.org/x/net v0.25.0
+ golang.org/x/sys v0.20.0
gopkg.in/yaml.v3 v3.0.1
)
@@ -31,7 +31,7 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
- golang.org/x/term v0.19.0 // indirect
- golang.org/x/text v0.14.0 // indirect
+ golang.org/x/term v0.20.0 // indirect
+ golang.org/x/text v0.15.0 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
)
diff --git a/client/go/go.sum b/client/go/go.sum
index d985c9e7ffc..e98a14f862a 100644
--- a/client/go/go.sum
+++ b/client/go/go.sum
@@ -75,6 +75,8 @@ golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
+golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -91,6 +93,8 @@ golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
@@ -103,8 +107,12 @@ golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
+golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
+golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/client/go/internal/vespa/document/throttler.go b/client/go/internal/vespa/document/throttler.go
index 39900156563..3eb0ccd17f6 100644
--- a/client/go/internal/vespa/document/throttler.go
+++ b/client/go/internal/vespa/document/throttler.go
@@ -37,8 +37,8 @@ type dynamicThrottler struct {
func newThrottler(connections int, nowFunc func() time.Time) *dynamicThrottler {
var (
- minInflight = 16 * int64(connections)
- maxInflight = 256 * minInflight // 4096 max streams per connection on the server side
+ minInflight = 2 * int64(connections)
+ maxInflight = 256 * minInflight // 512 max streams per connection on the server side
)
t := &dynamicThrottler{
minInflight: minInflight,
@@ -49,7 +49,7 @@ func newThrottler(connections int, nowFunc func() time.Time) *dynamicThrottler {
start: nowFunc(),
now: nowFunc,
}
- t.targetInflight.Store(8 * minInflight)
+ t.targetInflight.Store(minInflight)
t.targetTimesTen.Store(10 * maxInflight)
return t
}
@@ -57,7 +57,7 @@ func newThrottler(connections int, nowFunc func() time.Time) *dynamicThrottler {
func NewThrottler(connections int) Throttler { return newThrottler(connections, time.Now) }
func (t *dynamicThrottler) Sent() {
- currentInflight := t.targetInflight.Load()
+ currentInflight := t.TargetInflight()
t.sent++
if t.sent*t.sent*t.sent < 100*currentInflight*currentInflight {
return
@@ -73,8 +73,12 @@ func (t *dynamicThrottler) Sent() {
t.throughputs[index] = currentThroughput
// Loop over throughput measurements and pick the one which optimises throughput and latency.
- choice := float64(currentInflight)
+ best := float64(currentInflight)
maxObjective := float64(-1)
+ choice := 0
+ j := -1
+ k := -1
+ s := 0.0
for i := len(t.throughputs) - 1; i >= 0; i-- {
if t.throughputs[i] == 0 {
continue // Skip unknown values
@@ -83,10 +87,25 @@ func (t *dynamicThrottler) Sent() {
objective := t.throughputs[i] * math.Pow(inflight, throttlerWeight-1) // Optimise throughput (weight), but also latency (1 - weight)
if objective > maxObjective {
maxObjective = objective
- choice = inflight
+ best = inflight
+ choice = i
}
+ // Additionally, smooth the throughput values, to reduce the impact of noise, and reduce jumpiness
+ if j != -1 {
+ u := t.throughputs[j]
+ if k != -1 {
+ t.throughputs[j] = (2*u + t.throughputs[i] + s) / 4
+ }
+ s = u
+ }
+ k = j
+ j = i
+ }
+ target := int64((rand.Float64()*0.40+0.84)*best + rand.Float64()*4 - 1) // Random walk, skewed towards increase
+ // If the best inflight is at the high end of the known, we override the random walk to speed up upwards exploration
+ if choice == j && choice+1 < len(t.throughputs) {
+ target = int64(1 + float64(t.minInflight)*math.Pow(256, (float64(choice)+1.5)/float64(len(t.throughputs))))
}
- target := int64((rand.Float64()*0.20 + 0.92) * choice) // Random walk, skewed towards increase
t.targetInflight.Store(max(t.minInflight, min(t.maxInflight, target)))
}
diff --git a/client/go/internal/vespa/document/throttler_test.go b/client/go/internal/vespa/document/throttler_test.go
index 03f0bc75bdc..b386e0d5105 100644
--- a/client/go/internal/vespa/document/throttler_test.go
+++ b/client/go/internal/vespa/document/throttler_test.go
@@ -9,14 +9,19 @@ import (
func TestThrottler(t *testing.T) {
clock := &manualClock{tick: time.Second}
tr := newThrottler(8, clock.now)
- for i := 0; i < 100; i++ {
+
+ if got, want := tr.TargetInflight(), int64(16); got != want {
+ t.Errorf("got TargetInflight() = %d, but want %d", got, want)
+ }
+ for i := 0; i < 30; i++ {
tr.Sent()
+ tr.Success()
}
- if got, want := tr.TargetInflight(), int64(1024); got != want {
+ if got, want := tr.TargetInflight(), int64(18); got != want {
t.Errorf("got TargetInflight() = %d, but want %d", got, want)
}
- tr.Throttled(5)
- if got, want := tr.TargetInflight(), int64(128); got != want {
+ tr.Throttled(34)
+ if got, want := tr.TargetInflight(), int64(17); got != want {
t.Errorf("got TargetInflight() = %d, but want %d", got, want)
}
}
diff --git a/client/js/app/package.json b/client/js/app/package.json
index 6de98422514..0725a768ab5 100644
--- a/client/js/app/package.json
+++ b/client/js/app/package.json
@@ -38,7 +38,7 @@
"prettier": "3",
"pretty-quick": "^4.0.0",
"react-router-dom": "^6",
- "use-context-selector": "^1",
+ "use-context-selector": "^2.0.0",
"vite": "^5.0.5"
},
"jest": {
diff --git a/client/js/app/yarn.lock b/client/js/app/yarn.lock
index c5ddc285891..3b6d996ff64 100644
--- a/client/js/app/yarn.lock
+++ b/client/js/app/yarn.lock
@@ -1315,10 +1315,10 @@
dependencies:
"@babel/runtime" "^7.13.10"
-"@remix-run/router@1.16.0":
- version "1.16.0"
- resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.16.0.tgz#0e10181e5fec1434eb071a9bc4bdaac843f16dcc"
- integrity sha512-Quz1KOffeEf/zwkCBM3kBtH4ZoZ+pT3xIXBG4PPW/XFtDP7EGhtTiC2+gpL9GnR7+Qdet5Oa6cYSvwKYg6kN9Q==
+"@remix-run/router@1.16.1":
+ version "1.16.1"
+ resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.16.1.tgz#73db3c48b975eeb06d0006481bde4f5f2d17d1cd"
+ integrity sha512-es2g3dq6Nb07iFxGk5GuHN20RwBZOsuDQN7izWIisUcv9r+d2C5jQxqmgkdebXgReWfiyUabcki6Fg77mSNrig==
"@rollup/rollup-android-arm-eabi@4.17.2":
version "4.17.2"
@@ -4718,19 +4718,19 @@ react-refresh@^0.14.0:
integrity sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ==
react-router-dom@^6:
- version "6.23.0"
- resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-6.23.0.tgz#8b80ad92ad28f4dc38972e92d84b4c208150545a"
- integrity sha512-Q9YaSYvubwgbal2c9DJKfx6hTNoBp3iJDsl+Duva/DwxoJH+OTXkxGpql4iUK2sla/8z4RpjAm6EWx1qUDuopQ==
+ version "6.23.1"
+ resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-6.23.1.tgz#30cbf266669693e9492aa4fc0dde2541ab02322f"
+ integrity sha512-utP+K+aSTtEdbWpC+4gxhdlPFwuEfDKq8ZrPFU65bbRJY+l706qjR7yaidBpo3MSeA/fzwbXWbKBI6ftOnP3OQ==
dependencies:
- "@remix-run/router" "1.16.0"
- react-router "6.23.0"
+ "@remix-run/router" "1.16.1"
+ react-router "6.23.1"
-react-router@6.23.0:
- version "6.23.0"
- resolved "https://registry.yarnpkg.com/react-router/-/react-router-6.23.0.tgz#2f2d7492c66a6bdf760be4c6bdf9e1d672fa154b"
- integrity sha512-wPMZ8S2TuPadH0sF5irFGjkNLIcRvOSaEe7v+JER8508dyJumm6XZB1u5kztlX0RVq6AzRVndzqcUh6sFIauzA==
+react-router@6.23.1:
+ version "6.23.1"
+ resolved "https://registry.yarnpkg.com/react-router/-/react-router-6.23.1.tgz#d08cbdbd9d6aedc13eea6e94bc6d9b29cb1c4be9"
+ integrity sha512-fzcOaRF69uvqbbM7OhvQyBTFDVrrGlsFdS3AL+1KfIBtGETibHzi3FkoTRyiDJnWNc2VxrfvR+657ROHjaNjqQ==
dependencies:
- "@remix-run/router" "1.16.0"
+ "@remix-run/router" "1.16.1"
react-textarea-autosize@8.3.4:
version "8.3.4"
@@ -5484,10 +5484,10 @@ use-composed-ref@^1.3.0:
resolved "https://registry.yarnpkg.com/use-composed-ref/-/use-composed-ref-1.3.0.tgz#3d8104db34b7b264030a9d916c5e94fbe280dbda"
integrity sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==
-use-context-selector@^1:
- version "1.4.4"
- resolved "https://registry.yarnpkg.com/use-context-selector/-/use-context-selector-1.4.4.tgz#f5d65c7fcd78f994cb33cacd57651007a40595c0"
- integrity sha512-pS790zwGxxe59GoBha3QYOwk8AFGp4DN6DOtH+eoqVmgBBRXVx4IlPDhJmmMiNQAgUaLlP+58aqRC3A4rdaSjg==
+use-context-selector@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.yarnpkg.com/use-context-selector/-/use-context-selector-2.0.0.tgz#3b5dafec7aa947c152d4f0aa7f250e99a205df3d"
+ integrity sha512-owfuSmUNd3eNp3J9CdDl0kMgfidV+MkDvHPpvthN5ThqM+ibMccNE0k+Iq7TWC6JPFvGZqanqiGCuQx6DyV24g==
use-isomorphic-layout-effect@^1.1.1:
version "1.1.2"
diff --git a/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterController.java b/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterController.java
index 7e0c6fe3f63..114b88f03a8 100644
--- a/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterController.java
+++ b/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterController.java
@@ -11,11 +11,12 @@ import com.yahoo.vespa.clustercontroller.core.RemoteClusterControllerTaskSchedul
import com.yahoo.vespa.clustercontroller.core.restapiv2.ClusterControllerStateRestAPI;
import com.yahoo.vespa.clustercontroller.core.status.StatusHandler;
import com.yahoo.vespa.zookeeper.server.VespaZooKeeperServer;
+
+import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Logger;
/**
@@ -27,9 +28,10 @@ public class ClusterController extends AbstractComponent
private static final Logger log = Logger.getLogger(ClusterController.class.getName());
private final JDiscMetricWrapper metricWrapper;
+ private final Object monitor = new Object();
private final Map<String, FleetController> controllers = new TreeMap<>();
private final Map<String, StatusHandler.ContainerStatusPageServer> status = new TreeMap<>();
- private final AtomicInteger referents = new AtomicInteger();
+ private final Map<String, Integer> referents = new HashMap<>();
private final AtomicBoolean shutdown = new AtomicBoolean();
/**
@@ -44,9 +46,9 @@ public class ClusterController extends AbstractComponent
}
public void setOptions(FleetControllerOptions options, Metric metricImpl) throws Exception {
- referents.incrementAndGet();
metricWrapper.updateMetricImplementation(metricImpl);
- synchronized (controllers) {
+ synchronized (monitor) {
+ referents.merge(options.clusterName(), 1, Integer::sum);
FleetController controller = controllers.get(options.clusterName());
if (controller == null) {
controller = FleetController.create(options, metricWrapper);
@@ -68,21 +70,34 @@ public class ClusterController extends AbstractComponent
* we must also let the last configurer shut down this controller, to ensure this is shut down
* before the ZK server it had injected from the configurers.
*/
- void countdown() {
- if (referents.decrementAndGet() == 0)
- shutdown();
+ void countdown(String clusterName) {
+ synchronized (monitor) {
+ referents.compute(clusterName, (__, count) -> {
+ if (count == null) throw new IllegalStateException("trying to remove unknown cluster: " + clusterName);
+ if (count == 1) {
+ shutDownController(controllers.remove(clusterName));
+ status.remove(clusterName);
+ return null;
+ }
+ return count - 1;
+ });
+ }
+ }
+
+ private void shutDownController(FleetController controller) {
+ if (controller == null) return;
+ try {
+ controller.shutdown();
+ } catch (Exception e) {
+ log.warning("Failed to shut down fleet controller: " + e.getMessage());
+ }
}
void shutdown() {
if (shutdown.compareAndSet(false, true)) {
- synchronized (controllers) {
+ synchronized (monitor) {
for (FleetController controller : controllers.values()) {
- try {
- shutdownController(controller);
- }
- catch (Exception e) {
- log.warning("Failed to shut down fleet controller: " + e.getMessage());
- }
+ shutDownController(controller);
}
}
}
@@ -90,7 +105,7 @@ public class ClusterController extends AbstractComponent
@Override
public Map<String, RemoteClusterControllerTaskScheduler> getFleetControllers() {
- synchronized (controllers) {
+ synchronized (monitor) {
return new LinkedHashMap<>(controllers);
}
}
@@ -105,8 +120,4 @@ public class ClusterController extends AbstractComponent
return status;
}
- void shutdownController(FleetController controller) throws Exception {
- controller.shutdown();
- }
-
}
diff --git a/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterControllerClusterConfigurer.java b/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterControllerClusterConfigurer.java
index 5a2034f0372..265a99e2f72 100644
--- a/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterControllerClusterConfigurer.java
+++ b/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterControllerClusterConfigurer.java
@@ -45,7 +45,7 @@ public class ClusterControllerClusterConfigurer extends AbstractComponent {
@Override
public void deconstruct() {
- if (controller != null) controller.countdown();
+ if (controller != null) controller.countdown(options.clusterName());
}
FleetControllerOptions getOptions() { return options; }
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AggregatedClusterStats.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AggregatedClusterStats.java
index 37698a3ad00..aa2a1d29ec0 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AggregatedClusterStats.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AggregatedClusterStats.java
@@ -10,4 +10,6 @@ public interface AggregatedClusterStats {
ContentClusterStats getStats();
+ ContentNodeStats getGlobalStats();
+
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStatsAggregator.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStatsAggregator.java
index f1c19bac9b6..6fb31cc1b1c 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStatsAggregator.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStatsAggregator.java
@@ -38,6 +38,9 @@ public class ClusterStatsAggregator {
// Maps the content node index to the content node stats for that node.
// This MUST be kept up-to-date with distributorToStats;
private final ContentClusterStats aggregatedStats;
+ // This is the aggregate of aggregates across content nodes, allowing a reader to
+ // get a O(1) view of all merges pending in the cluster.
+ private final ContentNodeStats globallyAggregatedNodeStats = new ContentNodeStats(-1);
ClusterStatsAggregator(Set<Integer> distributors, Set<Integer> storageNodes) {
this.distributors = distributors;
@@ -58,6 +61,10 @@ public class ClusterStatsAggregator {
return aggregatedStats;
}
+ @Override
+ public ContentNodeStats getGlobalStats() {
+ return globallyAggregatedNodeStats;
+ }
};
}
@@ -96,12 +103,14 @@ public class ClusterStatsAggregator {
ContentNodeStats statsToAdd = clusterStats.getNodeStats(nodeIndex);
if (statsToAdd != null) {
contentNode.add(statsToAdd);
+ globallyAggregatedNodeStats.add(statsToAdd);
}
if (prevClusterStats != null) {
ContentNodeStats statsToSubtract = prevClusterStats.getNodeStats(nodeIndex);
if (statsToSubtract != null) {
contentNode.subtract(statsToSubtract);
+ globallyAggregatedNodeStats.subtract(statsToSubtract);
}
}
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
index 3e520d95d2c..3f7214c31e2 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
@@ -542,6 +542,7 @@ public class FleetController implements NodeListener, SlobrokListener, SystemSta
didWork |= metricUpdater.forWork("processNextQueuedRemoteTask", this::processNextQueuedRemoteTask);
didWork |= metricUpdater.forWork("completeSatisfiedVersionDependentTasks", this::completeSatisfiedVersionDependentTasks);
didWork |= metricUpdater.forWork("maybePublishOldMetrics", this::maybePublishOldMetrics);
+ updateClusterSyncMetrics();
processingCycle = false;
++cycleCount;
@@ -563,6 +564,14 @@ public class FleetController implements NodeListener, SlobrokListener, SystemSta
}
}
+ private void updateClusterSyncMetrics() {
+ var stats = stateVersionTracker.getAggregatedClusterStats().getAggregatedStats();
+ if (stats.hasUpdatesFromAllDistributors()) {
+ GlobalBucketSyncStatsCalculator.clusterBucketsOutOfSyncRatio(stats.getGlobalStats())
+ .ifPresent(metricUpdater::updateClusterBucketsOutOfSyncRatio);
+ }
+ }
+
private boolean updateMasterElectionState() {
try {
return masterElectionHandler.watchMasterElection(database, databaseContext);
@@ -689,6 +698,7 @@ public class FleetController implements NodeListener, SlobrokListener, SystemSta
context.cluster = cluster;
context.currentConsolidatedState = consolidatedClusterState();
context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle();
+ context.aggregatedClusterStats = stateVersionTracker.getAggregatedClusterStats().getAggregatedStats();
context.masterInfo = new MasterInterface() {
@Override public boolean isMaster() { return isMaster; }
@Override public Integer getMaster() { return masterElectionHandler.getMaster(); }
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GlobalBucketSyncStatsCalculator.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GlobalBucketSyncStatsCalculator.java
new file mode 100644
index 00000000000..0137ea2c29e
--- /dev/null
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GlobalBucketSyncStatsCalculator.java
@@ -0,0 +1,45 @@
+// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+import java.util.Optional;
+
+/**
+ * @author vekterli
+ */
+public class GlobalBucketSyncStatsCalculator {
+
+ /**
+ * Compute a value in [0, 1] representing how much of the cluster's data space is currently
+ * out of sync, i.e. pending merging. In other words, if the value is 1 all buckets are out
+ * of sync, and conversely if it's 0 all buckets are in sync. This number applies across bucket
+ * spaces.
+ *
+ * @param globalStats Globally aggregated content node statistics for the entire cluster.
+ * @return Optional containing a value [0, 1] representing the ratio of buckets pending merge
+ * in relation to the total number of buckets in the cluster, or an empty optional if
+ * the underlying global statistics contains invalid/incomplete information.
+ */
+ public static Optional<Double> clusterBucketsOutOfSyncRatio(ContentNodeStats globalStats) {
+ long totalBuckets = 0;
+ long pendingBuckets = 0;
+ for (var space : globalStats.getBucketSpaces().values()) {
+ if (!space.valid()) {
+ return Optional.empty();
+ }
+ totalBuckets += space.getBucketsTotal();
+ pendingBuckets += space.getBucketsPending();
+ }
+ // It's currently possible for the reported number of pending buckets to be greater than
+ // the number of total buckets. Example: this can happen if a bucket is present on a single
+ // node, but should have been replicated to 9 more nodes. Since counts are not normalized
+ // across content nodes for a given bucket, this will be counted as 9 pending and 1 total.
+ // Eventually this will settle as 0 pending and 10 total.
+ // TODO report node-normalized pending/total counts from distributors and use these.
+ pendingBuckets = Math.min(pendingBuckets, totalBuckets);
+ if (totalBuckets <= 0) {
+ return Optional.of(0.0); // No buckets; cannot be out of sync by definition
+ }
+ return Optional.of((double)pendingBuckets / (double)totalBuckets);
+ }
+
+}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MetricUpdater.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MetricUpdater.java
index 419cb652671..d149d4043e4 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MetricUpdater.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MetricUpdater.java
@@ -93,6 +93,10 @@ public class MetricUpdater {
metricReporter.set("is-master", isMaster ? 1 : 0);
}
+ public void updateClusterBucketsOutOfSyncRatio(double ratio) {
+ metricReporter.set("cluster-buckets-out-of-sync-ratio", ratio);
+ }
+
public void addTickTime(long millis, boolean didWork) {
if (didWork) {
metricReporter.set("busy-tick-time-ms", millis);
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/RemoteClusterControllerTask.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/RemoteClusterControllerTask.java
index efb161cebec..e1b774e64ff 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/RemoteClusterControllerTask.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/RemoteClusterControllerTask.java
@@ -17,6 +17,7 @@ public abstract class RemoteClusterControllerTask {
public MasterInterface masterInfo;
public NodeListener nodeListener;
public SlobrokListener slobrokListener;
+ public AggregatedClusterStats aggregatedClusterStats;
}
private final Object monitor = new Object();
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/Response.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/Response.java
index 636d01dbfa3..7af5f93fa21 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/Response.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/Response.java
@@ -76,7 +76,7 @@ public class Response {
{
protected final Map<String, String> attributes = new LinkedHashMap<>();
protected final Map<String, SubUnitList> subUnits = new LinkedHashMap<>();
- protected final Map<String, Long> metrics = new LinkedHashMap<>();
+ protected final Map<String, Number> metrics = new LinkedHashMap<>();
protected final Map<String, UnitState> stateMap = new LinkedHashMap<>();
protected DistributionState publishedState = null;
@@ -94,7 +94,7 @@ public class Response {
}
@Override
- public Map<String, Long> getMetricMap() { return metrics; }
+ public Map<String, Number> getMetricMap() { return metrics; }
@Override
public Map<String, UnitState> getStatePerType() { return stateMap; }
@Override
@@ -122,7 +122,7 @@ public class Response {
list.addUnit(unit, response);
return this;
}
- public EmptyResponse<T> addMetric(String name, Long value) {
+ public EmptyResponse<T> addMetric(String name, Number value) {
metrics.put(name, value);
return this;
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/ClusterStateRequest.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/ClusterStateRequest.java
index 1df37637dcf..3006effecd4 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/ClusterStateRequest.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/ClusterStateRequest.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.clustercontroller.core.restapiv2.requests;
import com.yahoo.vdslib.state.NodeType;
import com.yahoo.vespa.clustercontroller.core.ClusterStateBundle;
+import com.yahoo.vespa.clustercontroller.core.GlobalBucketSyncStatsCalculator;
import com.yahoo.vespa.clustercontroller.core.RemoteClusterControllerTask;
import com.yahoo.vespa.clustercontroller.core.restapiv2.Id;
import com.yahoo.vespa.clustercontroller.core.restapiv2.Request;
@@ -36,6 +37,11 @@ public class ClusterStateRequest extends Request<Response.ClusterResponse> {
}
}
result.setPublishedState(bundleToDistributionState(context.publishedClusterStateBundle));
+ if (context.aggregatedClusterStats.hasUpdatesFromAllDistributors()) {
+ var stats = context.aggregatedClusterStats.getGlobalStats();
+ var maybeRatio = GlobalBucketSyncStatsCalculator.clusterBucketsOutOfSyncRatio(stats);
+ maybeRatio.ifPresent(r -> result.addMetric("cluster-buckets-out-of-sync-ratio", r));
+ }
return result;
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStatsAggregatorTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStatsAggregatorTest.java
index aa47ce2ec82..14276c51416 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStatsAggregatorTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStatsAggregatorTest.java
@@ -33,6 +33,10 @@ public class ClusterStatsAggregatorTest {
assertEquals(expectedStats.build(), aggregator.getAggregatedStatsForDistributor(distributorIndex));
}
+ public void verifyGlobal(ContentNodeStatsBuilder expectedStats) {
+ assertEquals(expectedStats.build(), aggregator.getAggregatedStats().getGlobalStats());
+ }
+
boolean hasUpdatesFromAllDistributors() {
return aggregator.getAggregatedStats().hasUpdatesFromAllDistributors();
}
@@ -64,6 +68,10 @@ public class ClusterStatsAggregatorTest {
return Sets.newHashSet(indices);
}
+ private static ContentNodeStatsBuilder globalStatsBuilder() {
+ return ContentNodeStatsBuilder.forNode(-1);
+ }
+
@Test
void aggregator_handles_updates_to_single_distributor_and_content_node() {
Fixture f = new Fixture(distributorNodes(1), contentNodes(3));
@@ -72,6 +80,9 @@ public class ClusterStatsAggregatorTest {
.add(3, "global", 11, 2);
f.update(1, stats);
f.verify(stats);
+ f.verifyGlobal(globalStatsBuilder()
+ .add("default", 10, 1)
+ .add("global", 11, 2));
}
@Test
@@ -80,9 +91,13 @@ public class ClusterStatsAggregatorTest {
f.verify(new ContentClusterStatsBuilder()
.add(3, "default", 10 + 14, 1 + 5)
- .add(3, "global", 11 + 15, 2 + 6)
+ .add(3, "global", 11 + 15, 2 + 6)
.add(4, "default", 12 + 16, 3 + 7)
- .add(4, "global", 13 + 17, 4 + 8));
+ .add(4, "global", 13 + 17, 4 + 8));
+
+ f.verifyGlobal(globalStatsBuilder()
+ .add("default", (10 + 14) + (12 + 16), (1 + 5) + (3 + 7))
+ .add("global", (11 + 15) + (13 + 17), (2 + 6) + (4 + 8)));
}
@Test
@@ -94,28 +109,34 @@ public class ClusterStatsAggregatorTest {
f.update(2, new ContentClusterStatsBuilder().add(3, "default", 10, 1));
f.verify(new ContentClusterStatsBuilder().addInvalid(3, "default", 10, 1));
+ f.verifyGlobal(globalStatsBuilder().addInvalid("default", 10, 1));
f.update(1, new ContentClusterStatsBuilder().add(3, "default", 11, 2));
f.verify(new ContentClusterStatsBuilder().add(3, "default", 10 + 11, 1 + 2));
+ f.verifyGlobal(globalStatsBuilder().add("default", 10 + 11, 1 + 2));
f.update(2, new ContentClusterStatsBuilder().add(3, "default", 15, 6));
f.verify(new ContentClusterStatsBuilder().add(3, "default", 11 + 15, 2 + 6));
+ f.verifyGlobal(globalStatsBuilder().add("default", 11 + 15, 2 + 6));
f.update(1, new ContentClusterStatsBuilder().add(3, "default", 16, 7));
f.verify(new ContentClusterStatsBuilder().add(3, "default", 15 + 16, 6 + 7));
+ f.verifyGlobal(globalStatsBuilder().add("default", 15 + 16, 6 + 7));
f.update(2, new ContentClusterStatsBuilder().add(3, "default", 12, 3));
f.verify(new ContentClusterStatsBuilder().add(3, "default", 16 + 12, 7 + 3));
+ f.verifyGlobal(globalStatsBuilder().add("default", 16 + 12, 7 + 3));
}
@Test
- void aggregator_handles_more_content_nodes_that_distributors() {
+ void aggregator_handles_more_content_nodes_than_distributors() {
Fixture f = new Fixture(distributorNodes(1), contentNodes(3, 4));
ContentClusterStatsBuilder stats = new ContentClusterStatsBuilder()
.add(3, "default", 10, 1)
.add(4, "default", 11, 2);
f.update(1, stats);
f.verify(stats);
+ f.verifyGlobal(globalStatsBuilder().add("default", 10 + 11, 1 + 2));
}
@Test
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ContentNodeStatsBuilder.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ContentNodeStatsBuilder.java
index 9d4664a9362..34035793e75 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ContentNodeStatsBuilder.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ContentNodeStatsBuilder.java
@@ -13,7 +13,7 @@ public class ContentNodeStatsBuilder {
this.nodeIndex = nodeIndex;
}
- static ContentNodeStatsBuilder forNode(int nodeIndex) {
+ public static ContentNodeStatsBuilder forNode(int nodeIndex) {
return new ContentNodeStatsBuilder(nodeIndex);
}
@@ -21,12 +21,16 @@ public class ContentNodeStatsBuilder {
return add(bucketSpace, ContentNodeStats.BucketSpaceStats.of(bucketsTotal, bucketsPending));
}
+ public ContentNodeStatsBuilder addInvalid(String bucketSpace, long bucketsTotal, long bucketsPending) {
+ return add(bucketSpace, ContentNodeStats.BucketSpaceStats.invalid(bucketsTotal, bucketsPending));
+ }
+
public ContentNodeStatsBuilder add(String bucketSpace, ContentNodeStats.BucketSpaceStats bucketSpaceStats) {
stats.put(bucketSpace, bucketSpaceStats);
return this;
}
- ContentNodeStats build() {
+ public ContentNodeStats build() {
return new ContentNodeStats(nodeIndex, stats);
}
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GlobalBucketSyncStatsCalculatorTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GlobalBucketSyncStatsCalculatorTest.java
new file mode 100644
index 00000000000..d44aaa54a1d
--- /dev/null
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GlobalBucketSyncStatsCalculatorTest.java
@@ -0,0 +1,59 @@
+// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+public class GlobalBucketSyncStatsCalculatorTest {
+
+ private static ContentNodeStatsBuilder globalStatsBuilder() {
+ return ContentNodeStatsBuilder.forNode(-1);
+ }
+
+ private static void assertComputedRatio(double expected, ContentNodeStatsBuilder statsBuilder) {
+ var maybeRatio = GlobalBucketSyncStatsCalculator.clusterBucketsOutOfSyncRatio(statsBuilder.build());
+ if (maybeRatio.isEmpty()) {
+ throw new IllegalArgumentException("Expected calculation to yield a value, but was empty");
+ }
+ assertEquals(expected, maybeRatio.get(), 0.00001);
+ }
+
+ private static void assertEmptyComputedRatio(ContentNodeStatsBuilder statsBuilder) {
+ var maybeRatio = GlobalBucketSyncStatsCalculator.clusterBucketsOutOfSyncRatio(statsBuilder.build());
+ assertTrue(maybeRatio.isEmpty());
+ }
+
+ @Test
+ void no_buckets_imply_fully_in_sync() {
+ // Can't have anything out of sync if you don't have anything to be out of sync with *taps side of head*
+ assertComputedRatio(0.0, globalStatsBuilder().add("default", 0, 0));
+ }
+
+ @Test
+ void no_pending_buckets_implies_fully_in_sync() {
+ assertComputedRatio(0.0, globalStatsBuilder().add("default", 100, 0));
+ assertComputedRatio(0.0, globalStatsBuilder().add("default", 100, 0).add("global", 50, 0));
+ }
+
+ @Test
+ void invalid_stats_returns_empty() {
+ assertEmptyComputedRatio(globalStatsBuilder().add("default", ContentNodeStats.BucketSpaceStats.invalid()));
+ assertEmptyComputedRatio(globalStatsBuilder()
+ .add("default", 100, 0)
+ .add("global", ContentNodeStats.BucketSpaceStats.invalid()));
+ }
+
+ @Test
+ void pending_buckets_return_expected_ratio() {
+ assertComputedRatio(0.50, globalStatsBuilder().add("default", 10, 5));
+ assertComputedRatio(0.80, globalStatsBuilder().add("default", 10, 8));
+ assertComputedRatio(0.10, globalStatsBuilder().add("default", 100, 10));
+ assertComputedRatio(0.01, globalStatsBuilder().add("default", 100, 1));
+ assertComputedRatio(0.05, globalStatsBuilder().add("default", 50, 5).add("global", 50, 0));
+ assertComputedRatio(0.05, globalStatsBuilder().add("default", 50, 0).add("global", 50, 5));
+ assertComputedRatio(0.10, globalStatsBuilder().add("default", 50, 5).add("global", 50, 5));
+ }
+
+}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/ClusterControllerMock.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/ClusterControllerMock.java
index d06cc730b3f..902b1bce24a 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/ClusterControllerMock.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/ClusterControllerMock.java
@@ -15,6 +15,8 @@ public class ClusterControllerMock implements RemoteClusterControllerTaskSchedul
private final int fleetControllerIndex;
Integer fleetControllerMaster;
private final StringBuilder events = new StringBuilder();
+ ContentNodeStats globalClusterStats = new ContentNodeStats(-1);
+ boolean enableGlobalStatsReporting = false;
ClusterControllerMock(ContentCluster cluster, ClusterState state,
ClusterStateBundle publishedClusterStateBundle,
@@ -88,6 +90,22 @@ public class ClusterControllerMock implements RemoteClusterControllerTaskSchedul
}
};
+ context.aggregatedClusterStats = new AggregatedClusterStats() {
+ @Override
+ public boolean hasUpdatesFromAllDistributors() {
+ return enableGlobalStatsReporting;
+ }
+
+ @Override
+ public ContentClusterStats getStats() {
+ return null;
+ }
+
+ @Override
+ public ContentNodeStats getGlobalStats() {
+ return globalClusterStats;
+ }
+ };
}
@Override
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/ClusterTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/ClusterTest.java
index e4b3c0b9f2c..cb1213542ce 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/ClusterTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/ClusterTest.java
@@ -1,6 +1,7 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.clustercontroller.core.restapiv2;
+import com.yahoo.vespa.clustercontroller.core.ContentNodeStatsBuilder;
import com.yahoo.vespa.clustercontroller.utils.staterestapi.response.UnitResponse;
import org.junit.jupiter.api.Test;
@@ -105,4 +106,45 @@ public class ClusterTest extends StateRestApiTest {
}""",
jsonWriter.createJson(response).toPrettyString());
}
+
+ @Test
+ void emit_cluster_stats_if_present() throws Exception {
+ setUp(true);
+ books.globalClusterStats.add(ContentNodeStatsBuilder.forNode(-1).add("default", 10, 4).build());
+ books.enableGlobalStatsReporting = true;
+ UnitResponse response = restAPI.getState(new StateRequest("books", 0));
+ assertEquals("""
+ {
+ "state" : {
+ "generated" : {
+ "state" : "up",
+ "reason" : ""
+ }
+ },
+ "metrics" : {
+ "cluster-buckets-out-of-sync-ratio" : 0.4
+ },
+ "service" : {
+ "storage" : {
+ "link" : "/cluster/v2/books/storage"
+ },
+ "distributor" : {
+ "link" : "/cluster/v2/books/distributor"
+ }
+ },
+ "distribution-states" : {
+ "published" : {
+ "baseline" : "distributor:4 storage:4",
+ "bucket-spaces" : [ {
+ "name" : "default",
+ "state" : "distributor:4 storage:4 .3.s:m"
+ }, {
+ "name" : "global",
+ "state" : "distributor:4 storage:4"
+ } ]
+ }
+ }
+ }""",
+ jsonWriter.createJson(response).toPrettyString());
+ }
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/StateRestApiTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/StateRestApiTest.java
index dfd9783ecef..1ad5f6828b7 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/StateRestApiTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/StateRestApiTest.java
@@ -30,7 +30,7 @@ import java.util.stream.Collectors;
public abstract class StateRestApiTest {
- private ClusterControllerMock books;
+ ClusterControllerMock books;
ClusterControllerMock music;
StateRestAPI restAPI;
JsonWriter jsonWriter = new JsonWriter();
diff --git a/clustercontroller-utils/src/main/java/com/yahoo/vespa/clustercontroller/utils/staterestapi/response/UnitMetrics.java b/clustercontroller-utils/src/main/java/com/yahoo/vespa/clustercontroller/utils/staterestapi/response/UnitMetrics.java
index f9876870873..f2c22b2dac5 100644
--- a/clustercontroller-utils/src/main/java/com/yahoo/vespa/clustercontroller/utils/staterestapi/response/UnitMetrics.java
+++ b/clustercontroller-utils/src/main/java/com/yahoo/vespa/clustercontroller/utils/staterestapi/response/UnitMetrics.java
@@ -5,6 +5,6 @@ import java.util.Map;
public interface UnitMetrics {
- Map<String, Long> getMetricMap();
+ Map<String, Number> getMetricMap();
}
diff --git a/clustercontroller-utils/src/test/java/com/yahoo/vespa/clustercontroller/utils/staterestapi/DummyStateApi.java b/clustercontroller-utils/src/test/java/com/yahoo/vespa/clustercontroller/utils/staterestapi/DummyStateApi.java
index a73e20b8755..9c39186855e 100644
--- a/clustercontroller-utils/src/test/java/com/yahoo/vespa/clustercontroller/utils/staterestapi/DummyStateApi.java
+++ b/clustercontroller-utils/src/test/java/com/yahoo/vespa/clustercontroller/utils/staterestapi/DummyStateApi.java
@@ -139,8 +139,8 @@ public class DummyStateApi implements StateRestAPI {
public UnitMetrics getMetrics() {
return new UnitMetrics() {
@Override
- public Map<String, Long> getMetricMap() {
- Map<String, Long> m = new LinkedHashMap<>();
+ public Map<String, Number> getMetricMap() {
+ Map<String, Number> m = new LinkedHashMap<>();
m.put("doc-count", (long) node.docCount);
return m;
}
diff --git a/config-application-package/src/main/java/com/yahoo/config/application/ConfigDefinitionDir.java b/config-application-package/src/main/java/com/yahoo/config/application/ConfigDefinitionDir.java
index d4b257f0ba9..1329befbc9d 100644
--- a/config-application-package/src/main/java/com/yahoo/config/application/ConfigDefinitionDir.java
+++ b/config-application-package/src/main/java/com/yahoo/config/application/ConfigDefinitionDir.java
@@ -11,7 +11,6 @@ import java.util.List;
* but they cannot conflict with the existing ones.
*
* @author Ulf Lilleengen
- * @since 5.1
*/
public class ConfigDefinitionDir {
private final File defDir;
diff --git a/config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java b/config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java
index bc48e7dd814..ac365fa5a3e 100644
--- a/config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java
+++ b/config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java
@@ -22,7 +22,6 @@ import static java.nio.charset.StandardCharsets.UTF_8;
* Handles preprocess:include statements and returns a Document which has all the include statements resolved
*
* @author hmusum
- * @since 5.22
*/
class IncludeProcessor implements PreProcessor {
diff --git a/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java b/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java
index bb456d95326..5f2046b1450 100644
--- a/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java
+++ b/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java
@@ -46,6 +46,7 @@ class OverrideProcessor implements PreProcessor {
private final Tags tags;
private static final String ID_ATTRIBUTE = "id";
+ private static final String IDREF_ATTRIBUTE = "idref";
private static final String INSTANCE_ATTRIBUTE = "instance";
private static final String ENVIRONMENT_ATTRIBUTE = "environment";
private static final String REGION_ATTRIBUTE = "region";
@@ -200,7 +201,7 @@ class OverrideProcessor implements PreProcessor {
/** Find the most specific element and remove all others. */
private void retainMostSpecific(Element parent, List<Element> children, Context context) {
- // Keep track of elements with highest number of matches (might be more than one element with same tag, need a list)
+ // Keep track of elements with the highest number of matches (might be more than one element with same tag, need a list)
List<Element> bestMatches = new ArrayList<>();
int bestMatch = 0;
for (Element child : children) {
@@ -307,42 +308,43 @@ class OverrideProcessor implements PreProcessor {
private Set<InstanceName> getInstances(Element element) {
String instance = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, INSTANCE_ATTRIBUTE);
- if (instance == null || instance.isEmpty()) return Set.of();
+ if (instance.isEmpty()) return Set.of();
return Arrays.stream(instance.split(" ")).map(InstanceName::from).collect(Collectors.toSet());
}
private Set<Environment> getEnvironments(Element element) {
String env = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, ENVIRONMENT_ATTRIBUTE);
- if (env == null || env.isEmpty()) return Set.of();
+ if (env.isEmpty()) return Set.of();
return Arrays.stream(env.split(" ")).map(Environment::from).collect(Collectors.toSet());
}
private Set<RegionName> getRegions(Element element) {
String reg = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, REGION_ATTRIBUTE);
- if (reg == null || reg.isEmpty()) return Set.of();
+ if (reg.isEmpty()) return Set.of();
return Arrays.stream(reg.split(" ")).map(RegionName::from).collect(Collectors.toSet());
}
private Set<CloudName> getClouds(Element element) {
String reg = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, CLOUD_ATTRIBUTE);
- if (reg == null || reg.isEmpty()) return Set.of();
+ if (reg.isEmpty()) return Set.of();
return Arrays.stream(reg.split(" ")).map(CloudName::from).collect(Collectors.toSet());
}
private Tags getTags(Element element) {
String env = element.getAttributeNS(XmlPreProcessor.deployNamespaceUri, TAGS_ATTRIBUTE);
- if (env == null || env.isEmpty()) return Tags.empty();
+ if (env.isEmpty()) return Tags.empty();
return Tags.fromString(env);
}
private Map<String, List<Element>> elementsByTagNameAndId(List<Element> children) {
Map<String, List<Element>> elementsByTagName = new LinkedHashMap<>();
- // Index by tag name
+ // Index by tag name and optionally add "id" or "idref" to key if they are set
for (Element child : children) {
String key = child.getTagName();
- if (child.hasAttribute(ID_ATTRIBUTE)) {
+ if (child.hasAttribute(ID_ATTRIBUTE))
key += child.getAttribute(ID_ATTRIBUTE);
- }
+ if (child.hasAttribute(IDREF_ATTRIBUTE))
+ key += child.getAttribute(IDREF_ATTRIBUTE);
if ( ! elementsByTagName.containsKey(key)) {
elementsByTagName.put(key, new ArrayList<>());
}
@@ -382,7 +384,7 @@ class OverrideProcessor implements PreProcessor {
}
/**
- * Represents environment and region in a given context.
+ * Represents environments, regions, instances, clouds and tags in a given context.
*/
private static final class Context {
diff --git a/config-application-package/src/main/java/com/yahoo/config/application/ValidationProcessor.java b/config-application-package/src/main/java/com/yahoo/config/application/ValidationProcessor.java
index b02ccc711c0..4dc40df61f4 100644
--- a/config-application-package/src/main/java/com/yahoo/config/application/ValidationProcessor.java
+++ b/config-application-package/src/main/java/com/yahoo/config/application/ValidationProcessor.java
@@ -11,8 +11,8 @@ public class ValidationProcessor implements PreProcessor {
@Override
public Document process(Document input) throws IOException, TransformerException {
- NodeList includeitems = input.getElementsByTagNameNS("http://www.w3.org/2001/XInclude", "*");
- if (includeitems.getLength() > 0)
+ NodeList includeItems = input.getElementsByTagNameNS("http://www.w3.org/2001/XInclude", "*");
+ if (includeItems.getLength() > 0)
throw new UnsupportedOperationException("XInclude not supported, use preprocess:include instead");
return input;
}
diff --git a/config-application-package/src/test/java/com/yahoo/config/application/OverrideProcessorTest.java b/config-application-package/src/test/java/com/yahoo/config/application/OverrideProcessorTest.java
index e5e36615b09..c2b0770ab06 100644
--- a/config-application-package/src/test/java/com/yahoo/config/application/OverrideProcessorTest.java
+++ b/config-application-package/src/test/java/com/yahoo/config/application/OverrideProcessorTest.java
@@ -2,6 +2,7 @@
package com.yahoo.config.application;
import com.yahoo.config.provision.Cloud;
+import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.RegionName;
@@ -12,6 +13,8 @@ import org.w3c.dom.Document;
import javax.xml.transform.TransformerException;
import java.io.StringReader;
+import static com.yahoo.config.provision.Tags.empty;
+
/**
* @author Ulf Lilleengen
*/
@@ -366,14 +369,72 @@ public class OverrideProcessorTest {
assertOverride(input, Environment.dev, RegionName.defaultName(), expected);
}
+ /**
+ * Tests that searchers referred to with idref are overridden per cloud
+ * and that searchers not referred to with idref are not overridden.
+ */
+ @Test
+ public void testSearchersReferredWithIdRefPerCloud() throws TransformerException {
+ String input =
+ """
+ <?xml version="1.0" encoding="UTF-8" standalone="no"?>
+ <services xmlns:deploy="vespa" xmlns:preprocess="?" version="1.0">
+ <container id="stateless" version="1.0">
+ <search>
+ <searcher id="AwsSearcher" class="ai.vespa.AwsSearcher" bundle="foo"/>
+ <searcher id="GcpSearcher" class="ai.vespa.GcpSearcher" bundle="foo"/>
+ <searcher id="OtherSearcher" class="ai.vespa.OtherSearcher" bundle="foo"/>
+ <chain id="default" inherits="vespa">
+ <searcher idref="AwsSearcher" deploy:cloud="aws"/>
+ <searcher idref="GcpSearcher" deploy:cloud="gcp"/>
+ <searcher idref="OtherSearcher"/>
+ </chain>
+ </search>
+ </container>
+ "</services>""";
+
+ String expected =
+ """
+ <?xml version="1.0" encoding="UTF-8" standalone="no"?>
+ <services xmlns:deploy="vespa" xmlns:preprocess="?" version="1.0">
+ <container id="stateless" version="1.0">
+ <search>
+ <searcher id="AwsSearcher" class="ai.vespa.AwsSearcher" bundle="foo"/>
+ <searcher id="GcpSearcher" class="ai.vespa.GcpSearcher" bundle="foo"/>
+ <searcher id="OtherSearcher" class="ai.vespa.OtherSearcher" bundle="foo"/>
+ <chain id="default" inherits="vespa">
+ <searcher idref="%s"/>
+ <searcher idref="OtherSearcher"/>
+ </chain>
+ </search>
+ </container>
+ "</services>""";
+
+ assertOverride(input, "aws", expected.formatted("AwsSearcher"));
+ assertOverride(input, "gcp", expected.formatted("GcpSearcher"));
+ }
+
private void assertOverride(Environment environment, RegionName region, String expected) throws TransformerException {
assertOverride(input, environment, region, expected);
}
- private void assertOverride(String input, Environment environment, RegionName region, String expected) throws TransformerException {
- Document inputDoc = Xml.getDocument(new StringReader(input));
- Document newDoc = new OverrideProcessor(InstanceName.from("default"), environment, region, Cloud.defaultCloud().name(), Tags.empty()).process(inputDoc);
- TestBase.assertDocument(expected, newDoc);
+ private void assertOverride(String input, Environment environment, RegionName region, String expected) {
+ assertOverride(input, environment, region, Cloud.defaultCloud().name(), expected);
+ }
+
+ private void assertOverride(String input, String cloudName, String expected) {
+ assertOverride(input, Environment.defaultEnvironment(), RegionName.defaultName(), CloudName.from(cloudName), expected);
+ }
+
+ private void assertOverride(String input, Environment environment, RegionName region, CloudName cloudName, String expected) {
+ var inputDoc = Xml.getDocument(new StringReader(input));
+ try {
+ var newDoc = new OverrideProcessor(InstanceName.from("default"), environment, region, cloudName, Tags.empty())
+ .process(inputDoc);
+ TestBase.assertDocument(expected, newDoc);
+ } catch (TransformerException e) {
+ throw new RuntimeException(e);
+ }
}
}
diff --git a/config-model/src/main/java/com/yahoo/schema/RankProfile.java b/config-model/src/main/java/com/yahoo/schema/RankProfile.java
index cdefbbf8174..60674b5487c 100644
--- a/config-model/src/main/java/com/yahoo/schema/RankProfile.java
+++ b/config-model/src/main/java/com/yahoo/schema/RankProfile.java
@@ -222,9 +222,10 @@ public class RankProfile implements Cloneable {
this.useSignificanceModel = useSignificanceModel;
}
- public Boolean useSignificanceModel() {
+ public boolean useSignificanceModel() {
if (useSignificanceModel != null) return useSignificanceModel;
- return uniquelyInherited(p -> p.useSignificanceModel(), "use-model").orElse(null);
+ return uniquelyInherited(p -> p.useSignificanceModel(), "use-model")
+ .orElse(false); // Disabled by default
}
/**
diff --git a/config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java b/config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java
index 42586fa7d75..b057624f055 100644
--- a/config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java
+++ b/config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java
@@ -186,7 +186,6 @@ public class RawRankProfile {
private RankingExpression globalPhaseRanking;
private final int globalPhaseRerankCount;
private final SerializationContext functionSerializationContext;
- private Boolean useSignificanceModel;
/**
* Creates a raw rank profile from the given rank profile
@@ -222,7 +221,6 @@ public class RawRankProfile {
rankScoreDropLimit = compiled.getRankScoreDropLimit();
ignoreDefaultRankFeatures = compiled.getIgnoreDefaultRankFeatures();
rankProperties = new ArrayList<>(compiled.getRankProperties());
- useSignificanceModel = compiled.useSignificanceModel();
Map<String, RankProfile.RankingExpressionFunction> functions = compiled.getFunctions();
List<ExpressionFunction> functionExpressions = functions.values().stream().map(RankProfile.RankingExpressionFunction::function).toList();
@@ -481,9 +479,6 @@ public class RawRankProfile {
if (targetHitsMaxAdjustmentFactor.isPresent()) {
properties.add(new Pair<>("vespa.matching.nns.target_hits_max_adjustment_factor", String.valueOf(targetHitsMaxAdjustmentFactor.getAsDouble())));
}
- if (useSignificanceModel != null) {
- properties.add(new Pair<>("vespa.significance.use_model", String.valueOf(useSignificanceModel)));
- }
if (matchPhaseSettings != null) {
properties.add(new Pair<>("vespa.matchphase.degradation.attribute", matchPhaseSettings.getAttribute()));
properties.add(new Pair<>("vespa.matchphase.degradation.ascendingorder", matchPhaseSettings.getAscending() + ""));
diff --git a/config-model/src/main/java/com/yahoo/schema/derived/SchemaInfo.java b/config-model/src/main/java/com/yahoo/schema/derived/SchemaInfo.java
index f996b2624db..b91404be2dd 100644
--- a/config-model/src/main/java/com/yahoo/schema/derived/SchemaInfo.java
+++ b/config-model/src/main/java/com/yahoo/schema/derived/SchemaInfo.java
@@ -183,10 +183,12 @@ public final class SchemaInfo extends Derived {
private void addRankProfilesConfig(SchemaInfoConfig.Schema.Builder schemaBuilder) {
for (RankProfileInfo rankProfile : rankProfiles().values()) {
- var rankProfileConfig = new SchemaInfoConfig.Schema.Rankprofile.Builder();
- rankProfileConfig.name(rankProfile.name());
- rankProfileConfig.hasSummaryFeatures(rankProfile.hasSummaryFeatures());
- rankProfileConfig.hasRankFeatures(rankProfile.hasRankFeatures());
+ var rankProfileConfig = new SchemaInfoConfig.Schema.Rankprofile.Builder()
+ .name(rankProfile.name())
+ .hasSummaryFeatures(rankProfile.hasSummaryFeatures())
+ .hasRankFeatures(rankProfile.hasRankFeatures())
+ .significance(new SchemaInfoConfig.Schema.Rankprofile.Significance.Builder()
+ .useModel(rankProfile.useSignificanceModel()));
for (var input : rankProfile.inputs().entrySet()) {
var inputConfig = new SchemaInfoConfig.Schema.Rankprofile.Input.Builder();
inputConfig.name(input.getKey().toString());
@@ -226,6 +228,7 @@ public final class SchemaInfo extends Derived {
private final String name;
private final boolean hasSummaryFeatures;
private final boolean hasRankFeatures;
+ private final boolean useSignificanceModel;
private final Map<Reference, RankProfile.Input> inputs;
public RankProfileInfo(RankProfile profile) {
@@ -233,11 +236,13 @@ public final class SchemaInfo extends Derived {
this.hasSummaryFeatures = ! profile.getSummaryFeatures().isEmpty();
this.hasRankFeatures = ! profile.getRankFeatures().isEmpty();
this.inputs = profile.inputs();
+ useSignificanceModel = profile.useSignificanceModel();
}
public String name() { return name; }
public boolean hasSummaryFeatures() { return hasSummaryFeatures; }
public boolean hasRankFeatures() { return hasRankFeatures; }
+ public boolean useSignificanceModel() { return useSignificanceModel; }
public Map<Reference, RankProfile.Input> inputs() { return inputs; }
}
diff --git a/config-model/src/main/java/com/yahoo/schema/document/Matching.java b/config-model/src/main/java/com/yahoo/schema/document/Matching.java
index 9d68553fa80..33256fa8586 100644
--- a/config-model/src/main/java/com/yahoo/schema/document/Matching.java
+++ b/config-model/src/main/java/com/yahoo/schema/document/Matching.java
@@ -33,6 +33,8 @@ public class Matching implements Cloneable, Serializable {
private Integer maxLength;
/** Maximum number of occurrences for each term */
private Integer maxTermOccurrences;
+ /** Maximum number of characters in a token. */
+ private Integer maxTokenLength;
private String exactMatchTerminator = null;
@@ -61,6 +63,8 @@ public class Matching implements Cloneable, Serializable {
public Matching maxLength(int maxLength) { this.maxLength = maxLength; return this; }
public Integer maxTermOccurrences() { return maxTermOccurrences; }
public Matching maxTermOccurrences(int maxTermOccurrences) { this.maxTermOccurrences = maxTermOccurrences; return this; }
+ public Integer maxTokenLength() { return maxTokenLength; }
+ public Matching maxTokenLength(int maxTokenLength) { this.maxTokenLength = maxTokenLength; return this; }
public boolean isTypeUserSet() { return typeUserSet; }
public MatchAlgorithm getAlgorithm() { return algorithm; }
diff --git a/config-model/src/main/java/com/yahoo/schema/document/SDField.java b/config-model/src/main/java/com/yahoo/schema/document/SDField.java
index f165141b16e..2483fa47667 100644
--- a/config-model/src/main/java/com/yahoo/schema/document/SDField.java
+++ b/config-model/src/main/java/com/yahoo/schema/document/SDField.java
@@ -46,7 +46,7 @@ import java.util.TreeMap;
*
* @author bratseth
*/
-public class SDField extends Field implements TypedKey, ImmutableSDField {
+public class SDField extends Field implements ImmutableSDField {
/** Use this field for modifying index-structure, even if it doesn't have any indexing code */
private boolean indexStructureField = false;
@@ -315,7 +315,7 @@ public class SDField extends Field implements TypedKey, ImmutableSDField {
supplyStructField.accept(field.getName(), field.getDataType());
}
}
- if ((subType == null) && (structFields.size() > 0)) {
+ if ((subType == null) && (!structFields.isEmpty())) {
throw new IllegalArgumentException("Cannot find matching (repo=" + sdoc + ") for subfields in "
+ this + " [" + getDataType() + getDataType().getClass() +
"] with " + structFields.size() + " struct fields");
@@ -627,7 +627,7 @@ public class SDField extends Field implements TypedKey, ImmutableSDField {
public Attribute addAttribute(Attribute attribute) {
String name = attribute.getName();
- if (name == null || "".equals(name)) {
+ if (name == null || name.isEmpty()) {
name = getName();
attribute.setName(name);
}
diff --git a/config-model/src/main/java/com/yahoo/schema/document/TypedKey.java b/config-model/src/main/java/com/yahoo/schema/document/TypedKey.java
deleted file mode 100644
index 652d21d7f7d..00000000000
--- a/config-model/src/main/java/com/yahoo/schema/document/TypedKey.java
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.schema.document;
-
-import com.yahoo.document.DataType;
-
-/**
- * Common interface for various typed key (or field definitions).
- * Used by code which wants to use common algorithms for dealing with typed keys, like the logical mapping
- *
- * @author bratseth
- */
-public interface TypedKey {
-
- String getName();
-
- void setDataType(DataType type);
-
- DataType getDataType();
-
-}
diff --git a/config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedFields.java b/config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedFields.java
index 7659a1e6562..173eebe2a94 100644
--- a/config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedFields.java
+++ b/config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedFields.java
@@ -44,6 +44,7 @@ public class ConvertParsedFields {
parsed.getGramSize().ifPresent(gramSize -> field.getMatching().setGramSize(gramSize));
parsed.getMaxLength().ifPresent(maxLength -> field.getMatching().maxLength(maxLength));
parsed.getMaxTermOccurrences().ifPresent(maxTermOccurrences -> field.getMatching().maxTermOccurrences(maxTermOccurrences));
+ parsed.getMaxTokenLength().ifPresent(maxTokenLength -> field.getMatching().maxTokenLength(maxTokenLength));
parsed.getMatchAlgorithm().ifPresent
(matchingAlgorithm -> field.setMatchingAlgorithm(matchingAlgorithm));
parsed.getExactTerminator().ifPresent
diff --git a/config-model/src/main/java/com/yahoo/schema/parser/ParsedMatchSettings.java b/config-model/src/main/java/com/yahoo/schema/parser/ParsedMatchSettings.java
index c7d1a215ce3..bac2c894283 100644
--- a/config-model/src/main/java/com/yahoo/schema/parser/ParsedMatchSettings.java
+++ b/config-model/src/main/java/com/yahoo/schema/parser/ParsedMatchSettings.java
@@ -23,6 +23,7 @@ public class ParsedMatchSettings {
private Integer gramSize = null;
private Integer maxLength = null;
private Integer maxTermOccurrences = null;
+ private Integer maxTokenLength = null;
Optional<MatchType> getMatchType() { return Optional.ofNullable(matchType); }
Optional<Case> getMatchCase() { return Optional.ofNullable(matchCase); }
@@ -31,6 +32,7 @@ public class ParsedMatchSettings {
Optional<Integer> getGramSize() { return Optional.ofNullable(gramSize); }
Optional<Integer> getMaxLength() { return Optional.ofNullable(maxLength); }
Optional<Integer> getMaxTermOccurrences() { return Optional.ofNullable(maxTermOccurrences); }
+ Optional<Integer> getMaxTokenLength() { return Optional.ofNullable(maxTokenLength); }
// TODO - consider allowing each set only once:
void setType(MatchType value) { this.matchType = value; }
@@ -40,5 +42,6 @@ public class ParsedMatchSettings {
void setGramSize(int value) { this.gramSize = value; }
void setMaxLength(int value) { this.maxLength = value; }
void setMaxTermOccurrences(int value) { this.maxTermOccurrences = value; }
+ void setMaxTokenLength(int value) { this.maxTokenLength = value; }
}
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/AttributesImplicitWord.java b/config-model/src/main/java/com/yahoo/schema/processing/AttributesImplicitWord.java
index 767593b82d0..769f0c9de92 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/AttributesImplicitWord.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/AttributesImplicitWord.java
@@ -2,6 +2,7 @@
package com.yahoo.schema.processing;
import com.yahoo.config.application.api.DeployLogger;
+import com.yahoo.document.TensorDataType;
import com.yahoo.schema.RankProfileRegistry;
import com.yahoo.document.DataType;
import com.yahoo.schema.Schema;
@@ -45,6 +46,8 @@ public class AttributesImplicitWord extends Processor {
private boolean fieldImplicitlyWordMatch(ImmutableSDField field) {
// numeric types should not trigger exact-match query parsing
if (field.getDataType().getPrimitiveType() instanceof NumericDataType) return false;
+ // Tensor type should not trigger exact-match query parsing
+ if (field.getDataType() instanceof TensorDataType) return false;
return (! field.hasIndex()
&& !field.getAttributes().isEmpty()
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/ExactMatch.java b/config-model/src/main/java/com/yahoo/schema/processing/ExactMatch.java
index 056c37a9830..4313ceb4be1 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/ExactMatch.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/ExactMatch.java
@@ -16,6 +16,7 @@ import com.yahoo.vespa.indexinglanguage.expressions.ForEachExpression;
import com.yahoo.vespa.indexinglanguage.expressions.IndexExpression;
import com.yahoo.vespa.indexinglanguage.expressions.OutputExpression;
import com.yahoo.vespa.indexinglanguage.expressions.ScriptExpression;
+import com.yahoo.vespa.indexinglanguage.linguistics.AnnotatorConfig;
import com.yahoo.vespa.model.container.search.QueryProfiles;
/**
@@ -75,7 +76,11 @@ public class ExactMatch extends Processor {
}
ScriptExpression script = field.getIndexingScript();
if (new ExpressionSearcher<>(IndexExpression.class).containedIn(script)) {
- field.setIndexingScript(schema.getName(), (ScriptExpression)new MyProvider(schema).convert(field.getIndexingScript()));
+ var maxTokenLength = field.getMatching().maxTokenLength();
+ if (maxTokenLength == null) {
+ maxTokenLength = AnnotatorConfig.getDefaultMaxTokenLength();
+ }
+ field.setIndexingScript(schema.getName(), (ScriptExpression)new MyProvider(schema, maxTokenLength).convert(field.getIndexingScript()));
}
}
@@ -85,8 +90,12 @@ public class ExactMatch extends Processor {
private static class MyProvider extends TypedTransformProvider {
- MyProvider(Schema schema) {
+ private int maxTokenLength;
+
+ MyProvider(Schema schema, int maxTokenLength)
+ {
super(ExactExpression.class, schema);
+ this.maxTokenLength = maxTokenLength;
}
@Override
@@ -96,7 +105,7 @@ public class ExactMatch extends Processor {
@Override
protected Expression newTransform(DataType fieldType) {
- Expression exp = new ExactExpression();
+ Expression exp = new ExactExpression(maxTokenLength);
if (fieldType instanceof CollectionDataType) {
exp = new ForEachExpression(exp);
}
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/TextMatch.java b/config-model/src/main/java/com/yahoo/schema/processing/TextMatch.java
index e29f683761f..3f23cbc9b2d 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/TextMatch.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/TextMatch.java
@@ -64,12 +64,16 @@ public class TextMatch extends Processor {
if (fieldMatching != null) {
var maxLength = fieldMatching.maxLength();
if (maxLength != null) {
- ret.setMaxTokenLength(maxLength);
+ ret.setMaxTokenizeLength(maxLength);
}
var maxTermOccurrences = fieldMatching.maxTermOccurrences();
if (maxTermOccurrences != null) {
ret.setMaxTermOccurrences(maxTermOccurrences);
}
+ var maxTokenLength = fieldMatching.maxTokenLength();
+ if (maxTokenLength != null) {
+ ret.setMaxTokenLength(maxTokenLength);
+ }
}
return ret;
}
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/TypedTransformProvider.java b/config-model/src/main/java/com/yahoo/schema/processing/TypedTransformProvider.java
index 8ccc8870419..3d4934ed841 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/TypedTransformProvider.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/TypedTransformProvider.java
@@ -29,6 +29,10 @@ public abstract class TypedTransformProvider extends ValueTransformProvider {
protected final boolean requiresTransform(Expression exp) {
if (exp instanceof OutputExpression) {
String fieldName = ((OutputExpression)exp).getFieldName();
+ if (fieldName == null) {
+ // Incomplete output expressions never require a transform.
+ return false;
+ }
if (exp instanceof AttributeExpression) {
Attribute attribute = schema.getAttribute(fieldName);
if (attribute == null)
diff --git a/config-model/src/main/java/com/yahoo/vespa/documentmodel/SummaryField.java b/config-model/src/main/java/com/yahoo/vespa/documentmodel/SummaryField.java
index d50d5e36134..785b45d8def 100644
--- a/config-model/src/main/java/com/yahoo/vespa/documentmodel/SummaryField.java
+++ b/config-model/src/main/java/com/yahoo/vespa/documentmodel/SummaryField.java
@@ -3,10 +3,12 @@ package com.yahoo.vespa.documentmodel;
import com.yahoo.document.DataType;
import com.yahoo.document.Field;
-import com.yahoo.schema.document.TypedKey;
import java.io.Serializable;
-import java.util.*;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.Set;
import java.util.stream.Collectors;
import static com.yahoo.text.Lowercase.toLowerCase;
@@ -16,7 +18,7 @@ import static com.yahoo.text.Lowercase.toLowerCase;
*
* @author bratseth
*/
-public class SummaryField extends Field implements Cloneable, TypedKey {
+public class SummaryField extends Field implements Cloneable {
/** A source (field name). */
public static class Source implements Serializable {
@@ -62,7 +64,7 @@ public class SummaryField extends Field implements Cloneable, TypedKey {
*/
private Set<Source> sources = new java.util.LinkedHashSet<>();
- private Set<String> destinations =new java.util.LinkedHashSet<>();
+ private Set<String> destinations = new java.util.LinkedHashSet<>();
/** True if this field was defined implicitly */
private boolean implicit = false;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java
index 40c9a03b126..02a6b243054 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java
@@ -132,7 +132,7 @@ public class ConstantTensorJsonValidator {
private void consumeTopObject() throws IOException {
for (var cur = parser.nextToken(); cur != JsonToken.END_OBJECT; cur = parser.nextToken()) {
assertCurrentTokenIs(JsonToken.FIELD_NAME);
- String fieldName = parser.getCurrentName();
+ String fieldName = parser.currentName();
switch (fieldName) {
case FIELD_TYPE -> consumeTypeField();
case FIELD_VALUES -> consumeValuesField();
@@ -189,7 +189,7 @@ public class ConstantTensorJsonValidator {
}
for (var cur = parser.nextToken(); cur != JsonToken.END_OBJECT; cur = parser.nextToken()) {
assertCurrentTokenIs(JsonToken.FIELD_NAME);
- validateNumeric(parser.getCurrentName(), parser.nextToken());
+ validateNumeric(parser.currentName(), parser.nextToken());
}
}
@@ -199,7 +199,7 @@ public class ConstantTensorJsonValidator {
boolean seenValue = false;
for (int i = 0; i < 2; i++) {
assertNextTokenIs(JsonToken.FIELD_NAME);
- String fieldName = parser.getCurrentName();
+ String fieldName = parser.currentName();
switch (fieldName) {
case FIELD_ADDRESS -> {
validateTensorAddress(new HashSet<>(tensorDimensions.keySet()));
@@ -228,13 +228,13 @@ public class ConstantTensorJsonValidator {
// Iterate within the address key, value pairs
while ((parser.nextToken() != JsonToken.END_OBJECT)) {
assertCurrentTokenIs(JsonToken.FIELD_NAME);
- String dimensionName = parser.getCurrentName();
+ String dimensionName = parser.currentName();
TensorType.Dimension dimension = tensorDimensions.get(dimensionName);
if (dimension == null) {
- throw new InvalidConstantTensorException(parser, String.format("Tensor dimension '%s' does not exist", parser.getCurrentName()));
+ throw new InvalidConstantTensorException(parser, String.format("Tensor dimension '%s' does not exist", dimensionName));
}
if (!cellDimensions.contains(dimensionName)) {
- throw new InvalidConstantTensorException(parser, String.format("Duplicate tensor dimension '%s'", parser.getCurrentName()));
+ throw new InvalidConstantTensorException(parser, String.format("Duplicate tensor dimension '%s'", dimensionName));
}
cellDimensions.remove(dimensionName);
validateLabel(dimension);
@@ -300,7 +300,7 @@ public class ConstantTensorJsonValidator {
}
private void assertCurrentTokenIs(JsonToken wantedToken) {
- assertTokenIs(parser.getCurrentToken(), wantedToken);
+ assertTokenIs(parser.currentToken(), wantedToken);
}
private void assertNextTokenIs(JsonToken wantedToken) throws IOException {
@@ -316,11 +316,11 @@ public class ConstantTensorJsonValidator {
static class InvalidConstantTensorException extends IllegalArgumentException {
InvalidConstantTensorException(JsonParser parser, String message) {
- super(message + " " + parser.getCurrentLocation().toString());
+ super(message + " " + parser.currentLocation().toString());
}
InvalidConstantTensorException(JsonParser parser, Exception base) {
- super("Failed to parse JSON stream " + parser.getCurrentLocation().toString(), base);
+ super("Failed to parse JSON stream " + parser.currentLocation().toString(), base);
}
InvalidConstantTensorException(IOException base) {
@@ -412,7 +412,7 @@ public class ConstantTensorJsonValidator {
boolean seenValues = false;
for (int i = 0; i < 2; i++) {
assertNextTokenIs(JsonToken.FIELD_NAME);
- String fieldName = parser.getCurrentName();
+ String fieldName = parser.currentName();
switch (fieldName) {
case FIELD_ADDRESS -> {
validateTensorAddress(new HashSet<>(mappedDims));
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
index ed0804f7420..7f624032627 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
@@ -19,6 +19,7 @@ import com.yahoo.vespa.model.application.validation.change.IndexingModeChangeVal
import com.yahoo.vespa.model.application.validation.change.NodeResourceChangeValidator;
import com.yahoo.vespa.model.application.validation.change.RedundancyIncreaseValidator;
import com.yahoo.vespa.model.application.validation.change.ResourcesReductionValidator;
+import com.yahoo.vespa.model.application.validation.change.RestartOnDeployForLocalLLMValidator;
import com.yahoo.vespa.model.application.validation.change.RestartOnDeployForOnnxModelChangesValidator;
import com.yahoo.vespa.model.application.validation.change.StartupCommandChangeValidator;
import com.yahoo.vespa.model.application.validation.change.StreamingSearchClusterChangeValidator;
@@ -129,6 +130,7 @@ public class Validation {
new CertificateRemovalChangeValidator().validate(execution);
new RedundancyValidator().validate(execution);
new RestartOnDeployForOnnxModelChangesValidator().validate(execution);
+ new RestartOnDeployForLocalLLMValidator().validate(execution);
}
public interface Context {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForLocalLLMValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForLocalLLMValidator.java
new file mode 100644
index 00000000000..ccfc611c3dc
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForLocalLLMValidator.java
@@ -0,0 +1,55 @@
+// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation.change;
+
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.vespa.model.VespaModel;
+import com.yahoo.vespa.model.application.validation.Validation.ChangeContext;
+import com.yahoo.vespa.model.container.ApplicationContainerCluster;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.logging.Logger;
+
+import static java.util.logging.Level.INFO;
+import static java.util.stream.Collectors.toUnmodifiableSet;
+
+/**
+ * If using local LLMs, this validator will make sure that restartOnDeploy is set for
+ * configs for this cluster.
+ *
+ * @author lesters
+ */
+public class RestartOnDeployForLocalLLMValidator implements ChangeValidator {
+
+ public static final String LOCAL_LLM_COMPONENT = ai.vespa.llm.clients.LocalLLM.class.getName();
+
+ private static final Logger log = Logger.getLogger(RestartOnDeployForLocalLLMValidator.class.getName());
+
+ @Override
+ public void validate(ChangeContext context) {
+ var previousClustersWithLocalLLM = findClustersWithLocalLLMs(context.previousModel());
+ var nextClustersWithLocalLLM = findClustersWithLocalLLMs(context.model());
+
+ // Only restart services if we use a local LLM in both the next and previous generation
+ for (var clusterId : intersect(previousClustersWithLocalLLM, nextClustersWithLocalLLM)) {
+ String message = "Need to restart services in %s due to use of local LLM".formatted(clusterId);
+ context.require(new VespaRestartAction(clusterId, message));
+ log.log(INFO, message);
+ }
+ }
+
+ private Set<ClusterSpec.Id> findClustersWithLocalLLMs(VespaModel model) {
+ return model.getContainerClusters().values().stream()
+ .filter(cluster -> cluster.getAllComponents().stream()
+ .anyMatch(component -> component.getClassId().getName().equals(LOCAL_LLM_COMPONENT)))
+ .map(ApplicationContainerCluster::id)
+ .collect(toUnmodifiableSet());
+ }
+
+ private Set<ClusterSpec.Id> intersect(Set<ClusterSpec.Id> a, Set<ClusterSpec.Id> b) {
+ Set<ClusterSpec.Id> result = new HashSet<>(a);
+ result.retainAll(b);
+ return result;
+ }
+
+}
diff --git a/config-model/src/main/javacc/SchemaParser.jj b/config-model/src/main/javacc/SchemaParser.jj
index b40f2d0796d..1365c133932 100644
--- a/config-model/src/main/javacc/SchemaParser.jj
+++ b/config-model/src/main/javacc/SchemaParser.jj
@@ -183,6 +183,7 @@ TOKEN :
| < GRAM_SIZE: "gram-size" >
| < MAX_LENGTH: "max-length" >
| < MAX_OCCURRENCES: "max-occurrences" >
+| < MAX_TOKEN_LENGTH: "max-token-length" >
| < PREFIX: "prefix" >
| < SUBSTRING: "substring" >
| < SUFFIX: "suffix" >
@@ -1368,7 +1369,8 @@ void matchType(ParsedMatchSettings matchInfo) : { }
*/
void matchItem(ParsedMatchSettings matchInfo) : { }
{
- ( matchType(matchInfo) | exactTerminator(matchInfo) | gramSize(matchInfo) | matchSize(matchInfo) | maxTermOccurrences(matchInfo))
+ ( matchType(matchInfo) | exactTerminator(matchInfo) | gramSize(matchInfo) | matchSize(matchInfo) |
+ maxTermOccurrences(matchInfo) | maxTokenLength(matchInfo) )
}
void exactTerminator(ParsedMatchSettings matchInfo) :
@@ -1413,6 +1415,16 @@ void maxTermOccurrences(ParsedMatchSettings matchInfo) :
}
}
+void maxTokenLength(ParsedMatchSettings matchInfo) :
+{
+ int maxTokenLength;
+}
+{
+ <MAX_TOKEN_LENGTH> <COLON> maxTokenLength = integer() {
+ matchInfo.setMaxTokenLength(maxTokenLength);
+ }
+}
+
/**
* Consumes a rank statement of a field element.
*
diff --git a/config-model/src/test/derived/advanced/ilscripts.cfg b/config-model/src/test/derived/advanced/ilscripts.cfg
index 51a49502b64..d633cd97f0c 100644
--- a/config-model/src/test/derived/advanced/ilscripts.cfg
+++ b/config-model/src/test/derived/advanced/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "advanced"
ilscript[].docfield[] "debug_src"
diff --git a/config-model/src/test/derived/annotationsimplicitstruct/ilscripts.cfg b/config-model/src/test/derived/annotationsimplicitstruct/ilscripts.cfg
index 767c3af3c19..53dc789fbb7 100644
--- a/config-model/src/test/derived/annotationsimplicitstruct/ilscripts.cfg
+++ b/config-model/src/test/derived/annotationsimplicitstruct/ilscripts.cfg
@@ -1,3 +1,4 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "annotationsimplicitstruct"
diff --git a/config-model/src/test/derived/annotationsinheritance/ilscripts.cfg b/config-model/src/test/derived/annotationsinheritance/ilscripts.cfg
index d8e6c882b80..b0a69c5408a 100644
--- a/config-model/src/test/derived/annotationsinheritance/ilscripts.cfg
+++ b/config-model/src/test/derived/annotationsinheritance/ilscripts.cfg
@@ -1,3 +1,4 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "annotationsinheritance"
diff --git a/config-model/src/test/derived/annotationsinheritance2/ilscripts.cfg b/config-model/src/test/derived/annotationsinheritance2/ilscripts.cfg
index ae4ea621583..5ec1f839429 100644
--- a/config-model/src/test/derived/annotationsinheritance2/ilscripts.cfg
+++ b/config-model/src/test/derived/annotationsinheritance2/ilscripts.cfg
@@ -1,3 +1,4 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "annotationsinheritance2"
diff --git a/config-model/src/test/derived/annotationsreference/ilscripts.cfg b/config-model/src/test/derived/annotationsreference/ilscripts.cfg
index 812f5e44545..eaa20043be8 100644
--- a/config-model/src/test/derived/annotationsreference/ilscripts.cfg
+++ b/config-model/src/test/derived/annotationsreference/ilscripts.cfg
@@ -1,3 +1,4 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "annotationsreference"
diff --git a/config-model/src/test/derived/annotationssimple/ilscripts.cfg b/config-model/src/test/derived/annotationssimple/ilscripts.cfg
index 9d0962df5be..af179221eb4 100644
--- a/config-model/src/test/derived/annotationssimple/ilscripts.cfg
+++ b/config-model/src/test/derived/annotationssimple/ilscripts.cfg
@@ -1,3 +1,4 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "annotationssimple"
diff --git a/config-model/src/test/derived/arrays/ilscripts.cfg b/config-model/src/test/derived/arrays/ilscripts.cfg
index 98cff642d9e..3f2dae48552 100644
--- a/config-model/src/test/derived/arrays/ilscripts.cfg
+++ b/config-model/src/test/derived/arrays/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "arrays"
ilscript[].docfield[] "tags"
diff --git a/config-model/src/test/derived/attributeprefetch/ilscripts.cfg b/config-model/src/test/derived/attributeprefetch/ilscripts.cfg
index dec054b33f0..5a3784f7cb9 100644
--- a/config-model/src/test/derived/attributeprefetch/ilscripts.cfg
+++ b/config-model/src/test/derived/attributeprefetch/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "prefetch"
ilscript[].docfield[] "singlebyte"
diff --git a/config-model/src/test/derived/attributes/ilscripts.cfg b/config-model/src/test/derived/attributes/ilscripts.cfg
index 6d3ef2799d9..58279759e5f 100644
--- a/config-model/src/test/derived/attributes/ilscripts.cfg
+++ b/config-model/src/test/derived/attributes/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "attributes"
ilscript[].docfield[] "a1"
diff --git a/config-model/src/test/derived/attributes/index-info.cfg b/config-model/src/test/derived/attributes/index-info.cfg
index 1d4e8f485b3..245cff48d15 100644
--- a/config-model/src/test/derived/attributes/index-info.cfg
+++ b/config-model/src/test/derived/attributes/index-info.cfg
@@ -175,8 +175,6 @@ indexinfo[].command[].indexname "a13"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "a13"
indexinfo[].command[].command "type tensor(x{})"
-indexinfo[].command[].indexname "a13"
-indexinfo[].command[].command "word"
indexinfo[].command[].indexname "a7_arr"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "a7_arr"
diff --git a/config-model/src/test/derived/bolding_dynamic_summary/ilscripts.cfg b/config-model/src/test/derived/bolding_dynamic_summary/ilscripts.cfg
index c20c321ebcf..0b925da4778 100644
--- a/config-model/src/test/derived/bolding_dynamic_summary/ilscripts.cfg
+++ b/config-model/src/test/derived/bolding_dynamic_summary/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "test"
ilscript[].docfield[] "str_1"
diff --git a/config-model/src/test/derived/complex/ilscripts.cfg b/config-model/src/test/derived/complex/ilscripts.cfg
index 4405d2fda40..7d025e15703 100644
--- a/config-model/src/test/derived/complex/ilscripts.cfg
+++ b/config-model/src/test/derived/complex/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "complex"
ilscript[].docfield[] "title"
diff --git a/config-model/src/test/derived/emptydefault/ilscripts.cfg b/config-model/src/test/derived/emptydefault/ilscripts.cfg
index e4242153bce..bbb8e5c556c 100644
--- a/config-model/src/test/derived/emptydefault/ilscripts.cfg
+++ b/config-model/src/test/derived/emptydefault/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "emptydefault"
ilscript[].docfield[] "one"
diff --git a/config-model/src/test/derived/exactmatch/ilscripts.cfg b/config-model/src/test/derived/exactmatch/ilscripts.cfg
index 21dfbd1371b..1d1bd6d5e8a 100644
--- a/config-model/src/test/derived/exactmatch/ilscripts.cfg
+++ b/config-model/src/test/derived/exactmatch/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "exactmatch"
ilscript[].docfield[] "tag"
diff --git a/config-model/src/test/derived/hnsw_index/ilscripts.cfg b/config-model/src/test/derived/hnsw_index/ilscripts.cfg
index e48f116f468..c811b93c3df 100644
--- a/config-model/src/test/derived/hnsw_index/ilscripts.cfg
+++ b/config-model/src/test/derived/hnsw_index/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "test"
ilscript[].docfield[] "t1"
diff --git a/config-model/src/test/derived/id/ilscripts.cfg b/config-model/src/test/derived/id/ilscripts.cfg
index d3ab29f6cd8..121e305059e 100644
--- a/config-model/src/test/derived/id/ilscripts.cfg
+++ b/config-model/src/test/derived/id/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "id"
ilscript[].docfield[] "uri"
diff --git a/config-model/src/test/derived/imported_position_field_summary/schema-info.cfg b/config-model/src/test/derived/imported_position_field_summary/schema-info.cfg
index f820ad9720b..5a474f62e07 100644
--- a/config-model/src/test/derived/imported_position_field_summary/schema-info.cfg
+++ b/config-model/src/test/derived/imported_position_field_summary/schema-info.cfg
@@ -53,6 +53,8 @@ schema[].summaryclass[].fields[].dynamic false
schema[].rankprofile[].name "default"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "unranked"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
diff --git a/config-model/src/test/derived/indexswitches/ilscripts.cfg b/config-model/src/test/derived/indexswitches/ilscripts.cfg
index 472c1f95cb0..454f675c0a2 100644
--- a/config-model/src/test/derived/indexswitches/ilscripts.cfg
+++ b/config-model/src/test/derived/indexswitches/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "indexswitches"
ilscript[].docfield[] "title"
diff --git a/config-model/src/test/derived/inheritance/ilscripts.cfg b/config-model/src/test/derived/inheritance/ilscripts.cfg
index d4c804773f0..c966f32a502 100644
--- a/config-model/src/test/derived/inheritance/ilscripts.cfg
+++ b/config-model/src/test/derived/inheritance/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "child"
ilscript[].docfield[] "onlygrandparent"
diff --git a/config-model/src/test/derived/language/ilscripts.cfg b/config-model/src/test/derived/language/ilscripts.cfg
index 1860f180839..d0abc08f1e0 100644
--- a/config-model/src/test/derived/language/ilscripts.cfg
+++ b/config-model/src/test/derived/language/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "language"
ilscript[].docfield[] "language"
diff --git a/config-model/src/test/derived/lowercase/ilscripts.cfg b/config-model/src/test/derived/lowercase/ilscripts.cfg
index 8ba4bfa3349..49515e50df4 100644
--- a/config-model/src/test/derived/lowercase/ilscripts.cfg
+++ b/config-model/src/test/derived/lowercase/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "lowercase"
ilscript[].docfield[] "single_field_source"
diff --git a/config-model/src/test/derived/multiplesummaries/ilscripts.cfg b/config-model/src/test/derived/multiplesummaries/ilscripts.cfg
index 0cdf921de25..4a6de4154f8 100644
--- a/config-model/src/test/derived/multiplesummaries/ilscripts.cfg
+++ b/config-model/src/test/derived/multiplesummaries/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "multiplesummaries"
ilscript[].docfield[] "a"
diff --git a/config-model/src/test/derived/music/ilscripts.cfg b/config-model/src/test/derived/music/ilscripts.cfg
index f90cdb15baa..f79e8824b69 100644
--- a/config-model/src/test/derived/music/ilscripts.cfg
+++ b/config-model/src/test/derived/music/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "music"
ilscript[].docfield[] "bgndata"
diff --git a/config-model/src/test/derived/nearestneighbor_streaming/vsmfields.cfg b/config-model/src/test/derived/nearestneighbor_streaming/vsmfields.cfg
index ab9a96f819b..ec06d01f05a 100644
--- a/config-model/src/test/derived/nearestneighbor_streaming/vsmfields.cfg
+++ b/config-model/src/test/derived/nearestneighbor_streaming/vsmfields.cfg
@@ -3,25 +3,25 @@ searchall 1
fieldspec[].name "vec_a"
fieldspec[].searchmethod NEAREST_NEIGHBOR
fieldspec[].arg1 "EUCLIDEAN"
-fieldspec[].normalize LOWERCASE
+fieldspec[].normalize LOWERCASE_AND_FOLD
fieldspec[].maxlength 1048576
fieldspec[].fieldtype ATTRIBUTE
fieldspec[].name "vec_b"
fieldspec[].searchmethod NEAREST_NEIGHBOR
fieldspec[].arg1 "ANGULAR"
-fieldspec[].normalize LOWERCASE
+fieldspec[].normalize LOWERCASE_AND_FOLD
fieldspec[].maxlength 1048576
fieldspec[].fieldtype ATTRIBUTE
fieldspec[].name "vec_c"
fieldspec[].searchmethod NEAREST_NEIGHBOR
fieldspec[].arg1 "INNERPRODUCT"
-fieldspec[].normalize LOWERCASE
+fieldspec[].normalize LOWERCASE_AND_FOLD
fieldspec[].maxlength 1048576
fieldspec[].fieldtype ATTRIBUTE
fieldspec[].name "vec_d"
fieldspec[].searchmethod NONE
fieldspec[].arg1 ""
-fieldspec[].normalize LOWERCASE
+fieldspec[].normalize LOWERCASE_AND_FOLD
fieldspec[].maxlength 1048576
fieldspec[].fieldtype ATTRIBUTE
documenttype[].name "test"
diff --git a/config-model/src/test/derived/neuralnet_noqueryprofile/schema-info.cfg b/config-model/src/test/derived/neuralnet_noqueryprofile/schema-info.cfg
index 728856abbf2..8f59c21e97f 100644
--- a/config-model/src/test/derived/neuralnet_noqueryprofile/schema-info.cfg
+++ b/config-model/src/test/derived/neuralnet_noqueryprofile/schema-info.cfg
@@ -156,6 +156,7 @@ schema[].summaryclass[].fields[].dynamic false
schema[].rankprofile[].name "default"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].input[].name "query(W_0)"
schema[].rankprofile[].input[].type "tensor(hidden[9],x[9])"
schema[].rankprofile[].input[].name "query(b_0)"
@@ -173,9 +174,11 @@ schema[].rankprofile[].input[].type "tensor()"
schema[].rankprofile[].name "unranked"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "defaultRankProfile"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].input[].name "query(W_0)"
schema[].rankprofile[].input[].type "tensor(hidden[9],x[9])"
schema[].rankprofile[].input[].name "query(b_0)"
@@ -193,6 +196,7 @@ schema[].rankprofile[].input[].type "tensor()"
schema[].rankprofile[].name "neuralNetworkProfile"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].input[].name "query(W_0)"
schema[].rankprofile[].input[].type "tensor(hidden[9],x[9])"
schema[].rankprofile[].input[].name "query(b_0)"
diff --git a/config-model/src/test/derived/newrank/ilscripts.cfg b/config-model/src/test/derived/newrank/ilscripts.cfg
index b02e09a0496..487d2fca902 100644
--- a/config-model/src/test/derived/newrank/ilscripts.cfg
+++ b/config-model/src/test/derived/newrank/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "newrank"
ilscript[].docfield[] "bgndata"
diff --git a/config-model/src/test/derived/orderilscripts/ilscripts.cfg b/config-model/src/test/derived/orderilscripts/ilscripts.cfg
index 0ed1589af0a..4918e23efc6 100644
--- a/config-model/src/test/derived/orderilscripts/ilscripts.cfg
+++ b/config-model/src/test/derived/orderilscripts/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "orderilscripts"
ilscript[].docfield[] "foo"
diff --git a/config-model/src/test/derived/position_array/ilscripts.cfg b/config-model/src/test/derived/position_array/ilscripts.cfg
index ecafbc4a025..3f7611b25d8 100644
--- a/config-model/src/test/derived/position_array/ilscripts.cfg
+++ b/config-model/src/test/derived/position_array/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "position_array"
ilscript[].docfield[] "pos"
diff --git a/config-model/src/test/derived/position_attribute/ilscripts.cfg b/config-model/src/test/derived/position_attribute/ilscripts.cfg
index d2fc8503ce5..fbd1a293418 100644
--- a/config-model/src/test/derived/position_attribute/ilscripts.cfg
+++ b/config-model/src/test/derived/position_attribute/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "position_attribute"
ilscript[].docfield[] "pos"
diff --git a/config-model/src/test/derived/position_extra/ilscripts.cfg b/config-model/src/test/derived/position_extra/ilscripts.cfg
index a86dcec92ec..4645798723c 100644
--- a/config-model/src/test/derived/position_extra/ilscripts.cfg
+++ b/config-model/src/test/derived/position_extra/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "position_extra"
ilscript[].docfield[] "pos_str"
diff --git a/config-model/src/test/derived/prefixexactattribute/ilscripts.cfg b/config-model/src/test/derived/prefixexactattribute/ilscripts.cfg
index 40c7843a0a4..2d1904cf9d8 100644
--- a/config-model/src/test/derived/prefixexactattribute/ilscripts.cfg
+++ b/config-model/src/test/derived/prefixexactattribute/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "prefixexactattribute"
ilscript[].docfield[] "indexfield0"
diff --git a/config-model/src/test/derived/rankingexpression/schema-info.cfg b/config-model/src/test/derived/rankingexpression/schema-info.cfg
index 5bf01f10ede..f78eb7de310 100644
--- a/config-model/src/test/derived/rankingexpression/schema-info.cfg
+++ b/config-model/src/test/derived/rankingexpression/schema-info.cfg
@@ -148,96 +148,125 @@ schema[].summaryclass[].fields[].dynamic false
schema[].rankprofile[].name "default"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures true
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "unranked"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "static"
schema[].rankprofile[].hasSummaryFeatures true
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "overflow"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "duplicates"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "whitespace1"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "whitespace2"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "macros"
schema[].rankprofile[].hasSummaryFeatures true
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "macros2"
schema[].rankprofile[].hasSummaryFeatures true
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "macros3"
schema[].rankprofile[].hasSummaryFeatures true
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "macros3-inherited"
schema[].rankprofile[].hasSummaryFeatures true
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "macros-inherited"
schema[].rankprofile[].hasSummaryFeatures true
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "macros-inherited2"
schema[].rankprofile[].hasSummaryFeatures true
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "macros-inherited3"
schema[].rankprofile[].hasSummaryFeatures true
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "macros-refering-macros"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "macros-refering-macros-inherited"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "macros-refering-macros-inherited2"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "macros-refering-macros-inherited-two-levels"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "withmf"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "withboolean"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "withglobalphase"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "layered"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].input[].name "query(v)"
schema[].rankprofile[].input[].type "tensor(v[3])"
schema[].rankprofile[].name "withtfl"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].input[].name "query(v)"
schema[].rankprofile[].input[].type "tensor(v[3])"
schema[].rankprofile[].name "withtfl2"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].input[].name "query(v)"
schema[].rankprofile[].input[].type "tensor(v[3])"
schema[].rankprofile[].name "withnorm"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "withfusion"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "function-with-arg-as-summary-feature"
schema[].rankprofile[].hasSummaryFeatures true
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "function-with-arg-in-global-phase"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "withstringcompare"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].input[].name "query(myquerystring)"
schema[].rankprofile[].input[].type "string"
schema[].rankprofile[].input[].name "query(mybadlong)"
diff --git a/config-model/src/test/derived/rankprofilemodularity/schema-info.cfg b/config-model/src/test/derived/rankprofilemodularity/schema-info.cfg
index 377c10d3293..68892737e63 100644
--- a/config-model/src/test/derived/rankprofilemodularity/schema-info.cfg
+++ b/config-model/src/test/derived/rankprofilemodularity/schema-info.cfg
@@ -18,24 +18,32 @@ schema[].summaryclass[].fields[].dynamic false
schema[].rankprofile[].name "default"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "unranked"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "in_schema0"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "in_schema1"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "in_schema2"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "in_schema3"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "outside_schema1"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "outside_schema2"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
diff --git a/config-model/src/test/derived/ranktypes/ilscripts.cfg b/config-model/src/test/derived/ranktypes/ilscripts.cfg
index adcd2f70c70..22526d1aa23 100644
--- a/config-model/src/test/derived/ranktypes/ilscripts.cfg
+++ b/config-model/src/test/derived/ranktypes/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "ranktypes"
ilscript[].docfield[] "title"
diff --git a/config-model/src/test/derived/schemainheritance/ilscripts.cfg b/config-model/src/test/derived/schemainheritance/ilscripts.cfg
index f7324920fe7..b1ba947f1dc 100644
--- a/config-model/src/test/derived/schemainheritance/ilscripts.cfg
+++ b/config-model/src/test/derived/schemainheritance/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "child"
ilscript[].docfield[] "pf1"
diff --git a/config-model/src/test/derived/schemainheritance/schema-info.cfg b/config-model/src/test/derived/schemainheritance/schema-info.cfg
index 9fe71780c7a..466e66ad0bb 100644
--- a/config-model/src/test/derived/schemainheritance/schema-info.cfg
+++ b/config-model/src/test/derived/schemainheritance/schema-info.cfg
@@ -116,12 +116,16 @@ schema[].summaryclass[].fields[].dynamic false
schema[].rankprofile[].name "default"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "unranked"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "child_profile"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
schema[].rankprofile[].name "parent_profile"
schema[].rankprofile[].hasSummaryFeatures false
schema[].rankprofile[].hasRankFeatures false
+schema[].rankprofile[].significance.useModel false
diff --git a/config-model/src/test/derived/structanyorder/ilscripts.cfg b/config-model/src/test/derived/structanyorder/ilscripts.cfg
index c07f04b3021..a806bc1b712 100644
--- a/config-model/src/test/derived/structanyorder/ilscripts.cfg
+++ b/config-model/src/test/derived/structanyorder/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "annotationsimplicitstruct"
ilscript[].docfield[] "structfield"
diff --git a/config-model/src/test/derived/tensor/index-info.cfg b/config-model/src/test/derived/tensor/index-info.cfg
index c9ce2433e17..2402f074837 100644
--- a/config-model/src/test/derived/tensor/index-info.cfg
+++ b/config-model/src/test/derived/tensor/index-info.cfg
@@ -9,26 +9,18 @@ indexinfo[].command[].indexname "f2"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "f2"
indexinfo[].command[].command "type tensor<float>(x[2],y[1])"
-indexinfo[].command[].indexname "f2"
-indexinfo[].command[].command "word"
indexinfo[].command[].indexname "f3"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "f3"
indexinfo[].command[].command "type tensor(x{})"
-indexinfo[].command[].indexname "f3"
-indexinfo[].command[].command "word"
indexinfo[].command[].indexname "f4"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "f4"
indexinfo[].command[].command "type tensor(x[10],y[10])"
-indexinfo[].command[].indexname "f4"
-indexinfo[].command[].command "word"
indexinfo[].command[].indexname "f5"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "f5"
indexinfo[].command[].command "type tensor<float>(x[10])"
-indexinfo[].command[].indexname "f5"
-indexinfo[].command[].command "word"
indexinfo[].command[].indexname "f6"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "f6"
@@ -39,5 +31,3 @@ indexinfo[].command[].indexname "f7"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "f7"
indexinfo[].command[].command "type tensor<int8>(p{},x[5])"
-indexinfo[].command[].indexname "f7"
-indexinfo[].command[].command "word"
diff --git a/config-model/src/test/derived/tokenization/ilscripts.cfg b/config-model/src/test/derived/tokenization/ilscripts.cfg
index c08b6a54c83..cad8ec81879 100644
--- a/config-model/src/test/derived/tokenization/ilscripts.cfg
+++ b/config-model/src/test/derived/tokenization/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "tokenization"
ilscript[].docfield[] "text"
diff --git a/config-model/src/test/derived/types/ilscripts.cfg b/config-model/src/test/derived/types/ilscripts.cfg
index 17bed90deb4..73befb221ce 100644
--- a/config-model/src/test/derived/types/ilscripts.cfg
+++ b/config-model/src/test/derived/types/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "types"
ilscript[].docfield[] "abyte"
diff --git a/config-model/src/test/derived/uri_array/ilscripts.cfg b/config-model/src/test/derived/uri_array/ilscripts.cfg
index 3dd97e5c11f..0dc87b513ce 100644
--- a/config-model/src/test/derived/uri_array/ilscripts.cfg
+++ b/config-model/src/test/derived/uri_array/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "uri_array"
ilscript[].docfield[] "my_uri"
diff --git a/config-model/src/test/derived/uri_wset/ilscripts.cfg b/config-model/src/test/derived/uri_wset/ilscripts.cfg
index 48e07ef9959..cc45ee5ad8f 100644
--- a/config-model/src/test/derived/uri_wset/ilscripts.cfg
+++ b/config-model/src/test/derived/uri_wset/ilscripts.cfg
@@ -1,4 +1,5 @@
maxtermoccurrences 10000
+maxtokenlength 1000
fieldmatchmaxlength 1000000
ilscript[].doctype "uri_wset"
ilscript[].docfield[] "my_uri"
diff --git a/config-model/src/test/java/com/yahoo/schema/parser/SchemaParserTestCase.java b/config-model/src/test/java/com/yahoo/schema/parser/SchemaParserTestCase.java
index 34ca6c30a61..4186e352388 100644
--- a/config-model/src/test/java/com/yahoo/schema/parser/SchemaParserTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/parser/SchemaParserTestCase.java
@@ -170,6 +170,23 @@ public class SchemaParserTestCase {
assertEquals(11, field.matchSettings().getMaxTermOccurrences().get());
}
+ @Test
+ void maxTokenLengthCanBeParsed() throws Exception {
+ String input = joinLines
+ ("schema foo {",
+ " document foo {",
+ " field bar type string {",
+ " indexing: summary | index",
+ " match { max-token-length: 11 }",
+ " }",
+ " }",
+ "}");
+ ParsedSchema schema = parseString(input);
+ var field = schema.getDocument().getFields().get(0);
+ assertEquals("bar", field.name());
+ assertEquals(11, field.matchSettings().getMaxTokenLength().get());
+ }
+
void checkFileParses(String fileName) throws Exception {
var schema = parseFile(fileName);
assertNotNull(schema);
diff --git a/config-model/src/test/java/com/yahoo/schema/processing/IndexingScriptRewriterTestCase.java b/config-model/src/test/java/com/yahoo/schema/processing/IndexingScriptRewriterTestCase.java
index de99d46b9ca..355a810f5ff 100644
--- a/config-model/src/test/java/com/yahoo/schema/processing/IndexingScriptRewriterTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/processing/IndexingScriptRewriterTestCase.java
@@ -10,6 +10,7 @@ import com.yahoo.schema.Schema;
import com.yahoo.schema.ApplicationBuilder;
import com.yahoo.schema.AbstractSchemaTestCase;
import com.yahoo.schema.document.BooleanIndexDefinition;
+import com.yahoo.schema.document.MatchType;
import com.yahoo.schema.document.SDDocumentType;
import com.yahoo.schema.document.SDField;
import com.yahoo.vespa.documentmodel.SummaryField;
@@ -155,6 +156,24 @@ public class IndexingScriptRewriterTestCase extends AbstractSchemaTestCase {
field);
}
+ @Test
+ void requireThatMaxTokenLengthIsPropagated() {
+ var field = new SDField("test", DataType.STRING);
+ field.getMatching().maxTokenLength(10);
+ field.parseIndexingScript("test", "{ summary | index }");
+ assertIndexingScript("{ input test | tokenize normalize stem:\"BEST\" max-token-length:10 | summary test | index test; }",
+ field);
+ }
+
+ @Test
+ void requireThatMaxTokenLengthIsPropagatedForWordMatch() {
+ var field = new SDField("test", DataType.STRING);
+ field.getMatching().maxTokenLength(10).setType(MatchType.WORD);
+ field.parseIndexingScript("test", "{ summary | index }");
+ assertIndexingScript("{ input test | exact max-token-length:10 | summary test | index test; }",
+ field);
+ }
+
private static void assertIndexingScript(String expectedScript, SDField unprocessedField) {
assertEquals(expectedScript,
processField(unprocessedField).toString());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexFieldsValidatorTestCase.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexFieldsValidatorTestCase.java
index ae1db366c9f..2e51a425f6d 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexFieldsValidatorTestCase.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexFieldsValidatorTestCase.java
@@ -143,6 +143,32 @@ public class ComplexFieldsValidatorTestCase {
}
@Test
+ void logs_warning_when_complex_fields_have_struct_fields_with_index_and_exact_match() throws IOException, SAXException {
+ var logger = new MyLogger();
+ createModelAndValidate(joinLines(
+ "schema test {",
+ " document test {",
+ " field nesteds type array<nested> {",
+ " struct-field foo {",
+ " indexing: attribute | index",
+ " match {",
+ " exact",
+ " exact-terminator: '@@'",
+ " }",
+ " }",
+ " }",
+ " struct nested {",
+ " field foo type string {}",
+ " }",
+ " }",
+ "}"), logger);
+ assertTrue(logger.message.toString().contains("For cluster 'mycluster', schema 'test': " +
+ "The following complex fields have struct fields with 'indexing: index' which is " +
+ "not supported and has no effect: nesteds (nesteds.foo). " +
+ "Remove setting or change to 'indexing: attribute' if needed for matching."));
+ }
+
+ @Test
void validation_passes_when_only_supported_struct_field_attributes_are_used() throws IOException, SAXException {
createModelAndValidate(joinLines("search test {",
" document test {",
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForLocalLLMValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForLocalLLMValidatorTest.java
new file mode 100644
index 00000000000..13e91f60712
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForLocalLLMValidatorTest.java
@@ -0,0 +1,79 @@
+// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation.change;
+
+import com.yahoo.config.model.api.ConfigChangeAction;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.config.model.deploy.TestProperties;
+import com.yahoo.vespa.model.VespaModel;
+import com.yahoo.vespa.model.application.validation.ValidationTester;
+import com.yahoo.vespa.model.test.utils.VespaModelCreatorWithMockPkg;
+import org.junit.jupiter.api.Test;
+
+import java.util.List;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+/**
+ * @author lesters
+ */
+public class RestartOnDeployForLocalLLMValidatorTest {
+
+ private static final String LOCAL_LLM_COMPONENT = RestartOnDeployForLocalLLMValidator.LOCAL_LLM_COMPONENT;
+
+ @Test
+ void validate_no_restart_on_deploy() {
+ VespaModel current = createModel();
+ VespaModel next = createModel(withComponent(LOCAL_LLM_COMPONENT));
+ List<ConfigChangeAction> result = validateModel(current, next);
+ assertEquals(0, result.size());
+ }
+
+ @Test
+ void validate_restart_on_deploy() {
+ VespaModel current = createModel(withComponent(LOCAL_LLM_COMPONENT));
+ VespaModel next = createModel(withComponent(LOCAL_LLM_COMPONENT));
+ List<ConfigChangeAction> result = validateModel(current, next);
+ assertEquals(1, result.size());
+ assertTrue(result.get(0).validationId().isEmpty());
+ assertEquals("Need to restart services in cluster 'cluster1' due to use of local LLM", result.get(0).getMessage());
+ }
+
+ private static List<ConfigChangeAction> validateModel(VespaModel current, VespaModel next) {
+ return ValidationTester.validateChanges(new RestartOnDeployForLocalLLMValidator(),
+ next,
+ deployStateBuilder().previousModel(current).build());
+ }
+
+ private static VespaModel createModel(String component) {
+ var xml = """
+ <services version='1.0'>
+ <container id='cluster1' version='1.0'>
+ <http>
+ <server id='server1' port='8080'/>
+ </http>
+ %s
+ </container>
+ </services>
+ """.formatted(component);
+ DeployState.Builder builder = deployStateBuilder();
+ return new VespaModelCreatorWithMockPkg(null, xml).create(builder);
+ }
+
+ private static VespaModel createModel() {
+ return createModel("");
+ }
+
+ private static String withComponent(String componentClass) {
+ return "<component id='llm' class='%s' />".formatted(componentClass);
+ }
+
+ private static DeployState.Builder deployStateBuilder() {
+ return new DeployState.Builder().properties(new TestProperties());
+ }
+
+ private static void assertStartsWith(String expected, List<ConfigChangeAction> result) {
+ assertTrue(result.get(0).getMessage().startsWith(expected));
+ }
+
+}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/ContentClusterFixture.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/ContentClusterFixture.java
index 8778f0c26c0..0677cabafb0 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/ContentClusterFixture.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/ContentClusterFixture.java
@@ -29,11 +29,18 @@ public abstract class ContentClusterFixture {
nextCluster = createCluster(nextSd);
}
+ protected ContentClusterFixture(ContentCluster currentCluster, ContentCluster nextCluster) {
+ this.currentCluster = currentCluster;
+ this.nextCluster = nextCluster;
+ }
+
public ContentClusterFixture(String entireSd) throws Exception {
- currentCluster = new ContentClusterBuilder().build(
- ContentClusterUtils.createMockRoot(List.of(entireSd)));
- nextCluster = new ContentClusterBuilder().build(
- ContentClusterUtils.createMockRoot(List.of(entireSd)));
+ currentCluster = createClusterFromEntireSd(entireSd);
+ nextCluster = createClusterFromEntireSd(entireSd);
+ }
+
+ protected static ContentCluster createClusterFromEntireSd(String sdContent) throws Exception {
+ return new ContentClusterBuilder().build(ContentClusterUtils.createMockRoot(List.of(sdContent)));
}
private static ContentCluster createCluster(String sdContent) throws Exception {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java
index cd54a20523f..247f01068fa 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java
@@ -39,6 +39,21 @@ public class IndexingScriptChangeValidatorTest {
}
}
+ private static class ComplexFixture extends ContentClusterFixture {
+ IndexingScriptChangeValidator validator;
+ public ComplexFixture(String currentSd, String nextSd) throws Exception {
+ super(createClusterFromEntireSd(currentSd), createClusterFromEntireSd(nextSd));
+ validator = new IndexingScriptChangeValidator(ClusterSpec.Id.from("test"),
+ currentDb().getDerivedConfiguration().getSchema(),
+ nextDb().getDerivedConfiguration().getSchema());
+ }
+
+ @Override
+ public List<VespaConfigChangeAction> validate() {
+ return validator.validate();
+ }
+ }
+
private static class ScriptFixture {
private final ScriptExpression currentScript;
@@ -56,6 +71,9 @@ public class IndexingScriptChangeValidatorTest {
private static final String FIELD = "field f1 type string";
private static final String FIELD_F2 = "field f2 type string";
+ private static final String TENSOR_FIELD_F1 = "field f1 type tensor(x[2])";
+ private static final String TENSOR_FIELD_F2 = "field f2 type tensor(x[2])";
+ private static final String TENSOR_FIELD_F3 = "field f3 type tensor(x[2])";
private static VespaConfigChangeAction expectedReindexingAction(String changedMsg, String fromScript, String toScript) {
return expectedReindexingAction("f1", changedMsg, fromScript, toScript);
@@ -115,6 +133,28 @@ public class IndexingScriptChangeValidatorTest {
}
@Test
+ void requireThatAddingIndexAspectForExtraTensorFieldWithChangedInputRequireReindexing() throws Exception {
+ new ComplexFixture(joinLines("schema test {",
+ " document test {",
+ " " + TENSOR_FIELD_F1 + " { }",
+ " " + TENSOR_FIELD_F2 + " { }",
+ " }",
+ " " + TENSOR_FIELD_F3 + " { indexing: input f1 | attribute }",
+ "}"),
+ joinLines("schema test {",
+ " document test {",
+ " " + TENSOR_FIELD_F1 + " { }",
+ " " + TENSOR_FIELD_F2 + " { }",
+ " }",
+ " " + TENSOR_FIELD_F3 + " { indexing: input f2 | index | attribute }",
+ "}")).
+ assertValidation(List.of(expectedReindexingAction("f3", "add index aspect",
+ "{ input f1 | attribute f3; }",
+ "{ input f2 | index f3 | attribute f3; }")));
+ }
+
+
+ @Test
void requireThatSettingDynamicSummaryIsOk() throws Exception {
new Fixture(FIELD + " { indexing: summary }",
FIELD + " { indexing: summary \n summary: dynamic }").
diff --git a/configd/src/apps/sentinel/manager.cpp b/configd/src/apps/sentinel/manager.cpp
index 36bdef0dd8a..9ee259bb892 100644
--- a/configd/src/apps/sentinel/manager.cpp
+++ b/configd/src/apps/sentinel/manager.cpp
@@ -170,16 +170,17 @@ Manager::handleChildDeaths()
}
void
-Manager::updateActiveFdset(fd_set *fds, int *maxNum)
+Manager::updateActiveFdset(std::vector<pollfd> &fds)
{
- // ### _Possibly put an assert here if fd is > 1023???
- for (OutputConnection *c : _outputConnections) {
+ fds.clear();
+ for (const OutputConnection *c : _outputConnections) {
int fd = c->fd();
if (fd >= 0) {
- FD_SET(fd, fds);
- if (fd >= *maxNum) {
- *maxNum = fd + 1;
- }
+ fds.emplace_back();
+ auto &ev = fds.back();
+ ev.fd = fd;
+ ev.events = POLLIN;
+ ev.revents = 0;
}
}
}
diff --git a/configd/src/apps/sentinel/manager.h b/configd/src/apps/sentinel/manager.h
index 765803b5da6..6967e078dd9 100644
--- a/configd/src/apps/sentinel/manager.h
+++ b/configd/src/apps/sentinel/manager.h
@@ -9,6 +9,7 @@
#include "state-api.h"
#include <vespa/config-sentinel.h>
#include <vespa/vespalib/net/http/state_server.h>
+#include <poll.h>
#include <sys/types.h>
#include <sys/select.h>
@@ -54,7 +55,7 @@ public:
virtual ~Manager();
bool terminate();
bool doWork();
- void updateActiveFdset(fd_set *fds, int *maxNum);
+ void updateActiveFdset(std::vector<pollfd> &fds);
};
}
diff --git a/configd/src/apps/sentinel/sentinel.cpp b/configd/src/apps/sentinel/sentinel.cpp
index 4f1d6019065..db9f73ea76d 100644
--- a/configd/src/apps/sentinel/sentinel.cpp
+++ b/configd/src/apps/sentinel/sentinel.cpp
@@ -10,7 +10,6 @@
#include <clocale>
#include <string>
#include <unistd.h>
-#include <sys/time.h>
#include <vespa/log/log.h>
LOG_SETUP("sentinel.config-sentinel");
@@ -84,6 +83,7 @@ main(int argc, char **argv)
}
sentinel::Manager manager(environment);
+ std::vector<pollfd> fds;
vespalib::steady_time lastTime = vespalib::steady_clock::now();
while (!stop()) {
try {
@@ -103,16 +103,10 @@ main(int argc, char **argv)
if (vespalib::SignalHandler::CHLD.check()) {
continue;
}
- int maxNum = 0;
- fd_set fds;
- FD_ZERO(&fds);
- manager.updateActiveFdset(&fds, &maxNum);
+ manager.updateActiveFdset(fds);
+ constexpr int poll_timeout_ms = 100;
- struct timeval tv;
- tv.tv_sec = 0;
- tv.tv_usec = 100000; //0.1s
-
- select(maxNum, &fds, nullptr, nullptr, &tv);
+ poll(fds.data(), fds.size(), poll_timeout_ms);
vespalib::steady_time now = vespalib::steady_clock::now();
if ((now - lastTime) < 10ms) {
diff --git a/configdefinitions/src/vespa/CMakeLists.txt b/configdefinitions/src/vespa/CMakeLists.txt
index 81e587fcace..0ab12932880 100644
--- a/configdefinitions/src/vespa/CMakeLists.txt
+++ b/configdefinitions/src/vespa/CMakeLists.txt
@@ -6,8 +6,6 @@ vespa_add_library(configdefinitions
)
vespa_generate_config(configdefinitions application-id.def)
install_config_definition(application-id.def cloud.config.application-id.def)
-vespa_generate_config(configdefinitions athenz-provider-service.def)
-install_config_definition(athenz-provider-service.def vespa.hosted.athenz.instanceproviderservice.config.athenz-provider-service.def)
vespa_generate_config(configdefinitions attributes.def)
install_config_definition(attributes.def vespa.config.search.attributes.def)
vespa_generate_config(configdefinitions cluster-info.def)
diff --git a/configdefinitions/src/vespa/athenz-provider-service.def b/configdefinitions/src/vespa/athenz-provider-service.def
deleted file mode 100644
index 5ee9be323e8..00000000000
--- a/configdefinitions/src/vespa/athenz-provider-service.def
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-namespace=vespa.hosted.athenz.instanceproviderservice.config
-
-# Athenz domain
-domain string
-
-# Athenz service name
-serviceName string
-
-# Secret name of private Key
-secretName string
-
-# Secret version
-secretVersion int
-
-# Tempory resources
-sisSecretName string default=""
-sisSecretVersion int default=0
-sisUrl string default = ""
-
-# Secret name of CA certificate
-caCertSecretName string
-
-# Certificate DNS suffix
-certDnsSuffix string default=""
-
-# Athenz ZTS server url
-ztsUrl string default=""
-
-# Path to Athenz CA JKS trust store
-athenzCaTrustStore string default=""
-
-# Period between certificate updates
-updatePeriodDays int default=1
-
-# Tenant Service id
-tenantService string default=vespa.vespa.tenant
diff --git a/configdefinitions/src/vespa/ilscripts.def b/configdefinitions/src/vespa/ilscripts.def
index acb06abb755..7a286773564 100644
--- a/configdefinitions/src/vespa/ilscripts.def
+++ b/configdefinitions/src/vespa/ilscripts.def
@@ -3,6 +3,8 @@ namespace=vespa.configdefinition
## The maximum number of occurrences of a given term to index per field
maxtermoccurrences int default=10000
+## The maximum number of characters for a token
+maxtokenlength int default=1000
fieldmatchmaxlength int default=1000000
ilscript[].doctype string
diff --git a/configserver/src/main/resources/configserver-app/services.xml b/configserver/src/main/resources/configserver-app/services.xml
index fb6516ce6cf..d71840a95c2 100644
--- a/configserver/src/main/resources/configserver-app/services.xml
+++ b/configserver/src/main/resources/configserver-app/services.xml
@@ -45,13 +45,11 @@
</components>
<preprocess:include file='config-models.xml' required='false' />
- <preprocess:include file='routing-status.xml' required='false' />
<preprocess:include file='model-integration.xml' required='true' />
<component id="com.yahoo.vespa.configserver.flags.ConfigServerFlagSource" bundle="configserver-flags"/>
<component id="com.yahoo.vespa.configserver.flags.db.FlagsDbImpl" bundle="configserver-flags"/>
- <preprocess:include file='metrics-packets.xml' required='false' />
<component id="com.yahoo.vespa.service.slobrok.SlobrokMonitorManagerImpl" bundle="service-monitor" />
<component id="com.yahoo.vespa.service.health.HealthMonitorManager" bundle="service-monitor" />
<component id="com.yahoo.vespa.service.manager.UnionMonitorManager" bundle="service-monitor" />
@@ -153,13 +151,9 @@
<preprocess:include file='http-server.xml' required='false' />
</http>
- <preprocess:include file='athenz-identity-provider.xml' required='false' />
-
<preprocess:include file='configserver-config.xml' required='false' />
<preprocess:include file='configserver-components.xml' required='false' />
- <preprocess:include file='zookeeper-server-config.xml' required='false' />
-
</container>
</services>
diff --git a/container-core/src/main/java/com/yahoo/processing/request/CompoundName.java b/container-core/src/main/java/com/yahoo/processing/request/CompoundName.java
index b4536a1c56b..440df4f9be9 100644
--- a/container-core/src/main/java/com/yahoo/processing/request/CompoundName.java
+++ b/container-core/src/main/java/com/yahoo/processing/request/CompoundName.java
@@ -8,6 +8,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import static com.yahoo.text.Lowercase.toLowerCase;
@@ -74,41 +75,52 @@ public final class CompoundName {
* @param compounds the compounds of this name
*/
private CompoundName(String name, String [] compounds, boolean useCache) {
- if (name == null) throw new NullPointerException("Name can not be null");
-
- this.name = name;
+ this.name = Objects.requireNonNull(name, "Name can not be null");
this.lowerCasedName = toLowerCase(name);
- if (compounds.length == 1 && compounds[0].isEmpty()) {
- this.compounds = List.of();
- this.hashCode = 0;
- rest = this;
- first = this;
+ if (compounds.length == 1) {
+ if (compounds[0].isEmpty()) {
+ this.compounds = List.of();
+ this.hashCode = 0;
+ rest = first = this;
+ return;
+ }
+ this.compounds = new ImmutableArrayList(compounds);
+ this.hashCode = this.compounds.hashCode();
+ rest = first = empty;
return;
}
- this.compounds = new ImmutableArrayList(compounds);
- this.hashCode = this.compounds.hashCode();
-
- if (compounds.length > 1) {
- String restName = name.substring(compounds[0].length()+1);
- if (useCache) {
- rest = cache.computeIfAbsent(restName, (key) -> new CompoundName(key, Arrays.copyOfRange(compounds, 1, compounds.length), useCache));
- } else {
- rest = new CompoundName(restName, Arrays.copyOfRange(compounds, 1, compounds.length), useCache);
+ CompoundName[] children = new CompoundName[compounds.length];
+ for (int i = 0; i + 1 < children.length; i++) {
+ int start = 0, end = i == 0 ? -1 : children[0].name.length();
+ for (int j = 0; j + i < children.length; j++) {
+ end += compounds[j + i].length() + 1;
+ if (end == start) throw new IllegalArgumentException("'" + name + "' is not a legal compound name. " +
+ "Consecutive, leading or trailing dots are not allowed.");
+ String subName = this.name.substring(start, end);
+ CompoundName cached = cache.get(subName);
+ children[j] = cached != null ? cached
+ : new CompoundName(subName,
+ this.lowerCasedName.substring(start, end),
+ Arrays.copyOfRange(compounds, j, j + i + 1),
+ i == 0 ? empty : children[j + 1],
+ i == 0 ? empty : children[j]);
+ if (useCache && cached == null) cache.put(subName, children[j]);
+ start += compounds[j].length() + 1;
}
- } else {
- rest = empty;
}
+ this.compounds = new ImmutableArrayList(compounds);
+ this.hashCode = this.compounds.hashCode();
+ this.rest = children[1];
+ this.first = children[0];
+ }
- if (compounds.length > 1) {
- String firstName = name.substring(0, name.length() - (compounds[compounds.length-1].length()+1));
- if (useCache) {
- first = cache.computeIfAbsent(firstName, (key) -> new CompoundName(key, Arrays.copyOfRange(compounds, 0, compounds.length-1), useCache));
- } else {
- first = new CompoundName(firstName, Arrays.copyOfRange(compounds, 0, compounds.length-1), useCache);
- }
- } else {
- first = empty;
- }
+ private CompoundName(String name, String lowerCasedName, String[] compounds, CompoundName rest, CompoundName first) {
+ this.name = name;
+ this.lowerCasedName = lowerCasedName;
+ this.compounds = new ImmutableArrayList(compounds);
+ this.hashCode = this.compounds.hashCode();
+ this.rest = rest;
+ this.first = first;
}
private static List<String> parse(String s) {
diff --git a/container-core/src/test/java/com/yahoo/processing/request/CompoundNameTestCase.java b/container-core/src/test/java/com/yahoo/processing/request/CompoundNameTestCase.java
index b5143f89c78..7523a68501f 100644
--- a/container-core/src/test/java/com/yahoo/processing/request/CompoundNameTestCase.java
+++ b/container-core/src/test/java/com/yahoo/processing/request/CompoundNameTestCase.java
@@ -13,7 +13,7 @@ import static org.junit.jupiter.api.Assertions.*;
/**
* Module local test of the basic property name building block.
*
- * @author <a href="mailto:steinar@yahoo-inc.com">Steinar Knutsen</a>
+ * @author Steinar Knutsen
*/
public class CompoundNameTestCase {
@@ -30,22 +30,22 @@ public class CompoundNameTestCase {
}
@Test
- final void testLast() {
+ void testLast() {
assertEquals(NAME.substring(NAME.lastIndexOf('.') + 1), C_NAME.last());
}
@Test
- final void testFirst() {
+ void testFirst() {
assertEquals(NAME.substring(0, NAME.indexOf('.')), C_NAME.first());
}
@Test
- final void testRest() {
+ void testRest() {
verifyStrict(NAME.substring(NAME.indexOf('.') + 1), C_NAME.rest());
}
@Test
- final void testRestN() {
+ void testRestN() {
verifyStrict("a.b.c.d.e", C_abcde.rest(0));
verifyStrict("b.c.d.e", C_abcde.rest(1));
verifyStrict("c.d.e", C_abcde.rest(2));
@@ -53,8 +53,9 @@ public class CompoundNameTestCase {
verifyStrict("e", C_abcde.rest(4));
verifyStrict(CompoundName.empty, C_abcde.rest(5));
}
+
@Test
- final void testFirstN() {
+ void testFirstN() {
verifyStrict("a.b.c.d.e", C_abcde.first(5));
verifyStrict("a.b.c.d", C_abcde.first(4));
verifyStrict("a.b.c", C_abcde.first(3));
@@ -64,15 +65,32 @@ public class CompoundNameTestCase {
}
@Test
- final void testPrefix() {
- CompoundName abc = CompoundName.from("a.b.c");
- assertTrue(abc.hasPrefix(CompoundName.empty));
- assertTrue(abc.hasPrefix(CompoundName.from("a")));
- assertTrue(abc.hasPrefix(CompoundName.from("a.b")));
- assertTrue(abc.hasPrefix(CompoundName.from("a.b.c")));
+ void testPrefix() {
+ CompoundName abcc = CompoundName.from("a.b.cc");
+ assertTrue(abcc.hasPrefix(CompoundName.empty));
+ assertTrue(abcc.hasPrefix(CompoundName.from("a")));
+ assertTrue(abcc.hasPrefix(CompoundName.from("a.b")));
+ assertTrue(abcc.hasPrefix(CompoundName.from("a.b.cc")));
- assertFalse(abc.hasPrefix(CompoundName.from("a.b.c.d")));
- assertFalse(abc.hasPrefix(CompoundName.from("a.b.d")));
+ assertFalse(abcc.hasPrefix(CompoundName.from("a.b.c")));
+ assertFalse(abcc.hasPrefix(CompoundName.from("a.b.c.d")));
+ assertFalse(abcc.hasPrefix(CompoundName.from("a.b.d")));
+ }
+
+ @Test
+ void testIllegalCompound() {
+ assertEquals("'a.' is not a legal compound name. Names can not end with a dot.",
+ assertThrows(IllegalArgumentException.class,
+ () -> CompoundName.from("a."))
+ .getMessage());
+ assertEquals("'.b' is not a legal compound name. Consecutive, leading or trailing dots are not allowed.",
+ assertThrows(IllegalArgumentException.class,
+ () -> CompoundName.from(".b"))
+ .getMessage());
+ assertEquals("'a..b' is not a legal compound name. Consecutive, leading or trailing dots are not allowed.",
+ assertThrows(IllegalArgumentException.class,
+ () -> CompoundName.from("a..b"))
+ .getMessage());
}
@Test
@@ -82,7 +100,7 @@ public class CompoundNameTestCase {
}
@Test
- final void testSize() {
+ void testSize() {
Splitter s = Splitter.on('.');
Iterable<String> i = s.split(NAME);
int n = 0;
@@ -93,23 +111,23 @@ public class CompoundNameTestCase {
}
@Test
- final void testGet() {
+ void testGet() {
String s = C_NAME.get(0);
assertEquals(NAME.substring(0, NAME.indexOf('.')), s);
}
@Test
- final void testIsCompound() {
+ void testIsCompound() {
assertTrue(C_NAME.isCompound());
}
@Test
- final void testIsEmpty() {
+ void testIsEmpty() {
assertFalse(C_NAME.isEmpty());
}
@Test
- final void testAsList() {
+ void testAsList() {
List<String> l = C_NAME.asList();
Splitter peoplesFront = Splitter.on('.');
Iterable<String> answer = peoplesFront.split(NAME);
@@ -121,7 +139,7 @@ public class CompoundNameTestCase {
}
@Test
- final void testEqualsObject() {
+ void testEqualsObject() {
assertNotEquals(C_NAME, NAME);
assertNotEquals(C_NAME, null);
verifyStrict(C_NAME, C_NAME);
@@ -129,7 +147,7 @@ public class CompoundNameTestCase {
}
@Test
- final void testEmptyNonEmpty() {
+ void testEmptyNonEmpty() {
assertTrue(CompoundName.empty.isEmpty());
assertEquals(0, CompoundName.empty.size());
assertFalse(CompoundName.from("a").isEmpty());
@@ -140,7 +158,7 @@ public class CompoundNameTestCase {
}
@Test
- final void testGetLowerCasedName() {
+ void testGetLowerCasedName() {
assertEquals(Lowercase.toLowerCase(NAME), C_NAME.getLowerCasedName());
}
@@ -223,4 +241,5 @@ public class CompoundNameTestCase {
assertEquals("[one]", CompoundName.from("one").asList().toString());
assertEquals("[one, two, three]", CompoundName.from("one.two.three").asList().toString());
}
+
}
diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json
index d85f1844b18..1c6c773afd9 100644
--- a/container-search/abi-spec.json
+++ b/container-search/abi-spec.json
@@ -8539,6 +8539,7 @@
"public com.yahoo.search.schema.RankProfile$Builder setHasSummaryFeatures(boolean)",
"public com.yahoo.search.schema.RankProfile$Builder setHasRankFeatures(boolean)",
"public com.yahoo.search.schema.RankProfile$Builder addInput(java.lang.String, com.yahoo.search.schema.RankProfile$InputType)",
+ "public com.yahoo.search.schema.RankProfile$Builder setUseSignificanceModel(boolean)",
"public com.yahoo.search.schema.RankProfile build()"
],
"fields" : [ ]
@@ -8573,6 +8574,7 @@
"public com.yahoo.search.schema.Schema schema()",
"public boolean hasSummaryFeatures()",
"public boolean hasRankFeatures()",
+ "public boolean useSignificanceModel()",
"public java.util.Map inputs()",
"public boolean equals(java.lang.Object)",
"public int hashCode()",
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/SimpleParser.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/SimpleParser.java
index ea0cd2312a6..d3e6241a6e5 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/parser/SimpleParser.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/parser/SimpleParser.java
@@ -134,10 +134,8 @@ abstract class SimpleParser extends StructuredParser {
if (topLevelItem != null && topLevelItem != not) {
// => neutral rank items becomes implicit positives
not.addPositiveItem(getItemAsPositiveItem(topLevelItem, not));
- return not;
- } else {
- return not;
}
+ return not;
}
if (topLevelItem != null) {
return topLevelItem;
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java b/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java
index d7fad148c8c..bfcf0af325d 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java
@@ -79,7 +79,7 @@ public abstract class InvokerFactory {
success.add(node);
}
}
- if ( ! cluster.isPartialGroupCoverageSufficient(success) && !acceptIncompleteCoverage) {
+ if ( ! cluster.isPartialGroupCoverageSufficient(group.hasSufficientCoverage(), success) && !acceptIncompleteCoverage) {
return Optional.empty();
}
if (invokers.isEmpty()) {
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Group.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Group.java
index 965ce4aeb94..c7af37b3a26 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Group.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Group.java
@@ -23,7 +23,7 @@ public class Group {
// Using volatile to ensure visibility for reader.
// All updates are done in a single writer thread
- private volatile boolean hasSufficientCoverage = true;
+ private volatile boolean hasSufficientCoverage = false;
private volatile boolean hasFullCoverage = true;
private volatile long activeDocuments = 0;
private volatile long targetActiveDocuments = 0;
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
index 56545a32831..8f83d8ef5ce 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
@@ -226,17 +226,20 @@ public class SearchCluster implements NodeManager<Node> {
// With just one group sufficient coverage may not be the same as full coverage, as the
// group will always be marked sufficient for use.
updateSufficientCoverage(group, true);
- boolean sufficientCoverage = groups.isGroupCoverageSufficient(group.activeDocuments(), group.activeDocuments());
- trackGroupCoverageChanges(group, sufficientCoverage, group.activeDocuments());
+ boolean sufficientCoverage = groups.isGroupCoverageSufficient(group.hasSufficientCoverage(),
+ group.activeDocuments(), group.activeDocuments(), group.activeDocuments());
+ trackGroupCoverageChanges(group, sufficientCoverage, group.activeDocuments(), group.activeDocuments());
}
private void pingIterationCompletedMultipleGroups(SearchGroupsImpl groups) {
groups.groups().forEach(Group::aggregateNodeValues);
- long medianDocuments = groups.medianDocumentsPerGroup();
+ long medianDocuments = groups.medianDocumentCount();
+ long maxDocuments = groups.maxDocumentCount();
for (Group group : groups.groups()) {
- boolean sufficientCoverage = groups.isGroupCoverageSufficient(group.activeDocuments(), medianDocuments);
+ boolean sufficientCoverage = groups.isGroupCoverageSufficient(group.hasSufficientCoverage(),
+ group.activeDocuments(), medianDocuments, maxDocuments);
updateSufficientCoverage(group, sufficientCoverage);
- trackGroupCoverageChanges(group, sufficientCoverage, medianDocuments);
+ trackGroupCoverageChanges(group, sufficientCoverage, medianDocuments, maxDocuments);
}
}
@@ -261,7 +264,7 @@ public class SearchCluster implements NodeManager<Node> {
/**
* Calculate whether a subset of nodes in a group has enough coverage
*/
- private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments) {
+ private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments, long maxDocuments) {
if ( ! hasInformationAboutAllNodes()) return; // Be silent until we know what we are talking about.
boolean changed = group.fullCoverageStatusChanged(fullCoverage);
if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) {
@@ -278,7 +281,7 @@ public class SearchCluster implements NodeManager<Node> {
unresponsive.append('\n').append(node);
}
String message = "Cluster " + clusterId + ": " + group + " has reduced coverage: " +
- "Active documents: " + group.activeDocuments() + "/" + medianDocuments + ", " +
+ "Active documents: " + group.activeDocuments() + "/" + maxDocuments + ", " +
"Target active documents: " + group.targetActiveDocuments() + ", " +
"working nodes: " + group.workingNodes() + "/" + group.nodes().size() +
", unresponsive nodes: " + (unresponsive.toString().isEmpty() ? " none" : unresponsive);
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroups.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroups.java
index 85063b8ef57..0bb694f610e 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroups.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroups.java
@@ -13,21 +13,30 @@ import static java.util.stream.Collectors.toCollection;
import static java.util.stream.Collectors.toSet;
/**
- * Simple interface for groups and their nodes in the content cluster
+ * Simple interface for groups and their nodes in the content cluster.
+ *
* @author baldersheim
*/
public interface SearchGroups {
+
Group get(int id);
+
Set<Integer> keys();
+
Collection<Group> groups();
+
default boolean isEmpty() {
return size() == 0;
}
+
default Set<Node> nodes() {
return groups().stream().flatMap(group -> group.nodes().stream())
.sorted(comparingInt(Node::key))
.collect(toCollection(LinkedHashSet::new));
}
+
int size();
- boolean isPartialGroupCoverageSufficient(Collection<Node> nodes);
+
+ boolean isPartialGroupCoverageSufficient(boolean currentCoverageSufficient, Collection<Node> nodes);
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java
index c49a140804c..6528c5d2ae4 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java
@@ -7,14 +7,17 @@ import java.util.Collection;
import java.util.Map;
import java.util.Set;
+/**
+ * @author baldersheim
+ */
public class SearchGroupsImpl implements SearchGroups {
private final Map<Integer, Group> groups;
- private final double minActivedocsPercentage;
+ private final double minActiveDocsPercentage;
- public SearchGroupsImpl(Map<Integer, Group> groups, double minActivedocsPercentage) {
+ public SearchGroupsImpl(Map<Integer, Group> groups, double minActiveDocsPercentage) {
this.groups = Map.copyOf(groups);
- this.minActivedocsPercentage = minActivedocsPercentage;
+ this.minActiveDocsPercentage = minActiveDocsPercentage;
}
@Override public Group get(int id) { return groups.get(id); }
@@ -23,23 +26,38 @@ public class SearchGroupsImpl implements SearchGroups {
@Override public int size() { return groups.size(); }
@Override
- public boolean isPartialGroupCoverageSufficient(Collection<Node> nodes) {
- if (size() == 1)
- return true;
- long activeDocuments = nodes.stream().mapToLong(Node::getActiveDocuments).sum();
- return isGroupCoverageSufficient(activeDocuments, medianDocumentsPerGroup());
+ public boolean isPartialGroupCoverageSufficient(boolean currentIsGroupCoverageSufficient, Collection<Node> nodes) {
+ if (size() == 1) return true;
+ long groupDocumentCount = nodes.stream().mapToLong(Node::getActiveDocuments).sum();
+ return isGroupCoverageSufficient(currentIsGroupCoverageSufficient,
+ groupDocumentCount, medianDocumentCount(), maxDocumentCount());
}
- public boolean isGroupCoverageSufficient(long activeDocuments, long medianDocuments) {
- if (medianDocuments <= 0) return true;
- double documentCoverage = 100.0 * (double) activeDocuments / medianDocuments;
- return documentCoverage >= minActivedocsPercentage;
+ public boolean isGroupCoverageSufficient(boolean currentIsGroupCoverageSufficient,
+ long groupDocumentCount, long medianDocumentCount, long maxDocumentCount) {
+ if (medianDocumentCount <= 0) return true;
+ if (currentIsGroupCoverageSufficient) {
+ // To take a group *out of* rotation, require that it has less active documents than the median.
+ // This avoids scenarios where incorrect accounting in a single group takes all other groups offline.
+ double documentCoverage = 100.0 * (double) groupDocumentCount / medianDocumentCount;
+ return documentCoverage >= minActiveDocsPercentage;
+ }
+ else {
+ // to put a group *in* rotation, require that it has as many documents as the largest group,
+ // to avoid taking groups in too early when the majority of the groups have just been added.
+ double documentCoverage = 100.0 * (double) groupDocumentCount / maxDocumentCount;
+ return documentCoverage >= minActiveDocsPercentage;
+ }
}
- public long medianDocumentsPerGroup() {
+ public long medianDocumentCount() {
if (isEmpty()) return 0;
double[] activeDocuments = groups().stream().mapToDouble(Group::activeDocuments).toArray();
return (long) Quantiles.median().computeInPlace(activeDocuments);
}
+ public long maxDocumentCount() {
+ return (long)groups().stream().mapToDouble(Group::activeDocuments).max().orElse(0);
+ }
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/handler/Json2SingleLevelMap.java b/container-search/src/main/java/com/yahoo/search/handler/Json2SingleLevelMap.java
index 01167be6b8b..fdedbdc2fd9 100644
--- a/container-search/src/main/java/com/yahoo/search/handler/Json2SingleLevelMap.java
+++ b/container-search/src/main/java/com/yahoo/search/handler/Json2SingleLevelMap.java
@@ -64,8 +64,8 @@ class Json2SingleLevelMap {
}
void parse(Map<String, String> map, String parent) throws IOException {
- for (parser.nextToken(); parser.getCurrentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
- String fieldName = parent + parser.getCurrentName();
+ for (parser.nextToken(); parser.currentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
+ String fieldName = parent + parser.currentName();
JsonToken token = parser.nextToken();
if ((token == JsonToken.VALUE_STRING) ||
(token == JsonToken.VALUE_NUMBER_FLOAT) ||
@@ -89,9 +89,9 @@ class Json2SingleLevelMap {
}
private String skipChildren(JsonParser parser, byte [] input) throws IOException {
- JsonLocation start = parser.getCurrentLocation();
+ JsonLocation start = parser.currentLocation();
parser.skipChildren();
- JsonLocation end = parser.getCurrentLocation();
+ JsonLocation end = parser.currentLocation();
int offset = (int)start.getByteOffset() - 1;
return new String(input, offset, (int)(end.getByteOffset() - offset), StandardCharsets.UTF_8);
}
diff --git a/container-search/src/main/java/com/yahoo/search/schema/RankProfile.java b/container-search/src/main/java/com/yahoo/search/schema/RankProfile.java
index a5b8d328a7a..9583e9885e7 100644
--- a/container-search/src/main/java/com/yahoo/search/schema/RankProfile.java
+++ b/container-search/src/main/java/com/yahoo/search/schema/RankProfile.java
@@ -36,6 +36,7 @@ public class RankProfile {
private final String name;
private final boolean hasSummaryFeatures;
private final boolean hasRankFeatures;
+ private final boolean useSignificanceModel;
private final Map<String, InputType> inputs;
// Assigned when this is added to a schema
@@ -45,6 +46,7 @@ public class RankProfile {
this.name = builder.name;
this.hasSummaryFeatures = builder.hasSummaryFeatures;
this.hasRankFeatures = builder.hasRankFeatures;
+ this.useSignificanceModel = builder.useSignificanceModel;
this.inputs = Collections.unmodifiableMap(builder.inputs);
}
@@ -66,6 +68,9 @@ public class RankProfile {
/** Returns true if this rank profile has rank features. */
public boolean hasRankFeatures() { return hasRankFeatures; }
+ /** Returns true if this rank profile should use significance models. */
+ public boolean useSignificanceModel() { return useSignificanceModel; }
+
/** Returns the inputs explicitly declared in this rank profile. */
public Map<String, InputType> inputs() { return inputs; }
@@ -76,13 +81,14 @@ public class RankProfile {
if ( ! other.name.equals(this.name)) return false;
if ( other.hasSummaryFeatures != this.hasSummaryFeatures) return false;
if ( other.hasRankFeatures != this.hasRankFeatures) return false;
+ if ( other.useSignificanceModel != this.useSignificanceModel) return false;
if ( ! other.inputs.equals(this.inputs)) return false;
return true;
}
@Override
public int hashCode() {
- return Objects.hash(name, hasSummaryFeatures, hasRankFeatures, inputs);
+ return Objects.hash(name, hasSummaryFeatures, hasRankFeatures, useSignificanceModel, inputs);
}
@Override
@@ -95,6 +101,7 @@ public class RankProfile {
private final String name;
private boolean hasSummaryFeatures = true;
private boolean hasRankFeatures = true;
+ private boolean useSignificanceModel = false;
private final Map<String, InputType> inputs = new LinkedHashMap<>();
public Builder(String name) {
@@ -116,6 +123,8 @@ public class RankProfile {
return this;
}
+ public Builder setUseSignificanceModel(boolean use) { this.useSignificanceModel = use; return this; }
+
public RankProfile build() {
return new RankProfile(this);
}
diff --git a/container-search/src/main/java/com/yahoo/search/schema/SchemaInfoConfigurer.java b/container-search/src/main/java/com/yahoo/search/schema/SchemaInfoConfigurer.java
index d28c2db2b9e..77f27d3d411 100644
--- a/container-search/src/main/java/com/yahoo/search/schema/SchemaInfoConfigurer.java
+++ b/container-search/src/main/java/com/yahoo/search/schema/SchemaInfoConfigurer.java
@@ -22,9 +22,10 @@ class SchemaInfoConfigurer {
Schema.Builder builder = new Schema.Builder(schemaInfoConfig.name());
for (var profileConfig : schemaInfoConfig.rankprofile()) {
- RankProfile.Builder profileBuilder = new RankProfile.Builder(profileConfig.name());
- profileBuilder.setHasSummaryFeatures(profileConfig.hasSummaryFeatures());
- profileBuilder.setHasRankFeatures(profileConfig.hasRankFeatures());
+ RankProfile.Builder profileBuilder = new RankProfile.Builder(profileConfig.name())
+ .setHasSummaryFeatures(profileConfig.hasSummaryFeatures())
+ .setHasRankFeatures(profileConfig.hasRankFeatures())
+ .setUseSignificanceModel(profileConfig.significance().useModel());
for (var inputConfig : profileConfig.input())
profileBuilder.addInput(inputConfig.name(), RankProfile.InputType.fromSpec(inputConfig.type()));
builder.add(profileBuilder.build());
diff --git a/container-search/src/main/java/com/yahoo/search/significance/SignificanceSearcher.java b/container-search/src/main/java/com/yahoo/search/significance/SignificanceSearcher.java
index da0f98c50f5..e3a559da8f9 100644
--- a/container-search/src/main/java/com/yahoo/search/significance/SignificanceSearcher.java
+++ b/container-search/src/main/java/com/yahoo/search/significance/SignificanceSearcher.java
@@ -14,12 +14,16 @@ import com.yahoo.prelude.query.WordItem;
import com.yahoo.search.Query;
import com.yahoo.search.Result;
import com.yahoo.search.Searcher;
-import com.yahoo.search.query.Ranking;
+import com.yahoo.search.result.ErrorMessage;
+import com.yahoo.search.schema.RankProfile;
+import com.yahoo.search.schema.Schema;
+import com.yahoo.search.schema.SchemaInfo;
import com.yahoo.search.searchchain.Execution;
-import com.yahoo.vespa.config.search.RankProfilesConfig;
-import java.util.HashMap;
+import java.util.HashSet;
import java.util.Optional;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
import static com.yahoo.prelude.querytransform.StemmingSearcher.STEMMING;
@@ -33,29 +37,48 @@ import static com.yahoo.prelude.querytransform.StemmingSearcher.STEMMING;
public class SignificanceSearcher extends Searcher {
public final static String SIGNIFICANCE = "Significance";
- private final SignificanceModelRegistry significanceModelRegistry;
- private final RankProfilesConfig rankProfilesConfig;
- private final HashMap<String, Boolean> useModel = new HashMap<>();
+ private static final Logger log = Logger.getLogger(SignificanceSearcher.class.getName());
+
+ private final SignificanceModelRegistry significanceModelRegistry;
+ private final SchemaInfo schemaInfo;
@Inject
- public SignificanceSearcher(SignificanceModelRegistry significanceModelRegistry, RankProfilesConfig rankProfilesConfig) {
+ public SignificanceSearcher(SignificanceModelRegistry significanceModelRegistry, SchemaInfo schemaInfo) {
this.significanceModelRegistry = significanceModelRegistry;
- this.rankProfilesConfig = rankProfilesConfig;
-
- for (RankProfilesConfig.Rankprofile profile : rankProfilesConfig.rankprofile()) {
- for (RankProfilesConfig.Rankprofile.Fef.Property property : profile.fef().property()) {
- if (property.name().equals("vespa.significance.use_model")) {
- useModel.put(profile.name(), Boolean.parseBoolean(property.value()));
- }
- }
- }
+ this.schemaInfo = schemaInfo;
}
@Override
public Result search(Query query, Execution execution) {
- Ranking ranking = query.getRanking();
- if (!useModel.containsKey(ranking.getProfile()) || !useModel.get(ranking.getProfile())) return execution.search(query);
+ var rankProfileName = query.getRanking().getProfile();
+
+ // Determine significance setup per schema for the given rank profile
+ var perSchemaSetup = schemaInfo.newSession(query).schemas().stream()
+ .collect(Collectors.toMap(Schema::name, schema ->
+ // Fallback to disabled if the rank profile is not found in the schema
+ // This will result in a failure later (in a "backend searcher") anyway.
+ Optional.ofNullable(schema.rankProfiles().get(rankProfileName))
+ .map(RankProfile::useSignificanceModel).orElse(false)));
+ var uniqueSetups = new HashSet<>(perSchemaSetup.values());
+
+ // Fail if the significance setup for the selected schemas are conflicting
+ if (uniqueSetups.size() > 1) {
+ var result = new Result(query);
+ result.hits().addError(
+ ErrorMessage.createIllegalQuery(
+ ("Inconsistent 'significance' configuration for the rank profile '%s' in the schemas %s. " +
+ "Use 'restrict' to limit the query to a subset of schemas " +
+ "(https://docs.vespa.ai/en/schemas.html#multiple-schemas). " +
+ "Specify same 'significance' configuration for all selected schemas " +
+ "(https://docs.vespa.ai/en/reference/schema-reference.html#significance).")
+ .formatted(rankProfileName, perSchemaSetup.keySet())));
+ return result;
+ }
+
+ if (perSchemaSetup.isEmpty()) return execution.search(query);
+ var useSignificanceModel = uniqueSetups.iterator().next();
+ if (!useSignificanceModel) return execution.search(query);
Language language = query.getModel().getParsingLanguage();
Optional<SignificanceModel> model = significanceModelRegistry.getModel(language);
diff --git a/container-search/src/main/resources/configdefinitions/container.search.schema-info.def b/container-search/src/main/resources/configdefinitions/container.search.schema-info.def
index 989fbb16973..086b47f5ae5 100644
--- a/container-search/src/main/resources/configdefinitions/container.search.schema-info.def
+++ b/container-search/src/main/resources/configdefinitions/container.search.schema-info.def
@@ -28,6 +28,7 @@ schema[].summaryclass[].fields[].dynamic bool default=false
schema[].rankprofile[].name string
schema[].rankprofile[].hasSummaryFeatures bool default=true
schema[].rankprofile[].hasRankFeatures bool default=true
+schema[].rankprofile[].significance.useModel bool default=false
# The name of an input (query rank feature) accepted by this profile
schema[].rankprofile[].input[].name string
# The tensor type of an input (query rank feature) accepted by this profile
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java
index 2a9eaa86674..e7085b093f3 100644
--- a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java
@@ -48,6 +48,19 @@ public class SearchClusterCoverageTest {
}
@Test
+ void three_groups_of_which_two_were_just_added() {
+ var tester = new SearchClusterTester(3, 3);
+
+ tester.setDocsPerNode(100, 0);
+ tester.setDocsPerNode(80, 1);
+ tester.setDocsPerNode(80, 2);
+ tester.pingIterationCompleted();
+ assertTrue(tester.group(0).hasSufficientCoverage());
+ assertFalse(tester.group(1).hasSufficientCoverage());
+ assertFalse(tester.group(2).hasSufficientCoverage());
+ }
+
+ @Test
void three_groups_one_missing_docs_but_too_few() {
var tester = new SearchClusterTester(3, 3);
@@ -65,6 +78,10 @@ public class SearchClusterCoverageTest {
var tester = new SearchClusterTester(3, 3);
tester.setDocsPerNode(100, 0);
+ tester.setDocsPerNode(100, 1);
+ tester.setDocsPerNode(100, 2);
+ tester.pingIterationCompleted();
+ tester.setDocsPerNode(100, 0);
tester.setDocsPerNode(150, 1);
tester.setDocsPerNode(100, 2);
tester.pingIterationCompleted();
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java
index 1b36c2b8151..8ac4f067876 100644
--- a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java
@@ -200,8 +200,6 @@ public class SearchClusterTest {
@Test
void requireThatVipStatusIsDefaultDownWithLocalDispatch() {
try (State test = new State("cluster.1", 1, HostName.getLocalhost(), "b")) {
- assertTrue(test.searchCluster.localCorpusDispatchTarget().isPresent());
-
assertFalse(test.vipStatus.isInRotation());
test.waitOneFullPingRound();
assertTrue(test.vipStatus.isInRotation());
@@ -211,8 +209,6 @@ public class SearchClusterTest {
@Test
void requireThatVipStatusStaysUpWithLocalDispatchAndClusterSize1() {
try (State test = new State("cluster.1", 1, HostName.getLocalhost())) {
- assertTrue(test.searchCluster.localCorpusDispatchTarget().isPresent());
-
assertFalse(test.vipStatus.isInRotation());
test.waitOneFullPingRound();
assertTrue(test.vipStatus.isInRotation());
@@ -225,8 +221,6 @@ public class SearchClusterTest {
@Test
void requireThatVipStatusIsDefaultDownWithLocalDispatchAndClusterSize2() {
try (State test = new State("cluster.1", 1, HostName.getLocalhost(), "otherhost")) {
- assertTrue(test.searchCluster.localCorpusDispatchTarget().isPresent());
-
assertFalse(test.vipStatus.isInRotation());
test.waitOneFullPingRound();
assertTrue(test.vipStatus.isInRotation());
diff --git a/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java
index ffa6c82e941..611df6ad284 100644
--- a/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java
@@ -1532,7 +1532,7 @@ public class JsonRendererTestCase {
+ "}";
assertEquals(
"Unexpected character ('a' (code 97)): was expecting comma to separate Object entries\n" +
- " at [Source: (String)\"{ \"root\": { \"invalidvalue\": 1adsf, }}\"; line: 1, column: 41]",
+ " at [Source: (String)\"{ \"root\": { \"invalidvalue\": 1adsf, }}\"; line: 1, column: 40]",
validateJSON(json));
}
diff --git a/container-search/src/test/java/com/yahoo/search/schema/SchemaInfoTester.java b/container-search/src/test/java/com/yahoo/search/schema/SchemaInfoTester.java
index 3e98b911fc8..2ba399cf42d 100644
--- a/container-search/src/test/java/com/yahoo/search/schema/SchemaInfoTester.java
+++ b/container-search/src/test/java/com/yahoo/search/schema/SchemaInfoTester.java
@@ -77,6 +77,7 @@ public class SchemaInfoTester {
.addInput("query(myTensor1)", InputType.fromSpec("tensor(x[10])"))
.build())
.add(new RankProfile.Builder("bOnly")
+ .setUseSignificanceModel(true)
.addInput("query(myTensor1)", InputType.fromSpec("tensor(a{},b{})"))
.build())
.build());
@@ -129,7 +130,8 @@ public class SchemaInfoTester {
rankProfileInconsistentB.input(new SchemaInfoConfig.Schema.Rankprofile.Input.Builder().name("query(myTensor1)").type("tensor(x[10])"));
schemaB.rankprofile(rankProfileInconsistentB);
var rankProfileBOnly = new SchemaInfoConfig.Schema.Rankprofile.Builder();
- rankProfileBOnly.name("bOnly");
+ rankProfileBOnly.name("bOnly")
+ .significance(new SchemaInfoConfig.Schema.Rankprofile.Significance.Builder().useModel(true));
rankProfileBOnly.input(new SchemaInfoConfig.Schema.Rankprofile.Input.Builder().name("query(myTensor1)").type("tensor(a{},b{})"));
schemaB.rankprofile(rankProfileBOnly);
diff --git a/container-search/src/test/java/com/yahoo/search/significance/test/SignificanceSearcherTest.java b/container-search/src/test/java/com/yahoo/search/significance/test/SignificanceSearcherTest.java
index ed67798b4b1..cb5722074ff 100644
--- a/container-search/src/test/java/com/yahoo/search/significance/test/SignificanceSearcherTest.java
+++ b/container-search/src/test/java/com/yahoo/search/significance/test/SignificanceSearcherTest.java
@@ -11,6 +11,10 @@ import com.yahoo.prelude.query.AndItem;
import com.yahoo.prelude.query.WordItem;
import com.yahoo.search.Query;
import com.yahoo.search.Result;
+import com.yahoo.search.schema.DocumentSummary;
+import com.yahoo.search.schema.RankProfile;
+import com.yahoo.search.schema.Schema;
+import com.yahoo.search.schema.SchemaInfo;
import com.yahoo.search.searchchain.Execution;
import com.yahoo.search.significance.SignificanceSearcher;
import com.yahoo.vespa.config.search.RankProfilesConfig;
@@ -33,24 +37,18 @@ public class SignificanceSearcherTest {
SignificanceModelRegistry significanceModelRegistry;
SignificanceSearcher searcher;
- private static final String CONFIG_DIR = "src/test/resources/config/";
public SignificanceSearcherTest() {
List<Path> models = new ArrayList<>();
models.add( Path.of("src/test/java/com/yahoo/search/significance/model/en.json"));
- RankProfilesConfig rpCfg = readConfig("with_significance");
-
- assertEquals(1, rpCfg.rankprofile().size());
-
+ var schema = new Schema.Builder("music")
+ .add(new DocumentSummary.Builder("default").build())
+ .add(new RankProfile.Builder("significance-ranking")
+ .setUseSignificanceModel(true)
+ .build());
significanceModelRegistry = new DefaultSignificanceModelRegistry(models);
- searcher = new SignificanceSearcher(significanceModelRegistry, rpCfg);
- }
-
- @SuppressWarnings("deprecation")
- private RankProfilesConfig readConfig(String subDir) {
- String cfgId = "file:" + CONFIG_DIR + subDir + "/rank-profiles.cfg";
- return ConfigGetter.getConfig(RankProfilesConfig.class, cfgId);
+ searcher = new SignificanceSearcher(significanceModelRegistry, new SchemaInfo(List.of(schema.build()), List.of()));
}
private Execution createExecution(SignificanceSearcher searcher) {
@@ -168,4 +166,36 @@ public class SignificanceSearcherTest {
assertEquals(w0_1.getSignificance(), w1.getSignificance());
}
+
+ @Test
+ public void failsOnConflictingSignificanceConfiguration() {
+ var musicSchema = new Schema.Builder("music")
+ .add(new DocumentSummary.Builder("default").build())
+ .add(new RankProfile.Builder("significance-ranking")
+ .setUseSignificanceModel(true)
+ .build())
+ .build();
+ var albumSchema = new Schema.Builder("album")
+ .add(new DocumentSummary.Builder("default").build())
+ .add(new RankProfile.Builder("significance-ranking")
+ .setUseSignificanceModel(false)
+ .build())
+ .build();
+ var searcher = new SignificanceSearcher(
+ significanceModelRegistry, new SchemaInfo(List.of(musicSchema, albumSchema), List.of()));
+
+ var query = new Query();
+ query.getRanking().setProfile("significance-ranking");
+
+ var result = createExecution(searcher).search(query);
+ assertEquals(1, result.hits().getErrorHit().errors().size());
+
+ var errorMessage = result.hits().getError();
+ assertEquals("Inconsistent 'significance' configuration for the rank profile 'significance-ranking' in the schemas [music, album]. " +
+ "Use 'restrict' to limit the query to a subset of schemas " +
+ "(https://docs.vespa.ai/en/schemas.html#multiple-schemas). " +
+ "Specify same 'significance' configuration for all selected schemas " +
+ "(https://docs.vespa.ai/en/reference/schema-reference.html#significance).",
+ errorMessage.getDetailedMessage());
+ }
}
diff --git a/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/StreamingSearcherTestCase.java b/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/StreamingSearcherTestCase.java
index cd9ef708920..25b54267242 100644
--- a/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/StreamingSearcherTestCase.java
+++ b/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/StreamingSearcherTestCase.java
@@ -167,6 +167,7 @@ public class StreamingSearcherTestCase {
Query[] queries = new Query[4]; // Increase coverage
for (int i = 0; i<queries.length; i++) {
Query query = new Query(queryString);
+ query.setTimeout(1000);
if (i == 0) {
} else if (i == 1) {
query.getPresentation().setSummary("summary");
diff --git a/container-search/src/test/resources/config/with_significance/rank-profiles.cfg b/container-search/src/test/resources/config/with_significance/rank-profiles.cfg
deleted file mode 100644
index 1dc1be62862..00000000000
--- a/container-search/src/test/resources/config/with_significance/rank-profiles.cfg
+++ /dev/null
@@ -1,3 +0,0 @@
-rankprofile[0].name "significance-ranking"
-rankprofile[0].fef.property[0].name "vespa.significance.use_model"
-rankprofile[0].fef.property[0].value "true" \ No newline at end of file
diff --git a/dependency-versions/pom.xml b/dependency-versions/pom.xml
index 9c21a9809b1..02c3f663f98 100644
--- a/dependency-versions/pom.xml
+++ b/dependency-versions/pom.xml
@@ -37,7 +37,7 @@
<guava.vespa.version>33.2.0-jre</guava.vespa.version>
<guice.vespa.version>6.0.0</guice.vespa.version>
<j2objc-annotations.vespa.version>3.0.0</j2objc-annotations.vespa.version>
- <jackson2.vespa.version>2.16.2</jackson2.vespa.version>
+ <jackson2.vespa.version>2.17.1</jackson2.vespa.version>
<jackson-databind.vespa.version>${jackson2.vespa.version}</jackson-databind.vespa.version>
<jakarta.inject.vespa.version>2.0.1</jakarta.inject.vespa.version>
<javax.activation-api.vespa.version>1.2.0</javax.activation-api.vespa.version>
@@ -68,8 +68,8 @@
<assertj.vespa.version>3.25.3</assertj.vespa.version>
<!-- Athenz dependencies. Make sure these dependencies match those in Vespa's internal repositories -->
- <aws-sdk.vespa.version>1.12.715</aws-sdk.vespa.version>
- <athenz.vespa.version>1.11.57</athenz.vespa.version>
+ <aws-sdk.vespa.version>1.12.720</aws-sdk.vespa.version>
+ <athenz.vespa.version>1.11.58</athenz.vespa.version>
<!-- Athenz END -->
<!-- WARNING: If you change curator version, you also need to update
@@ -79,7 +79,7 @@
xargs perl -pi -e 's/major = [0-9]+, minor = [0-9]+, micro = [0-9]+/major = 5, minor = 3, micro = 0/g'
-->
<bouncycastle.vespa.version>1.78.1</bouncycastle.vespa.version>
- <byte-buddy.vespa.version>1.14.14</byte-buddy.vespa.version>
+ <byte-buddy.vespa.version>1.14.15</byte-buddy.vespa.version>
<checker-qual.vespa.version>3.38.0</checker-qual.vespa.version>
<commons-beanutils.vespa.version>1.9.4</commons-beanutils.vespa.version>
<commons-codec.vespa.version>1.17.0</commons-codec.vespa.version>
@@ -102,7 +102,7 @@
<felix.log.vespa.version>1.3.0</felix.log.vespa.version>
<findbugs.vespa.version>3.0.2</findbugs.vespa.version> <!-- Should be kept in sync with guava -->
<hamcrest.vespa.version>2.2</hamcrest.vespa.version>
- <hdrhistogram.vespa.version>2.1.12</hdrhistogram.vespa.version>
+ <hdrhistogram.vespa.version>2.2.1</hdrhistogram.vespa.version>
<huggingface.vespa.version>0.27.0</huggingface.vespa.version>
<icu4j.vespa.version>75.1</icu4j.vespa.version>
<java-jjwt.vespa.version>0.11.5</java-jjwt.vespa.version>
@@ -117,14 +117,14 @@
<junit.vespa.version>5.10.2</junit.vespa.version>
<junit.platform.vespa.version>1.10.2</junit.platform.vespa.version>
<junit4.vespa.version>4.13.2</junit4.vespa.version>
- <kherud.llama.vespa.version>3.0.1</kherud.llama.vespa.version>
+ <kherud.llama.vespa.version>3.0.2</kherud.llama.vespa.version>
<luben.zstd.vespa.version>1.5.6-3</luben.zstd.vespa.version>
<lucene.vespa.version>9.10.0</lucene.vespa.version>
<maven-archiver.vespa.version>3.6.2</maven-archiver.vespa.version>
<maven-wagon.vespa.version>3.5.3</maven-wagon.vespa.version>
<maven-xml-impl.vespa.version>4.0.0-alpha-13</maven-xml-impl.vespa.version>
<mimepull.vespa.version>1.10.0</mimepull.vespa.version>
- <mockito.vespa.version>5.11.0</mockito.vespa.version>
+ <mockito.vespa.version>5.12.0</mockito.vespa.version>
<mojo-executor.vespa.version>2.4.0</mojo-executor.vespa.version>
<netty.vespa.version>4.1.109.Final</netty.vespa.version>
<netty-tcnative.vespa.version>2.0.65.Final</netty-tcnative.vespa.version>
@@ -179,7 +179,7 @@
<maven-jar-plugin.vespa.version>3.4.1</maven-jar-plugin.vespa.version>
<maven-javadoc-plugin.vespa.version>3.6.3</maven-javadoc-plugin.vespa.version>
<maven-plugin-api.vespa.version>${maven-core.vespa.version}</maven-plugin-api.vespa.version>
- <maven-plugin-tools.vespa.version>3.12.0</maven-plugin-tools.vespa.version>
+ <maven-plugin-tools.vespa.version>3.13.0</maven-plugin-tools.vespa.version>
<maven-resources-plugin.vespa.version>3.3.1</maven-resources-plugin.vespa.version>
<maven-resolver.vespa.version>1.9.20</maven-resolver.vespa.version>
<maven-shade-plugin.vespa.version>3.5.3</maven-shade-plugin.vespa.version>
diff --git a/docprocs/src/main/java/com/yahoo/docprocs/indexing/ScriptManager.java b/docprocs/src/main/java/com/yahoo/docprocs/indexing/ScriptManager.java
index 86b0a2e78ad..3088083912b 100644
--- a/docprocs/src/main/java/com/yahoo/docprocs/indexing/ScriptManager.java
+++ b/docprocs/src/main/java/com/yahoo/docprocs/indexing/ScriptManager.java
@@ -72,7 +72,7 @@ public class ScriptManager {
Map<String, Map<String, DocumentScript>> documentFieldScripts = new HashMap<>(config.ilscript().size());
ScriptParserContext parserContext = new ScriptParserContext(linguistics, embedders);
parserContext.getAnnotatorConfig().setMaxTermOccurrences(config.maxtermoccurrences());
- parserContext.getAnnotatorConfig().setMaxTokenLength(config.fieldmatchmaxlength());
+ parserContext.getAnnotatorConfig().setMaxTokenizeLength(config.fieldmatchmaxlength());
for (IlscriptsConfig.Ilscript ilscript : config.ilscript()) {
DocumentType documentType = docTypeMgr.getDocumentType(ilscript.doctype());
diff --git a/document/src/main/java/com/yahoo/document/json/JsonReader.java b/document/src/main/java/com/yahoo/document/json/JsonReader.java
index 358c0cb65e4..9c621c033bd 100644
--- a/document/src/main/java/com/yahoo/document/json/JsonReader.java
+++ b/document/src/main/java/com/yahoo/document/json/JsonReader.java
@@ -105,7 +105,7 @@ public class JsonReader {
String condition = null;
ParsedDocumentOperation operation = null;
while (JsonToken.END_OBJECT != parser.nextValue()) {
- switch (parser.getCurrentName()) {
+ switch (parser.currentName()) {
case FIELDS -> {
documentParseInfo.fieldsBuffer = new LazyTokenBuffer(parser);
VespaJsonDocumentReader vespaJsonDocumentReader = new VespaJsonDocumentReader(typeManager.getIgnoreUndefinedFields());
@@ -177,7 +177,7 @@ public class JsonReader {
state = END_OF_FEED;
throw new IllegalArgumentException(r);
}
- if ( ! documentParseInfo.isPresent()) {
+ if (documentParseInfo.isEmpty()) {
state = END_OF_FEED;
return null;
}
diff --git a/document/src/main/java/com/yahoo/document/json/LazyTokenBuffer.java b/document/src/main/java/com/yahoo/document/json/LazyTokenBuffer.java
index 0fbdd0b28c7..53ddacf6cc3 100644
--- a/document/src/main/java/com/yahoo/document/json/LazyTokenBuffer.java
+++ b/document/src/main/java/com/yahoo/document/json/LazyTokenBuffer.java
@@ -33,7 +33,7 @@ public class LazyTokenBuffer extends TokenBuffer {
public Supplier<Token> lookahead() {
return new Supplier<>() {
int localNesting = nesting();
- Supplier<Token> buffered = LazyTokenBuffer.super.lookahead();
+ final Supplier<Token> buffered = LazyTokenBuffer.super.lookahead();
@Override public Token get() {
if (localNesting == 0)
return null;
@@ -54,7 +54,7 @@ public class LazyTokenBuffer extends TokenBuffer {
JsonToken token = parser.nextValue();
if (token == null)
throw new IllegalStateException("no more JSON tokens");
- return new Token(token, parser.getCurrentName(), parser.getText());
+ return new Token(token, parser.currentName(), parser.getText());
}
catch (IOException e) {
throw new IllegalArgumentException("failed reading document JSON", e);
diff --git a/document/src/main/java/com/yahoo/document/json/TokenBuffer.java b/document/src/main/java/com/yahoo/document/json/TokenBuffer.java
index 3a48f71c4cd..c5c022370bf 100644
--- a/document/src/main/java/com/yahoo/document/json/TokenBuffer.java
+++ b/document/src/main/java/com/yahoo/document/json/TokenBuffer.java
@@ -99,7 +99,7 @@ public class TokenBuffer {
}
int addFromParser(JsonParser tokens) throws IOException {
- add(tokens.currentToken(), tokens.getCurrentName(), tokens.getText());
+ add(tokens.currentToken(), tokens.currentName(), tokens.getText());
return nestingOffset(tokens.currentToken());
}
diff --git a/document/src/main/java/com/yahoo/document/json/document/DocumentParser.java b/document/src/main/java/com/yahoo/document/json/document/DocumentParser.java
index 77e11dcf2a8..c5bcd356c94 100644
--- a/document/src/main/java/com/yahoo/document/json/document/DocumentParser.java
+++ b/document/src/main/java/com/yahoo/document/json/document/DocumentParser.java
@@ -61,7 +61,7 @@ public class DocumentParser {
private boolean parseOneItem(DocumentParseInfo documentParseInfo, boolean docIdAndOperationIsSetExternally) throws IOException {
parser.nextValue();
processIndent();
- if (parser.getCurrentName() == null) return false;
+ if (parser.currentName() == null) return false;
if (indentLevel == 1L) {
handleIdentLevelOne(documentParseInfo, docIdAndOperationIsSetExternally);
} else if (indentLevel == 2L) {
@@ -85,17 +85,18 @@ public class DocumentParser {
private void handleIdentLevelOne(DocumentParseInfo documentParseInfo, boolean docIdAndOperationIsSetExternally)
throws IOException {
- JsonToken currentToken = parser.getCurrentToken();
+ JsonToken currentToken = parser.currentToken();
+ String currentName = parser.currentName();
if ((currentToken == JsonToken.VALUE_TRUE || currentToken == JsonToken.VALUE_FALSE) &&
- CREATE_IF_NON_EXISTENT.equals(parser.getCurrentName())) {
+ CREATE_IF_NON_EXISTENT.equals(currentName)) {
documentParseInfo.create = Optional.of(currentToken == JsonToken.VALUE_TRUE);
- } else if (currentToken == JsonToken.VALUE_STRING && CONDITION.equals(parser.getCurrentName())) {
+ } else if (currentToken == JsonToken.VALUE_STRING && CONDITION.equals(currentName)) {
documentParseInfo.condition = Optional.of(parser.getText());
} else if (currentToken == JsonToken.VALUE_STRING) {
// Value is expected to be set in the header not in the document. Ignore any unknown field
// as well.
if (! docIdAndOperationIsSetExternally) {
- documentParseInfo.operationType = operationNameToOperationType(parser.getCurrentName());
+ documentParseInfo.operationType = operationNameToOperationType(currentName);
documentParseInfo.documentId = new DocumentId(parser.getText());
}
}
@@ -104,7 +105,7 @@ public class DocumentParser {
private void handleIdentLevelTwo(DocumentParseInfo documentParseInfo) {
try {
// "fields" opens a dictionary and is therefore on level two which might be surprising.
- if (parser.currentToken() == JsonToken.START_OBJECT && FIELDS.equals(parser.getCurrentName())) {
+ if (parser.currentToken() == JsonToken.START_OBJECT && FIELDS.equals(parser.currentName())) {
documentParseInfo.fieldsBuffer.bufferObject(parser);
processIndent();
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index 7ebe9555296..a577bbe74df 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -48,6 +48,14 @@ public class Flags {
private static volatile TreeMap<FlagId, FlagDefinition> flags = new TreeMap<>();
+ public static final UnboundBooleanFlag ATHENZ_SERVICE_ACCOUNTS = defineFeatureFlag(
+ "athenz-service-accounts", false,
+ List.of("hakonhall"), "2024-05-06", "2024-07-06",
+ "Whether to provision new GCP VM instances with a service account that are independent " +
+ "of the zone, and aligned with the Athenz service names (configserver and tenant-host).",
+ "Takes effect when provisioning new VM instances",
+ APPLICATION, INSTANCE_ID);
+
public static final UnboundDoubleFlag DEFAULT_TERM_WISE_LIMIT = defineDoubleFlag(
"default-term-wise-limit", 1.0,
List.of("baldersheim"), "2020-12-02", "2024-12-31",
@@ -84,18 +92,6 @@ public class Flags {
"Takes effect at redeployment",
INSTANCE_ID);
- public static final UnboundBooleanFlag NEW_RESOURCES_FORMULA = defineFeatureFlag(
- "new-resources-formula", true,
- List.of("hakonhall"), "2024-04-25", "2024-05-25",
- "Use an easier to understand formula for calculating the memory and disk resources",
- "Takes effect on next deployment of an applications.");
-
- public static final UnboundBooleanFlag FIX_CONFIG_SERVER_HEAP = defineFeatureFlag(
- "fix-config-server-heap", true,
- List.of("hakonhall"), "2024-04-23", "2024-05-23",
- "Base the calculation of the config server JVM heap size on the amount of memory available to the container.",
- "Takes effect on start of config server Podman container");
-
public static final UnboundStringFlag RESPONSE_SEQUENCER_TYPE = defineStringFlag(
"response-sequencer-type", "ADAPTIVE",
List.of("baldersheim"), "2020-12-02", "2024-12-31",
@@ -455,6 +451,18 @@ public class Flags {
"Whether to encrypt disk when provisioning new hosts",
"Will be read only on boot.");
+ public static UnboundBooleanFlag HUBSPOT_SYNC_TENANTS = defineFeatureFlag(
+ "hubspot-sync-tenants", false,
+ List.of("bjorncs"), "2024-05-07", "2025-01-01",
+ "Whether to sync tenants to HubSpot",
+ "Takes effect immediately");
+
+ public static final UnboundBooleanFlag REMOVE_ORPHANED_DNS_RECORDS = defineFeatureFlag(
+ "remove-orphaned-dns-records", false,
+ List.of("mpolden"), "2024-05-07", "2024-10-01",
+ "Whether EndpointDnsMaintainer should remove orphaned records instead of logging them",
+ "Takes effect on next maintenance run");
+
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners,
String createdAt, String expiresAt, String description,
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
index 3b73d9d6013..2a667930add 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
@@ -18,12 +18,12 @@ import static com.yahoo.vespa.flags.Dimension.ARCHITECTURE;
import static com.yahoo.vespa.flags.Dimension.CERTIFICATE_PROVIDER;
import static com.yahoo.vespa.flags.Dimension.CLAVE;
import static com.yahoo.vespa.flags.Dimension.CLOUD_ACCOUNT;
-import static com.yahoo.vespa.flags.Dimension.FLAVOR;
-import static com.yahoo.vespa.flags.Dimension.INSTANCE_ID;
import static com.yahoo.vespa.flags.Dimension.CLUSTER_ID;
import static com.yahoo.vespa.flags.Dimension.CLUSTER_TYPE;
import static com.yahoo.vespa.flags.Dimension.CONSOLE_USER_EMAIL;
+import static com.yahoo.vespa.flags.Dimension.FLAVOR;
import static com.yahoo.vespa.flags.Dimension.HOSTNAME;
+import static com.yahoo.vespa.flags.Dimension.INSTANCE_ID;
import static com.yahoo.vespa.flags.Dimension.NODE_TYPE;
import static com.yahoo.vespa.flags.Dimension.TENANT_ID;
import static com.yahoo.vespa.flags.Dimension.VESPA_VERSION;
@@ -178,7 +178,7 @@ public class PermanentFlags {
HOSTNAME, NODE_TYPE, TENANT_ID, INSTANCE_ID, CLUSTER_TYPE, CLUSTER_ID, VESPA_VERSION);
public static final UnboundStringFlag ZOOKEEPER_SERVER_VERSION = defineStringFlag(
- "zookeeper-server-version", "3.9.1",
+ "zookeeper-server-version", "3.9.2",
"ZooKeeper server version, a jar file zookeeper-server-<ZOOKEEPER_SERVER_VERSION>-jar-with-dependencies.jar must exist",
"Takes effect on restart of Docker container",
NODE_TYPE, INSTANCE_ID, HOSTNAME);
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ExactExpression.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ExactExpression.java
index 855430f45fc..7481363b737 100644
--- a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ExactExpression.java
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ExactExpression.java
@@ -12,6 +12,9 @@ import com.yahoo.document.annotation.SpanTrees;
import com.yahoo.document.datatypes.IntegerFieldValue;
import com.yahoo.document.datatypes.StringFieldValue;
import com.yahoo.language.process.TokenType;
+import com.yahoo.vespa.indexinglanguage.linguistics.AnnotatorConfig;
+
+import java.util.OptionalInt;
import static com.yahoo.language.LinguisticsCase.toLowerCase;
@@ -20,8 +23,19 @@ import static com.yahoo.language.LinguisticsCase.toLowerCase;
*/
public final class ExactExpression extends Expression {
- public ExactExpression() {
+ private int maxTokenLength;
+
+ private ExactExpression(OptionalInt maxTokenLength) {
super(DataType.STRING);
+ this.maxTokenLength = maxTokenLength.isPresent() ? maxTokenLength.getAsInt() : AnnotatorConfig.getDefaultMaxTokenLength();
+ }
+
+ public ExactExpression() {
+ this(OptionalInt.empty());;
+ }
+
+ public ExactExpression(int maxTokenLength) {
+ this(OptionalInt.of(maxTokenLength));
}
@Override
@@ -36,6 +50,12 @@ public final class ExactExpression extends Expression {
String next = toLowerCase(prev);
SpanTree tree = output.getSpanTree(SpanTrees.LINGUISTICS);
+ if (next.length() > maxTokenLength) {
+ if (tree != null) {
+ output.removeSpanTree(SpanTrees.LINGUISTICS);
+ }
+ return;
+ }
SpanList root;
if (tree == null) {
root = new SpanList();
@@ -64,8 +84,14 @@ public final class ExactExpression extends Expression {
}
@Override
- public String toString() {
- return "exact";
+ public String toString()
+ {
+ StringBuilder ret = new StringBuilder();
+ ret.append("exact");
+ if (maxTokenLength != AnnotatorConfig.getDefaultMaxTokenLength()) {
+ ret.append(" max-token-length:" + maxTokenLength);
+ }
+ return ret.toString();
}
@Override
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/TokenizeExpression.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/TokenizeExpression.java
index b807ad4cb65..a3c404e50c3 100644
--- a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/TokenizeExpression.java
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/TokenizeExpression.java
@@ -66,9 +66,12 @@ public final class TokenizeExpression extends Expression {
if (config.getStemMode() != StemMode.NONE) {
ret.append(" stem:\""+config.getStemMode()+"\"");
}
- if (config.hasNonDefaultMaxTokenLength()) {
+ if (config.hasNonDefaultMaxTokenizeLength()) {
ret.append(" max-length:" + config.getMaxTokenizeLength());
}
+ if (config.hasNonDefaultMaxTokenLength()) {
+ ret.append(" max-token-length:" + config.getMaxTokenLength());
+ }
if (config.hasNonDefaultMaxTermOccurrences()) {
ret.append(" max-occurrences:" + config.getMaxTermOccurrences());
}
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/AnnotatorConfig.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/AnnotatorConfig.java
index 7b6f350d831..6522e284fc8 100644
--- a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/AnnotatorConfig.java
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/AnnotatorConfig.java
@@ -14,14 +14,17 @@ public class AnnotatorConfig implements Cloneable {
private StemMode stemMode;
private boolean removeAccents;
private int maxTermOccurrences;
+ private int maxTokenLength;
private int maxTokenizeLength;
public static final int DEFAULT_MAX_TERM_OCCURRENCES;
+ private static final int DEFAULT_MAX_TOKEN_LENGTH;
private static final int DEFAULT_MAX_TOKENIZE_LENGTH;
static {
IlscriptsConfig defaults = new IlscriptsConfig(new IlscriptsConfig.Builder());
DEFAULT_MAX_TERM_OCCURRENCES = defaults.maxtermoccurrences();
+ DEFAULT_MAX_TOKEN_LENGTH = defaults.maxtokenlength();
DEFAULT_MAX_TOKENIZE_LENGTH = defaults.fieldmatchmaxlength();
}
@@ -30,6 +33,7 @@ public class AnnotatorConfig implements Cloneable {
stemMode = StemMode.NONE;
removeAccents = false;
maxTermOccurrences = DEFAULT_MAX_TERM_OCCURRENCES;
+ maxTokenLength = DEFAULT_MAX_TOKEN_LENGTH;
maxTokenizeLength = DEFAULT_MAX_TOKENIZE_LENGTH;
}
@@ -38,6 +42,7 @@ public class AnnotatorConfig implements Cloneable {
stemMode = rhs.stemMode;
removeAccents = rhs.removeAccents;
maxTermOccurrences = rhs.maxTermOccurrences;
+ maxTokenLength = rhs.maxTokenLength;
maxTokenizeLength = rhs.maxTokenizeLength;
}
@@ -82,7 +87,18 @@ public class AnnotatorConfig implements Cloneable {
return this;
}
- public AnnotatorConfig setMaxTokenLength(int maxTokenizeLength) {
+ public AnnotatorConfig setMaxTokenLength(int maxTokenLength) {
+ this.maxTokenLength = maxTokenLength;
+ return this;
+ }
+
+ public int getMaxTokenLength() {
+ return maxTokenLength;
+ }
+
+ public static int getDefaultMaxTokenLength() { return DEFAULT_MAX_TOKEN_LENGTH; }
+
+ public AnnotatorConfig setMaxTokenizeLength(int maxTokenizeLength) {
this.maxTokenizeLength = maxTokenizeLength;
return this;
}
@@ -92,6 +108,10 @@ public class AnnotatorConfig implements Cloneable {
}
public boolean hasNonDefaultMaxTokenLength() {
+ return maxTokenLength != DEFAULT_MAX_TOKEN_LENGTH;
+ }
+
+ public boolean hasNonDefaultMaxTokenizeLength() {
return maxTokenizeLength != DEFAULT_MAX_TOKENIZE_LENGTH;
}
@@ -116,6 +136,9 @@ public class AnnotatorConfig implements Cloneable {
if (maxTermOccurrences != rhs.maxTermOccurrences) {
return false;
}
+ if (maxTokenLength != rhs.maxTokenLength) {
+ return false;
+ }
if (maxTokenizeLength != rhs.maxTokenizeLength) {
return false;
}
@@ -125,7 +148,7 @@ public class AnnotatorConfig implements Cloneable {
@Override
public int hashCode() {
return getClass().hashCode() + language.hashCode() + stemMode.hashCode() +
- Boolean.valueOf(removeAccents).hashCode() + maxTermOccurrences + maxTokenizeLength;
+ Boolean.valueOf(removeAccents).hashCode() + maxTermOccurrences + maxTokenLength + maxTokenizeLength;
}
}
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java
index 86d4e91a567..913b874c6f6 100644
--- a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java
@@ -78,7 +78,8 @@ public class LinguisticsAnnotator {
TermOccurrences termOccurrences = new TermOccurrences(config.getMaxTermOccurrences());
SpanTree tree = new SpanTree(SpanTrees.LINGUISTICS);
for (Token token : tokens)
- addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences);
+ addAnnotationSpan(text.getString(), tree.spanList(), token, config.getStemMode(), termOccurrences,
+ config.getMaxTokenLength());
if (tree.numAnnotations() == 0) return false;
text.setSpanTree(tree);
@@ -100,17 +101,22 @@ public class LinguisticsAnnotator {
return new Annotation(AnnotationTypes.TERM, new StringFieldValue(term));
}
- private static void addAnnotation(Span here, String term, String orig, TermOccurrences termOccurrences) {
+ private static void addAnnotation(Span here, String term, String orig, TermOccurrences termOccurrences,
+ int maxTokenLength) {
+ if (term.length() > maxTokenLength) {
+ return;
+ }
if (termOccurrences.termCountBelowLimit(term)) {
here.annotate(termAnnotation(term, orig));
}
}
- private static void addAnnotationSpan(String input, SpanList parent, Token token, StemMode mode, TermOccurrences termOccurrences) {
+ private static void addAnnotationSpan(String input, SpanList parent, Token token, StemMode mode,
+ TermOccurrences termOccurrences, int maxTokenLength) {
if ( ! token.isSpecialToken()) {
if (token.getNumComponents() > 0) {
for (int i = 0; i < token.getNumComponents(); ++i) {
- addAnnotationSpan(input, parent, token.getComponent(i), mode, termOccurrences);
+ addAnnotationSpan(input, parent, token.getComponent(i), mode, termOccurrences, maxTokenLength);
}
return;
}
@@ -130,18 +136,21 @@ public class LinguisticsAnnotator {
String lowercasedOrig = toLowerCase(token.getOrig());
String term = token.getTokenString();
if (term != null) {
- addAnnotation(where, term, token.getOrig(), termOccurrences);
+ addAnnotation(where, term, token.getOrig(), termOccurrences, maxTokenLength);
if ( ! term.equals(lowercasedOrig))
- addAnnotation(where, lowercasedOrig, token.getOrig(), termOccurrences);
+ addAnnotation(where, lowercasedOrig, token.getOrig(), termOccurrences, maxTokenLength);
}
for (int i = 0; i < token.getNumStems(); i++) {
String stem = token.getStem(i);
if (! (stem.equals(lowercasedOrig) || stem.equals(term)))
- addAnnotation(where, stem, token.getOrig(), termOccurrences);
+ addAnnotation(where, stem, token.getOrig(), termOccurrences, maxTokenLength);
}
} else {
String term = token.getTokenString();
if (term == null || term.trim().isEmpty()) return;
+ if (term.length() > maxTokenLength) {
+ return;
+ }
if (termOccurrences.termCountBelowLimit(term)) {
parent.span((int)token.getOffset(), token.getOrig().length()).annotate(termAnnotation(term, token.getOrig()));
}
diff --git a/indexinglanguage/src/main/javacc/IndexingParser.jj b/indexinglanguage/src/main/javacc/IndexingParser.jj
index 469d96ead60..29ca5270db8 100644
--- a/indexinglanguage/src/main/javacc/IndexingParser.jj
+++ b/indexinglanguage/src/main/javacc/IndexingParser.jj
@@ -174,6 +174,7 @@ TOKEN :
<LOWER_CASE: "lowercase"> |
<MAX_LENGTH: "max-length"> |
<MAX_OCCURRENCES: "max-occurrences"> |
+ <MAX_TOKEN_LENGTH: "max-token-length"> |
<NGRAM: "ngram"> |
<NORMALIZE: "normalize"> |
<NOW: "now"> |
@@ -407,10 +408,13 @@ Expression embedExp() :
{ return new EmbedExpression(embedders, embedderId, embedderArguments); }
}
-Expression exactExp() : { }
+Expression exactExp() :
{
- ( <EXACT> )
- { return new ExactExpression(); }
+ int maxTokenLength = annotatorCfg.getMaxTokenLength();
+}
+{
+ ( <EXACT> [ <MAX_TOKEN_LENGTH> <COLON> maxTokenLength = integer() ] )
+ { return new ExactExpression(maxTokenLength); }
}
Expression flattenExp() : { }
@@ -686,11 +690,13 @@ AnnotatorConfig tokenizeCfg() :
String str = "SHORTEST";
Integer maxLength;
Integer maxTermOccurrences;
+ Integer maxTokenLength;
}
{
( <STEM> ( <COLON> str = string() ) ? { val.setStemMode(str); } |
- <MAX_LENGTH> <COLON> maxLength = integer() { val.setMaxTokenLength(maxLength); } |
+ <MAX_LENGTH> <COLON> maxLength = integer() { val.setMaxTokenizeLength(maxLength); } |
<MAX_OCCURRENCES> <COLON> maxTermOccurrences = integer() { val.setMaxTermOccurrences(maxTermOccurrences); } |
+ <MAX_TOKEN_LENGTH> <COLON> maxTokenLength = integer() { val.setMaxTokenLength(maxTokenLength); } |
<NORMALIZE> { val.setRemoveAccents(true); } )+
{ return val; }
}
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ExactTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ExactTestCase.java
index 403d1820f70..b338c45f7a4 100644
--- a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ExactTestCase.java
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ExactTestCase.java
@@ -63,6 +63,15 @@ public class ExactTestCase {
}
@Test
+ public void requireThatLongStringsAreNotAnnotated() {
+ ExecutionContext ctx = new ExecutionContext(new SimpleTestAdapter());
+ ctx.setValue(new StringFieldValue("foo"));
+ new ExactExpression(2).execute(ctx);
+
+ assertNull(((StringFieldValue)ctx.getValue()).getSpanTree(SpanTrees.LINGUISTICS));
+ }
+
+ @Test
public void requireThatEmptyStringsAreNotAnnotated() {
ExecutionContext ctx = new ExecutionContext(new SimpleTestAdapter());
ctx.setValue(new StringFieldValue(""));
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/TokenizeTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/TokenizeTestCase.java
index 01ffbe359f3..7ed3ab410a3 100644
--- a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/TokenizeTestCase.java
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/TokenizeTestCase.java
@@ -62,4 +62,15 @@ public class TokenizeTestCase {
assertTrue(val instanceof StringFieldValue);
assertNotNull(((StringFieldValue)val).getSpanTree(SpanTrees.LINGUISTICS));
}
+
+ @Test
+ public void requireThatLongWordIsDropped() {
+ ExecutionContext ctx = new ExecutionContext(new SimpleTestAdapter());
+ ctx.setValue(new StringFieldValue("foo"));
+ new TokenizeExpression(new SimpleLinguistics(), new AnnotatorConfig().setMaxTokenLength(2)).execute(ctx);
+
+ FieldValue val = ctx.getValue();
+ assertTrue(val instanceof StringFieldValue);
+ assertNull(((StringFieldValue)val).getSpanTree(SpanTrees.LINGUISTICS));
+ }
}
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/AnnotatorConfigTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/AnnotatorConfigTestCase.java
index 0d34d2841fd..c3131e28906 100644
--- a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/AnnotatorConfigTestCase.java
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/AnnotatorConfigTestCase.java
@@ -27,6 +27,8 @@ public class AnnotatorConfigTestCase {
assertTrue(config.getRemoveAccents());
config.setRemoveAccents(false);
assertFalse(config.getRemoveAccents());
+ config.setMaxTokenLength(10);
+ assertEquals(10, config.getMaxTokenLength());
}
@Test
@@ -35,11 +37,13 @@ public class AnnotatorConfigTestCase {
config.setLanguage(Language.ARABIC);
config.setStemMode(StemMode.SHORTEST);
config.setRemoveAccents(!config.getRemoveAccents());
+ config.setMaxTokenLength(11);
AnnotatorConfig other = new AnnotatorConfig(config);
assertEquals(config.getLanguage(), other.getLanguage());
assertEquals(config.getStemMode(), other.getStemMode());
assertEquals(config.getRemoveAccents(), other.getRemoveAccents());
+ assertEquals(config.getMaxTokenLength(), other.getMaxTokenLength());
}
@Test
@@ -49,6 +53,7 @@ public class AnnotatorConfigTestCase {
assertFalse(config.equals(newConfig(Language.SPANISH, StemMode.SHORTEST, false)));
assertFalse(config.equals(newConfig(Language.DUTCH, StemMode.SHORTEST, false)));
assertFalse(config.equals(newConfig(Language.DUTCH, StemMode.NONE, false)));
+ assertNotEquals(config, newConfig(Language.DUTCH, StemMode.NONE, true).setMaxTokenLength(10));
assertEquals(config, newConfig(Language.DUTCH, StemMode.NONE, true));
assertEquals(config.hashCode(), newConfig(Language.DUTCH, StemMode.NONE, true).hashCode());
}
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java
index 136e71564d8..461c915acef 100644
--- a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java
@@ -194,7 +194,7 @@ public class LinguisticsAnnotatorTestCase {
Linguistics linguistics = new SimpleLinguistics();
- LinguisticsAnnotator annotator = new LinguisticsAnnotator(linguistics, new AnnotatorConfig().setMaxTokenLength(12));
+ LinguisticsAnnotator annotator = new LinguisticsAnnotator(linguistics, new AnnotatorConfig().setMaxTokenizeLength(12));
assertTrue(annotator.annotate(shortValue));
assertEquals(spanTree, shortValue.getSpanTree(SpanTrees.LINGUISTICS));
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/ExpressionTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/ExpressionTestCase.java
index a7ed7ae3e72..1b7c6973f1e 100644
--- a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/ExpressionTestCase.java
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/ExpressionTestCase.java
@@ -27,6 +27,7 @@ public class ExpressionTestCase {
assertExpression(ClearStateExpression.class, "clear_state");
assertExpression(EchoExpression.class, "echo");
assertExpression(ExactExpression.class, "exact");
+ assertExpression(ExactExpression.class, "exact max-token-length: 10", Optional.of("exact max-token-length:10"));
assertExpression(FlattenExpression.class, "flatten");
assertExpression(ForEachExpression.class, "for_each { 1 }");
assertExpression(GetFieldExpression.class, "get_field field1");
@@ -73,6 +74,7 @@ public class ExpressionTestCase {
assertExpression(TokenizeExpression.class, "tokenize stem:\"ALL\"");
assertExpression(TokenizeExpression.class, "tokenize normalize");
assertExpression(TokenizeExpression.class, "tokenize max-occurrences: 15", Optional.of("tokenize max-occurrences:15"));
+ assertExpression(TokenizeExpression.class, "tokenize max-token-length: 15", Optional.of("tokenize max-token-length:15"));
assertExpression(ToLongExpression.class, "to_long");
assertExpression(ToPositionExpression.class, "to_pos");
assertExpression(ToStringExpression.class, "to_string");
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/DynamicThrottlePolicy.java b/messagebus/src/main/java/com/yahoo/messagebus/DynamicThrottlePolicy.java
index 97f681404e9..1a42b688437 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/DynamicThrottlePolicy.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/DynamicThrottlePolicy.java
@@ -167,10 +167,10 @@ public class DynamicThrottlePolicy extends StaticThrottlePolicy {
}
/**
- * Determines where on each latency level the attractor sits. 2 is at the very end, and makes this to *boom*.
+ * Determines where on each latency level the attractor sits. 2 is at the very end, and makes this go *boom*.
* 0.2 is at the very start, and makes the algorithm more conservative. Probably fine to stay away from this.
*/
- // Original javadoc is non-sense, but kept for historical reasons.
+ // Original javadoc is nonsense, but kept for historical reasons.
/*
* Sets the lower efficiency threshold at which the algorithm should perform window size back off. Efficiency is
* the correlation between throughput and window size. The algorithm will increase the window size until efficiency
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/MetricsParser.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/MetricsParser.java
index 0e33d7dbf2f..052b8425a45 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/MetricsParser.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/MetricsParser.java
@@ -53,8 +53,8 @@ public class MetricsParser {
throw new IOException("Expected start of object, got " + parser.currentToken());
}
- for (parser.nextToken(); parser.getCurrentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
- String fieldName = parser.getCurrentName();
+ for (parser.nextToken(); parser.currentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
+ String fieldName = parser.currentName();
JsonToken token = parser.nextToken();
if (fieldName.equals("metrics")) {
parseMetrics(parser, consumer);
@@ -67,12 +67,12 @@ public class MetricsParser {
}
static private Instant parseSnapshot(JsonParser parser) throws IOException {
- if (parser.getCurrentToken() != JsonToken.START_OBJECT) {
+ if (parser.currentToken() != JsonToken.START_OBJECT) {
throw new IOException("Expected start of 'snapshot' object, got " + parser.currentToken());
}
Instant timestamp = Instant.now();
- for (parser.nextToken(); parser.getCurrentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
- String fieldName = parser.getCurrentName();
+ for (parser.nextToken(); parser.currentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
+ String fieldName = parser.currentName();
JsonToken token = parser.nextToken();
if (fieldName.equals("to")) {
timestamp = Instant.ofEpochSecond(parser.getLongValue());
@@ -88,12 +88,12 @@ public class MetricsParser {
// 'metrics' object with 'snapshot' and 'values' arrays
static private void parseMetrics(JsonParser parser, Collector consumer) throws IOException {
- if (parser.getCurrentToken() != JsonToken.START_OBJECT) {
+ if (parser.currentToken() != JsonToken.START_OBJECT) {
throw new IOException("Expected start of 'metrics' object, got " + parser.currentToken());
}
Instant timestamp = Instant.now();
- for (parser.nextToken(); parser.getCurrentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
- String fieldName = parser.getCurrentName();
+ for (parser.nextToken(); parser.currentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
+ String fieldName = parser.currentName();
JsonToken token = parser.nextToken();
if (fieldName.equals("snapshot")) {
timestamp = parseSnapshot(parser);
@@ -109,7 +109,7 @@ public class MetricsParser {
// 'values' array
static private void parseMetricValues(JsonParser parser, Instant timestamp, Collector consumer) throws IOException {
- if (parser.getCurrentToken() != JsonToken.START_ARRAY) {
+ if (parser.currentToken() != JsonToken.START_ARRAY) {
throw new IOException("Expected start of 'metrics:values' array, got " + parser.currentToken());
}
@@ -126,8 +126,8 @@ public class MetricsParser {
String description = "";
Map<DimensionId, String> dim = Map.of();
List<Map.Entry<String, Number>> values = List.of();
- for (parser.nextToken(); parser.getCurrentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
- String fieldName = parser.getCurrentName();
+ for (parser.nextToken(); parser.currentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
+ String fieldName = parser.currentName();
JsonToken token = parser.nextToken();
switch (fieldName) {
case "name" -> name = parser.getText();
@@ -154,8 +154,8 @@ public class MetricsParser {
Set<Dimension> dimensions = new HashSet<>();
- for (parser.nextToken(); parser.getCurrentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
- String fieldName = parser.getCurrentName();
+ for (parser.nextToken(); parser.currentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
+ String fieldName = parser.currentName();
JsonToken token = parser.nextToken();
if (token == JsonToken.VALUE_STRING){
@@ -180,17 +180,16 @@ public class MetricsParser {
private static List<Map.Entry<String, Number>> parseValues(JsonParser parser) throws IOException {
List<Map.Entry<String, Number>> metrics = new ArrayList<>();
- for (parser.nextToken(); parser.getCurrentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
- String fieldName = parser.getCurrentName();
+ for (parser.nextToken(); parser.currentToken() != JsonToken.END_OBJECT; parser.nextToken()) {
+ String metricName = parser.currentName();
JsonToken token = parser.nextToken();
- String metricName = fieldName;
if (token == JsonToken.VALUE_NUMBER_INT) {
metrics.add(Map.entry(metricName, parser.getLongValue()));
} else if (token == JsonToken.VALUE_NUMBER_FLOAT) {
double value = parser.getValueAsDouble();
metrics.add(Map.entry(metricName, value == ZERO_DOUBLE ? ZERO_DOUBLE : value));
} else {
- throw new IllegalArgumentException("Value for aggregator '" + fieldName + "' is not a number");
+ throw new IllegalArgumentException("Value for aggregator '" + metricName + "' is not a number");
}
}
return metrics;
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java
index 5b5126b23f0..957eaf8304f 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java
@@ -211,6 +211,10 @@ public class Vespa9VespaMetricSet {
addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_SUCCEEDED.rate());
addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_FAILED.rate());
+ // Embedders
+ addMetric(metrics, ContainerMetrics.EMBEDDER_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, ContainerMetrics.EMBEDDER_SEQUENCE_LENGTH, EnumSet.of(max, sum, count));
+
return metrics;
}
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
index e703011ef5f..778d6963e19 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
@@ -116,10 +116,6 @@ public class VespaMetricSet {
// Routing layer metrics
addMetric(metrics, RoutingLayerMetrics.WORKER_CONNECTIONS.max()); // Hosted Vespa only (routing layer)
- // Embedders
- addMetric(metrics, ContainerMetrics.EMBEDDER_LATENCY, EnumSet.of(max, sum, count));
- addMetric(metrics, ContainerMetrics.EMBEDDER_SEQUENCE_LENGTH, EnumSet.of(max, sum, count));
-
return metrics;
}
@@ -237,6 +233,10 @@ public class VespaMetricSet {
addMetric(metrics, ContainerMetrics.FEED_LATENCY, EnumSet.of(sum, count, max));
addMetric(metrics, ContainerMetrics.FEED_HTTP_REQUESTS, EnumSet.of(count, rate));
+ // Embedders
+ addMetric(metrics, ContainerMetrics.EMBEDDER_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, ContainerMetrics.EMBEDDER_SEQUENCE_LENGTH, EnumSet.of(max, sum, count));
+
// Deprecated metrics. TODO: Remove on Vespa 9.
addMetric(metrics, ContainerMetrics.SERVER_REJECTED_REQUESTS, EnumSet.of(rate, count));
addMetric(metrics, ContainerMetrics.SERVER_THREAD_POOL_SIZE, EnumSet.of(max, last));
diff --git a/metrics/src/tests/CMakeLists.txt b/metrics/src/tests/CMakeLists.txt
index 043dd7f736d..779b799cc75 100644
--- a/metrics/src/tests/CMakeLists.txt
+++ b/metrics/src/tests/CMakeLists.txt
@@ -9,7 +9,6 @@ vespa_add_executable(metrics_gtest_runner_app TEST
metricsettest.cpp
metrictest.cpp
snapshottest.cpp
- stresstest.cpp
summetrictest.cpp
valuemetrictest.cpp
gtest_runner.cpp
diff --git a/metrics/src/tests/stresstest.cpp b/metrics/src/tests/stresstest.cpp
deleted file mode 100644
index a5213ba8b2d..00000000000
--- a/metrics/src/tests/stresstest.cpp
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/metrics/metricmanager.h>
-#include <vespa/metrics/metrics.h>
-#include <vespa/metrics/summetric.hpp>
-#include <vespa/vespalib/util/time.h>
-#include <vespa/vespalib/util/size_literals.h>
-#include <thread>
-#include <vespa/vespalib/gtest/gtest.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".metrics.test.stress");
-
-namespace metrics {
-
-namespace {
-struct InnerMetricSet : public MetricSet {
- LongCountMetric _count;
- LongAverageMetric _value1;
- LongAverageMetric _value2;
- SumMetric<LongAverageMetric> _valueSum;
-
- InnerMetricSet(const char* name, MetricSet* owner = 0);
- ~InnerMetricSet();
-
- MetricSet* clone(std::vector<Metric::UP> &ownerList, CopyType copyType,
- MetricSet* owner, bool includeUnused) const override;
-};
-
-InnerMetricSet::InnerMetricSet(const char* name, MetricSet* owner)
- : MetricSet(name, {}, "", owner),
- _count("count", {}, "", this),
- _value1("value1", {}, "", this),
- _value2("value2", {}, "", this),
- _valueSum("valuesum", {}, "", this)
-{
- _valueSum.addMetricToSum(_value1);
- _valueSum.addMetricToSum(_value2);
-}
-InnerMetricSet::~InnerMetricSet() = default;
-
-MetricSet*
-InnerMetricSet::clone(std::vector<Metric::UP> &ownerList, CopyType copyType,
- MetricSet* owner, bool includeUnused) const
-{
- if (copyType != CLONE) {
- return MetricSet::clone(ownerList, copyType, owner, includeUnused);
-}
- InnerMetricSet * myset = new InnerMetricSet(getName().c_str(), owner);
- myset->assignValues(*this);
- return myset;
-}
-
-struct OuterMetricSet : public MetricSet {
- InnerMetricSet _inner1;
- InnerMetricSet _inner2;
- SumMetric<InnerMetricSet> _innerSum;
- InnerMetricSet _tmp;
-
- OuterMetricSet(MetricSet* owner = 0);
- ~OuterMetricSet();
-};
-
-OuterMetricSet::OuterMetricSet(MetricSet* owner)
- : MetricSet("outer", {}, "", owner),
- _inner1("inner1", this),
- _inner2("inner2", this),
- _innerSum("innersum", {}, "", this),
- _tmp("innertmp", 0)
-{
- _innerSum.addMetricToSum(_inner1);
- _innerSum.addMetricToSum(_inner2);
-}
-
-OuterMetricSet::~OuterMetricSet() = default;
-
-struct Hammer {
- using UP = std::unique_ptr<Hammer>;
-
- OuterMetricSet& _metrics;
- std::atomic<bool> _stop_requested;
- std::thread _thread;
-
- Hammer(OuterMetricSet& metrics)
- : _metrics(metrics),
- _stop_requested(false),
- _thread()
- {
- _thread = std::thread([this](){run();});
- }
- ~Hammer() {
- _stop_requested = true;
- _thread.join();
- //std::cerr << "Loadgiver thread joined\n";
- }
-
- void run() {
- uint64_t i = 0;
- while (!_stop_requested.load(std::memory_order_relaxed)) {
- ++i;
- setMetrics(i, _metrics._inner1);
- setMetrics(i + 3, _metrics._inner2);
- }
- }
-
- void setMetrics(uint64_t val, InnerMetricSet& set) {
- set._count.inc(val);
- set._value1.addValue(val);
- set._value2.addValue(val + 10);
- }
-};
-
-}
-
-
-TEST(StressTest, test_stress)
-{
- OuterMetricSet metrics;
-
- LOG(info, "Starting load givers");
- std::vector<Hammer::UP> hammers;
- for (uint32_t i=0; i<10; ++i) {
- hammers.push_back(std::make_unique<Hammer>(metrics));
- }
- LOG(info, "Waiting to let loadgivers hammer a while");
- std::this_thread::sleep_for(5s);
-
- LOG(info, "Removing loadgivers");
- hammers.clear();
-
- LOG(info, "Printing end state");
- std::ostringstream ost;
- metrics.print(ost, true, "", 5);
- // std::cerr << ost.str() << "\n";
-}
-
-}
diff --git a/model-integration/abi-spec.json b/model-integration/abi-spec.json
index e7130d9c777..31f2b64d728 100644
--- a/model-integration/abi-spec.json
+++ b/model-integration/abi-spec.json
@@ -94,6 +94,7 @@
"public ai.vespa.llm.clients.LlmLocalClientConfig$Builder model(com.yahoo.config.ModelReference)",
"public ai.vespa.llm.clients.LlmLocalClientConfig$Builder parallelRequests(int)",
"public ai.vespa.llm.clients.LlmLocalClientConfig$Builder maxQueueSize(int)",
+ "public ai.vespa.llm.clients.LlmLocalClientConfig$Builder maxQueueWait(int)",
"public ai.vespa.llm.clients.LlmLocalClientConfig$Builder useGpu(boolean)",
"public ai.vespa.llm.clients.LlmLocalClientConfig$Builder gpuLayers(int)",
"public ai.vespa.llm.clients.LlmLocalClientConfig$Builder threads(int)",
@@ -139,6 +140,7 @@
"public java.nio.file.Path model()",
"public int parallelRequests()",
"public int maxQueueSize()",
+ "public int maxQueueWait()",
"public boolean useGpu()",
"public int gpuLayers()",
"public int threads()",
diff --git a/model-integration/src/main/java/ai/vespa/llm/clients/LocalLLM.java b/model-integration/src/main/java/ai/vespa/llm/clients/LocalLLM.java
index aa7c071b93a..b6409b5466d 100644
--- a/model-integration/src/main/java/ai/vespa/llm/clients/LocalLLM.java
+++ b/model-integration/src/main/java/ai/vespa/llm/clients/LocalLLM.java
@@ -3,6 +3,7 @@ package ai.vespa.llm.clients;
import ai.vespa.llm.InferenceParameters;
import ai.vespa.llm.LanguageModel;
+import ai.vespa.llm.LanguageModelException;
import ai.vespa.llm.completion.Completion;
import ai.vespa.llm.completion.Prompt;
import com.yahoo.component.AbstractComponent;
@@ -14,10 +15,14 @@ import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.logging.Logger;
@@ -29,14 +34,19 @@ import java.util.logging.Logger;
public class LocalLLM extends AbstractComponent implements LanguageModel {
private final static Logger logger = Logger.getLogger(LocalLLM.class.getName());
+
+ private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor();
+
private final LlamaModel model;
private final ThreadPoolExecutor executor;
+ private final long queueTimeoutMilliseconds;
private final int contextSize;
private final int maxTokens;
@Inject
public LocalLLM(LlmLocalClientConfig config) {
executor = createExecutor(config);
+ queueTimeoutMilliseconds = config.maxQueueWait();
// Maximum number of tokens to generate - need this since some models can just generate infinitely
maxTokens = config.maxTokens();
@@ -74,6 +84,7 @@ public class LocalLLM extends AbstractComponent implements LanguageModel {
logger.info("Closing LLM model...");
model.close();
executor.shutdownNow();
+ scheduler.shutdownNow();
}
@Override
@@ -104,22 +115,39 @@ public class LocalLLM extends AbstractComponent implements LanguageModel {
// Todo: more options?
var completionFuture = new CompletableFuture<Completion.FinishReason>();
+ var hasStarted = new AtomicBoolean(false);
try {
- executor.submit(() -> {
+ Future<?> future = executor.submit(() -> {
+ hasStarted.set(true);
for (LlamaModel.Output output : model.generate(inferParams)) {
consumer.accept(Completion.from(output.text, Completion.FinishReason.none));
}
completionFuture.complete(Completion.FinishReason.stop);
});
+
+ if (queueTimeoutMilliseconds > 0) {
+ scheduler.schedule(() -> {
+ if ( ! hasStarted.get()) {
+ future.cancel(false);
+ String error = rejectedExecutionReason("Rejected completion due to timeout waiting to start");
+ completionFuture.completeExceptionally(new LanguageModelException(504, error));
+ }
+ }, queueTimeoutMilliseconds, TimeUnit.MILLISECONDS);
+ }
+
} catch (RejectedExecutionException e) {
// If we have too many requests (active + any waiting in queue), we reject the completion
- int activeCount = executor.getActiveCount();
- int queueSize = executor.getQueue().size();
- String error = String.format("Rejected completion due to too many requests, " +
- "%d active, %d in queue", activeCount, queueSize);
+ String error = rejectedExecutionReason("Rejected completion due to too many requests");
throw new RejectedExecutionException(error);
}
return completionFuture;
}
+ private String rejectedExecutionReason(String prepend) {
+ int activeCount = executor.getActiveCount();
+ int queueSize = executor.getQueue().size();
+ return String.format("%s, %d active, %d in queue", prepend, activeCount, queueSize);
+ }
+
+
}
diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/lightgbm/LightGBMImporter.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/lightgbm/LightGBMImporter.java
index e1d2f8802a6..6a1e2f2562a 100644
--- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/lightgbm/LightGBMImporter.java
+++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/lightgbm/LightGBMImporter.java
@@ -34,9 +34,9 @@ public class LightGBMImporter extends ModelImporter {
private boolean probe(File modelFile) {
try (JsonParser parser = Jackson.mapper().createParser(modelFile)) {
while (parser.nextToken() != null) {
- JsonToken token = parser.getCurrentToken();
+ JsonToken token = parser.currentToken();
if (token == JsonToken.FIELD_NAME) {
- if ("tree_info".equals(parser.getCurrentName())) return true;
+ if ("tree_info".equals(parser.currentName())) return true;
}
}
return false;
diff --git a/model-integration/src/main/resources/configdefinitions/llm-local-client.def b/model-integration/src/main/resources/configdefinitions/llm-local-client.def
index 4823a53ec46..6b83ffd0751 100755
--- a/model-integration/src/main/resources/configdefinitions/llm-local-client.def
+++ b/model-integration/src/main/resources/configdefinitions/llm-local-client.def
@@ -8,7 +8,10 @@ model model
parallelRequests int default=1
# Additional number of requests to put in queue for processing before starting to reject new requests
-maxQueueSize int default=10
+maxQueueSize int default=100
+
+# Max number of milliseoncds to wait in the queue before rejecting a request
+maxQueueWait int default=10000
# Use GPU
useGpu bool default=true
@@ -24,6 +27,6 @@ threads int default=-1
# Context is divided between parallel requests. So for 10 parallel requests, each "slot" gets 1/10 of the context
contextSize int default=4096
-# Maximum number of tokens to process in one request - overriden by inference parameters
+# Maximum number of tokens to process in one request - overridden by inference parameters
maxTokens int default=512
diff --git a/model-integration/src/test/java/ai/vespa/llm/clients/LocalLLMTest.java b/model-integration/src/test/java/ai/vespa/llm/clients/LocalLLMTest.java
index a3b260f3fb5..4db1140d171 100644
--- a/model-integration/src/test/java/ai/vespa/llm/clients/LocalLLMTest.java
+++ b/model-integration/src/test/java/ai/vespa/llm/clients/LocalLLMTest.java
@@ -2,6 +2,7 @@
package ai.vespa.llm.clients;
import ai.vespa.llm.InferenceParameters;
+import ai.vespa.llm.LanguageModelException;
import ai.vespa.llm.completion.Completion;
import ai.vespa.llm.completion.Prompt;
import ai.vespa.llm.completion.StringPrompt;
@@ -96,7 +97,6 @@ public class LocalLLMTest {
try {
for (int i = 0; i < promptsToUse; i++) {
final var seq = i;
-
completions.set(seq, new StringBuilder());
futures.set(seq, llm.completeAsync(StringPrompt.from(prompts.get(seq)), defaultOptions(), completion -> {
completions.get(seq).append(completion.text());
@@ -122,8 +122,9 @@ public class LocalLLMTest {
var prompts = testPrompts();
var promptsToUse = prompts.size();
var parallelRequests = 2;
- var additionalQueue = 1;
- // 7 should be rejected
+ var additionalQueue = 100;
+ var queueWaitTime = 10;
+ // 8 should be rejected due to queue wait time
var futures = new ArrayList<CompletableFuture<Completion.FinishReason>>(Collections.nCopies(promptsToUse, null));
var completions = new ArrayList<StringBuilder>(Collections.nCopies(promptsToUse, null));
@@ -131,10 +132,12 @@ public class LocalLLMTest {
var config = new LlmLocalClientConfig.Builder()
.parallelRequests(parallelRequests)
.maxQueueSize(additionalQueue)
+ .maxQueueWait(queueWaitTime)
.model(ModelReference.valueOf(model));
var llm = new LocalLLM(config.build());
var rejected = new AtomicInteger(0);
+ var timedOut = new AtomicInteger(0);
try {
for (int i = 0; i < promptsToUse; i++) {
final var seq = i;
@@ -143,7 +146,14 @@ public class LocalLLMTest {
try {
var future = llm.completeAsync(StringPrompt.from(prompts.get(seq)), defaultOptions(), completion -> {
completions.get(seq).append(completion.text());
- }).exceptionally(exception -> Completion.FinishReason.error);
+ }).exceptionally(exception -> {
+ if (exception instanceof LanguageModelException lme) {
+ if (lme.code() == 504) {
+ timedOut.incrementAndGet();
+ }
+ }
+ return Completion.FinishReason.error;
+ });
futures.set(seq, future);
} catch (RejectedExecutionException e) {
rejected.incrementAndGet();
@@ -151,13 +161,14 @@ public class LocalLLMTest {
}
for (int i = 0; i < promptsToUse; i++) {
if (futures.get(i) != null) {
- assertNotEquals(futures.get(i).join(), Completion.FinishReason.error);
+ futures.get(i).join();
}
}
} finally {
llm.deconstruct();
}
- assertEquals(7, rejected.get());
+ assertEquals(0, rejected.get());
+ assertEquals(8, timedOut.get());
}
private static InferenceParameters defaultOptions() {
diff --git a/parent/pom.xml b/parent/pom.xml
index 3299a9fb871..45259e567ca 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -317,7 +317,7 @@
-->
<groupId>org.openrewrite.maven</groupId>
<artifactId>rewrite-maven-plugin</artifactId>
- <version>5.29.0</version>
+ <version>5.30.0</version>
<configuration>
<activeRecipes>
<recipe>org.openrewrite.java.testing.junit5.JUnit5BestPractices</recipe>
@@ -327,7 +327,7 @@
<dependency>
<groupId>org.openrewrite.recipe</groupId>
<artifactId>rewrite-testing-frameworks</artifactId>
- <version>2.7.0</version>
+ <version>2.8.0</version>
</dependency>
</dependencies>
</plugin>
@@ -1199,7 +1199,7 @@
See pluginManagement of rewrite-maven-plugin for more details -->
<groupId>org.openrewrite.recipe</groupId>
<artifactId>rewrite-recipe-bom</artifactId>
- <version>2.10.0</version>
+ <version>2.11.0</version>
<type>pom</type>
<scope>import</scope>
</dependency>
diff --git a/predicate-search-core/src/main/java/com/yahoo/search/predicate/PredicateQueryParser.java b/predicate-search-core/src/main/java/com/yahoo/search/predicate/PredicateQueryParser.java
index 09487506ffe..42b6195549e 100644
--- a/predicate-search-core/src/main/java/com/yahoo/search/predicate/PredicateQueryParser.java
+++ b/predicate-search-core/src/main/java/com/yahoo/search/predicate/PredicateQueryParser.java
@@ -10,7 +10,6 @@ import java.util.Arrays;
/**
* Parses predicate queries from JSON.
- *
* Input JSON is assumed to have the following format:
* {
* "features": [
@@ -46,7 +45,7 @@ public class PredicateQueryParser {
try (JsonParser parser = factory.createParser(json)) {
skipToken(parser, JsonToken.START_OBJECT);
while (parser.nextToken() != JsonToken.END_OBJECT) {
- String fieldName = parser.getCurrentName();
+ String fieldName = parser.currentName();
switch (fieldName) {
case "features":
parseFeatures(parser, JsonParser::getText, featureHandler);
@@ -82,7 +81,7 @@ public class PredicateQueryParser {
long subqueryBitmap = SubqueryBitmap.DEFAULT_VALUE; // Specifying subquery bitmap is optional.
while (parser.nextToken() != JsonToken.END_OBJECT) {
- String fieldName = parser.getCurrentName();
+ String fieldName = parser.currentName();
skipToken(parser, JsonToken.VALUE_STRING, JsonToken.VALUE_NUMBER_INT);
switch (fieldName) {
case "k":
@@ -100,11 +99,11 @@ public class PredicateQueryParser {
}
if (key == null) {
throw new IllegalArgumentException(
- String.format("Feature key is missing! (%s)", parser.getCurrentLocation()));
+ String.format("Feature key is missing! (%s)", parser.currentLocation()));
}
if (value == null) {
throw new IllegalArgumentException(
- String.format("Feature value is missing! (%s)", parser.getCurrentLocation()));
+ String.format("Feature value is missing! (%s)", parser.currentLocation()));
}
featureHandler.accept(key, value, subqueryBitmap);
}
@@ -114,7 +113,7 @@ public class PredicateQueryParser {
if (Arrays.stream(expected).noneMatch(e -> e.equals(actual))) {
throw new IllegalArgumentException(
String.format("Expected a token in %s, got %s (%s).",
- Arrays.toString(expected), actual, parser.getTokenLocation()));
+ Arrays.toString(expected), actual, parser.currentTokenLocation()));
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/lid_allocator.cpp b/searchcore/src/vespa/searchcore/proton/documentmetastore/lid_allocator.cpp
index 0c986422be6..758d1336399 100644
--- a/searchcore/src/vespa/searchcore/proton/documentmetastore/lid_allocator.cpp
+++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/lid_allocator.cpp
@@ -210,7 +210,8 @@ private:
}
FlowStats calculate_flow_stats(uint32_t docid_limit) const override {
double rel_est = abs_to_rel_est(_activeLids.size(), docid_limit);
- return {rel_est, bitvector_cost(), bitvector_strict_cost(rel_est)};
+ double do_not_make_me_strict = 1000.0;
+ return {rel_est, bitvector_cost(), do_not_make_me_strict * bitvector_strict_cost(rel_est)};
}
SearchIterator::UP
createLeafSearch(const TermFieldMatchDataArray &tfmda) const override
diff --git a/searchcore/src/vespa/searchcore/proton/server/rpc_hooks.cpp b/searchcore/src/vespa/searchcore/proton/server/rpc_hooks.cpp
index b9794bf6a75..d4ae635d760 100644
--- a/searchcore/src/vespa/searchcore/proton/server/rpc_hooks.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/rpc_hooks.cpp
@@ -241,10 +241,16 @@ RPCHooksBase::getProtonStatus(FRT_RPCRequest *req)
}
void
-RPCHooksBase::rpc_die(FRT_RPCRequest *)
+RPCHooksBase::rpc_die(FRT_RPCRequest * req)
{
LOG(debug, "RPCHooksBase::rpc_die");
- _exit(0);
+ req->Detach();
+ letProtonDo(makeLambdaTask([req]() {
+ LOG(debug, "Nap for 10ms and then quickly exit.");
+ req->Return();
+ std::this_thread::sleep_for(10ms);
+ std::quick_exit(0);
+ }));
}
void
diff --git a/searchlib/src/tests/queryeval/iterator_benchmark/intermediate_blueprint_factory.cpp b/searchlib/src/tests/queryeval/iterator_benchmark/intermediate_blueprint_factory.cpp
index 8591ec1415d..51177850155 100644
--- a/searchlib/src/tests/queryeval/iterator_benchmark/intermediate_blueprint_factory.cpp
+++ b/searchlib/src/tests/queryeval/iterator_benchmark/intermediate_blueprint_factory.cpp
@@ -2,14 +2,14 @@
#include "intermediate_blueprint_factory.h"
#include <vespa/searchlib/queryeval/intermediate_blueprints.h>
+#include <vespa/searchlib/attribute/singlenumericattribute.h>
#include <iomanip>
#include <sstream>
namespace search::queryeval::test {
-template <typename BlueprintType>
char
-IntermediateBlueprintFactory<BlueprintType>::child_name(void* blueprint) const
+IntermediateBlueprintFactory::child_name(void* blueprint) const
{
auto itr = _child_names.find(blueprint);
if (itr != _child_names.end()) {
@@ -18,35 +18,33 @@ IntermediateBlueprintFactory<BlueprintType>::child_name(void* blueprint) const
return '?';
}
-template <typename BlueprintType>
-IntermediateBlueprintFactory<BlueprintType>::IntermediateBlueprintFactory(vespalib::stringref name)
+IntermediateBlueprintFactory::IntermediateBlueprintFactory(vespalib::stringref name)
: _name(name),
_children(),
_child_names()
{
}
-template <typename BlueprintType>
-IntermediateBlueprintFactory<BlueprintType>::~IntermediateBlueprintFactory() = default;
+IntermediateBlueprintFactory::~IntermediateBlueprintFactory() = default;
-template <typename BlueprintType>
std::unique_ptr<Blueprint>
-IntermediateBlueprintFactory<BlueprintType>::make_blueprint()
+IntermediateBlueprintFactory::make_blueprint()
{
- auto res = std::make_unique<BlueprintType>();
+ auto res = make_self();
_child_names.clear();
char name = 'A';
+ uint32_t source = 1;
for (const auto& factory : _children) {
auto child = factory->make_blueprint();
_child_names[child.get()] = name++;
+ child->setSourceId(source++); // ignored by non-source-blender blueprints
res->addChild(std::move(child));
}
return res;
}
-template <typename BlueprintType>
vespalib::string
-IntermediateBlueprintFactory<BlueprintType>::get_name(Blueprint& blueprint) const
+IntermediateBlueprintFactory::get_name(Blueprint& blueprint) const
{
auto* intermediate = blueprint.asIntermediate();
if (intermediate != nullptr) {
@@ -69,11 +67,29 @@ IntermediateBlueprintFactory<BlueprintType>::get_name(Blueprint& blueprint) cons
return get_class_name(blueprint);
}
-template class IntermediateBlueprintFactory<AndBlueprint>;
+//-----------------------------------------------------------------------------
AndBlueprintFactory::AndBlueprintFactory()
- : IntermediateBlueprintFactory<AndBlueprint>("AND")
+ : IntermediateBlueprintFactory("AND")
{}
+std::unique_ptr<IntermediateBlueprint>
+AndBlueprintFactory::make_self() const
+{
+ return std::make_unique<AndBlueprint>();
+}
+
+//-----------------------------------------------------------------------------
+
+SourceBlenderBlueprintFactory::SourceBlenderBlueprintFactory()
+ : IntermediateBlueprintFactory("SB"),
+ _selector(250, "my_source_blender", 1000)
+{}
+
+std::unique_ptr<IntermediateBlueprint>
+SourceBlenderBlueprintFactory::make_self() const
+{
+ return std::make_unique<SourceBlenderBlueprint>(_selector);
}
+}
diff --git a/searchlib/src/tests/queryeval/iterator_benchmark/intermediate_blueprint_factory.h b/searchlib/src/tests/queryeval/iterator_benchmark/intermediate_blueprint_factory.h
index 6f7fe4f9ee7..c791d866612 100644
--- a/searchlib/src/tests/queryeval/iterator_benchmark/intermediate_blueprint_factory.h
+++ b/searchlib/src/tests/queryeval/iterator_benchmark/intermediate_blueprint_factory.h
@@ -4,6 +4,7 @@
#include "benchmark_blueprint_factory.h"
#include <vespa/searchlib/queryeval/intermediate_blueprints.h>
+#include <vespa/searchlib/attribute/fixedsourceselector.h>
#include <unordered_map>
namespace search::queryeval::test {
@@ -11,7 +12,6 @@ namespace search::queryeval::test {
/**
* Factory that creates an IntermediateBlueprint (of the given type) with children created by the given factories.
*/
-template <typename BlueprintType>
class IntermediateBlueprintFactory : public BenchmarkBlueprintFactory {
private:
vespalib::string _name;
@@ -19,7 +19,8 @@ private:
std::unordered_map<void*, char> _child_names;
char child_name(void* blueprint) const;
-
+protected:
+ virtual std::unique_ptr<IntermediateBlueprint> make_self() const = 0;
public:
IntermediateBlueprintFactory(vespalib::stringref name);
~IntermediateBlueprintFactory();
@@ -30,10 +31,26 @@ public:
vespalib::string get_name(Blueprint& blueprint) const override;
};
-class AndBlueprintFactory : public IntermediateBlueprintFactory<AndBlueprint> {
+class AndBlueprintFactory : public IntermediateBlueprintFactory {
+protected:
+ std::unique_ptr<IntermediateBlueprint> make_self() const override;
public:
AndBlueprintFactory();
};
-}
+class SourceBlenderBlueprintFactory : public IntermediateBlueprintFactory
+{
+private:
+ FixedSourceSelector _selector;
+protected:
+ std::unique_ptr<IntermediateBlueprint> make_self() const override;
+public:
+ SourceBlenderBlueprintFactory();
+ void init_selector(auto f, uint32_t limit) {
+ for (uint32_t i = 0; i < limit; ++i) {
+ _selector.setSource(i, f(i));
+ }
+ }
+};
+}
diff --git a/searchlib/src/tests/queryeval/iterator_benchmark/iterator_benchmark_test.cpp b/searchlib/src/tests/queryeval/iterator_benchmark/iterator_benchmark_test.cpp
index 2977664f6ad..96472200952 100644
--- a/searchlib/src/tests/queryeval/iterator_benchmark/iterator_benchmark_test.cpp
+++ b/searchlib/src/tests/queryeval/iterator_benchmark/iterator_benchmark_test.cpp
@@ -292,10 +292,6 @@ benchmark_search(BenchmarkBlueprintFactory& factory, uint32_t docid_limit, bool
}
}
-
-
-
-
//-----------------------------------------------------------------------------
double est_forced_strict_cost(double estimate, double strict_cost, double rate) {
@@ -430,15 +426,29 @@ to_string(bool val)
void
print_result_header()
{
- std::cout << "| chn | f_ratio | o_ratio | a_ratio | f.est | f.cost | f.act_cost | f.scost | f.act_scost | hits | seeks | time_ms | act_cost | ns_per_seek | ms_per_act_cost | iterator | blueprint |" << std::endl;
+ std::cout << "| in_flow | chn | o_ratio | a_ratio | f.est | f.cost | f.act_cost | f.scost | f.act_scost | hits | seeks | time_ms | act_cost | ns_per_seek | ms_per_act_cost | iterator | blueprint |" << std::endl;
+}
+
+std::ostream &operator<<(std::ostream &dst, InFlow in_flow) {
+ auto old_w = dst.width();
+ auto old_p = dst.precision();
+ dst << std::setw(7) << std::setprecision(5);
+ if (in_flow.strict()) {
+ dst << " STRICT";
+ } else {
+ dst << in_flow.rate();
+ }
+ dst << std::setw(old_w);
+ dst << std::setprecision(old_p);
+ return dst;
}
void
print_result(const BenchmarkResult& res, uint32_t children, double op_hit_ratio, InFlow in_flow, uint32_t num_docs)
{
std::cout << std::fixed << std::setprecision(5)
- << "| " << std::setw(5) << children
- << " | " << std::setw(7) << in_flow.rate()
+ << "| " << in_flow
+ << " | " << std::setw(5) << children
<< " | " << std::setw(7) << op_hit_ratio
<< " | " << std::setw(7) << ((double) res.hits / (double) num_docs)
<< " | " << std::setw(6) << res.flow.estimate
@@ -684,23 +694,25 @@ run_benchmarks(const BenchmarkSetup& setup)
void
print_intermediate_blueprint_result_header(size_t children)
{
+ std::cout << "| in_flow";
// This matches the naming scheme in IntermediateBlueprintFactory.
char name = 'A';
for (size_t i = 0; i < children; ++i) {
- std::cout << "| " << name++ << ".ratio ";
+ std::cout << " | " << name++ << ".ratio";
}
- std::cout << "| flow.cost | flow.scost | flow.est | ratio | hits | seeks | ms_per_cost | time_ms | algo | blueprint |" << std::endl;
+ std::cout << " | flow.cost | flow.scost | flow.est | ratio | hits | seeks | ms_per_cost | time_ms | algo | blueprint |" << std::endl;
}
void
-print_intermediate_blueprint_result(const BenchmarkResult& res, const std::vector<double>& children_ratios, PlanningAlgo algo, uint32_t num_docs)
+print_intermediate_blueprint_result(const BenchmarkResult& res, const std::vector<double>& children_ratios, PlanningAlgo algo, InFlow in_flow, uint32_t num_docs)
{
- std::cout << std::fixed << std::setprecision(5);
+ std::cout << std::fixed << std::setprecision(5)
+ << "| " << in_flow;
for (auto ratio : children_ratios) {
- std::cout << "| " << std::setw(7) << ratio << " ";
+ std::cout << " | " << std::setw(7) << ratio;
}
std::cout << std::setprecision(5)
- << "| " << std::setw(10) << res.flow.cost
+ << " | " << std::setw(10) << res.flow.cost
<< " | " << std::setw(10) << res.flow.strict_cost
<< " | " << std::setw(8) << res.flow.estimate
<< " | " << std::setw(7) << ((double) res.hits / (double) num_docs)
@@ -748,9 +760,8 @@ struct BlueprintFactorySetup {
BlueprintFactorySetup::~BlueprintFactorySetup() = default;
-template <typename IntermediateBlueprintFactoryType>
void
-run_intermediate_blueprint_benchmark(const BlueprintFactorySetup& a, const BlueprintFactorySetup& b, size_t num_docs)
+run_intermediate_blueprint_benchmark(auto factory_factory, std::vector<InFlow> in_flows, const BlueprintFactorySetup& a, const BlueprintFactorySetup& b, size_t num_docs)
{
print_intermediate_blueprint_result_header(2);
double max_speedup = 0.0;
@@ -758,26 +769,28 @@ run_intermediate_blueprint_benchmark(const BlueprintFactorySetup& a, const Bluep
for (double b_hit_ratio: b.op_hit_ratios) {
auto b_factory = b.make_factory_shared(num_docs, b_hit_ratio);
for (double a_hit_ratio : a.op_hit_ratios) {
- IntermediateBlueprintFactoryType factory;
- factory.add_child(a.make_factory(num_docs, a_hit_ratio));
- factory.add_child(b_factory);
+ auto factory = factory_factory();
+ factory->add_child(a.make_factory(num_docs, a_hit_ratio));
+ factory->add_child(b_factory);
double time_ms_esti = 0.0;
- for (auto algo: {PlanningAlgo::Order, PlanningAlgo::Estimate, PlanningAlgo::Cost,
- PlanningAlgo::CostForceStrict}) {
- auto res = benchmark_search(factory, num_docs + 1, true, false, false, 1.0, algo);
- print_intermediate_blueprint_result(res, {a_hit_ratio, b_hit_ratio}, algo, num_docs);
- if (algo == PlanningAlgo::Estimate) {
- time_ms_esti = res.time_ms;
- }
- if (algo == PlanningAlgo::CostForceStrict) {
- double speedup = time_ms_esti / res.time_ms;
- if (speedup > max_speedup) {
- max_speedup = speedup;
+ for (InFlow in_flow: in_flows) {
+ for (auto algo: {PlanningAlgo::Order, PlanningAlgo::Estimate, PlanningAlgo::Cost,
+ PlanningAlgo::CostForceStrict}) {
+ auto res = benchmark_search(*factory, num_docs + 1, in_flow.strict(), false, false, in_flow.rate(), algo);
+ print_intermediate_blueprint_result(res, {a_hit_ratio, b_hit_ratio}, algo, in_flow, num_docs);
+ if (algo == PlanningAlgo::Estimate) {
+ time_ms_esti = res.time_ms;
}
- if (speedup < min_speedup) {
- min_speedup = speedup;
+ if (algo == PlanningAlgo::CostForceStrict) {
+ double speedup = time_ms_esti / res.time_ms;
+ if (speedup > max_speedup) {
+ max_speedup = speedup;
+ }
+ if (speedup < min_speedup) {
+ min_speedup = speedup;
+ }
+ std::cout << "speedup (esti/forc)=" << std::setprecision(4) << speedup << std::endl;
}
- std::cout << "speedup (esti/forc)=" << std::setprecision(4) << speedup << std::endl;
}
}
}
@@ -789,7 +802,19 @@ void
run_and_benchmark(const BlueprintFactorySetup& a, const BlueprintFactorySetup& b, size_t num_docs)
{
std::cout << "AND[A={" << a.to_string() << "},B={" << b.to_string() << "}]" << std::endl;
- run_intermediate_blueprint_benchmark<AndBlueprintFactory>(a, b, num_docs);
+ run_intermediate_blueprint_benchmark([](){ return std::make_unique<AndBlueprintFactory>(); }, {true}, a, b, num_docs);
+}
+
+void
+run_source_blender_benchmark(const BlueprintFactorySetup& a, const BlueprintFactorySetup& b, size_t num_docs)
+{
+ std::cout << "SB[A={" << a.to_string() << "},B={" << b.to_string() << "}]" << std::endl;
+ auto factory_factory = [&](){
+ auto factory = std::make_unique<SourceBlenderBlueprintFactory>();
+ factory->init_selector([](uint32_t i){ return (i%10 == 0) ? 1 : 2; }, num_docs + 1);
+ return factory;
+ };
+ run_intermediate_blueprint_benchmark(factory_factory, {true, 0.75, 0.5, 0.25, 0.1, 0.01, 0.001}, a, b, num_docs);
}
//-------------------------------------------------------------------------------------
@@ -973,6 +998,15 @@ TEST(IteratorBenchmark, analyze_AND_bitvector_vs_IN)
}
}
+TEST(IteratorBenchmark, analyze_strict_SOURCEBLENDER_memory_and_disk)
+{
+ for (double small_ratio: {0.001, 0.005, 0.01, 0.05}) {
+ run_source_blender_benchmark({str_fs, QueryOperator::Term, {small_ratio}},
+ {str_index, QueryOperator::Term, {small_ratio * 10}},
+ num_docs);
+ }
+}
+
TEST(IteratorBenchmark, analyze_OR_non_strict_fs)
{
for (auto or_hit_ratio : {0.01, 0.1, 0.5}) {
diff --git a/searchlib/src/tests/util/token_extractor/token_extractor_test.cpp b/searchlib/src/tests/util/token_extractor/token_extractor_test.cpp
index e6944e257e9..5eb42bb8ac4 100644
--- a/searchlib/src/tests/util/token_extractor/token_extractor_test.cpp
+++ b/searchlib/src/tests/util/token_extractor/token_extractor_test.cpp
@@ -118,7 +118,7 @@ TEST_F(TokenExtractorTest, empty_string)
TEST_F(TokenExtractorTest, plain_string)
{
- EXPECT_EQ((Words{"Plain string"}), process(StringFieldValue("Plain string")));
+ EXPECT_EQ((Words{}), process(StringFieldValue("Plain string")));
}
TEST_F(TokenExtractorTest, normal_string)
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
index 5b17b491a20..70b86bf22a1 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
@@ -94,6 +94,7 @@ using search::queryeval::StrictHeapOrSearch;
using search::queryeval::WeightedSetTermBlueprint;
using search::queryeval::flow::btree_cost;
using search::queryeval::flow::btree_strict_cost;
+using search::queryeval::flow::estimate_when_unknown;
using search::queryeval::flow::get_num_indirections;
using search::queryeval::flow::lookup_cost;
using search::queryeval::flow::lookup_strict_cost;
@@ -150,10 +151,9 @@ public:
search::queryeval::FlowStats calculate_flow_stats(uint32_t docid_limit) const override {
if (_hit_estimate.is_unknown()) {
// E.g. attributes without fast-search are not able to provide a hit estimate.
- // In this case we just assume matching half of the document corpus.
// In addition, matching is lookup based, and we are not able to skip documents efficiently when being strict.
size_t indirections = get_num_indirections(_attr.getBasicType(), _attr.getCollectionType());
- return {0.5, lookup_cost(indirections), lookup_strict_cost(indirections)};
+ return {estimate_when_unknown(), lookup_cost(indirections), lookup_strict_cost(indirections)};
} else {
double rel_est = abs_to_rel_est(_hit_estimate.est_hits(), docid_limit);
return {rel_est, btree_cost(rel_est), btree_strict_cost(rel_est)};
diff --git a/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp b/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp
index 3645496e4fb..41551ac1062 100644
--- a/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp
+++ b/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp
@@ -10,11 +10,11 @@ LOG_SETUP(".fef.matchdatabuilder");
namespace search::fef::test {
-MatchDataBuilder::MatchDataBuilder(QueryEnvironment &queryEnv, MatchData &data) :
- _queryEnv(queryEnv),
- _data(data),
- _index(),
- _match()
+MatchDataBuilder::MatchDataBuilder(QueryEnvironment &queryEnv, MatchData &data)
+ : _queryEnv(queryEnv),
+ _data(data),
+ _index(),
+ _match()
{
// reset all match data objects.
for (TermFieldHandle handle = 0; handle < _data.getNumTermFields(); ++handle) {
@@ -22,7 +22,7 @@ MatchDataBuilder::MatchDataBuilder(QueryEnvironment &queryEnv, MatchData &data)
}
}
-MatchDataBuilder::~MatchDataBuilder() {}
+MatchDataBuilder::~MatchDataBuilder() = default;
TermFieldMatchData *
MatchDataBuilder::getTermFieldMatchData(uint32_t termId, uint32_t fieldId)
@@ -59,7 +59,7 @@ MatchDataBuilder::addElement(const vespalib::string &fieldName, int32_t weight,
LOG(error, "Field '%s' does not exist.", fieldName.c_str());
return false;
}
- _index[info->id()].elements.push_back(MyElement(weight, length));
+ _index[info->id()].elements.emplace_back(weight, length);
return true;
}
@@ -77,8 +77,7 @@ MatchDataBuilder::addOccurence(const vespalib::string &fieldName, uint32_t termI
}
const ITermFieldData *tfd = _queryEnv.getTerm(termId)->lookupField(info->id());
if (tfd == nullptr) {
- LOG(error, "Field '%s' is not searched by the given term.",
- fieldName.c_str());
+ LOG(error, "Field '%s' is not searched by the given term.", fieldName.c_str());
return false;
}
_match[termId][info->id()].insert(Position(pos, element));
@@ -99,14 +98,13 @@ MatchDataBuilder::setWeight(const vespalib::string &fieldName, uint32_t termId,
}
const ITermFieldData *tfd = _queryEnv.getTerm(termId)->lookupField(info->id());
if (tfd == nullptr) {
- LOG(error, "Field '%s' is not searched by the given term.",
- fieldName.c_str());
+ LOG(error, "Field '%s' is not searched by the given term.", fieldName.c_str());
return false;
}
uint32_t eid = _index[info->id()].elements.size();
_match[termId][info->id()].clear();
_match[termId][info->id()].insert(Position(0, eid));
- _index[info->id()].elements.push_back(MyElement(weight, 1));
+ _index[info->id()].elements.emplace_back(weight, 1);
return true;
}
@@ -142,19 +140,13 @@ MatchDataBuilder::apply(uint32_t docId)
// For each occurence of that term, in that field, do
for (const auto& occ : field_elem.second) {
// Append a term match position to the term match data.
- match->appendPosition(TermFieldMatchDataPosition(
- occ.eid,
- occ.pos,
- field.getWeight(occ.eid),
- field.getLength(occ.eid)));
- LOG(debug,
- "Added occurence of term '%u' in field '%s'"
- " at position '%u'.",
+ match->appendPosition(TermFieldMatchDataPosition(occ.eid, occ.pos,
+ field.getWeight(occ.eid),
+ field.getLength(occ.eid)));
+ LOG(debug, "Added occurence of term '%u' in field '%s' at position '%u'.",
termId, name.c_str(), occ.pos);
if (occ.pos >= field.getLength(occ.eid)) {
- LOG(warning,
- "Added occurence of term '%u' in field '%s'"
- " at position '%u' >= fieldLen '%u'.",
+ LOG(warning, "Added occurence of term '%u' in field '%s' at position '%u' >= fieldLen '%u'.",
termId, name.c_str(), occ.pos, field.getLength(occ.eid));
}
}
diff --git a/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.h b/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.h
index 0e5025efd37..753e1596520 100644
--- a/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.h
+++ b/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.h
@@ -13,7 +13,7 @@ public:
struct MyElement {
int32_t weight;
uint32_t length;
- MyElement(int32_t w, uint32_t l) : weight(w), length(l) {}
+ MyElement(int32_t w, uint32_t l) noexcept : weight(w), length(l) {}
};
struct MyField {
uint32_t fieldLength;
@@ -21,7 +21,7 @@ public:
MyField() : fieldLength(0), elements() {}
MyElement &getElement(uint32_t eid) {
while (elements.size() <= eid) {
- elements.push_back(MyElement(0, 0));
+ elements.emplace_back(0, 0);
}
return elements[eid];
}
@@ -68,6 +68,8 @@ public:
* @param data The match data to build in.
*/
MatchDataBuilder(QueryEnvironment &queryEnv, MatchData &data);
+ MatchDataBuilder(const MatchDataBuilder &) = delete;
+ MatchDataBuilder & operator=(const MatchDataBuilder &) = delete;
~MatchDataBuilder();
/**
@@ -133,10 +135,6 @@ public:
bool apply(uint32_t docId);
private:
- MatchDataBuilder(const MatchDataBuilder &); // hide
- MatchDataBuilder & operator=(const MatchDataBuilder &); // hide
-
-private:
QueryEnvironment &_queryEnv;
MatchData &_data;
IndexData _index;
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp b/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp
index 2bc94073c92..49a0f0621d2 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp
@@ -213,6 +213,17 @@ FieldIndex<interleaved_features>::getMemoryUsage() const
}
template <bool interleaved_features>
+void
+FieldIndex<interleaved_features>::commit()
+{
+ _remover.flush();
+ freeze();
+ assign_generation();
+ incGeneration();
+ reclaim_memory();
+}
+
+template <bool interleaved_features>
queryeval::SearchIterator::UP
FieldIndex<interleaved_features>::make_search_iterator(const vespalib::string& term,
uint32_t field_id,
@@ -248,7 +259,7 @@ public:
: SimpleLeafBlueprint(field),
_guard(),
_field(field),
- _posting_itr(posting_itr),
+ _posting_itr(std::move(posting_itr)),
_feature_store(feature_store),
_field_id(field_id),
_query_term(query_term),
@@ -302,7 +313,7 @@ FieldIndex<interleaved_features>::make_term_blueprint(const vespalib::string& te
auto posting_itr = findFrozen(term);
bool use_bit_vector = field.isFilter();
return std::make_unique<MemoryTermBlueprint<interleaved_features>>
- (std::move(guard), posting_itr, getFeatureStore(), field, field_id, term, use_bit_vector);
+ (std::move(guard), std::move(posting_itr), getFeatureStore(), field, field_id, term, use_bit_vector);
}
template class FieldIndex<false>;
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index.h b/searchlib/src/vespa/searchlib/memoryindex/field_index.h
index 0b245300a7b..18e60cf2194 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_index.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_index.h
@@ -87,13 +87,7 @@ public:
vespalib::MemoryUsage getMemoryUsage() const override;
PostingListStore &getPostingListStore() { return _postingListStore; }
- void commit() override {
- _remover.flush();
- freeze();
- assign_generation();
- incGeneration();
- reclaim_memory();
- }
+ void commit() override;
/**
* Should only by used by unit tests.
diff --git a/searchlib/src/vespa/searchlib/queryeval/blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/blueprint.cpp
index 7334db4b716..cfa165be067 100644
--- a/searchlib/src/vespa/searchlib/queryeval/blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/blueprint.cpp
@@ -1,14 +1,15 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "blueprint.h"
-#include "leaf_blueprints.h"
+#include "andnotsearch.h"
+#include "andsearch.h"
#include "emptysearch.h"
-#include "full_search.h"
#include "field_spec.hpp"
-#include "andsearch.h"
-#include "orsearch.h"
-#include "andnotsearch.h"
+#include "flow_tuning.h"
+#include "full_search.h"
+#include "leaf_blueprints.h"
#include "matching_elements_search.h"
+#include "orsearch.h"
#include <vespa/searchlib/fef/termfieldmatchdataarray.h>
#include <vespa/vespalib/objects/visit.hpp>
#include <vespa/vespalib/objects/objectdumper.h>
@@ -238,7 +239,7 @@ Blueprint::default_flow_stats(uint32_t docid_limit, uint32_t abs_est, size_t chi
FlowStats
Blueprint::default_flow_stats(size_t child_cnt)
{
- return {0.5, 1.0 + child_cnt, 1.0 + child_cnt};
+ return {flow::estimate_when_unknown(), 1.0 + child_cnt, 1.0 + child_cnt};
}
std::unique_ptr<MatchingElementsSearch>
diff --git a/searchlib/src/vespa/searchlib/queryeval/flow_tuning.h b/searchlib/src/vespa/searchlib/queryeval/flow_tuning.h
index 22faa920bc0..5ed61ef9fc8 100644
--- a/searchlib/src/vespa/searchlib/queryeval/flow_tuning.h
+++ b/searchlib/src/vespa/searchlib/queryeval/flow_tuning.h
@@ -60,6 +60,12 @@ inline size_t get_num_indirections(const attribute::BasicType& basic_type,
return res;
}
+// Some blueprints are not able to provide a hit estimate (e.g. attributes without fast-search).
+// In such cases the following estimate is used instead. In most cases this is an overestimate.
+inline double estimate_when_unknown() {
+ return 0.1;
+}
+
// Non-strict cost of lookup based matching in an attribute (not fast-search).
// Test used: IteratorBenchmark::analyze_term_search_in_attributes_non_strict
inline double lookup_cost(size_t num_indirections) {
diff --git a/searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp b/searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp
index 2b25aa29747..c5435b557b0 100644
--- a/searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/simple_phrase_search.cpp
@@ -191,16 +191,14 @@ SimplePhraseSearch::doSeek(uint32_t doc_id) {
void
SimplePhraseSearch::doStrictSeek(uint32_t doc_id) {
uint32_t next_candidate = doc_id;
- while (getDocId() < doc_id || getDocId() == beginId()) {
- getChildren()[0]->seek(next_candidate + 1);
- next_candidate = getChildren()[0]->getDocId();
+ auto &best_child = *getChildren()[_eval_order[0]];
+ while (getDocId() < doc_id) {
+ best_child.seek(next_candidate + 1);
+ next_candidate = best_child.getDocId();
if (isAtEnd(next_candidate)) {
setAtEnd();
return;
}
- // child must behave as strict.
- assert(next_candidate > doc_id && next_candidate != beginId());
-
phraseSeek(next_candidate);
}
}
diff --git a/searchlib/src/vespa/searchlib/tensor/euclidean_distance.cpp b/searchlib/src/vespa/searchlib/tensor/euclidean_distance.cpp
index 3ab3a1123eb..441ade27d1f 100644
--- a/searchlib/src/vespa/searchlib/tensor/euclidean_distance.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/euclidean_distance.cpp
@@ -45,10 +45,9 @@ public:
return score;
}
double calc_with_limit(TypedCells rhs, double limit) const noexcept override {
- vespalib::ConstArrayRef<AttributeCellType> rhs_vector = rhs.typify<AttributeCellType>();
+ vespalib::ConstArrayRef<AttributeCellType> rhs_vector = rhs.unsafe_typify<AttributeCellType>();
double sum = 0.0;
size_t sz = _lhs_vector.size();
- assert(sz == rhs_vector.size());
for (size_t i = 0; i < sz && sum <= limit; ++i) {
double diff = _lhs_vector[i] - rhs_vector[i];
sum += diff*diff;
diff --git a/searchlib/src/vespa/searchlib/util/token_extractor.cpp b/searchlib/src/vespa/searchlib/util/token_extractor.cpp
index a78f30afe21..6e1573c4551 100644
--- a/searchlib/src/vespa/searchlib/util/token_extractor.cpp
+++ b/searchlib/src/vespa/searchlib/util/token_extractor.cpp
@@ -143,8 +143,6 @@ TokenExtractor::extract(std::vector<SpanTerm>& terms, const document::StringFiel
{
auto tree = StringFieldValue::findTree(trees, SPANTREE_NAME);
if (tree == nullptr) {
- /* field might not be annotated if match type is exact */
- consider_word(terms, text, Span(0, text.size()), nullptr, doc);
return;
}
for (const Annotation & annotation : *tree) {
diff --git a/searchsummary/src/tests/docsummary/tokens_converter/tokens_converter_test.cpp b/searchsummary/src/tests/docsummary/tokens_converter/tokens_converter_test.cpp
index 493cbe0ecba..3d92cee601a 100644
--- a/searchsummary/src/tests/docsummary/tokens_converter/tokens_converter_test.cpp
+++ b/searchsummary/src/tests/docsummary/tokens_converter/tokens_converter_test.cpp
@@ -149,7 +149,7 @@ TEST_F(TokensConverterTest, convert_empty_string)
TEST_F(TokensConverterTest, convert_plain_string)
{
- vespalib::string exp(R"(["Foo Bar Baz"])");
+ vespalib::string exp(R"([])");
StringFieldValue plain_string("Foo Bar Baz");
EXPECT_EQ(exp, convert(plain_string));
}
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/health/StateV1HealthMonitor.java b/service-monitor/src/main/java/com/yahoo/vespa/service/health/StateV1HealthMonitor.java
index a4216ee1e41..0d94f719824 100644
--- a/service-monitor/src/main/java/com/yahoo/vespa/service/health/StateV1HealthMonitor.java
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/health/StateV1HealthMonitor.java
@@ -30,6 +30,7 @@ class StateV1HealthMonitor implements HealthMonitor {
@Override
public void close() {
periodicExecution.cancel();
+ updater.close();
}
}
diff --git a/storage/src/tests/common/testhelper.cpp b/storage/src/tests/common/testhelper.cpp
index 4ca935b7904..f295d955ab2 100644
--- a/storage/src/tests/common/testhelper.cpp
+++ b/storage/src/tests/common/testhelper.cpp
@@ -36,18 +36,6 @@ vdstestlib::DirConfig getStandardConfig(bool storagenode, const std::string & ro
std::string clusterName("storage");
vdstestlib::DirConfig dc;
vdstestlib::DirConfig::Config* config;
- config = &dc.addConfig("fleetcontroller");
- config->set("cluster_name", clusterName);
- config->set("index", "0");
- config->set("zookeeper_server", "\"\"");
- config->set("total_distributor_count", "10");
- config->set("total_storage_count", "10");
- config = &dc.addConfig("upgrading");
- config = &dc.addConfig("load-type");
- config = &dc.addConfig("bucket");
- config = &dc.addConfig("messagebus");
- config = &dc.addConfig("stor-prioritymapping");
- config = &dc.addConfig("stor-bucketdbupdater");
config = &dc.addConfig("metricsmanager");
config->set("consumer[2]");
config->set("consumer[0].name", "\"status\"");
@@ -71,16 +59,8 @@ vdstestlib::DirConfig getStandardConfig(bool storagenode, const std::string & ro
// Easier to see what goes wrong with only 1 thread per disk.
config->set("num_threads", "1");
config->set("num_response_threads", "1");
- config->set("maximum_versions_of_single_document_stored", "0");
- config->set("keep_remove_time_period", "2000000000");
- config->set("revert_time_period", "2000000000");
- // Don't want test to call exit()
- config->set("fail_disk_after_error_count", "0");
- config = &dc.addConfig("stor-bouncer");
config = &dc.addConfig("stor-server");
config->set("cluster_name", clusterName);
- config->set("enable_dead_lock_detector", "false");
- config->set("enable_dead_lock_detector_warnings", "false");
config->set("max_merges_per_node", "25");
config->set("max_merge_queue_size", "20");
config->set("resource_exhaustion_merge_back_pressure_duration_secs", "15.0");
diff --git a/storage/src/tests/common/teststorageapp.cpp b/storage/src/tests/common/teststorageapp.cpp
index e2e2de10702..9e421051184 100644
--- a/storage/src/tests/common/teststorageapp.cpp
+++ b/storage/src/tests/common/teststorageapp.cpp
@@ -35,17 +35,16 @@ TestStorageApp::TestStorageApp(StorageComponentRegisterImpl::UP compReg,
_node_identity("test_cluster", type, index),
_initialized(false)
{
- // Use config to adjust values
+ // Use config to adjust values
vespalib::string clusterName = "mycluster";
uint32_t redundancy = 2;
uint32_t nodeCount = 10;
if (!configId.empty()) {
config::ConfigUri uri(configId);
- std::unique_ptr<vespa::config::content::core::StorServerConfig> serverConfig = config::ConfigGetter<vespa::config::content::core::StorServerConfig>::getConfig(uri.getConfigId(), uri.getContext());
+ auto serverConfig = config::ConfigGetter<vespa::config::content::core::StorServerConfig>::getConfig(uri.getConfigId(), uri.getContext());
clusterName = serverConfig->clusterName;
if (index == 0xffff) index = serverConfig->nodeIndex;
redundancy = config::ConfigGetter<vespa::config::content::StorDistributionConfig>::getConfig(uri.getConfigId(), uri.getContext())->redundancy;
- nodeCount = config::ConfigGetter<vespa::config::content::FleetcontrollerConfig>::getConfig(uri.getConfigId(), uri.getContext())->totalStorageCount;
} else {
if (index == 0xffff) index = 0;
}
diff --git a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
index f12b85eb2ea..2680eacf49c 100644
--- a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
+++ b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
@@ -93,8 +93,6 @@ struct FileStorTestBase : Test {
enum {LONG_WAITTIME=60};
unique_ptr<TestServiceLayerApp> _node;
std::unique_ptr<vdstestlib::DirConfig> config;
- std::unique_ptr<vdstestlib::DirConfig> config2;
- std::unique_ptr<vdstestlib::DirConfig> smallConfig;
const int32_t _waitTime;
const document::DocumentType* _testdoctype1;
@@ -167,27 +165,8 @@ struct FileStorTestBase : Test {
std::string rootOfRoot = "filestormanagertest";
config = std::make_unique<vdstestlib::DirConfig>(getStandardConfig(true, rootOfRoot));
- config2 = std::make_unique<vdstestlib::DirConfig>(*config);
- config2->getConfig("stor-server").set("root_folder", rootOfRoot + "-vdsroot.2");
- config2->getConfig("stor-devices").set("root_folder", rootOfRoot + "-vdsroot.2");
- config2->getConfig("stor-server").set("node_index", "1");
-
- smallConfig = std::make_unique<vdstestlib::DirConfig>(*config);
- vdstestlib::DirConfig::Config& c(smallConfig->getConfig("stor-filestor", true));
- c.set("initial_index_read", "128");
- c.set("use_direct_io", "false");
- c.set("maximum_gap_to_read_through", "64");
-
- assert(system(vespalib::make_string("rm -rf %s", getRootFolder(*config).c_str()).c_str()) == 0);
- assert(system(vespalib::make_string("rm -rf %s", getRootFolder(*config2).c_str()).c_str()) == 0);
- assert(system(vespalib::make_string("mkdir -p %s/disks/d0", getRootFolder(*config).c_str()).c_str()) == 0);
- assert(system(vespalib::make_string("mkdir -p %s/disks/d0", getRootFolder(*config2).c_str()).c_str()) == 0);
- try {
- _node = std::make_unique<TestServiceLayerApp>(NodeIndex(0), config->getConfigId());
- _node->setupDummyPersistence();
- } catch (config::InvalidConfigException& e) {
- fprintf(stderr, "%s\n", e.what());
- }
+ _node = std::make_unique<TestServiceLayerApp>(NodeIndex(0), config->getConfigId());
+ _node->setupDummyPersistence();
_testdoctype1 = _node->getTypeRepo()->getDocumentType("testdoctype1");
}
@@ -227,10 +206,10 @@ struct TestFileStorComponents {
DummyStorageLink top;
FileStorManager* manager;
- explicit TestFileStorComponents(FileStorTestBase& test, bool use_small_config = false)
+ explicit TestFileStorComponents(FileStorTestBase& test)
: manager(nullptr)
{
- auto config_uri = config::ConfigUri((use_small_config ? test.smallConfig : test.config)->getConfigId());
+ auto config_uri = config::ConfigUri(test.config->getConfigId());
auto config = config_from<StorFilestorConfig>(config_uri);
auto fsm = std::make_unique<FileStorManager>(*config, test._node->getPersistenceProvider(),
test._node->getComponentRegister(), *test._node, test._node->get_host_info());
@@ -1255,7 +1234,7 @@ createIterator(DummyStorageLink& link,
}
TEST_F(FileStorManagerTest, visiting) {
- TestFileStorComponents c(*this, true);
+ TestFileStorComponents c(*this);
auto& top = c.top;
// Adding documents to two buckets which we are going to visit
// We want one bucket in one slotfile, and one bucket with a file split
diff --git a/storage/src/tests/persistence/mergehandlertest.cpp b/storage/src/tests/persistence/mergehandlertest.cpp
index 4bd0570efa8..e865c87e15e 100644
--- a/storage/src/tests/persistence/mergehandlertest.cpp
+++ b/storage/src/tests/persistence/mergehandlertest.cpp
@@ -220,11 +220,11 @@ void
MergeHandlerTest::setUpChain(ChainPos pos) {
_nodes.clear();
if (pos != FRONT) {
- _nodes.push_back(api::MergeBucketCommand::Node(2, false));
+ _nodes.emplace_back(2, false);
}
- _nodes.push_back(api::MergeBucketCommand::Node(0, false));
+ _nodes.emplace_back(0, false);
if (pos != BACK) {
- _nodes.push_back(api::MergeBucketCommand::Node(1, false));
+ _nodes.emplace_back(1, false);
}
}
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/AccessTokenResponseEntity.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/AccessTokenResponseEntity.java
index a3063524b93..785c215df18 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/AccessTokenResponseEntity.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/AccessTokenResponseEntity.java
@@ -9,6 +9,7 @@ import com.yahoo.vespa.athenz.api.AthenzRole;
import java.time.Instant;
import java.util.List;
+import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -19,6 +20,7 @@ import java.util.stream.Stream;
public class AccessTokenResponseEntity {
private final AthenzAccessToken accessToken;
private final Instant expiryTime;
+ // roles can be null (not set in the json response)
private final List<AthenzRole> roles;
public AccessTokenResponseEntity(
@@ -29,7 +31,8 @@ public class AccessTokenResponseEntity {
this.accessToken = new AthenzAccessToken(accessToken);
// We do not know from when, so best we can do is assume now ...
this.expiryTime = Instant.now().plusSeconds(expiresIn);
- this.roles = Stream.of(roles.split(" "))
+ this.roles = Optional.ofNullable(roles).stream()
+ .flatMap(r -> Stream.of(r.split(" ")))
.map(AthenzResourceName::fromString)
.map(AthenzRole::fromResourceName)
.toList();
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identity/SiaIdentityProvider.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identity/SiaIdentityProvider.java
index 2c8908a89a6..2f344004780 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identity/SiaIdentityProvider.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identity/SiaIdentityProvider.java
@@ -86,6 +86,10 @@ public class SiaIdentityProvider extends AbstractComponent implements ServiceIde
return createIdentitySslContext(keyManager, trustStoreFile, false);
}
+ public SSLContext createIdentitySslContextWithTrustStore(Path trustStoreFile, boolean includeDefaultTruststore) {
+ return createIdentitySslContext(keyManager, trustStoreFile, includeDefaultTruststore);
+ }
+
/**
* Create an SSL context with the given trust store and the key manager from this provider.
* If the {code includeDefaultTruststore} is true, the default trust store will be included.
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/utils/SiaUtils.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/utils/SiaUtils.java
index af0da93edc3..56e64b2261d 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/utils/SiaUtils.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/utils/SiaUtils.java
@@ -1,10 +1,10 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.athenz.utils;
-import com.yahoo.vespa.athenz.api.AthenzIdentity;
-import com.yahoo.vespa.athenz.api.AthenzService;
import com.yahoo.security.KeyUtils;
import com.yahoo.security.X509CertificateUtils;
+import com.yahoo.vespa.athenz.api.AthenzIdentity;
+import com.yahoo.vespa.athenz.api.AthenzService;
import java.io.IOException;
import java.io.UncheckedIOException;
@@ -132,7 +132,7 @@ public class SiaUtils {
try (DirectoryStream<Path> directoryStream = Files.newDirectoryStream(keysDirectory)) {
return StreamSupport.stream(directoryStream.spliterator(), false)
.map(path -> path.getFileName().toString())
- .filter(fileName -> fileName.endsWith(keyFileSuffix))
+ .filter(fileName -> fileName.endsWith(keyFileSuffix) && ! fileName.contains(":role."))
.map(fileName -> fileName.substring(0, fileName.length() - keyFileSuffix.length()))
.map(AthenzService::new)
.collect(toList());
diff --git a/vespa-athenz/src/test/java/com/yahoo/vespa/athenz/utils/SiaUtilsTest.java b/vespa-athenz/src/test/java/com/yahoo/vespa/athenz/utils/SiaUtilsTest.java
index 9ff59236c0c..8274fe7f7a6 100644
--- a/vespa-athenz/src/test/java/com/yahoo/vespa/athenz/utils/SiaUtilsTest.java
+++ b/vespa-athenz/src/test/java/com/yahoo/vespa/athenz/utils/SiaUtilsTest.java
@@ -32,6 +32,7 @@ public class SiaUtilsTest {
Files.createFile(SiaUtils.getPrivateKeyFile(siaRoot, fooService));
AthenzService barService = new AthenzService("my.domain.bar");
Files.createFile(SiaUtils.getPrivateKeyFile(siaRoot, barService));
+ Files.createFile(siaRoot.resolve("keys/my.domain.foo:role.my-role.key.pem"));
List<AthenzIdentity> siaIdentities = SiaUtils.findSiaServices(siaRoot);
assertEquals(2, siaIdentities.size());
diff --git a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
index f8966d4fc68..7519f3a5211 100644
--- a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
+++ b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
@@ -123,9 +123,7 @@ org.apache.opennlp:opennlp-tools:${opennlp.vespa.version}
org.apache.velocity:velocity-engine-core:${velocity.vespa.version}
org.apache.yetus:audience-annotations:0.12.0
org.apache.zookeeper:zookeeper-jute:${zookeeper.client.vespa.version}
-org.apache.zookeeper:zookeeper-jute:3.9.1
org.apache.zookeeper:zookeeper:${zookeeper.client.vespa.version}
-org.apache.zookeeper:zookeeper:3.9.1
org.apiguardian:apiguardian-api:${apiguardian.vespa.version}
org.assertj:assertj-core:${assertj.vespa.version}
org.bouncycastle:bcpkix-jdk18on:${bouncycastle.vespa.version}
diff --git a/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/JsonFeeder.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/JsonFeeder.java
index 11fb6526210..3111815b332 100644
--- a/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/JsonFeeder.java
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/JsonFeeder.java
@@ -414,7 +414,7 @@ public class JsonFeeder implements Closeable {
abstract String getDocumentJson(long start, long end);
OperationParseException parseException(String error) {
- JsonLocation location = parser.getTokenLocation();
+ JsonLocation location = parser.currentLocation();
return new OperationParseException(error + " at offset " + location.getByteOffset() +
" (line " + location.getLineNr() + ", column " + location.getColumnNr() + ")");
}
@@ -444,13 +444,13 @@ public class JsonFeeder implements Closeable {
case "create": parameters = parameters.createIfNonExistent(readBoolean()); break;
case "fields": {
expect(START_OBJECT);
- start = parser.getTokenLocation().getByteOffset();
+ start = parser.currentTokenLocation().getByteOffset();
int depth = 1;
while (depth > 0) switch (parser.nextToken()) {
case START_OBJECT: ++depth; break;
case END_OBJECT: --depth; break;
}
- end = parser.getTokenLocation().getByteOffset() + 1;
+ end = parser.currentTokenLocation().getByteOffset() + 1;
break;
}
default: throw parseException("Unexpected field name '" + parser.getText() + "'");
@@ -470,7 +470,7 @@ public class JsonFeeder implements Closeable {
if (end >= start)
throw parseException("Illegal 'fields' object for remove operation");
else
- start = end = parser.getTokenLocation().getByteOffset(); // getDocumentJson advances buffer overwrite head.
+ start = end = parser.currentTokenLocation().getByteOffset(); // getDocumentJson advances buffer overwrite head.
}
else if (end < start)
throw parseException("No 'fields' object for document");
@@ -486,14 +486,14 @@ public class JsonFeeder implements Closeable {
private void expect(JsonToken token) throws IOException {
if (parser.nextToken() != token)
- throw new OperationParseException("Expected '" + token + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ throw new OperationParseException("Expected '" + token + "' at offset " + parser.currentTokenLocation().getByteOffset() +
", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
}
private String readString() throws IOException {
String value = parser.nextTextValue();
if (value == null)
- throw new OperationParseException("Expected '" + JsonToken.VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ throw new OperationParseException("Expected '" + JsonToken.VALUE_STRING + "' at offset " + parser.currentTokenLocation().getByteOffset() +
", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
return value;
@@ -502,7 +502,7 @@ public class JsonFeeder implements Closeable {
private boolean readBoolean() throws IOException {
Boolean value = parser.nextBooleanValue();
if (value == null)
- throw new OperationParseException("Expected '" + JsonToken.VALUE_FALSE + "' or '" + JsonToken.VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ throw new OperationParseException("Expected '" + JsonToken.VALUE_FALSE + "' or '" + JsonToken.VALUE_TRUE + "' at offset " + parser.currentTokenLocation().getByteOffset() +
", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
return value;
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DynamicThrottler.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DynamicThrottler.java
index 951a1776b6f..567788b8501 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DynamicThrottler.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DynamicThrottler.java
@@ -28,12 +28,12 @@ public class DynamicThrottler extends StaticThrottler {
public DynamicThrottler(FeedClientBuilderImpl builder) {
super(builder);
- targetInflight = new AtomicLong(8 * minInflight);
+ targetInflight = new AtomicLong(minInflight);
}
@Override
public void sent(long __, CompletableFuture<HttpResponse> ___) {
- double currentInflight = targetInflight.get();
+ double currentInflight = targetInflight();
if (++sent * sent * sent < 1e2 * currentInflight * currentInflight)
return;
@@ -43,22 +43,36 @@ public class DynamicThrottler extends StaticThrottler {
// Use buckets for throughput over inflight, along the log-scale, in [minInflight, maxInflight).
int index = (int) (throughputs.length * log(max(1, min(255, currentInflight / minInflight)))
- / log(256)); // 4096 (server max streams per connection) / 16 (our min per connection)
+ / log(256)); // 512 (server max streams per connection) / 2 (our min per connection)
throughputs[index] = currentThroughput;
// Loop over throughput measurements and pick the one which optimises throughput and latency.
- double choice = currentInflight;
+ double best = currentInflight;
double max = -1;
- for (int i = throughputs.length; i-- > 0; ) {
+ int j = -1, k = -1, choice = 0;
+ double s = 0;
+ for (int i = 0; i < throughputs.length; i++) {
if (throughputs[i] == 0) continue; // Skip unknown values.
double inflight = minInflight * pow(256, (i + 0.5) / throughputs.length);
double objective = throughputs[i] * pow(inflight, (weight - 1)); // Optimise throughput (weight), but also latency (1 - weight).
if (objective > max) {
max = objective;
- choice = inflight;
+ best = inflight;
+ choice = i;
}
+ // Additionally, smooth the throughput values, to reduce the impact of noise, and reduce jumpiness.
+ if (j != -1) {
+ double t = throughputs[j];
+ if (k != -1) throughputs[j] = (2 * t + throughputs[i] + s) / 4;
+ s = t;
+ }
+ k = j;
+ j = i;
}
- long target = (long) ((random() * 0.20 + 0.92) * choice); // Random walk, skewed towards increase.
+ long target = (long) ((random() * 0.40 + 0.84) * best + random() * 4 - 1); // Random step, skewed towards increase.
+ // If the best inflight is at the high end of the known, we override the random walk to speed up upwards exploration.
+ if (choice == j && choice + 1 < throughputs.length)
+ target = (long) (1 + minInflight * pow(256, (choice + 1.5) / throughputs.length));
targetInflight.set(max(minInflight, min(maxInflight, target)));
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java
index a30cfd5ec39..9dd11113c0b 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java
@@ -219,13 +219,13 @@ class HttpFeedClient implements FeedClient {
throw new ResultParseException(documentId,
"Expected 'trace' to be an array, but got '" + parser.currentToken() + "' in: " +
new String(json, UTF_8));
- int start = (int) parser.getTokenLocation().getByteOffset();
+ int start = (int) parser.currentTokenLocation().getByteOffset();
int depth = 1;
while (depth > 0) switch (parser.nextToken()) {
case START_ARRAY: ++depth; break;
case END_ARRAY: --depth; break;
}
- int end = (int) parser.getTokenLocation().getByteOffset() + 1;
+ int end = (int) parser.currentTokenLocation().getByteOffset() + 1;
trace = new String(json, start, end - start, UTF_8);
break;
default:
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/StaticThrottler.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/StaticThrottler.java
index 9010b0a7ad8..f0ee434e87c 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/StaticThrottler.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/StaticThrottler.java
@@ -22,7 +22,7 @@ public class StaticThrottler implements Throttler {
public StaticThrottler(FeedClientBuilderImpl builder) {
minInflight = 2L * builder.connectionsPerEndpoint * builder.endpoints.size();
- maxInflight = 256 * minInflight; // 4096 max streams per connection on the server side.
+ maxInflight = 256 * minInflight; // 512 max streams per connection on the server side.
targetX10 = new AtomicLong(10 * maxInflight); // 10x the actual value to allow for smaller updates.
}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/DynamicThrottlerTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/DynamicThrottlerTest.java
new file mode 100644
index 00000000000..7e07fc6e116
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/DynamicThrottlerTest.java
@@ -0,0 +1,30 @@
+package ai.vespa.feed.client.impl;
+
+import org.junit.jupiter.api.Test;
+
+import java.net.URI;
+import java.util.List;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * @author jonmv
+ */
+class DynamicThrottlerTest {
+
+ @Test
+ void testThrottler() {
+ DynamicThrottler throttler = new DynamicThrottler(new FeedClientBuilderImpl(List.of(URI.create("http://localhost:8080"))));
+ assertEquals(16, throttler.targetInflight());
+
+ for (int i = 0; i < 30; i++) {
+ throttler.sent(1, null);
+ throttler.success();
+ }
+ assertEquals(18, throttler.targetInflight());
+
+ throttler.throttled(34);
+ assertEquals(17, throttler.targetInflight());
+ }
+
+}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java
index 54fab9b859b..b1a04ac9ed4 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java
@@ -33,6 +33,7 @@ import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.OPEN;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -105,7 +106,7 @@ class HttpRequestStrategyTest {
cluster.expect((__, vessel) -> vessel.completeExceptionally(new RuntimeException("boom")));
ExecutionException expected = assertThrows(ExecutionException.class,
() -> strategy.enqueue(id1, request).get());
- assertTrue(expected.getCause() instanceof FeedException);
+ assertInstanceOf(FeedException.class, expected.getCause());
assertEquals("java.lang.RuntimeException: boom", expected.getCause().getMessage());
assertEquals(1, strategy.stats().requests());
@@ -200,7 +201,7 @@ class HttpRequestStrategyTest {
@Override public int retries() { return 1; }
})
.setCircuitBreaker(breaker)
- .setConnectionsPerEndpoint(1),
+ .setConnectionsPerEndpoint(3), // Must be >= 0.5x text ops.
cluster);
DocumentId id1 = DocumentId.of("ns", "type", "1");
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
index b483d6977d6..6e07661235e 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
@@ -1397,6 +1397,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
Phaser phaser = new Phaser(2); // Synchronize this thread (dispatch) with the visitor callback thread.
AtomicReference<String> error = new AtomicReference<>(); // Set if error occurs during processing of visited documents.
callback.onStart(response, fullyApplied);
+ final AtomicLong locallyReceivedDocCount = new AtomicLong(0);
VisitorControlHandler controller = new VisitorControlHandler() {
final ScheduledFuture<?> abort = streaming ? visitDispatcher.schedule(this::abort, visitTimeout(request), MILLISECONDS) : null;
final AtomicReference<VisitorSession> session = new AtomicReference<>();
@@ -1410,7 +1411,10 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
try (response) {
callback.onEnd(response);
- response.writeDocumentCount(getVisitorStatistics() == null ? 0 : getVisitorStatistics().getDocumentsVisited());
+ // Locally tracked document count is only correct if we have a local data handler.
+ // Otherwise, we have to report the statistics received transitively from the content nodes.
+ long statsDocCount = (getVisitorStatistics() != null ? getVisitorStatistics().getDocumentsVisited() : 0);
+ response.writeDocumentCount(parameters.getLocalDataHandler() != null ? locallyReceivedDocCount.get() : statsDocCount);
if (session.get() != null)
response.writeTrace(session.get().getTrace());
@@ -1456,6 +1460,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
if (m instanceof PutDocumentMessage put) document = put.getDocumentPut().getDocument();
else if (parameters.visitRemoves() && m instanceof RemoveDocumentMessage remove) removeId = remove.getDocumentId();
else throw new UnsupportedOperationException("Got unsupported message type: " + m.getClass().getName());
+ locallyReceivedDocCount.getAndAdd(1);
callback.onDocument(response,
document,
removeId,
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
index 58cf34712aa..b2c0b1b2ce8 100644
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
@@ -290,7 +290,7 @@ public class DocumentV1ApiTest {
parameters.getLocalDataHandler().onMessage(new RemoveDocumentMessage(new DocumentId("id:space:music::t-square-truth")), tokens.get(3));
VisitorStatistics statistics = new VisitorStatistics();
statistics.setBucketsVisited(1);
- statistics.setDocumentsVisited(3);
+ statistics.setDocumentsVisited(123); // Ignored in favor of tracking actually emitted entries
parameters.getControlHandler().onVisitorStatistics(statistics);
parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.TIMEOUT, "timeout is OK");
});
@@ -323,7 +323,7 @@ public class DocumentV1ApiTest {
"remove": "id:space:music::t-square-truth"
}
],
- "documentCount": 3,
+ "documentCount": 4,
"trace": [
{ "message": "Tracy Chapman" },
{
@@ -441,13 +441,18 @@ public class DocumentV1ApiTest {
assertEquals("[Content:cluster=content]", parameters.getRemoteDataHandler());
assertEquals("[document]", parameters.fieldSet());
assertEquals(60_000L, parameters.getSessionTimeoutMs());
+ VisitorStatistics statistics = new VisitorStatistics();
+ statistics.setBucketsVisited(1);
+ statistics.setDocumentsVisited(2);
+ // Visiting with remote data handlers should report the remotely aggregated statistics
+ parameters.getControlHandler().onVisitorStatistics(statistics);
parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.SUCCESS, "We made it!");
});
response = driver.sendRequest("http://localhost/document/v1/space/music/docid?destinationCluster=content&selection=true&cluster=content&timeout=60", POST);
assertSameJson("""
{
"pathId": "/document/v1/space/music/docid",
- "documentCount": 0
+ "documentCount": 2
}""",
response.readAll());
assertEquals(200, response.getStatus());
@@ -488,7 +493,7 @@ public class DocumentV1ApiTest {
assertSameJson("""
{
"pathId": "/document/v1/space/music/docid",
- "documentCount": 0
+ "documentCount": 1
}""",
response.readAll());
assertEquals(200, response.getStatus());
@@ -542,7 +547,7 @@ public class DocumentV1ApiTest {
assertSameJson("""
{
"pathId": "/document/v1/space/music/docid",
- "documentCount": 0,
+ "documentCount": 1,
"message": "boom"
}""",
response.readAll());
diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
index 15bec762119..f3f0a0684a0 100644
--- a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
+++ b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
@@ -109,7 +109,7 @@ public abstract class Maintainer implements Runnable {
/** Convenience methods to convert attempts and failures into a success factor deviation from the baseline, and return */
protected final double asSuccessFactorDeviation(int attempts, int failures) {
double factor = attempts == 0 ? 1.0 : 1 - (double) failures / attempts;
- return new BigDecimal(factor - successFactorBaseline).setScale(2, RoundingMode.HALF_UP).doubleValue();
+ return new BigDecimal(factor - successFactorBaseline).setScale(5, RoundingMode.HALF_UP).doubleValue();
}
/** Returns the interval at which this job is set to run */
diff --git a/vespajlib/src/test/java/com/yahoo/slime/JsonBenchmark.java b/vespajlib/src/test/java/com/yahoo/slime/JsonBenchmark.java
index ee755a44010..cccc9667e11 100644
--- a/vespajlib/src/test/java/com/yahoo/slime/JsonBenchmark.java
+++ b/vespajlib/src/test/java/com/yahoo/slime/JsonBenchmark.java
@@ -43,7 +43,7 @@ public class JsonBenchmark {
try (JsonParser jsonParser = jsonFactory.createParser(json)) {
JsonToken array = jsonParser.nextToken();
for (JsonToken token = jsonParser.nextToken(); !JsonToken.END_ARRAY.equals(token); token = jsonParser.nextToken()) {
- if (JsonToken.FIELD_NAME.equals(token) && "weight".equals(jsonParser.getCurrentName())) {
+ if (JsonToken.FIELD_NAME.equals(token) && "weight".equals(jsonParser.currentName())) {
token = jsonParser.nextToken();
count += jsonParser.getLongValue();
}
diff --git a/vespalog/src/logger/runserver.cpp b/vespalog/src/logger/runserver.cpp
index 9a0a499cd54..4e0141f88dc 100644
--- a/vespalog/src/logger/runserver.cpp
+++ b/vespalog/src/logger/runserver.cpp
@@ -6,7 +6,7 @@
#include <cerrno>
#include <unistd.h>
#include <csignal>
-
+#include <poll.h>
#include <sys/select.h>
#include <sys/types.h>
#include <sys/wait.h>
@@ -18,6 +18,7 @@
#include "llreader.h"
#include <vespa/log/log.h>
#include <chrono>
+#include <array>
LOG_SETUP("runserver");
@@ -179,8 +180,6 @@ int loop(const char *svc, char * const * run)
pstdout[0], pstdout[1],
pstderr[0], pstderr[1]);
- int high = 1 + pstdout[0] + pstderr[0];
-
pid_t child = fork();
if (child == 0) {
@@ -237,24 +236,24 @@ int loop(const char *svc, char * const * run)
bool outeof = false;
bool erreof = false;
-
+ constexpr int stdout_idx = 0, stderr_idx = 1;
+ std::array<pollfd, 2> fds{};
int wstat = 0;
while (child || !outeof || !erreof) {
- struct timeval timeout;
-
- timeout.tv_sec = 0;
- timeout.tv_usec = 100000; // == 100 ms == 1/10 s
-
- fd_set pipes;
-
- FD_ZERO(&pipes);
- if (!outeof) FD_SET(pstdout[0], &pipes);
- if (!erreof) FD_SET(pstderr[0], &pipes);
-
- int n = select(high, &pipes, NULL, NULL, &timeout);
+ // Entries with negative fds are entirely ignored by the kernel.
+ fds[stdout_idx].fd = !outeof ? pstdout[0] : -1;
+ fds[stdout_idx].events = POLLIN;
+ fds[stdout_idx].revents = 0;
+ fds[stderr_idx].fd = !erreof ? pstderr[0] : -2;
+ fds[stderr_idx].events = POLLIN;
+ fds[stderr_idx].revents = 0;
+
+ constexpr int poll_timeout_ms = 100;
+ int n = poll(fds.data(), fds.size(), poll_timeout_ms);
if (n > 0) {
- if (FD_ISSET(pstdout[0], &pipes)) {
+ constexpr short ev_mask = POLLIN | POLLERR | POLLHUP;
+ if ((fds[stdout_idx].revents & ev_mask) != 0) {
LOG(debug, "out reader has input");
if (outReader.blockRead()) {
while (outReader.hasInput()) {
@@ -267,7 +266,7 @@ int loop(const char *svc, char * const * run)
close(pstdout[0]);
}
}
- if (FD_ISSET(pstderr[0], &pipes)) {
+ if ((fds[stderr_idx].revents & ev_mask) != 0) {
LOG(debug, "err reader has input");
if (errReader.blockRead()) {
while (errReader.hasInput()) {
diff --git a/vespamalloc/src/vespamalloc/malloc/mmappool.cpp b/vespamalloc/src/vespamalloc/malloc/mmappool.cpp
index cee709ed0ed..ba330d14125 100644
--- a/vespamalloc/src/vespamalloc/malloc/mmappool.cpp
+++ b/vespamalloc/src/vespamalloc/malloc/mmappool.cpp
@@ -58,9 +58,9 @@ MMapPool::mmap(size_t sz) {
}
buf = ::mmap(nullptr, sz, prot, flags, -1, 0);
if (buf == MAP_FAILED) {
- fprintf(_G_logFile, "Failed mmaping anonymous of size %ld errno(%d) from : ", sz, errno);
+ fprintf(_G_logFile, "Will exit due to: Failed mmaping anonymous of size %ld errno(%d) from : ", sz, errno);
logStackTrace();
- abort();
+ std::quick_exit(66);
}
} else {
if (_has_hugepage_failure_just_happened) {
diff --git a/vespamalloc/src/vespamalloc/util/callstack.cpp b/vespamalloc/src/vespamalloc/util/callstack.cpp
index b8449c89a72..56b634bca33 100644
--- a/vespamalloc/src/vespamalloc/util/callstack.cpp
+++ b/vespamalloc/src/vespamalloc/util/callstack.cpp
@@ -1,39 +1,59 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <dlfcn.h>
-#include <ctype.h>
+#include <cctype>
#include <vespamalloc/util/callstack.h>
+#include <string>
+#include <cxxabi.h>
namespace vespamalloc {
-const char *
-dlAddr(const void * func) {
- static const char * _unknown = "UNKNOWN";
- const char * funcName = _unknown;
+namespace {
+
+std::string
+demangle(const char *native) {
+ int status = 0;
+ size_t size = 0;
+ char *unmangled = abi::__cxa_demangle(native, nullptr, &size, &status);
+ if (unmangled == nullptr) {
+ return ""; // Demangling failed for some reason. TODO return `native` instead?
+ }
+ std::string result(unmangled);
+ free(unmangled);
+ return result;
+}
+
+
+std::string
+dlAddr(const void *func) {
+ static std::string _unknown = "UNKNOWN";
Dl_info info;
int ret = dladdr(func, &info);
if (ret != 0) {
- funcName = info.dli_sname;
+ return demangle(info.dli_sname);
}
- return funcName;
+ return _unknown;
+}
+
}
namespace {
void
verifyAndCopy(const void *addr, char *v, size_t sz) {
size_t pos(0);
- const char *sym = dlAddr(addr);
- for (; sym && (sym[pos] != '\0') && (pos < sz - 1); pos++) {
+ std::string sym = dlAddr(addr);
+ for (; (pos < sym.size()) && (pos < sz - 1); pos++) {
char c(sym[pos]);
v[pos] = isprint(c) ? c : '.';
}
v[pos] = '\0';
}
+
}
void
StackReturnEntry::info(FILE * os) const
{
- static char tmp[0x400];
+ char tmp[0x400];
verifyAndCopy(_return, tmp, sizeof(tmp));
fprintf(os, "%s(%p)", tmp, _return);
}
@@ -41,8 +61,8 @@ StackReturnEntry::info(FILE * os) const
asciistream &
operator << (asciistream & os, const StackReturnEntry & v)
{
- static char tmp[0x100];
- static char t[0x200];
+ char tmp[0x100];
+ char t[0x200];
verifyAndCopy(v._return, tmp, sizeof(tmp));
snprintf(t, sizeof(t), "%s(%p)", tmp, v._return);
return os << t;
diff --git a/vespamalloc/src/vespamalloc/util/callstack.h b/vespamalloc/src/vespamalloc/util/callstack.h
index 3773d3c08b2..f3b177ea5f6 100644
--- a/vespamalloc/src/vespamalloc/util/callstack.h
+++ b/vespamalloc/src/vespamalloc/util/callstack.h
@@ -9,8 +9,6 @@
namespace vespamalloc {
-const char * dlAddr(const void * addr);
-
class StackReturnEntry {
public:
StackReturnEntry(const void * returnAddress = nullptr,
diff --git a/zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyMetrics.java b/zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyMetrics.java
index 01db658594f..fd6008ac4f9 100644
--- a/zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyMetrics.java
+++ b/zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyMetrics.java
@@ -59,7 +59,7 @@ public class LatencyMetrics {
/** Returns the average number of intervals that ended in the period per second. */
public double endHz() { return roundTo3DecimalPlaces(endHz); }
- /** Returns the average load of the implied time periond, for each thread with non-zero load, with 3 decimal places precision. */
+ /** Returns the average load of the implied time period, for each thread with non-zero load, with 3 decimal places precision. */
public Map<String, Double> loadByThread() {
Map<String, Double> result = new TreeMap<>();
loadByThread.forEach((name, load) -> result.put(name, roundTo3DecimalPlaces(load)));
diff --git a/zookeeper-server/CMakeLists.txt b/zookeeper-server/CMakeLists.txt
index 5e40f1e2246..0fc2eeec46a 100644
--- a/zookeeper-server/CMakeLists.txt
+++ b/zookeeper-server/CMakeLists.txt
@@ -1,4 +1,4 @@
# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
add_subdirectory(zookeeper-server-common)
add_subdirectory(zookeeper-server)
-add_subdirectory(zookeeper-server-3.9.2)
+
diff --git a/zookeeper-server/pom.xml b/zookeeper-server/pom.xml
index 4b7f4be7a7f..e0838a9eefa 100644
--- a/zookeeper-server/pom.xml
+++ b/zookeeper-server/pom.xml
@@ -14,7 +14,6 @@
<modules>
<module>zookeeper-server-common</module>
<module>zookeeper-server</module>
- <module>zookeeper-server-3.9.2</module>
</modules>
<dependencies>
<dependency>
diff --git a/zookeeper-server/zookeeper-server-3.9.2/CMakeLists.txt b/zookeeper-server/zookeeper-server-3.9.2/CMakeLists.txt
deleted file mode 100644
index de5780610d9..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/CMakeLists.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-install_jar(zookeeper-server-3.9.2-jar-with-dependencies.jar)
diff --git a/zookeeper-server/zookeeper-server-3.9.2/pom.xml b/zookeeper-server/zookeeper-server-3.9.2/pom.xml
deleted file mode 100644
index 791c026234a..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/pom.xml
+++ /dev/null
@@ -1,99 +0,0 @@
-<?xml version="1.0"?>
-<!-- Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>zookeeper-server-parent</artifactId>
- <version>8-SNAPSHOT</version>
- <relativePath>../pom.xml</relativePath>
- </parent>
- <artifactId>zookeeper-server-3.9.2</artifactId>
- <packaging>container-plugin</packaging>
- <version>8-SNAPSHOT</version>
- <properties>
- <zookeeper.version>3.9.2</zookeeper.version>
- </properties>
- <dependencies>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>zookeeper-server-common</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>zookeeper-client-common</artifactId>
- <version>${project.version}</version>
- <exclusions>
- <exclusion>
- <!-- Don't use ZK version from zookeeper-client-common -->
- <groupId>org.apache.zookeeper</groupId>
- <artifactId>zookeeper</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>org.apache.zookeeper</groupId>
- <artifactId>zookeeper</artifactId>
- <version>${zookeeper.version}</version>
- <exclusions>
- <!--
- Container provides wiring for all common log libraries
- Duplicate embedding results in various warnings being printed to stderr
- -->
- <exclusion>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-api</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <!-- snappy-java and metrics-core are included here
- to be able to work with ZooKeeper 3.7.0 due to
- class loading issues -->
- <dependency>
- <groupId>io.dropwizard.metrics</groupId>
- <artifactId>metrics-core</artifactId>
- <scope>compile</scope>
- <exclusions>
- <exclusion>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-api</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>org.xerial.snappy</groupId>
- <artifactId>snappy-java</artifactId>
- <scope>compile</scope>
- </dependency>
- </dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-compiler-plugin</artifactId>
- <configuration>
- <compilerArgs>
- <!-- Turn off classfile warnings where spotbugs is pulled in transitively. -->
- <arg>-Xlint:all</arg>
- <arg>-Xlint:-classfile</arg>
- <arg>-Werror</arg>
- </compilerArgs>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-install-plugin</artifactId>
- </plugin>
- <plugin>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>bundle-plugin</artifactId>
- <extensions>true</extensions>
- <configuration>
- <importPackage>com.sun.management</importPackage>
- <bundleSymbolicName>zookeeper-server</bundleSymbolicName>
- </configuration>
- </plugin>
- </plugins>
- </build>
-</project>
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/ConfigServerZooKeeperServer.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/ConfigServerZooKeeperServer.java
deleted file mode 100644
index a7cd14c415f..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/ConfigServerZooKeeperServer.java
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.zookeeper;
-
-import com.yahoo.cloud.config.ZookeeperServerConfig;
-import com.yahoo.component.AbstractComponent;
-import com.yahoo.component.annotation.Inject;
-import com.yahoo.vespa.zookeeper.server.VespaZooKeeperServer;
-
-import java.nio.file.Path;
-
-/**
- *
- * Server used for starting config server, needed to be able to have different behavior for hosted and
- * self-hosted Vespa (controlled by zookeeperServerConfig.dynamicReconfiguration).
- *
- * @author Harald Musum
- */
-public class ConfigServerZooKeeperServer extends AbstractComponent implements VespaZooKeeperServer {
-
- private final VespaZooKeeperServer zooKeeperServer;
-
- @Inject
- public ConfigServerZooKeeperServer(ZookeeperServerConfig zookeeperServerConfig) {
- this.zooKeeperServer = zookeeperServerConfig.dynamicReconfiguration()
- ? new ReconfigurableVespaZooKeeperServer(new Reconfigurer(new VespaZooKeeperAdminImpl()), zookeeperServerConfig)
- : new VespaZooKeeperServerImpl(zookeeperServerConfig);
- }
-
- @Override
- public void deconstruct() { zooKeeperServer.shutdown(); }
-
- @Override
- public void shutdown() {
- zooKeeperServer.shutdown();
- }
-
- @Override
- public void start(Path configFilePath) {
- zooKeeperServer.start(configFilePath);
- }
-
- @Override
- public boolean reconfigurable() { return zooKeeperServer.reconfigurable(); }
-
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/ReconfigurableVespaZooKeeperServer.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/ReconfigurableVespaZooKeeperServer.java
deleted file mode 100644
index d869cbb6938..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/ReconfigurableVespaZooKeeperServer.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.zookeeper;
-
-import ai.vespa.validation.Validation;
-import com.yahoo.cloud.config.ZookeeperServerConfig;
-import com.yahoo.component.AbstractComponent;
-import com.yahoo.component.annotation.Inject;
-import com.yahoo.vespa.zookeeper.server.VespaZooKeeperServer;
-
-import java.nio.file.Path;
-import java.time.Duration;
-
-/**
- * Starts or reconfigures zookeeper cluster.
- * The QuorumPeer conditionally created here is owned by the Reconfigurer;
- * when it already has a peer, that peer is used here in case start or shutdown is required.
- * Guarantees that server is up by writing a node to ZooKeeper successfully before
- * returning from constructor.
- *
- * @author hmusum
- */
-public class ReconfigurableVespaZooKeeperServer extends AbstractComponent implements VespaZooKeeperServer {
-
- private QuorumPeer peer;
-
- @Inject
- public ReconfigurableVespaZooKeeperServer(Reconfigurer reconfigurer, ZookeeperServerConfig zookeeperServerConfig) {
- Validation.require(zookeeperServerConfig.dynamicReconfiguration(),
- zookeeperServerConfig.dynamicReconfiguration(),
- "dynamicReconfiguration must be true");
- peer = reconfigurer.startOrReconfigure(zookeeperServerConfig, this, () -> peer = new VespaQuorumPeer());
- }
-
- @Override
- public void shutdown() {
- peer.shutdown(Duration.ofMinutes(1));
- }
-
- @Override
- public void start(Path configFilePath) {
- peer.start(configFilePath);
- }
-
- @Override
- public boolean reconfigurable() {
- return true;
- }
-
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaMtlsAuthenticationProvider.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaMtlsAuthenticationProvider.java
deleted file mode 100644
index 90554910293..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaMtlsAuthenticationProvider.java
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.zookeeper;
-
-import com.yahoo.security.X509SslContext;
-import com.yahoo.security.tls.TlsContext;
-import com.yahoo.security.tls.TransportSecurityUtils;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.common.ClientX509Util;
-import org.apache.zookeeper.common.X509Exception;
-import org.apache.zookeeper.data.Id;
-import org.apache.zookeeper.server.ServerCnxn;
-import org.apache.zookeeper.server.auth.AuthenticationProvider;
-import org.apache.zookeeper.server.auth.X509AuthenticationProvider;
-
-import javax.net.ssl.KeyManager;
-import javax.net.ssl.X509KeyManager;
-import javax.net.ssl.X509TrustManager;
-import java.security.cert.X509Certificate;
-import java.util.logging.Logger;
-
-/**
- * A {@link AuthenticationProvider} to be used in combination with Vespa mTLS.
- *
- * @author bjorncs
- */
-public class VespaMtlsAuthenticationProvider extends X509AuthenticationProvider {
-
- private static final Logger log = Logger.getLogger(VespaMtlsAuthenticationProvider.class.getName());
-
- public VespaMtlsAuthenticationProvider() {
- super(null, null);
- }
-
- @Override
- public KeeperException.Code handleAuthentication(ServerCnxn cnxn, byte[] authData) {
- // Vespa's mTLS peer authorization rules are performed by the underlying trust manager implementation.
- // The client is authorized once the SSL handshake has completed.
- X509Certificate[] certificateChain = (X509Certificate[]) cnxn.getClientCertificateChain();
- if (certificateChain == null || certificateChain.length == 0) {
- log.warning("Client not authenticated - should not be possible with clientAuth=NEED");
- return KeeperException.Code.AUTHFAILED;
- }
- X509Certificate certificate = certificateChain[0];
- cnxn.addAuthInfo(new Id(getScheme(), certificate.getSubjectX500Principal().getName()));
- return KeeperException.Code.OK;
- }
-
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaQuorumPeer.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaQuorumPeer.java
deleted file mode 100644
index dd5ac4e252b..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaQuorumPeer.java
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.zookeeper;
-
-import com.yahoo.protect.Process;
-import org.apache.zookeeper.server.admin.AdminServer;
-import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
-import org.apache.zookeeper.server.quorum.QuorumPeerMain;
-
-import java.io.IOException;
-import java.nio.file.Path;
-import java.time.Duration;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-/**
- * Starts or stops a ZooKeeper server. Extends QuorumPeerMain to be able to call initializeAndRun() and wraps
- * exceptions so that it can be used by code that does not depend on ZooKeeper.
- *
- * @author hmusum
- */
-class VespaQuorumPeer extends QuorumPeerMain implements QuorumPeer {
-
- private static final Logger log = java.util.logging.Logger.getLogger(VespaQuorumPeer.class.getName());
-
- @Override
- public void start(Path path) {
- initializeAndRun(new String[]{ path.toFile().getAbsolutePath()});
- }
-
- @Override
- public void shutdown(Duration timeout) {
- if (quorumPeer != null) {
- log.log(Level.FINE, "Shutting down ZooKeeper server");
- try {
- quorumPeer.shutdown();
- quorumPeer.join(timeout.toMillis()); // Wait for shutdown to complete
- if (quorumPeer.isAlive())
- throw new IllegalStateException("Peer still alive after " + timeout);
- } catch (RuntimeException | InterruptedException e) {
- // If shutdown fails, we have no other option than forcing the JVM to stop and letting it be restarted.
- //
- // When a VespaZooKeeperServer component receives a new config, the container will try to start a new
- // server with the new config, this will fail until the old server is deconstructed. If the old server
- // fails to deconstruct/shutdown, the new one will never start and if that happens forcing a restart is
- // the better option.
- Process.logAndDie("Failed to shut down ZooKeeper server properly, forcing shutdown", e);
- }
- }
- }
-
- @Override
- protected void initializeAndRun(String[] args) {
- try {
- super.initializeAndRun(args);
- } catch (QuorumPeerConfig.ConfigException | IOException | AdminServer.AdminServerException e) {
- throw new RuntimeException("Exception when initializing or running ZooKeeper server", e);
- }
- }
-
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java
deleted file mode 100644
index c74a020bcf4..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.zookeeper;
-
-import com.yahoo.cloud.config.ZookeeperServerConfig;
-import com.yahoo.net.HostName;
-import com.yahoo.vespa.zookeeper.client.ZkClientConfigBuilder;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.ZooDefs;
-import org.apache.zookeeper.admin.ZooKeeperAdmin;
-import org.apache.zookeeper.data.ACL;
-import java.nio.charset.StandardCharsets;
-import java.time.Duration;
-import java.time.Instant;
-import java.util.List;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-import static com.yahoo.yolean.Exceptions.uncheck;
-
-/**
- * @author hmusum
- */
-@SuppressWarnings("unused") // Created by injection
-public class VespaZooKeeperAdminImpl implements VespaZooKeeperAdmin {
-
- private static final Logger log = java.util.logging.Logger.getLogger(VespaZooKeeperAdminImpl.class.getName());
-
-
- @SuppressWarnings("try")
- @Override
- public void reconfigure(String connectionSpec, String servers) throws ReconfigException {
- try (ZooKeeperAdmin zooKeeperAdmin = createAdmin(connectionSpec)) {
- long fromConfig = -1;
- // Using string parameters because the List variant of reconfigure fails to join empty lists (observed on 3.5.6, fixed in 3.7.0).
- log.log(Level.INFO, "Applying ZooKeeper config: " + servers);
- byte[] appliedConfig = zooKeeperAdmin.reconfigure(null, null, servers, fromConfig, null);
- log.log(Level.INFO, "Applied ZooKeeper config: " + new String(appliedConfig, StandardCharsets.UTF_8));
-
- // Verify by issuing a write operation; this is only accepted once new quorum is obtained.
- List<ACL> acl = ZooDefs.Ids.OPEN_ACL_UNSAFE;
- String node = zooKeeperAdmin.create("/reconfigure-dummy-node", new byte[0], acl, CreateMode.EPHEMERAL_SEQUENTIAL);
- zooKeeperAdmin.delete(node, -1);
-
- log.log(Level.INFO, "Verified ZooKeeper config: " + new String(appliedConfig, StandardCharsets.UTF_8));
- }
- catch ( KeeperException.ReconfigInProgress
- | KeeperException.ConnectionLossException
- | KeeperException.NewConfigNoQuorum e) {
- throw new ReconfigException(e);
- }
- catch (KeeperException | InterruptedException e) {
- throw new RuntimeException(e);
- }
- }
-
- private ZooKeeperAdmin createAdmin(String connectionSpec) {
- return uncheck(() -> new ZooKeeperAdmin(connectionSpec, (int) sessionTimeout().toMillis(),
- (event) -> log.log(Level.FINE, event.toString()), new ZkClientConfigBuilder().toConfig()));
- }
-
- /** Creates a node in zookeeper, with hostname as part of node name, this ensures that server is up and working before returning */
- @SuppressWarnings("try")
- void createDummyNode(ZookeeperServerConfig zookeeperServerConfig) {
- int sleepTime = 2_000;
- try (ZooKeeperAdmin zooKeeperAdmin = createAdmin(localConnectionSpec(zookeeperServerConfig))) {
- Instant end = Instant.now().plus(Duration.ofMinutes(5));
- Exception exception = null;
- do {
- try {
- zooKeeperAdmin.create("/dummy-node-" + HostName.getLocalhost(), new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
- return;
- } catch (KeeperException e) {
- if (e instanceof KeeperException.NodeExistsException) {
- try {
- zooKeeperAdmin.setData("/dummy-node-" + HostName.getLocalhost(), new byte[0], -1);
- return;
- } catch (KeeperException ex) {
- log.log(Level.FINE, e.getMessage());
- Thread.sleep(sleepTime);
- continue;
- }
- }
- log.log(Level.FINE, e.getMessage());
- exception = e;
- Thread.sleep(sleepTime);
- }
- } while (Instant.now().isBefore(end));
- throw new RuntimeException("Unable to create dummy node: ", exception);
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- }
- }
-
-}
-
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperServerImpl.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperServerImpl.java
deleted file mode 100644
index 4f93eb0efa5..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperServerImpl.java
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.zookeeper;
-
-import ai.vespa.validation.Validation;
-import com.yahoo.cloud.config.ZookeeperServerConfig;
-import com.yahoo.component.AbstractComponent;
-import com.yahoo.component.annotation.Inject;
-import com.yahoo.vespa.zookeeper.server.VespaZooKeeperServer;
-
-import java.nio.file.Path;
-import java.time.Duration;
-
-/**
- * ZooKeeper server. Guarantees that the server is up by writing a node to ZooKeeper successfully before
- * returning from constructor.
- *
- * @author Ulf Lilleengen
- * @author Harald Musum
- */
-public class VespaZooKeeperServerImpl extends AbstractComponent implements VespaZooKeeperServer {
-
- private final VespaQuorumPeer peer;
- private final ZooKeeperRunner runner;
-
- @Inject
- public VespaZooKeeperServerImpl(ZookeeperServerConfig zookeeperServerConfig) {
- Validation.require(! zookeeperServerConfig.dynamicReconfiguration(),
- ! zookeeperServerConfig.dynamicReconfiguration(),
- "dynamicReconfiguration must be false");
- this.peer = new VespaQuorumPeer();
- this.runner = new ZooKeeperRunner(zookeeperServerConfig, this);
- new VespaZooKeeperAdminImpl().createDummyNode(zookeeperServerConfig);
- }
-
- @Override
- public void deconstruct() {
- runner.shutdown();
- super.deconstruct();
- }
-
- @Override
- public void shutdown() {
- peer.shutdown(Duration.ofMinutes(1));
- }
-
- @Override
- public void start(Path configFilePath) {
- peer.start(configFilePath);
- }
-
- @Override
- public boolean reconfigurable() {
- return false;
- }
-
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/common/ClientX509Util.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/common/ClientX509Util.java
deleted file mode 100644
index f6dfb0fa4d9..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/common/ClientX509Util.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zookeeper.common;
-
-import com.yahoo.vespa.zookeeper.tls.VespaZookeeperTlsContextUtils;
-import io.netty.handler.ssl.DelegatingSslContext;
-import io.netty.handler.ssl.SslContext;
-import io.netty.handler.ssl.SslContextBuilder;
-import io.netty.handler.ssl.SslProvider;
-import javax.net.ssl.KeyManager;
-import javax.net.ssl.SSLEngine;
-import javax.net.ssl.SSLException;
-import javax.net.ssl.SSLParameters;
-import javax.net.ssl.TrustManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-
-/**
- * X509 utilities specific for client-server communication framework.
- * <p>
- * <em>Modified to use Vespa's TLS context, whenever it is available, instead of the file-based key and trust stores of ZK 3.9.
- * Based on https://github.com/apache/zookeeper/blob/branch-3.9/zookeeper-server/src/main/java/org/apache/zookeeper/common/ClientX509Util.java</em>
- *
- * @author jonmv
- */
-public class ClientX509Util extends X509Util {
-
- private static final Logger LOG = LoggerFactory.getLogger(ClientX509Util.class);
-
- private final String sslAuthProviderProperty = getConfigPrefix() + "authProvider";
- private final String sslProviderProperty = getConfigPrefix() + "sslProvider";
-
- @Override
- protected String getConfigPrefix() {
- return "zookeeper.ssl.";
- }
-
- @Override
- protected boolean shouldVerifyClientHostname() {
- return false;
- }
-
- public String getSslAuthProviderProperty() {
- return sslAuthProviderProperty;
- }
-
- public String getSslProviderProperty() {
- return sslProviderProperty;
- }
-
- public SslContext createNettySslContextForClient(ZKConfig config)
- throws X509Exception.KeyManagerException, X509Exception.TrustManagerException, SSLException {
- SslContextBuilder sslContextBuilder = SslContextBuilder.forClient();
- KeyManager km;
- TrustManager tm;
- if (VespaZookeeperTlsContextUtils.tlsContext().isPresent()) {
- km = VespaZookeeperTlsContextUtils.tlsContext().get().sslContext().keyManager();
- tm = VespaZookeeperTlsContextUtils.tlsContext().get().sslContext().trustManager();
- }
- else {
- String keyStoreLocation = config.getProperty(getSslKeystoreLocationProperty(), "");
- String keyStorePassword = getPasswordFromConfigPropertyOrFile(config, getSslKeystorePasswdProperty(),
- getSslKeystorePasswdPathProperty());
- String keyStoreType = config.getProperty(getSslKeystoreTypeProperty());
-
- if (keyStoreLocation.isEmpty()) {
- LOG.warn("{} not specified", getSslKeystoreLocationProperty());
- km = null;
- }
- else {
- km = createKeyManager(keyStoreLocation, keyStorePassword, keyStoreType);
- }
-
- tm = getTrustManager(config);
- }
-
- if (km != null) {
- sslContextBuilder.keyManager(km);
- }
- if (tm != null) {
- sslContextBuilder.trustManager(tm);
- }
-
- sslContextBuilder.enableOcsp(config.getBoolean(getSslOcspEnabledProperty()));
- sslContextBuilder.protocols(getEnabledProtocols(config));
- Iterable<String> enabledCiphers = getCipherSuites(config);
- if (enabledCiphers != null) {
- sslContextBuilder.ciphers(enabledCiphers);
- }
- sslContextBuilder.sslProvider(getSslProvider(config));
-
- SslContext sslContext1 = sslContextBuilder.build();
-
- if (getFipsMode(config) && isServerHostnameVerificationEnabled(config)) {
- return addHostnameVerification(sslContext1, "Server");
- } else {
- return sslContext1;
- }
- }
-
- public SslContext createNettySslContextForServer(ZKConfig config)
- throws X509Exception.SSLContextException, X509Exception.KeyManagerException, X509Exception.TrustManagerException, SSLException {
- KeyManager km;
- TrustManager tm;
- if (VespaZookeeperTlsContextUtils.tlsContext().isPresent()) {
- km = VespaZookeeperTlsContextUtils.tlsContext().get().sslContext().keyManager();
- tm = VespaZookeeperTlsContextUtils.tlsContext().get().sslContext().trustManager();
- }
- else {
- String keyStoreLocation = config.getProperty(getSslKeystoreLocationProperty(), "");
- String keyStorePassword = getPasswordFromConfigPropertyOrFile(config, getSslKeystorePasswdProperty(),
- getSslKeystorePasswdPathProperty());
- String keyStoreType = config.getProperty(getSslKeystoreTypeProperty());
-
- if (keyStoreLocation.isEmpty()) {
- throw new X509Exception.SSLContextException(
- "Keystore is required for SSL server: " + getSslKeystoreLocationProperty());
- }
- km = createKeyManager(keyStoreLocation, keyStorePassword, keyStoreType);
- tm = getTrustManager(config);
- }
- return createNettySslContextForServer(config, km, tm);
- }
-
- public SslContext createNettySslContextForServer(ZKConfig config, KeyManager keyManager, TrustManager trustManager) throws SSLException {
- SslContextBuilder sslContextBuilder = SslContextBuilder.forServer(keyManager);
-
- if (trustManager != null) {
- sslContextBuilder.trustManager(trustManager);
- }
-
- sslContextBuilder.enableOcsp(config.getBoolean(getSslOcspEnabledProperty()));
- sslContextBuilder.protocols(getEnabledProtocols(config));
- sslContextBuilder.clientAuth(getClientAuth(config).toNettyClientAuth());
- Iterable<String> enabledCiphers = getCipherSuites(config);
- if (enabledCiphers != null) {
- sslContextBuilder.ciphers(enabledCiphers);
- }
- sslContextBuilder.sslProvider(getSslProvider(config));
-
- SslContext sslContext1 = sslContextBuilder.build();
-
- if (getFipsMode(config) && isClientHostnameVerificationEnabled(config)) {
- return addHostnameVerification(sslContext1, "Client");
- } else {
- return sslContext1;
- }
- }
-
- private SslContext addHostnameVerification(SslContext sslContext, String clientOrServer) {
- return new DelegatingSslContext(sslContext) {
- @Override
- protected void initEngine(SSLEngine sslEngine) {
- SSLParameters sslParameters = sslEngine.getSSLParameters();
- sslParameters.setEndpointIdentificationAlgorithm("HTTPS");
- sslEngine.setSSLParameters(sslParameters);
- if (LOG.isDebugEnabled()) {
- LOG.debug("{} hostname verification: enabled HTTPS style endpoint identification algorithm", clientOrServer);
- }
- }
- };
- }
-
- private String[] getEnabledProtocols(final ZKConfig config) {
- String enabledProtocolsInput = config.getProperty(getSslEnabledProtocolsProperty());
- if (enabledProtocolsInput == null) {
- return new String[]{ config.getProperty(getSslProtocolProperty(), DEFAULT_PROTOCOL) };
- }
- return enabledProtocolsInput.split(",");
- }
-
- private X509Util.ClientAuth getClientAuth(final ZKConfig config) {
- return X509Util.ClientAuth.fromPropertyValue(config.getProperty(getSslClientAuthProperty()));
- }
-
- private Iterable<String> getCipherSuites(final ZKConfig config) {
- String cipherSuitesInput = config.getProperty(getSslCipherSuitesProperty());
- if (cipherSuitesInput == null) {
- if (getSslProvider(config) != SslProvider.JDK) {
- return null;
- }
- return List.of(X509Util.getDefaultCipherSuites());
- } else {
- return List.of(cipherSuitesInput.split(","));
- }
- }
-
- public SslProvider getSslProvider(ZKConfig config) {
- return SslProvider.valueOf(config.getProperty(getSslProviderProperty(), "JDK"));
- }
-
- private TrustManager getTrustManager(ZKConfig config) throws X509Exception.TrustManagerException {
- String trustStoreLocation = config.getProperty(getSslTruststoreLocationProperty(), "");
- String trustStorePassword = getPasswordFromConfigPropertyOrFile(config, getSslTruststorePasswdProperty(),
- getSslTruststorePasswdPathProperty());
- String trustStoreType = config.getProperty(getSslTruststoreTypeProperty());
-
- boolean sslCrlEnabled = config.getBoolean(getSslCrlEnabledProperty());
- boolean sslOcspEnabled = config.getBoolean(getSslOcspEnabledProperty());
- boolean sslServerHostnameVerificationEnabled = isServerHostnameVerificationEnabled(config);
- boolean sslClientHostnameVerificationEnabled = isClientHostnameVerificationEnabled(config);
-
- if (trustStoreLocation.isEmpty()) {
- LOG.warn("{} not specified", getSslTruststoreLocationProperty());
- return null;
- } else {
- return createTrustManager(trustStoreLocation, trustStorePassword, trustStoreType,
- sslCrlEnabled, sslOcspEnabled, sslServerHostnameVerificationEnabled,
- sslClientHostnameVerificationEnabled, getFipsMode(config));
- }
- }
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/common/NetUtils.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/common/NetUtils.java
deleted file mode 100644
index baa69f12968..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/common/NetUtils.java
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zookeeper.common;
-
-import java.net.Inet6Address;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-
-/**
- * This class contains common utilities for netstuff. Like printing IPv6 literals correctly
- */
-public class NetUtils {
-
- // Note: Changed from original to use hostname from InetSocketAddress if there exists one
- public static String formatInetAddr(InetSocketAddress addr) {
- String hostName = addr.getHostName();
- if (hostName != null) {
- return String.format("%s:%s", hostName, addr.getPort());
- }
-
- InetAddress ia = addr.getAddress();
-
- if (ia == null) {
- return String.format("%s:%s", addr.getHostString(), addr.getPort());
- }
- if (ia instanceof Inet6Address) {
- return String.format("[%s]:%s", ia.getHostAddress(), addr.getPort());
- } else {
- return String.format("%s:%s", ia.getHostAddress(), addr.getPort());
- }
- }
-
- /**
- * Separates host and port from given host port string if host port string is enclosed
- * within square bracket.
- *
- * @param hostPort host port string
- * @return String[]{host, port} if host port string is host:port
- * or String[] {host, port:port} if host port string is host:port:port
- * or String[] {host} if host port string is host
- * or String[]{} if not a ipv6 host port string.
- */
- public static String[] getIPV6HostAndPort(String hostPort) {
- if (hostPort.startsWith("[")) {
- int i = hostPort.lastIndexOf(']');
- if (i < 0) {
- throw new IllegalArgumentException(
- hostPort + " starts with '[' but has no matching ']'");
- }
- String host = hostPort.substring(1, i);
- if (host.isEmpty()) {
- throw new IllegalArgumentException(host + " is empty.");
- }
- if (hostPort.length() > i + 1) {
- return getHostPort(hostPort, i, host);
- }
- return new String[] { host };
- } else {
- //Not an IPV6 host port string
- return new String[] {};
- }
- }
-
- private static String[] getHostPort(String hostPort, int indexOfClosingBracket, String host) {
- // [127::1]:2181 , check separator : exits
- if (hostPort.charAt(indexOfClosingBracket + 1) != ':') {
- throw new IllegalArgumentException(hostPort + " does not have : after ]");
- }
- // [127::1]: scenario
- if (indexOfClosingBracket + 2 == hostPort.length()) {
- throw new IllegalArgumentException(hostPort + " doesn't have a port after colon.");
- }
- //do not include
- String port = hostPort.substring(indexOfClosingBracket + 2);
- return new String[] { host, port };
- }
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/SyncRequestProcessor.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/SyncRequestProcessor.java
deleted file mode 100644
index cf7f4c44015..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/SyncRequestProcessor.java
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zookeeper.server;
-
-import java.io.Flushable;
-import java.io.IOException;
-import java.util.ArrayDeque;
-import java.util.Objects;
-import java.util.Queue;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.TimeUnit;
-import org.apache.zookeeper.common.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This RequestProcessor logs requests to disk. It batches the requests to do
- * the io efficiently. The request is not passed to the next RequestProcessor
- * until its log has been synced to disk.
- *
- * SyncRequestProcessor is used in 3 different cases
- * 1. Leader - Sync request to disk and forward it to AckRequestProcessor which
- * send ack back to itself.
- * 2. Follower - Sync request to disk and forward request to
- * SendAckRequestProcessor which send the packets to leader.
- * SendAckRequestProcessor is flushable which allow us to force
- * push packets to leader.
- * 3. Observer - Sync committed request to disk (received as INFORM packet).
- * It never send ack back to the leader, so the nextProcessor will
- * be null. This change the semantic of txnlog on the observer
- * since it only contains committed txns.
- */
-public class SyncRequestProcessor extends ZooKeeperCriticalThread implements RequestProcessor {
-
- private static final Logger LOG = LoggerFactory.getLogger(SyncRequestProcessor.class);
-
- private static final Request REQUEST_OF_DEATH = Request.requestOfDeath;
-
- private static class FlushRequest extends Request {
- private final CountDownLatch latch = new CountDownLatch(1);
- public FlushRequest() {
- super(null, 0, 0, 0, null, null);
- }
- }
-
- private static final Request TURN_FORWARDING_DELAY_ON_REQUEST = new Request(null, 0, 0, 0, null, null);
- private static final Request TURN_FORWARDING_DELAY_OFF_REQUEST = new Request(null, 0, 0, 0, null, null);
-
- private static class DelayingProcessor implements RequestProcessor, Flushable {
- private final RequestProcessor next;
- private Queue<Request> delayed = null;
- private DelayingProcessor(RequestProcessor next) {
- this.next = next;
- }
- @Override
- public void flush() throws IOException {
- if (delayed == null && next instanceof Flushable) {
- ((Flushable) next).flush();
- }
- }
- @Override
- public void processRequest(Request request) throws RequestProcessorException {
- if (delayed == null) {
- next.processRequest(request);
- } else {
- delayed.add(request);
- }
- }
- @Override
- public void shutdown() {
- next.shutdown();
- }
- private void startDelaying() {
- if (delayed == null) {
- delayed = new ArrayDeque<>();
- }
- }
- private void flushAndStopDelaying() throws RequestProcessorException {
- if (delayed != null) {
- for (Request request : delayed) {
- next.processRequest(request);
- }
- delayed = null;
- }
- }
- }
-
- /** The number of log entries to log before starting a snapshot */
- private static int snapCount = ZooKeeperServer.getSnapCount();
-
- /**
- * The total size of log entries before starting a snapshot
- */
- private static long snapSizeInBytes = ZooKeeperServer.getSnapSizeInBytes();
-
- /**
- * Random numbers used to vary snapshot timing
- */
- private int randRoll;
- private long randSize;
-
- private final BlockingQueue<Request> queuedRequests = new LinkedBlockingQueue<>();
-
- private final Semaphore snapThreadMutex = new Semaphore(1);
-
- private final ZooKeeperServer zks;
-
- private final DelayingProcessor nextProcessor;
-
- /**
- * Transactions that have been written and are waiting to be flushed to
- * disk. Basically this is the list of SyncItems whose callbacks will be
- * invoked after flush returns successfully.
- */
- private final Queue<Request> toFlush;
- private long lastFlushTime;
-
- public SyncRequestProcessor(ZooKeeperServer zks, RequestProcessor nextProcessor) {
- super("SyncThread:" + zks.getServerId(), zks.getZooKeeperServerListener());
- this.zks = zks;
- this.nextProcessor = nextProcessor == null ? null : new DelayingProcessor(nextProcessor);
- this.toFlush = new ArrayDeque<>(zks.getMaxBatchSize());
- }
-
- /**
- * used by tests to check for changing
- * snapcounts
- * @param count
- */
- public static void setSnapCount(int count) {
- snapCount = count;
- }
-
- /**
- * used by tests to get the snapcount
- * @return the snapcount
- */
- public static int getSnapCount() {
- return snapCount;
- }
-
- private long getRemainingDelay() {
- long flushDelay = zks.getFlushDelay();
- long duration = Time.currentElapsedTime() - lastFlushTime;
- if (duration < flushDelay) {
- return flushDelay - duration;
- }
- return 0;
- }
-
- /** If both flushDelay and maxMaxBatchSize are set (bigger than 0), flush
- * whenever either condition is hit. If only one or the other is
- * set, flush only when the relevant condition is hit.
- */
- private boolean shouldFlush() {
- long flushDelay = zks.getFlushDelay();
- long maxBatchSize = zks.getMaxBatchSize();
- if ((flushDelay > 0) && (getRemainingDelay() == 0)) {
- return true;
- }
- return (maxBatchSize > 0) && (toFlush.size() >= maxBatchSize);
- }
-
- /**
- * used by tests to check for changing
- * snapcounts
- * @param size
- */
- public static void setSnapSizeInBytes(long size) {
- snapSizeInBytes = size;
- }
-
- private boolean shouldSnapshot() {
- int logCount = zks.getZKDatabase().getTxnCount();
- long logSize = zks.getZKDatabase().getTxnSize();
- return (logCount > (snapCount / 2 + randRoll))
- || (snapSizeInBytes > 0 && logSize > (snapSizeInBytes / 2 + randSize));
- }
-
- private void resetSnapshotStats() {
- randRoll = ThreadLocalRandom.current().nextInt(snapCount / 2);
- randSize = Math.abs(ThreadLocalRandom.current().nextLong() % (snapSizeInBytes / 2));
- }
-
- @Override
- public void run() {
- try {
- // we do this in an attempt to ensure that not all of the servers
- // in the ensemble take a snapshot at the same time
- resetSnapshotStats();
- lastFlushTime = Time.currentElapsedTime();
- while (true) {
- ServerMetrics.getMetrics().SYNC_PROCESSOR_QUEUE_SIZE.add(queuedRequests.size());
-
- long pollTime = Math.min(zks.getMaxWriteQueuePollTime(), getRemainingDelay());
- Request si = queuedRequests.poll(pollTime, TimeUnit.MILLISECONDS);
- if (si == null) {
- /* We timed out looking for more writes to batch, go ahead and flush immediately */
- flush();
- si = queuedRequests.take();
- }
-
- if (si == REQUEST_OF_DEATH) {
- break;
- }
-
- if (si == TURN_FORWARDING_DELAY_ON_REQUEST) {
- nextProcessor.startDelaying();
- continue;
- }
- if (si == TURN_FORWARDING_DELAY_OFF_REQUEST) {
- nextProcessor.flushAndStopDelaying();
- continue;
- }
-
- if (si instanceof FlushRequest) {
- flush();
- ((FlushRequest) si).latch.countDown();
- continue;
- }
-
- long startProcessTime = Time.currentElapsedTime();
- ServerMetrics.getMetrics().SYNC_PROCESSOR_QUEUE_TIME.add(startProcessTime - si.syncQueueStartTime);
-
- // track the number of records written to the log
- if (!si.isThrottled() && zks.getZKDatabase().append(si)) {
- if (shouldSnapshot()) {
- resetSnapshotStats();
- // roll the log
- zks.getZKDatabase().rollLog();
- // take a snapshot
- if (!snapThreadMutex.tryAcquire()) {
- LOG.warn("Too busy to snap, skipping");
- } else {
- new ZooKeeperThread("Snapshot Thread") {
- public void run() {
- try {
- zks.takeSnapshot();
- } catch (Exception e) {
- LOG.warn("Unexpected exception", e);
- } finally {
- snapThreadMutex.release();
- }
- }
- }.start();
- }
- }
- } else if (toFlush.isEmpty()) {
- // optimization for read heavy workloads
- // iff this is a read or a throttled request(which doesn't need to be written to the disk),
- // and there are no pending flushes (writes), then just pass this to the next processor
- if (nextProcessor != null) {
- nextProcessor.processRequest(si);
- nextProcessor.flush();
- }
- continue;
- }
- toFlush.add(si);
- if (shouldFlush()) {
- flush();
- }
- ServerMetrics.getMetrics().SYNC_PROCESS_TIME.add(Time.currentElapsedTime() - startProcessTime);
- }
- } catch (Throwable t) {
- handleException(this.getName(), t);
- }
- LOG.info("SyncRequestProcessor exited!");
- }
-
- /** Flushes all pending writes, and waits for this to complete. */
- public void syncFlush() throws InterruptedException {
- FlushRequest marker = new FlushRequest();
- queuedRequests.add(marker);
- marker.latch.await();
- }
-
- public void setDelayForwarding(boolean delayForwarding) {
- queuedRequests.add(delayForwarding ? TURN_FORWARDING_DELAY_ON_REQUEST : TURN_FORWARDING_DELAY_OFF_REQUEST);
- }
-
- private void flush() throws IOException, RequestProcessorException {
- if (this.toFlush.isEmpty()) {
- return;
- }
-
- ServerMetrics.getMetrics().BATCH_SIZE.add(toFlush.size());
-
- long flushStartTime = Time.currentElapsedTime();
- zks.getZKDatabase().commit();
- ServerMetrics.getMetrics().SYNC_PROCESSOR_FLUSH_TIME.add(Time.currentElapsedTime() - flushStartTime);
-
- if (this.nextProcessor == null) {
- this.toFlush.clear();
- } else {
- while (!this.toFlush.isEmpty()) {
- final Request i = this.toFlush.remove();
- long latency = Time.currentElapsedTime() - i.syncQueueStartTime;
- ServerMetrics.getMetrics().SYNC_PROCESSOR_QUEUE_AND_FLUSH_TIME.add(latency);
- this.nextProcessor.processRequest(i);
- }
- nextProcessor.flush();
- }
- lastFlushTime = Time.currentElapsedTime();
- }
-
- public void shutdown() {
- LOG.info("Shutting down");
- queuedRequests.add(REQUEST_OF_DEATH);
- try {
- this.join();
- this.flush();
- } catch (InterruptedException e) {
- LOG.warn("Interrupted while wating for {} to finish", this);
- Thread.currentThread().interrupt();
- } catch (IOException e) {
- LOG.warn("Got IO exception during shutdown");
- } catch (RequestProcessorException e) {
- LOG.warn("Got request processor exception during shutdown");
- }
- if (nextProcessor != null) {
- nextProcessor.shutdown();
- }
- }
-
- public void processRequest(final Request request) {
- Objects.requireNonNull(request, "Request cannot be null");
-
- request.syncQueueStartTime = Time.currentElapsedTime();
- queuedRequests.add(request);
- ServerMetrics.getMetrics().SYNC_PROCESSOR_QUEUED.add(1);
- }
-
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/VespaNettyServerCnxnFactory.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/VespaNettyServerCnxnFactory.java
deleted file mode 100644
index 114d2987fe2..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/VespaNettyServerCnxnFactory.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package org.apache.zookeeper.server;
-
-import com.yahoo.vespa.zookeeper.Configurator;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.logging.Logger;
-
-/**
- * Overrides secure setting with value from {@link Configurator}.
- * Workaround for incorrect handling of clientSecurePort in combination with ZooKeeper Dynamic Reconfiguration in 3.6.2
- * See https://issues.apache.org/jira/browse/ZOOKEEPER-3577.
- *
- * Using package {@link org.apache.zookeeper.server} as {@link NettyServerCnxnFactory#NettyServerCnxnFactory()} is package-private.
- *
- * @author bjorncs
- */
-public class VespaNettyServerCnxnFactory extends NettyServerCnxnFactory {
-
- private static final Logger log = Logger.getLogger(VespaNettyServerCnxnFactory.class.getName());
-
- private final boolean isSecure;
-
- public VespaNettyServerCnxnFactory() {
- super();
- this.isSecure = Configurator.VespaNettyServerCnxnFactory_isSecure;
- boolean portUnificationEnabled = Boolean.getBoolean(NettyServerCnxnFactory.PORT_UNIFICATION_KEY);
- log.info(String.format("For %h: isSecure=%b, portUnification=%b", this, isSecure, portUnificationEnabled));
- }
-
- @Override
- public void configure(InetSocketAddress addr, int maxClientCnxns, int backlog, boolean secure) throws IOException {
- log.info(String.format("For %h: configured() invoked with parameter 'secure'=%b, overridden to %b", this, secure, isSecure));
- super.configure(addr, maxClientCnxns, backlog, isSecure);
- }
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/ZooKeeperServer.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/ZooKeeperServer.java
deleted file mode 100644
index 00af31b46d4..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/ZooKeeperServer.java
+++ /dev/null
@@ -1,2412 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zookeeper.server;
-
-import java.io.BufferedInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.PrintWriter;
-import java.nio.ByteBuffer;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Deque;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.function.BiConsumer;
-import java.util.zip.Adler32;
-import java.util.zip.CheckedInputStream;
-import javax.security.sasl.SaslException;
-import org.apache.jute.BinaryInputArchive;
-import org.apache.jute.BinaryOutputArchive;
-import org.apache.jute.InputArchive;
-import org.apache.jute.Record;
-import org.apache.zookeeper.Environment;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.Code;
-import org.apache.zookeeper.KeeperException.SessionExpiredException;
-import org.apache.zookeeper.Quotas;
-import org.apache.zookeeper.StatsTrack;
-import org.apache.zookeeper.Version;
-import org.apache.zookeeper.ZooDefs;
-import org.apache.zookeeper.ZooDefs.OpCode;
-import org.apache.zookeeper.ZookeeperBanner;
-import org.apache.zookeeper.common.PathUtils;
-import org.apache.zookeeper.common.StringUtils;
-import org.apache.zookeeper.common.Time;
-import org.apache.zookeeper.data.ACL;
-import org.apache.zookeeper.data.Id;
-import org.apache.zookeeper.data.StatPersisted;
-import org.apache.zookeeper.jmx.MBeanRegistry;
-import org.apache.zookeeper.metrics.MetricsContext;
-import org.apache.zookeeper.proto.AuthPacket;
-import org.apache.zookeeper.proto.ConnectRequest;
-import org.apache.zookeeper.proto.ConnectResponse;
-import org.apache.zookeeper.proto.CreateRequest;
-import org.apache.zookeeper.proto.DeleteRequest;
-import org.apache.zookeeper.proto.GetSASLRequest;
-import org.apache.zookeeper.proto.ReplyHeader;
-import org.apache.zookeeper.proto.RequestHeader;
-import org.apache.zookeeper.proto.SetACLRequest;
-import org.apache.zookeeper.proto.SetDataRequest;
-import org.apache.zookeeper.proto.SetSASLResponse;
-import org.apache.zookeeper.server.DataTree.ProcessTxnResult;
-import org.apache.zookeeper.server.RequestProcessor.RequestProcessorException;
-import org.apache.zookeeper.server.ServerCnxn.CloseRequestException;
-import org.apache.zookeeper.server.SessionTracker.Session;
-import org.apache.zookeeper.server.SessionTracker.SessionExpirer;
-import org.apache.zookeeper.server.auth.ProviderRegistry;
-import org.apache.zookeeper.server.auth.ServerAuthenticationProvider;
-import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
-import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
-import org.apache.zookeeper.server.quorum.ReadOnlyZooKeeperServer;
-import org.apache.zookeeper.server.util.JvmPauseMonitor;
-import org.apache.zookeeper.server.util.OSMXBean;
-import org.apache.zookeeper.server.util.QuotaMetricsUtils;
-import org.apache.zookeeper.server.util.RequestPathMetricsCollector;
-import org.apache.zookeeper.txn.CreateSessionTxn;
-import org.apache.zookeeper.txn.TxnDigest;
-import org.apache.zookeeper.txn.TxnHeader;
-import org.apache.zookeeper.util.ServiceUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements a simple standalone ZooKeeperServer. It sets up the
- * following chain of RequestProcessors to process requests:
- * PrepRequestProcessor -&gt; SyncRequestProcessor -&gt; FinalRequestProcessor
- */
-public class ZooKeeperServer implements SessionExpirer, ServerStats.Provider {
-
- protected static final Logger LOG;
- private static final RateLogger RATE_LOGGER;
-
- public static final String GLOBAL_OUTSTANDING_LIMIT = "zookeeper.globalOutstandingLimit";
-
- public static final String ENABLE_EAGER_ACL_CHECK = "zookeeper.enableEagerACLCheck";
- public static final String SKIP_ACL = "zookeeper.skipACL";
- public static final String ENFORCE_QUOTA = "zookeeper.enforceQuota";
-
- // When enabled, will check ACL constraints appertained to the requests first,
- // before sending the requests to the quorum.
- static boolean enableEagerACLCheck;
-
- static final boolean skipACL;
-
- public static final boolean enforceQuota;
-
- public static final String SASL_SUPER_USER = "zookeeper.superUser";
-
- public static final String ALLOW_SASL_FAILED_CLIENTS = "zookeeper.allowSaslFailedClients";
- public static final String ZOOKEEPER_DIGEST_ENABLED = "zookeeper.digest.enabled";
- private static boolean digestEnabled;
-
- public static final String ZOOKEEPER_SERIALIZE_LAST_PROCESSED_ZXID_ENABLED = "zookeeper.serializeLastProcessedZxid.enabled";
- private static boolean serializeLastProcessedZxidEnabled;
-
- // Add a enable/disable option for now, we should remove this one when
- // this feature is confirmed to be stable
- public static final String CLOSE_SESSION_TXN_ENABLED = "zookeeper.closeSessionTxn.enabled";
- private static boolean closeSessionTxnEnabled = true;
- private volatile CountDownLatch restoreLatch;
-
- static {
- LOG = LoggerFactory.getLogger(ZooKeeperServer.class);
-
- RATE_LOGGER = new RateLogger(LOG);
-
- ZookeeperBanner.printBanner(LOG);
-
- Environment.logEnv("Server environment:", LOG);
-
- enableEagerACLCheck = Boolean.getBoolean(ENABLE_EAGER_ACL_CHECK);
- LOG.info("{} = {}", ENABLE_EAGER_ACL_CHECK, enableEagerACLCheck);
-
- skipACL = System.getProperty(SKIP_ACL, "no").equals("yes");
- if (skipACL) {
- LOG.info("{}==\"yes\", ACL checks will be skipped", SKIP_ACL);
- }
-
- enforceQuota = Boolean.parseBoolean(System.getProperty(ENFORCE_QUOTA, "false"));
- if (enforceQuota) {
- LOG.info("{} = {}, Quota Enforce enables", ENFORCE_QUOTA, enforceQuota);
- }
-
- digestEnabled = Boolean.parseBoolean(System.getProperty(ZOOKEEPER_DIGEST_ENABLED, "true"));
- LOG.info("{} = {}", ZOOKEEPER_DIGEST_ENABLED, digestEnabled);
-
- closeSessionTxnEnabled = Boolean.parseBoolean(
- System.getProperty(CLOSE_SESSION_TXN_ENABLED, "true"));
- LOG.info("{} = {}", CLOSE_SESSION_TXN_ENABLED, closeSessionTxnEnabled);
-
- setSerializeLastProcessedZxidEnabled(Boolean.parseBoolean(
- System.getProperty(ZOOKEEPER_SERIALIZE_LAST_PROCESSED_ZXID_ENABLED, "true")));
- }
-
- // @VisibleForTesting
- public static boolean isEnableEagerACLCheck() {
- return enableEagerACLCheck;
- }
-
- // @VisibleForTesting
- public static void setEnableEagerACLCheck(boolean enabled) {
- ZooKeeperServer.enableEagerACLCheck = enabled;
- LOG.info("Update {} to {}", ENABLE_EAGER_ACL_CHECK, enabled);
- }
-
- public static boolean isCloseSessionTxnEnabled() {
- return closeSessionTxnEnabled;
- }
-
- public static void setCloseSessionTxnEnabled(boolean enabled) {
- ZooKeeperServer.closeSessionTxnEnabled = enabled;
- LOG.info("Update {} to {}", CLOSE_SESSION_TXN_ENABLED,
- ZooKeeperServer.closeSessionTxnEnabled);
- }
-
- protected ZooKeeperServerBean jmxServerBean;
- protected DataTreeBean jmxDataTreeBean;
-
- public static final int DEFAULT_TICK_TIME = 3000;
- protected int tickTime = DEFAULT_TICK_TIME;
- public static final int DEFAULT_THROTTLED_OP_WAIT_TIME = 0; // disabled
- protected static volatile int throttledOpWaitTime =
- Integer.getInteger("zookeeper.throttled_op_wait_time", DEFAULT_THROTTLED_OP_WAIT_TIME);
- /** value of -1 indicates unset, use default */
- protected int minSessionTimeout = -1;
- /** value of -1 indicates unset, use default */
- protected int maxSessionTimeout = -1;
- /** Socket listen backlog. Value of -1 indicates unset */
- protected int listenBacklog = -1;
- protected SessionTracker sessionTracker;
- private FileTxnSnapLog txnLogFactory = null;
- private ZKDatabase zkDb;
- private ResponseCache readResponseCache;
- private ResponseCache getChildrenResponseCache;
- private final AtomicLong hzxid = new AtomicLong(0);
- public static final Exception ok = new Exception("No prob");
- protected RequestProcessor firstProcessor;
- protected JvmPauseMonitor jvmPauseMonitor;
- protected volatile State state = State.INITIAL;
- private boolean isResponseCachingEnabled = true;
- /* contains the configuration file content read at startup */
- protected String initialConfig;
- protected boolean reconfigEnabled;
- private final RequestPathMetricsCollector requestPathMetricsCollector;
- private static final int DEFAULT_SNAP_COUNT = 100000;
- private static final int DEFAULT_GLOBAL_OUTSTANDING_LIMIT = 1000;
-
- private boolean localSessionEnabled = false;
- protected enum State {
- INITIAL,
- RUNNING,
- SHUTDOWN,
- ERROR
- }
-
- /**
- * This is the secret that we use to generate passwords. For the moment,
- * it's more of a checksum that's used in reconnection, which carries no
- * security weight, and is treated internally as if it carries no
- * security weight.
- */
- private static final long superSecret = 0XB3415C00L;
-
- private final AtomicInteger requestsInProcess = new AtomicInteger(0);
- final Deque<ChangeRecord> outstandingChanges = new ArrayDeque<>();
- // this data structure must be accessed under the outstandingChanges lock
- final Map<String, ChangeRecord> outstandingChangesForPath = new HashMap<>();
-
- protected ServerCnxnFactory serverCnxnFactory;
- protected ServerCnxnFactory secureServerCnxnFactory;
-
- private final ServerStats serverStats;
- private final ZooKeeperServerListener listener;
- private ZooKeeperServerShutdownHandler zkShutdownHandler;
- private volatile int createSessionTrackerServerId = 1;
-
- private static final String FLUSH_DELAY = "zookeeper.flushDelay";
- private static volatile long flushDelay;
- private static final String MAX_WRITE_QUEUE_POLL_SIZE = "zookeeper.maxWriteQueuePollTime";
- private static volatile long maxWriteQueuePollTime;
- private static final String MAX_BATCH_SIZE = "zookeeper.maxBatchSize";
- private static volatile int maxBatchSize;
-
- /**
- * Starting size of read and write ByteArroyOuputBuffers. Default is 32 bytes.
- * Flag not used for small transfers like connectResponses.
- */
- public static final String INT_BUFFER_STARTING_SIZE_BYTES = "zookeeper.intBufferStartingSizeBytes";
- public static final int DEFAULT_STARTING_BUFFER_SIZE = 1024;
- public static final int intBufferStartingSizeBytes;
-
- public static final String GET_DATA_RESPONSE_CACHE_SIZE = "zookeeper.maxResponseCacheSize";
- public static final String GET_CHILDREN_RESPONSE_CACHE_SIZE = "zookeeper.maxGetChildrenResponseCacheSize";
-
- static {
- long configuredFlushDelay = Long.getLong(FLUSH_DELAY, 0);
- setFlushDelay(configuredFlushDelay);
- setMaxWriteQueuePollTime(Long.getLong(MAX_WRITE_QUEUE_POLL_SIZE, configuredFlushDelay / 3));
- setMaxBatchSize(Integer.getInteger(MAX_BATCH_SIZE, 1000));
-
- intBufferStartingSizeBytes = Integer.getInteger(INT_BUFFER_STARTING_SIZE_BYTES, DEFAULT_STARTING_BUFFER_SIZE);
-
- if (intBufferStartingSizeBytes < 32) {
- String msg = "Buffer starting size (" + intBufferStartingSizeBytes + ") must be greater than or equal to 32. "
- + "Configure with \"-Dzookeeper.intBufferStartingSizeBytes=<size>\" ";
- LOG.error(msg);
- throw new IllegalArgumentException(msg);
- }
-
- LOG.info("{} = {}", INT_BUFFER_STARTING_SIZE_BYTES, intBufferStartingSizeBytes);
- }
-
- // Connection throttling
- private final BlueThrottle connThrottle = new BlueThrottle();
-
- private RequestThrottler requestThrottler;
- public static final String SNAP_COUNT = "zookeeper.snapCount";
-
- /**
- * This setting sets a limit on the total number of large requests that
- * can be inflight and is designed to prevent ZooKeeper from accepting
- * too many large requests such that the JVM runs out of usable heap and
- * ultimately crashes.
- *
- * The limit is enforced by the {@link #checkRequestSizeWhenReceivingMessage(int)}
- * method which is called by the connection layer ({@link NIOServerCnxn},
- * {@link NettyServerCnxn}) before allocating a byte buffer and pulling
- * data off the TCP socket. The limit is then checked again by the
- * ZooKeeper server in {@link #processPacket(ServerCnxn, RequestHeader, RequestRecord)} which
- * also atomically updates {@link #currentLargeRequestBytes}. The request is
- * then marked as a large request, with the request size stored in the Request
- * object so that it can later be decremented from {@link #currentLargeRequestBytes}.
- *
- * When a request is completed or dropped, the relevant code path calls the
- * {@link #requestFinished(Request)} method which performs the decrement if
- * needed.
- */
- private volatile int largeRequestMaxBytes = 100 * 1024 * 1024;
-
- /**
- * The size threshold after which a request is considered a large request
- * and is checked against the large request byte limit.
- */
- private volatile int largeRequestThreshold = -1;
-
- private final AtomicInteger currentLargeRequestBytes = new AtomicInteger(0);
-
- private final AuthenticationHelper authHelper = new AuthenticationHelper();
-
- void removeCnxn(ServerCnxn cnxn) {
- zkDb.removeCnxn(cnxn);
- }
-
- /**
- * Creates a ZooKeeperServer instance. Nothing is setup, use the setX
- * methods to prepare the instance (eg datadir, datalogdir, ticktime,
- * builder, etc...)
- *
- */
- public ZooKeeperServer() {
- listener = new ZooKeeperServerListenerImpl(this);
- serverStats = new ServerStats(this);
- this.requestPathMetricsCollector = new RequestPathMetricsCollector();
- }
-
- /**
- * Keeping this constructor for backward compatibility
- */
- public ZooKeeperServer(FileTxnSnapLog txnLogFactory, int tickTime, int minSessionTimeout, int maxSessionTimeout, int clientPortListenBacklog, ZKDatabase zkDb, String initialConfig) {
- this(txnLogFactory, tickTime, minSessionTimeout, maxSessionTimeout, clientPortListenBacklog, zkDb, initialConfig, QuorumPeerConfig.isReconfigEnabled());
- }
-
- /**
- * * Creates a ZooKeeperServer instance. It sets everything up, but doesn't
- * actually start listening for clients until run() is invoked.
- *
- */
- public ZooKeeperServer(FileTxnSnapLog txnLogFactory, int tickTime, int minSessionTimeout, int maxSessionTimeout, int clientPortListenBacklog, ZKDatabase zkDb, String initialConfig, boolean reconfigEnabled) {
- serverStats = new ServerStats(this);
- this.txnLogFactory = txnLogFactory;
- this.txnLogFactory.setServerStats(this.serverStats);
- this.zkDb = zkDb;
- this.tickTime = tickTime;
- setMinSessionTimeout(minSessionTimeout);
- setMaxSessionTimeout(maxSessionTimeout);
- this.listenBacklog = clientPortListenBacklog;
- this.reconfigEnabled = reconfigEnabled;
-
- listener = new ZooKeeperServerListenerImpl(this);
-
- readResponseCache = new ResponseCache(Integer.getInteger(
- GET_DATA_RESPONSE_CACHE_SIZE,
- ResponseCache.DEFAULT_RESPONSE_CACHE_SIZE), "getData");
-
- getChildrenResponseCache = new ResponseCache(Integer.getInteger(
- GET_CHILDREN_RESPONSE_CACHE_SIZE,
- ResponseCache.DEFAULT_RESPONSE_CACHE_SIZE), "getChildren");
-
- this.initialConfig = initialConfig;
-
- this.requestPathMetricsCollector = new RequestPathMetricsCollector();
-
- this.initLargeRequestThrottlingSettings();
-
- LOG.info(
- "Created server with"
- + " tickTime {} ms"
- + " minSessionTimeout {} ms"
- + " maxSessionTimeout {} ms"
- + " clientPortListenBacklog {}"
- + " dataLogdir {}"
- + " snapdir {}",
- tickTime,
- getMinSessionTimeout(),
- getMaxSessionTimeout(),
- getClientPortListenBacklog(),
- txnLogFactory.getDataLogDir(),
- txnLogFactory.getSnapDir());
- }
-
- public String getInitialConfig() {
- return initialConfig;
- }
-
- /**
- * Adds JvmPauseMonitor and calls
- * {@link #ZooKeeperServer(FileTxnSnapLog, int, int, int, int, ZKDatabase, String)}
- *
- */
- public ZooKeeperServer(JvmPauseMonitor jvmPauseMonitor, FileTxnSnapLog txnLogFactory, int tickTime, int minSessionTimeout, int maxSessionTimeout, int clientPortListenBacklog, ZKDatabase zkDb, String initialConfig) {
- this(txnLogFactory, tickTime, minSessionTimeout, maxSessionTimeout, clientPortListenBacklog, zkDb, initialConfig, QuorumPeerConfig.isReconfigEnabled());
- this.jvmPauseMonitor = jvmPauseMonitor;
- if (jvmPauseMonitor != null) {
- LOG.info("Added JvmPauseMonitor to server");
- }
- }
-
- /**
- * creates a zookeeperserver instance.
- * @param txnLogFactory the file transaction snapshot logging class
- * @param tickTime the ticktime for the server
- */
- public ZooKeeperServer(FileTxnSnapLog txnLogFactory, int tickTime, String initialConfig) {
- this(txnLogFactory, tickTime, -1, -1, -1, new ZKDatabase(txnLogFactory), initialConfig, QuorumPeerConfig.isReconfigEnabled());
- }
-
- public ServerStats serverStats() {
- return serverStats;
- }
-
- public RequestPathMetricsCollector getRequestPathMetricsCollector() {
- return requestPathMetricsCollector;
- }
-
- public BlueThrottle connThrottle() {
- return connThrottle;
- }
-
- public void dumpConf(PrintWriter pwriter) {
- pwriter.print("clientPort=");
- pwriter.println(getClientPort());
- pwriter.print("secureClientPort=");
- pwriter.println(getSecureClientPort());
- pwriter.print("dataDir=");
- pwriter.println(zkDb.snapLog.getSnapDir().getAbsolutePath());
- pwriter.print("dataDirSize=");
- pwriter.println(getDataDirSize());
- pwriter.print("dataLogDir=");
- pwriter.println(zkDb.snapLog.getDataLogDir().getAbsolutePath());
- pwriter.print("dataLogSize=");
- pwriter.println(getLogDirSize());
- pwriter.print("tickTime=");
- pwriter.println(getTickTime());
- pwriter.print("maxClientCnxns=");
- pwriter.println(getMaxClientCnxnsPerHost());
- pwriter.print("minSessionTimeout=");
- pwriter.println(getMinSessionTimeout());
- pwriter.print("maxSessionTimeout=");
- pwriter.println(getMaxSessionTimeout());
- pwriter.print("clientPortListenBacklog=");
- pwriter.println(getClientPortListenBacklog());
-
- pwriter.print("serverId=");
- pwriter.println(getServerId());
- }
-
- public ZooKeeperServerConf getConf() {
- return new ZooKeeperServerConf(
- getClientPort(),
- zkDb.snapLog.getSnapDir().getAbsolutePath(),
- zkDb.snapLog.getDataLogDir().getAbsolutePath(),
- getTickTime(),
- getMaxClientCnxnsPerHost(),
- getMinSessionTimeout(),
- getMaxSessionTimeout(),
- getServerId(),
- getClientPortListenBacklog());
- }
-
- /**
- * This constructor is for backward compatibility with the existing unit
- * test code.
- * It defaults to FileLogProvider persistence provider.
- */
- public ZooKeeperServer(File snapDir, File logDir, int tickTime) throws IOException {
- this(new FileTxnSnapLog(snapDir, logDir), tickTime, "");
- }
-
- /**
- * Default constructor, relies on the config for its argument values
- *
- * @throws IOException
- */
- public ZooKeeperServer(FileTxnSnapLog txnLogFactory) throws IOException {
- this(txnLogFactory, DEFAULT_TICK_TIME, -1, -1, -1, new ZKDatabase(txnLogFactory), "", QuorumPeerConfig.isReconfigEnabled());
- }
-
- /**
- * get the zookeeper database for this server
- * @return the zookeeper database for this server
- */
- public ZKDatabase getZKDatabase() {
- return this.zkDb;
- }
-
- /**
- * set the zkdatabase for this zookeeper server
- * @param zkDb
- */
- public void setZKDatabase(ZKDatabase zkDb) {
- this.zkDb = zkDb;
- }
-
- /**
- * Restore sessions and data
- */
- public void loadData() throws IOException, InterruptedException {
- /*
- * When a new leader starts executing Leader#lead, it
- * invokes this method. The database, however, has been
- * initialized before running leader election so that
- * the server could pick its zxid for its initial vote.
- * It does it by invoking QuorumPeer#getLastLoggedZxid.
- * Consequently, we don't need to initialize it once more
- * and avoid the penalty of loading it a second time. Not
- * reloading it is particularly important for applications
- * that host a large database.
- *
- * The following if block checks whether the database has
- * been initialized or not. Note that this method is
- * invoked by at least one other method:
- * ZooKeeperServer#startdata.
- *
- * See ZOOKEEPER-1642 for more detail.
- */
- if (zkDb.isInitialized()) {
- setZxid(zkDb.getDataTreeLastProcessedZxid());
- } else {
- setZxid(zkDb.loadDataBase());
- }
-
- // Clean up dead sessions
- zkDb.getSessions().stream()
- .filter(session -> zkDb.getSessionWithTimeOuts().get(session) == null)
- .forEach(session -> killSession(session, zkDb.getDataTreeLastProcessedZxid()));
-
- // Make a clean snapshot
- takeSnapshot();
- }
-
- public File takeSnapshot() throws IOException {
- return takeSnapshot(false);
- }
-
- public File takeSnapshot(boolean syncSnap) throws IOException {
- return takeSnapshot(syncSnap, true, false);
- }
-
- /**
- * Takes a snapshot on the server.
- *
- * @param syncSnap syncSnap sync the snapshot immediately after write
- * @param isSevere if true system exist, otherwise throw IOException
- * @param fastForwardFromEdits whether fast forward database to the latest recorded transactions
- *
- * @return file snapshot file object
- * @throws IOException
- */
- public synchronized File takeSnapshot(boolean syncSnap, boolean isSevere, boolean fastForwardFromEdits) throws IOException {
- long start = Time.currentElapsedTime();
- File snapFile = null;
- try {
- if (fastForwardFromEdits) {
- zkDb.fastForwardDataBase();
- }
- snapFile = txnLogFactory.save(zkDb.getDataTree(), zkDb.getSessionWithTimeOuts(), syncSnap);
- } catch (IOException e) {
- if (isSevere) {
- LOG.error("Severe unrecoverable error, exiting", e);
- // This is a severe error that we cannot recover from,
- // so we need to exit
- ServiceUtils.requestSystemExit(ExitCode.TXNLOG_ERROR_TAKING_SNAPSHOT.getValue());
- } else {
- throw e;
- }
- }
- long elapsed = Time.currentElapsedTime() - start;
- LOG.info("Snapshot taken in {} ms", elapsed);
- ServerMetrics.getMetrics().SNAPSHOT_TIME.add(elapsed);
- return snapFile;
- }
-
- /**
- * Restores database from a snapshot. It is used by the restore admin server command.
- *
- * @param inputStream input stream of snapshot
- * @return last processed zxid
- */
- public synchronized long restoreFromSnapshot(final InputStream inputStream) throws IOException {
- if (inputStream == null) {
- throw new IllegalArgumentException("InputStream can not be null when restoring from snapshot");
- }
-
- long start = Time.currentElapsedTime();
- LOG.info("Before restore database. lastProcessedZxid={}, nodeCount={},sessionCount={}",
- getZKDatabase().getDataTreeLastProcessedZxid(),
- getZKDatabase().dataTree.getNodeCount(),
- getZKDatabase().getSessionCount());
-
- // restore to a new zkDatabase
- final ZKDatabase newZKDatabase = new ZKDatabase(this.txnLogFactory);
- final CheckedInputStream cis = new CheckedInputStream(new BufferedInputStream(inputStream), new Adler32());
- final InputArchive ia = BinaryInputArchive.getArchive(cis);
- newZKDatabase.deserializeSnapshot(ia, cis);
- LOG.info("Restored to a new database. lastProcessedZxid={}, nodeCount={}, sessionCount={}",
- newZKDatabase.getDataTreeLastProcessedZxid(),
- newZKDatabase.dataTree.getNodeCount(),
- newZKDatabase.getSessionCount());
-
- // create a CountDownLatch
- restoreLatch = new CountDownLatch(1);
-
- try {
- // set to the new zkDatabase
- setZKDatabase(newZKDatabase);
-
- // re-create SessionTrack
- createSessionTracker();
- } finally {
- // unblock request submission
- restoreLatch.countDown();
- restoreLatch = null;
- }
-
- LOG.info("After restore database. lastProcessedZxid={}, nodeCount={}, sessionCount={}",
- getZKDatabase().getDataTreeLastProcessedZxid(),
- getZKDatabase().dataTree.getNodeCount(),
- getZKDatabase().getSessionCount());
-
- long elapsed = Time.currentElapsedTime() - start;
- LOG.info("Restore taken in {} ms", elapsed);
- ServerMetrics.getMetrics().RESTORE_TIME.add(elapsed);
-
- return getLastProcessedZxid();
- }
-
- public boolean shouldForceWriteInitialSnapshotAfterLeaderElection() {
- return txnLogFactory.shouldForceWriteInitialSnapshotAfterLeaderElection();
- }
-
- @Override
- public long getDataDirSize() {
- if (zkDb == null) {
- return 0L;
- }
- File path = zkDb.snapLog.getSnapDir();
- return getDirSize(path);
- }
-
- @Override
- public long getLogDirSize() {
- if (zkDb == null) {
- return 0L;
- }
- File path = zkDb.snapLog.getDataLogDir();
- return getDirSize(path);
- }
-
- private long getDirSize(File file) {
- long size = 0L;
- if (file.isDirectory()) {
- File[] files = file.listFiles();
- if (files != null) {
- for (File f : files) {
- size += getDirSize(f);
- }
- }
- } else {
- size = file.length();
- }
- return size;
- }
-
- public long getZxid() {
- return hzxid.get();
- }
-
- public SessionTracker getSessionTracker() {
- return sessionTracker;
- }
-
- long getNextZxid() {
- return hzxid.incrementAndGet();
- }
-
- public void setZxid(long zxid) {
- hzxid.set(zxid);
- }
-
- private void close(long sessionId) {
- Request si = new Request(null, sessionId, 0, OpCode.closeSession, null, null);
- submitRequest(si);
- }
-
- public void closeSession(long sessionId) {
- LOG.info("Closing session 0x{}", Long.toHexString(sessionId));
-
- // we do not want to wait for a session close. send it as soon as we
- // detect it!
- close(sessionId);
- }
-
- protected void killSession(long sessionId, long zxid) {
- zkDb.killSession(sessionId, zxid);
- if (LOG.isTraceEnabled()) {
- ZooTrace.logTraceMessage(
- LOG,
- ZooTrace.SESSION_TRACE_MASK,
- "ZooKeeperServer --- killSession: 0x" + Long.toHexString(sessionId));
- }
- if (sessionTracker != null) {
- sessionTracker.removeSession(sessionId);
- }
- }
-
- public void expire(Session session) {
- long sessionId = session.getSessionId();
- LOG.info(
- "Expiring session 0x{}, timeout of {}ms exceeded",
- Long.toHexString(sessionId),
- session.getTimeout());
- close(sessionId);
- }
-
- public void expire(long sessionId) {
- LOG.info("forcibly expiring session 0x{}", Long.toHexString(sessionId));
-
- close(sessionId);
- }
-
- public static class MissingSessionException extends IOException {
-
- private static final long serialVersionUID = 7467414635467261007L;
-
- public MissingSessionException(String msg) {
- super(msg);
- }
-
- }
-
- void touch(ServerCnxn cnxn) throws MissingSessionException {
- if (cnxn == null) {
- return;
- }
- long id = cnxn.getSessionId();
- int to = cnxn.getSessionTimeout();
- if (!sessionTracker.touchSession(id, to)) {
- throw new MissingSessionException("No session with sessionid 0x"
- + Long.toHexString(id)
- + " exists, probably expired and removed");
- }
- }
-
- protected void registerJMX() {
- // register with JMX
- try {
- jmxServerBean = new ZooKeeperServerBean(this);
- MBeanRegistry.getInstance().register(jmxServerBean, null);
-
- try {
- jmxDataTreeBean = new DataTreeBean(zkDb.getDataTree());
- MBeanRegistry.getInstance().register(jmxDataTreeBean, jmxServerBean);
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- jmxDataTreeBean = null;
- }
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- jmxServerBean = null;
- }
- }
-
- public void startdata() throws IOException, InterruptedException {
- //check to see if zkDb is not null
- if (zkDb == null) {
- zkDb = new ZKDatabase(this.txnLogFactory);
- }
- if (!zkDb.isInitialized()) {
- loadData();
- }
- }
-
- public synchronized void startup() {
- startupWithServerState(State.RUNNING);
- }
-
- public synchronized void startupWithoutServing() {
- startupWithServerState(State.INITIAL);
- }
-
- public synchronized void startServing() {
- setState(State.RUNNING);
- notifyAll();
- }
-
- private void startupWithServerState(State state) {
- if (sessionTracker == null) {
- createSessionTracker();
- }
- startSessionTracker();
- setupRequestProcessors();
-
- startRequestThrottler();
-
- registerJMX();
-
- startJvmPauseMonitor();
-
- registerMetrics();
-
- setState(state);
-
- requestPathMetricsCollector.start();
-
- localSessionEnabled = sessionTracker.isLocalSessionsEnabled();
-
- notifyAll();
- }
-
- protected void startJvmPauseMonitor() {
- if (this.jvmPauseMonitor != null) {
- this.jvmPauseMonitor.serviceStart();
- }
- }
-
- protected void startRequestThrottler() {
- requestThrottler = createRequestThrottler();
- requestThrottler.start();
- }
-
- protected RequestThrottler createRequestThrottler() {
- return new RequestThrottler(this);
- }
-
- protected void setupRequestProcessors() {
- RequestProcessor finalProcessor = new FinalRequestProcessor(this);
- RequestProcessor syncProcessor = new SyncRequestProcessor(this, finalProcessor);
- ((SyncRequestProcessor) syncProcessor).start();
- firstProcessor = new PrepRequestProcessor(this, syncProcessor);
- ((PrepRequestProcessor) firstProcessor).start();
- }
-
- public ZooKeeperServerListener getZooKeeperServerListener() {
- return listener;
- }
-
- /**
- * Change the server ID used by {@link #createSessionTracker()}. Must be called prior to
- * {@link #startup()} being called
- *
- * @param newId ID to use
- */
- public void setCreateSessionTrackerServerId(int newId) {
- createSessionTrackerServerId = newId;
- }
-
- protected void createSessionTracker() {
- sessionTracker = new SessionTrackerImpl(this, zkDb.getSessionWithTimeOuts(), tickTime, createSessionTrackerServerId, getZooKeeperServerListener());
- }
-
- protected void startSessionTracker() {
- ((SessionTrackerImpl) sessionTracker).start();
- }
-
- /**
- * Sets the state of ZooKeeper server. After changing the state, it notifies
- * the server state change to a registered shutdown handler, if any.
- * <p>
- * The following are the server state transitions:
- * <ul><li>During startup the server will be in the INITIAL state.</li>
- * <li>After successfully starting, the server sets the state to RUNNING.
- * </li>
- * <li>The server transitions to the ERROR state if it hits an internal
- * error. {@link ZooKeeperServerListenerImpl} notifies any critical resource
- * error events, e.g., SyncRequestProcessor not being able to write a txn to
- * disk.</li>
- * <li>During shutdown the server sets the state to SHUTDOWN, which
- * corresponds to the server not running.</li>
- *
- * <li>During maintenance (e.g. restore) the server sets the state to MAINTENANCE
- * </li></ul>
- *
- * @param state new server state.
- */
- protected void setState(State state) {
- this.state = state;
- // Notify server state changes to the registered shutdown handler, if any.
- if (zkShutdownHandler != null) {
- zkShutdownHandler.handle(state);
- } else {
- LOG.debug(
- "ZKShutdownHandler is not registered, so ZooKeeper server"
- + " won't take any action on ERROR or SHUTDOWN server state changes");
- }
- }
-
- /**
- * This can be used while shutting down the server to see whether the server
- * is already shutdown or not.
- *
- * @return true if the server is running or server hits an error, false
- * otherwise.
- */
- protected boolean canShutdown() {
- return state == State.RUNNING || state == State.ERROR;
- }
-
- /**
- * @return true if the server is running, false otherwise.
- */
- public boolean isRunning() {
- return state == State.RUNNING;
- }
-
- public void shutdown() {
- shutdown(false);
- }
-
- /**
- * Shut down the server instance
- * @param fullyShutDown true if another server using the same database will not replace this one in the same process
- */
- public synchronized void shutdown(boolean fullyShutDown) {
- if (!canShutdown()) {
- if (fullyShutDown && zkDb != null) {
- zkDb.clear();
- }
- LOG.debug("ZooKeeper server is not running, so not proceeding to shutdown!");
- return;
- }
- LOG.info("shutting down");
-
- // new RuntimeException("Calling shutdown").printStackTrace();
- setState(State.SHUTDOWN);
-
- // unregister all metrics that are keeping a strong reference to this object
- // subclasses will do their specific clean up
- unregisterMetrics();
-
- if (requestThrottler != null) {
- requestThrottler.shutdown();
- }
-
- // Since sessionTracker and syncThreads poll we just have to
- // set running to false and they will detect it during the poll
- // interval.
- if (sessionTracker != null) {
- sessionTracker.shutdown();
- }
- if (firstProcessor != null) {
- firstProcessor.shutdown();
- }
- if (jvmPauseMonitor != null) {
- jvmPauseMonitor.serviceStop();
- }
-
- if (zkDb != null) {
- if (fullyShutDown) {
- zkDb.clear();
- } else {
- // else there is no need to clear the database
- // * When a new quorum is established we can still apply the diff
- // on top of the same zkDb data
- // * If we fetch a new snapshot from leader, the zkDb will be
- // cleared anyway before loading the snapshot
- try {
- // This will fast-forward the database to the latest recorded transactions
- zkDb.fastForwardDataBase();
- } catch (IOException e) {
- LOG.error("Error updating DB", e);
- zkDb.clear();
- }
- }
- }
-
- requestPathMetricsCollector.shutdown();
- unregisterJMX();
- }
-
- protected void unregisterJMX() {
- // unregister from JMX
- try {
- if (jmxDataTreeBean != null) {
- MBeanRegistry.getInstance().unregister(jmxDataTreeBean);
- }
- } catch (Exception e) {
- LOG.warn("Failed to unregister with JMX", e);
- }
- try {
- if (jmxServerBean != null) {
- MBeanRegistry.getInstance().unregister(jmxServerBean);
- }
- } catch (Exception e) {
- LOG.warn("Failed to unregister with JMX", e);
- }
- jmxServerBean = null;
- jmxDataTreeBean = null;
- }
-
- public void incInProcess() {
- requestsInProcess.incrementAndGet();
- }
-
- public void decInProcess() {
- requestsInProcess.decrementAndGet();
- if (requestThrottler != null) {
- requestThrottler.throttleWake();
- }
- }
-
- public int getInProcess() {
- return requestsInProcess.get();
- }
-
- public int getInflight() {
- return requestThrottleInflight();
- }
-
- private int requestThrottleInflight() {
- if (requestThrottler != null) {
- return requestThrottler.getInflight();
- }
- return 0;
- }
-
- static class PrecalculatedDigest {
- final long nodeDigest;
- final long treeDigest;
-
- PrecalculatedDigest(long nodeDigest, long treeDigest) {
- this.nodeDigest = nodeDigest;
- this.treeDigest = treeDigest;
- }
- }
-
-
- /**
- * This structure is used to facilitate information sharing between PrepRP
- * and FinalRP.
- */
- static class ChangeRecord {
- PrecalculatedDigest precalculatedDigest;
- byte[] data;
-
- ChangeRecord(long zxid, String path, StatPersisted stat, int childCount, List<ACL> acl) {
- this.zxid = zxid;
- this.path = path;
- this.stat = stat;
- this.childCount = childCount;
- this.acl = acl;
- }
-
- long zxid;
-
- String path;
-
- StatPersisted stat; /* Make sure to create a new object when changing */
-
- int childCount;
-
- List<ACL> acl; /* Make sure to create a new object when changing */
-
- ChangeRecord duplicate(long zxid) {
- StatPersisted stat = new StatPersisted();
- if (this.stat != null) {
- DataTree.copyStatPersisted(this.stat, stat);
- }
- ChangeRecord changeRecord = new ChangeRecord(zxid, path, stat, childCount,
- acl == null ? new ArrayList<>() : new ArrayList<>(acl));
- changeRecord.precalculatedDigest = precalculatedDigest;
- changeRecord.data = data;
- return changeRecord;
- }
-
- }
-
- byte[] generatePasswd(long id) {
- Random r = new Random(id ^ superSecret);
- byte[] p = new byte[16];
- r.nextBytes(p);
- return p;
- }
-
- protected boolean checkPasswd(long sessionId, byte[] passwd) {
- return sessionId != 0 && Arrays.equals(passwd, generatePasswd(sessionId));
- }
-
- long createSession(ServerCnxn cnxn, byte[] passwd, int timeout) {
- if (passwd == null) {
- // Possible since it's just deserialized from a packet on the wire.
- passwd = new byte[0];
- }
- long sessionId = sessionTracker.createSession(timeout);
- Random r = new Random(sessionId ^ superSecret);
- r.nextBytes(passwd);
- CreateSessionTxn txn = new CreateSessionTxn(timeout);
- cnxn.setSessionId(sessionId);
- Request si = new Request(cnxn, sessionId, 0, OpCode.createSession, RequestRecord.fromRecord(txn), null);
- submitRequest(si);
- return sessionId;
- }
-
- /**
- * set the owner of this session as owner
- * @param id the session id
- * @param owner the owner of the session
- * @throws SessionExpiredException
- */
- public void setOwner(long id, Object owner) throws SessionExpiredException {
- sessionTracker.setOwner(id, owner);
- }
-
- protected void revalidateSession(ServerCnxn cnxn, long sessionId, int sessionTimeout) throws IOException {
- boolean rc = sessionTracker.touchSession(sessionId, sessionTimeout);
- if (LOG.isTraceEnabled()) {
- ZooTrace.logTraceMessage(
- LOG,
- ZooTrace.SESSION_TRACE_MASK,
- "Session 0x" + Long.toHexString(sessionId) + " is valid: " + rc);
- }
- finishSessionInit(cnxn, rc);
- }
-
- public void reopenSession(ServerCnxn cnxn, long sessionId, byte[] passwd, int sessionTimeout) throws IOException {
- if (checkPasswd(sessionId, passwd)) {
- revalidateSession(cnxn, sessionId, sessionTimeout);
- } else {
- LOG.warn(
- "Incorrect password from {} for session 0x{}",
- cnxn.getRemoteSocketAddress(),
- Long.toHexString(sessionId));
- finishSessionInit(cnxn, false);
- }
- }
-
- public void finishSessionInit(ServerCnxn cnxn, boolean valid) {
- // register with JMX
- try {
- if (valid) {
- if (serverCnxnFactory != null && serverCnxnFactory.cnxns.contains(cnxn)) {
- serverCnxnFactory.registerConnection(cnxn);
- } else if (secureServerCnxnFactory != null && secureServerCnxnFactory.cnxns.contains(cnxn)) {
- secureServerCnxnFactory.registerConnection(cnxn);
- }
- }
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- }
-
- try {
- ConnectResponse rsp = new ConnectResponse(
- 0,
- valid ? cnxn.getSessionTimeout() : 0,
- valid ? cnxn.getSessionId() : 0, // send 0 if session is no
- // longer valid
- valid ? generatePasswd(cnxn.getSessionId()) : new byte[16],
- this instanceof ReadOnlyZooKeeperServer);
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- BinaryOutputArchive bos = BinaryOutputArchive.getArchive(baos);
- bos.writeInt(-1, "len");
- rsp.serialize(bos, "connect");
- baos.close();
- ByteBuffer bb = ByteBuffer.wrap(baos.toByteArray());
- bb.putInt(bb.remaining() - 4).rewind();
- cnxn.sendBuffer(bb);
-
- if (valid) {
- LOG.debug(
- "Established session 0x{} with negotiated timeout {} for client {}",
- Long.toHexString(cnxn.getSessionId()),
- cnxn.getSessionTimeout(),
- cnxn.getRemoteSocketAddress());
- cnxn.enableRecv();
- } else {
-
- LOG.info(
- "Invalid session 0x{} for client {}, probably expired",
- Long.toHexString(cnxn.getSessionId()),
- cnxn.getRemoteSocketAddress());
- cnxn.sendBuffer(ServerCnxnFactory.closeConn);
- }
-
- } catch (Exception e) {
- LOG.warn("Exception while establishing session, closing", e);
- cnxn.close(ServerCnxn.DisconnectReason.IO_EXCEPTION_IN_SESSION_INIT);
- }
- }
-
- public void closeSession(ServerCnxn cnxn, RequestHeader requestHeader) {
- closeSession(cnxn.getSessionId());
- }
-
- public long getServerId() {
- return 0;
- }
-
- /**
- * If the underlying Zookeeper server support local session, this method
- * will set a isLocalSession to true if a request is associated with
- * a local session.
- *
- * @param si
- */
- protected void setLocalSessionFlag(Request si) {
- }
-
- public void submitRequest(Request si) {
- if (restoreLatch != null) {
- try {
- LOG.info("Blocking request submission while restore is in progress");
- restoreLatch.await();
- } catch (final InterruptedException e) {
- LOG.warn("Unexpected interruption", e);
- }
- }
- enqueueRequest(si);
- }
-
- public void enqueueRequest(Request si) {
- if (requestThrottler == null) {
- synchronized (this) {
- try {
- // Since all requests are passed to the request
- // processor it should wait for setting up the request
- // processor chain. The state will be updated to RUNNING
- // after the setup.
- while (state == State.INITIAL) {
- wait(1000);
- }
- } catch (InterruptedException e) {
- LOG.warn("Unexpected interruption", e);
- }
- if (requestThrottler == null) {
- throw new RuntimeException("Not started");
- }
- }
- }
- requestThrottler.submitRequest(si);
- }
-
- public void submitRequestNow(Request si) {
- if (firstProcessor == null) {
- synchronized (this) {
- try {
- // Since all requests are passed to the request
- // processor it should wait for setting up the request
- // processor chain. The state will be updated to RUNNING
- // after the setup.
- while (state == State.INITIAL) {
- wait(1000);
- }
- } catch (InterruptedException e) {
- LOG.warn("Unexpected interruption", e);
- }
- if (firstProcessor == null || state != State.RUNNING) {
- throw new RuntimeException("Not started");
- }
- }
- }
- try {
- touch(si.cnxn);
- boolean validpacket = Request.isValid(si.type);
- if (validpacket) {
- setLocalSessionFlag(si);
- firstProcessor.processRequest(si);
- if (si.cnxn != null) {
- incInProcess();
- }
- } else {
- LOG.warn("Received packet at server of unknown type {}", si.type);
- // Update request accounting/throttling limits
- requestFinished(si);
- new UnimplementedRequestProcessor().processRequest(si);
- }
- } catch (MissingSessionException e) {
- LOG.debug("Dropping request.", e);
- // Update request accounting/throttling limits
- requestFinished(si);
- } catch (RequestProcessorException e) {
- LOG.error("Unable to process request", e);
- // Update request accounting/throttling limits
- requestFinished(si);
- }
- }
-
- public static int getSnapCount() {
- int snapCount = Integer.getInteger(SNAP_COUNT, DEFAULT_SNAP_COUNT);
- // snapCount must be 2 or more. See org.apache.zookeeper.server.SyncRequestProcessor
- if (snapCount < 2) {
- LOG.warn("SnapCount should be 2 or more. Now, snapCount is reset to 2");
- snapCount = 2;
- }
- return snapCount;
- }
-
- public int getGlobalOutstandingLimit() {
- return Integer.getInteger(GLOBAL_OUTSTANDING_LIMIT, DEFAULT_GLOBAL_OUTSTANDING_LIMIT);
- }
-
- public static long getSnapSizeInBytes() {
- long size = Long.getLong("zookeeper.snapSizeLimitInKb", 4194304L); // 4GB by default
- if (size <= 0) {
- LOG.info("zookeeper.snapSizeLimitInKb set to a non-positive value {}; disabling feature", size);
- }
- return size * 1024; // Convert to bytes
- }
-
- public void setServerCnxnFactory(ServerCnxnFactory factory) {
- serverCnxnFactory = factory;
- }
-
- public ServerCnxnFactory getServerCnxnFactory() {
- return serverCnxnFactory;
- }
-
- public ServerCnxnFactory getSecureServerCnxnFactory() {
- return secureServerCnxnFactory;
- }
-
- public void setSecureServerCnxnFactory(ServerCnxnFactory factory) {
- secureServerCnxnFactory = factory;
- }
-
- /**
- * return the last processed id from the
- * datatree
- */
- public long getLastProcessedZxid() {
- return zkDb.getDataTreeLastProcessedZxid();
- }
-
- /**
- * return the outstanding requests
- * in the queue, which haven't been
- * processed yet
- */
- public long getOutstandingRequests() {
- return getInProcess();
- }
-
- /**
- * return the total number of client connections that are alive
- * to this server
- */
- public int getNumAliveConnections() {
- int numAliveConnections = 0;
-
- if (serverCnxnFactory != null) {
- numAliveConnections += serverCnxnFactory.getNumAliveConnections();
- }
-
- if (secureServerCnxnFactory != null) {
- numAliveConnections += secureServerCnxnFactory.getNumAliveConnections();
- }
-
- return numAliveConnections;
- }
-
- /**
- * truncate the log to get in sync with others
- * if in a quorum
- * @param zxid the zxid that it needs to get in sync
- * with others
- * @throws IOException
- */
- public void truncateLog(long zxid) throws IOException {
- this.zkDb.truncateLog(zxid);
- }
-
- public int getTickTime() {
- return tickTime;
- }
-
- public void setTickTime(int tickTime) {
- LOG.info("tickTime set to {} ms", tickTime);
- this.tickTime = tickTime;
- }
-
- public static int getThrottledOpWaitTime() {
- return throttledOpWaitTime;
- }
-
- public static void setThrottledOpWaitTime(int time) {
- LOG.info("throttledOpWaitTime set to {} ms", time);
- throttledOpWaitTime = time;
- }
-
- public int getMinSessionTimeout() {
- return minSessionTimeout;
- }
-
- public void setMinSessionTimeout(int min) {
- this.minSessionTimeout = min == -1 ? tickTime * 2 : min;
- LOG.info("minSessionTimeout set to {} ms", this.minSessionTimeout);
- }
-
- public int getMaxSessionTimeout() {
- return maxSessionTimeout;
- }
-
- public void setMaxSessionTimeout(int max) {
- this.maxSessionTimeout = max == -1 ? tickTime * 20 : max;
- LOG.info("maxSessionTimeout set to {} ms", this.maxSessionTimeout);
- }
-
- public int getClientPortListenBacklog() {
- return listenBacklog;
- }
-
- public void setClientPortListenBacklog(int backlog) {
- this.listenBacklog = backlog;
- LOG.info("clientPortListenBacklog set to {}", backlog);
- }
-
- public int getClientPort() {
- return serverCnxnFactory != null ? serverCnxnFactory.getLocalPort() : -1;
- }
-
- public int getSecureClientPort() {
- return secureServerCnxnFactory != null ? secureServerCnxnFactory.getLocalPort() : -1;
- }
-
- /** Maximum number of connections allowed from particular host (ip) */
- public int getMaxClientCnxnsPerHost() {
- if (serverCnxnFactory != null) {
- return serverCnxnFactory.getMaxClientCnxnsPerHost();
- }
- if (secureServerCnxnFactory != null) {
- return secureServerCnxnFactory.getMaxClientCnxnsPerHost();
- }
- return -1;
- }
-
- public void setTxnLogFactory(FileTxnSnapLog txnLog) {
- this.txnLogFactory = txnLog;
- }
-
- public FileTxnSnapLog getTxnLogFactory() {
- return this.txnLogFactory;
- }
-
- /**
- * Returns the elapsed sync of time of transaction log in milliseconds.
- */
- public long getTxnLogElapsedSyncTime() {
- return txnLogFactory.getTxnLogElapsedSyncTime();
- }
-
- public String getState() {
- return "standalone";
- }
-
- public void dumpEphemerals(PrintWriter pwriter) {
- zkDb.dumpEphemerals(pwriter);
- }
-
- public Map<Long, Set<String>> getEphemerals() {
- return zkDb.getEphemerals();
- }
-
- public double getConnectionDropChance() {
- return connThrottle.getDropChance();
- }
-
- public void processConnectRequest(ServerCnxn cnxn, ConnectRequest request) throws IOException, ClientCnxnLimitException {
- LOG.debug(
- "Session establishment request from client {} client's lastZxid is 0x{}",
- cnxn.getRemoteSocketAddress(),
- Long.toHexString(request.getLastZxidSeen()));
-
- long sessionId = request.getSessionId();
- int tokensNeeded = 1;
- if (connThrottle.isConnectionWeightEnabled()) {
- if (sessionId == 0) {
- if (localSessionEnabled) {
- tokensNeeded = connThrottle.getRequiredTokensForLocal();
- } else {
- tokensNeeded = connThrottle.getRequiredTokensForGlobal();
- }
- } else {
- tokensNeeded = connThrottle.getRequiredTokensForRenew();
- }
- }
-
- if (!connThrottle.checkLimit(tokensNeeded)) {
- throw new ClientCnxnLimitException();
- }
- ServerMetrics.getMetrics().CONNECTION_TOKEN_DEFICIT.add(connThrottle.getDeficit());
- ServerMetrics.getMetrics().CONNECTION_REQUEST_COUNT.add(1);
-
- if (!cnxn.protocolManager.isReadonlyAvailable()) {
- LOG.warn(
- "Connection request from old client {}; will be dropped if server is in r-o mode",
- cnxn.getRemoteSocketAddress());
- }
-
- if (!request.getReadOnly() && this instanceof ReadOnlyZooKeeperServer) {
- String msg = "Refusing session request for not-read-only client " + cnxn.getRemoteSocketAddress();
- LOG.info(msg);
- throw new CloseRequestException(msg, ServerCnxn.DisconnectReason.NOT_READ_ONLY_CLIENT);
- }
- if (request.getLastZxidSeen() > zkDb.dataTree.lastProcessedZxid) {
- String msg = "Refusing session(0x"
- + Long.toHexString(sessionId)
- + ") request for client "
- + cnxn.getRemoteSocketAddress()
- + " as it has seen zxid 0x"
- + Long.toHexString(request.getLastZxidSeen())
- + " our last zxid is 0x"
- + Long.toHexString(getZKDatabase().getDataTreeLastProcessedZxid())
- + " client must try another server";
-
- LOG.info(msg);
- throw new CloseRequestException(msg, ServerCnxn.DisconnectReason.CLIENT_ZXID_AHEAD);
- }
- int sessionTimeout = request.getTimeOut();
- byte[] passwd = request.getPasswd();
- int minSessionTimeout = getMinSessionTimeout();
- if (sessionTimeout < minSessionTimeout) {
- sessionTimeout = minSessionTimeout;
- }
- int maxSessionTimeout = getMaxSessionTimeout();
- if (sessionTimeout > maxSessionTimeout) {
- sessionTimeout = maxSessionTimeout;
- }
- cnxn.setSessionTimeout(sessionTimeout);
- // We don't want to receive any packets until we are sure that the
- // session is setup
- cnxn.disableRecv();
- if (sessionId == 0) {
- long id = createSession(cnxn, passwd, sessionTimeout);
- LOG.debug(
- "Client attempting to establish new session: session = 0x{}, zxid = 0x{}, timeout = {}, address = {}",
- Long.toHexString(id),
- Long.toHexString(request.getLastZxidSeen()),
- request.getTimeOut(),
- cnxn.getRemoteSocketAddress());
- } else {
- validateSession(cnxn, sessionId);
- LOG.debug(
- "Client attempting to renew session: session = 0x{}, zxid = 0x{}, timeout = {}, address = {}",
- Long.toHexString(sessionId),
- Long.toHexString(request.getLastZxidSeen()),
- request.getTimeOut(),
- cnxn.getRemoteSocketAddress());
- if (serverCnxnFactory != null) {
- serverCnxnFactory.closeSession(sessionId, ServerCnxn.DisconnectReason.CLIENT_RECONNECT);
- }
- if (secureServerCnxnFactory != null) {
- secureServerCnxnFactory.closeSession(sessionId, ServerCnxn.DisconnectReason.CLIENT_RECONNECT);
- }
- cnxn.setSessionId(sessionId);
- reopenSession(cnxn, sessionId, passwd, sessionTimeout);
- ServerMetrics.getMetrics().CONNECTION_REVALIDATE_COUNT.add(1);
-
- }
- }
-
- /**
- * Validate if a particular session can be reestablished.
- *
- * @param cnxn
- * @param sessionId
- */
- protected void validateSession(ServerCnxn cnxn, long sessionId)
- throws IOException {
- // do nothing
- }
-
- public boolean shouldThrottle(long outStandingCount) {
- int globalOutstandingLimit = getGlobalOutstandingLimit();
- if (globalOutstandingLimit < getInflight() || globalOutstandingLimit < getInProcess()) {
- return outStandingCount > 0;
- }
- return false;
- }
-
- long getFlushDelay() {
- return flushDelay;
- }
-
- static void setFlushDelay(long delay) {
- LOG.info("{} = {} ms", FLUSH_DELAY, delay);
- flushDelay = delay;
- }
-
- long getMaxWriteQueuePollTime() {
- return maxWriteQueuePollTime;
- }
-
- static void setMaxWriteQueuePollTime(long maxTime) {
- LOG.info("{} = {} ms", MAX_WRITE_QUEUE_POLL_SIZE, maxTime);
- maxWriteQueuePollTime = maxTime;
- }
-
- int getMaxBatchSize() {
- return maxBatchSize;
- }
-
- static void setMaxBatchSize(int size) {
- LOG.info("{}={}", MAX_BATCH_SIZE, size);
- maxBatchSize = size;
- }
-
- private void initLargeRequestThrottlingSettings() {
- setLargeRequestMaxBytes(Integer.getInteger("zookeeper.largeRequestMaxBytes", largeRequestMaxBytes));
- setLargeRequestThreshold(Integer.getInteger("zookeeper.largeRequestThreshold", -1));
- }
-
- public int getLargeRequestMaxBytes() {
- return largeRequestMaxBytes;
- }
-
- public void setLargeRequestMaxBytes(int bytes) {
- if (bytes <= 0) {
- LOG.warn("Invalid max bytes for all large requests {}. It should be a positive number.", bytes);
- LOG.warn("Will not change the setting. The max bytes stay at {}", largeRequestMaxBytes);
- } else {
- largeRequestMaxBytes = bytes;
- LOG.info("The max bytes for all large requests are set to {}", largeRequestMaxBytes);
- }
- }
-
- public int getLargeRequestThreshold() {
- return largeRequestThreshold;
- }
-
- public void setLargeRequestThreshold(int threshold) {
- if (threshold == 0 || threshold < -1) {
- LOG.warn("Invalid large request threshold {}. It should be -1 or positive. Setting to -1 ", threshold);
- largeRequestThreshold = -1;
- } else {
- largeRequestThreshold = threshold;
- LOG.info("The large request threshold is set to {}", largeRequestThreshold);
- }
- }
-
- public int getLargeRequestBytes() {
- return currentLargeRequestBytes.get();
- }
-
- private boolean isLargeRequest(int length) {
- // The large request limit is disabled when threshold is -1
- if (largeRequestThreshold == -1) {
- return false;
- }
- return length > largeRequestThreshold;
- }
-
- public boolean checkRequestSizeWhenReceivingMessage(int length) throws IOException {
- if (!isLargeRequest(length)) {
- return true;
- }
- if (currentLargeRequestBytes.get() + length <= largeRequestMaxBytes) {
- return true;
- } else {
- ServerMetrics.getMetrics().LARGE_REQUESTS_REJECTED.add(1);
- throw new IOException("Rejecting large request");
- }
-
- }
-
- private boolean checkRequestSizeWhenMessageReceived(int length) throws IOException {
- if (!isLargeRequest(length)) {
- return true;
- }
-
- int bytes = currentLargeRequestBytes.addAndGet(length);
- if (bytes > largeRequestMaxBytes) {
- currentLargeRequestBytes.addAndGet(-length);
- ServerMetrics.getMetrics().LARGE_REQUESTS_REJECTED.add(1);
- throw new IOException("Rejecting large request");
- }
- return true;
- }
-
- public void requestFinished(Request request) {
- int largeRequestLength = request.getLargeRequestSize();
- if (largeRequestLength != -1) {
- currentLargeRequestBytes.addAndGet(-largeRequestLength);
- }
- }
-
- public void processPacket(ServerCnxn cnxn, RequestHeader h, RequestRecord request) throws IOException {
- // Need to increase the outstanding request count first, otherwise
- // there might be a race condition that it enabled recv after
- // processing request and then disabled when check throttling.
- //
- // Be aware that we're actually checking the global outstanding
- // request before this request.
- //
- // It's fine if the IOException thrown before we decrease the count
- // in cnxn, since it will close the cnxn anyway.
- cnxn.incrOutstandingAndCheckThrottle(h);
-
- if (h.getType() == OpCode.auth) {
- LOG.info("got auth packet {}", cnxn.getRemoteSocketAddress());
- AuthPacket authPacket = request.readRecord(AuthPacket::new);
- String scheme = authPacket.getScheme();
- ServerAuthenticationProvider ap = ProviderRegistry.getServerProvider(scheme);
- Code authReturn = KeeperException.Code.AUTHFAILED;
- if (ap != null) {
- try {
- // handleAuthentication may close the connection, to allow the client to choose
- // a different server to connect to.
- authReturn = ap.handleAuthentication(
- new ServerAuthenticationProvider.ServerObjs(this, cnxn),
- authPacket.getAuth());
- } catch (RuntimeException e) {
- LOG.warn("Caught runtime exception from AuthenticationProvider: {}", scheme, e);
- authReturn = KeeperException.Code.AUTHFAILED;
- }
- }
- if (authReturn == KeeperException.Code.OK) {
- LOG.info("Session 0x{}: auth success for scheme {} and address {}",
- Long.toHexString(cnxn.getSessionId()), scheme,
- cnxn.getRemoteSocketAddress());
- ReplyHeader rh = new ReplyHeader(h.getXid(), 0, KeeperException.Code.OK.intValue());
- cnxn.sendResponse(rh, null, null);
- } else {
- if (ap == null) {
- LOG.warn(
- "No authentication provider for scheme: {} has {}",
- scheme,
- ProviderRegistry.listProviders());
- } else {
- LOG.warn("Authentication failed for scheme: {}", scheme);
- }
- // send a response...
- ReplyHeader rh = new ReplyHeader(h.getXid(), 0, KeeperException.Code.AUTHFAILED.intValue());
- cnxn.sendResponse(rh, null, null);
- // ... and close connection
- cnxn.sendBuffer(ServerCnxnFactory.closeConn);
- cnxn.disableRecv();
- }
- return;
- } else if (h.getType() == OpCode.sasl) {
- processSasl(request, cnxn, h);
- } else {
- if (!authHelper.enforceAuthentication(cnxn, h.getXid())) {
- // Authentication enforcement is failed
- // Already sent response to user about failure and closed the session, lets return
- return;
- } else {
- Request si = new Request(cnxn, cnxn.getSessionId(), h.getXid(), h.getType(), request, cnxn.getAuthInfo());
- int length = request.limit();
- if (isLargeRequest(length)) {
- // checkRequestSize will throw IOException if request is rejected
- checkRequestSizeWhenMessageReceived(length);
- si.setLargeRequestSize(length);
- }
- si.setOwner(ServerCnxn.me);
- submitRequest(si);
- }
- }
- }
-
- private static boolean isSaslSuperUser(String id) {
- if (id == null || id.isEmpty()) {
- return false;
- }
-
- Properties properties = System.getProperties();
- int prefixLen = SASL_SUPER_USER.length();
-
- for (String k : properties.stringPropertyNames()) {
- if (k.startsWith(SASL_SUPER_USER)
- && (k.length() == prefixLen || k.charAt(prefixLen) == '.')) {
- String value = properties.getProperty(k);
-
- if (value != null && value.equals(id)) {
- return true;
- }
- }
- }
-
- return false;
- }
-
- private static boolean shouldAllowSaslFailedClientsConnect() {
- return Boolean.getBoolean(ALLOW_SASL_FAILED_CLIENTS);
- }
-
- private void processSasl(RequestRecord request, ServerCnxn cnxn, RequestHeader requestHeader) throws IOException {
- LOG.debug("Responding to client SASL token.");
- GetSASLRequest clientTokenRecord = request.readRecord(GetSASLRequest::new);
- byte[] clientToken = clientTokenRecord.getToken();
- LOG.debug("Size of client SASL token: {}", clientToken.length);
- byte[] responseToken = null;
- try {
- ZooKeeperSaslServer saslServer = cnxn.zooKeeperSaslServer;
- try {
- // note that clientToken might be empty (clientToken.length == 0):
- // if using the DIGEST-MD5 mechanism, clientToken will be empty at the beginning of the
- // SASL negotiation process.
- responseToken = saslServer.evaluateResponse(clientToken);
- if (saslServer.isComplete()) {
- String authorizationID = saslServer.getAuthorizationID();
- LOG.info("Session 0x{}: adding SASL authorization for authorizationID: {}",
- Long.toHexString(cnxn.getSessionId()), authorizationID);
- cnxn.addAuthInfo(new Id("sasl", authorizationID));
-
- if (isSaslSuperUser(authorizationID)) {
- cnxn.addAuthInfo(new Id("super", ""));
- LOG.info(
- "Session 0x{}: Authenticated Id '{}' as super user",
- Long.toHexString(cnxn.getSessionId()),
- authorizationID);
- }
- }
- } catch (SaslException e) {
- LOG.warn("Client {} failed to SASL authenticate: {}", cnxn.getRemoteSocketAddress(), e);
- if (shouldAllowSaslFailedClientsConnect() && !authHelper.isSaslAuthRequired()) {
- LOG.warn("Maintaining client connection despite SASL authentication failure.");
- } else {
- int error;
- if (authHelper.isSaslAuthRequired()) {
- LOG.warn(
- "Closing client connection due to server requires client SASL authenticaiton,"
- + "but client SASL authentication has failed, or client is not configured with SASL "
- + "authentication.");
- error = Code.SESSIONCLOSEDREQUIRESASLAUTH.intValue();
- } else {
- LOG.warn("Closing client connection due to SASL authentication failure.");
- error = Code.AUTHFAILED.intValue();
- }
-
- ReplyHeader replyHeader = new ReplyHeader(requestHeader.getXid(), 0, error);
- cnxn.sendResponse(replyHeader, new SetSASLResponse(null), "response");
- cnxn.sendCloseSession();
- cnxn.disableRecv();
- return;
- }
- }
- } catch (NullPointerException e) {
- LOG.error("cnxn.saslServer is null: cnxn object did not initialize its saslServer properly.");
- }
- if (responseToken != null) {
- LOG.debug("Size of server SASL response: {}", responseToken.length);
- }
-
- ReplyHeader replyHeader = new ReplyHeader(requestHeader.getXid(), 0, Code.OK.intValue());
- Record record = new SetSASLResponse(responseToken);
- cnxn.sendResponse(replyHeader, record, "response");
- }
-
- // entry point for quorum/Learner.java
- public ProcessTxnResult processTxn(TxnHeader hdr, Record txn) {
- processTxnForSessionEvents(null, hdr, txn);
- return processTxnInDB(hdr, txn, null);
- }
-
- // entry point for FinalRequestProcessor.java
- public ProcessTxnResult processTxn(Request request) {
- TxnHeader hdr = request.getHdr();
- processTxnForSessionEvents(request, hdr, request.getTxn());
-
- final boolean writeRequest = (hdr != null);
- final boolean quorumRequest = request.isQuorum();
-
- // return fast w/o synchronization when we get a read
- if (!writeRequest && !quorumRequest) {
- return new ProcessTxnResult();
- }
- synchronized (outstandingChanges) {
- ProcessTxnResult rc = processTxnInDB(hdr, request.getTxn(), request.getTxnDigest());
-
- // request.hdr is set for write requests, which are the only ones
- // that add to outstandingChanges.
- if (writeRequest) {
- long zxid = hdr.getZxid();
- while (!outstandingChanges.isEmpty()
- && outstandingChanges.peek().zxid <= zxid) {
- ChangeRecord cr = outstandingChanges.remove();
- ServerMetrics.getMetrics().OUTSTANDING_CHANGES_REMOVED.add(1);
- if (cr.zxid < zxid) {
- LOG.warn(
- "Zxid outstanding 0x{} is less than current 0x{}",
- Long.toHexString(cr.zxid),
- Long.toHexString(zxid));
- }
- if (outstandingChangesForPath.get(cr.path) == cr) {
- outstandingChangesForPath.remove(cr.path);
- }
- }
- }
-
- // do not add non quorum packets to the queue.
- if (quorumRequest) {
- getZKDatabase().addCommittedProposal(request);
- }
- return rc;
- }
- }
-
- private void processTxnForSessionEvents(Request request, TxnHeader hdr, Record txn) {
- int opCode = (request == null) ? hdr.getType() : request.type;
- long sessionId = (request == null) ? hdr.getClientId() : request.sessionId;
-
- if (opCode == OpCode.createSession) {
- if (hdr != null && txn instanceof CreateSessionTxn) {
- CreateSessionTxn cst = (CreateSessionTxn) txn;
- sessionTracker.commitSession(sessionId, cst.getTimeOut());
- } else if (request == null || !request.isLocalSession()) {
- LOG.warn("*****>>>>> Got {} {}", txn.getClass(), txn.toString());
- }
- } else if (opCode == OpCode.closeSession) {
- sessionTracker.removeSession(sessionId);
- }
- }
-
- private ProcessTxnResult processTxnInDB(TxnHeader hdr, Record txn, TxnDigest digest) {
- if (hdr == null) {
- return new ProcessTxnResult();
- } else {
- return getZKDatabase().processTxn(hdr, txn, digest);
- }
- }
-
- public Map<Long, Set<Long>> getSessionExpiryMap() {
- return sessionTracker.getSessionExpiryMap();
- }
-
- /**
- * This method is used to register the ZooKeeperServerShutdownHandler to get
- * server's error or shutdown state change notifications.
- * {@link ZooKeeperServerShutdownHandler#handle(State)} will be called for
- * every server state changes {@link #setState(State)}.
- *
- * @param zkShutdownHandler shutdown handler
- */
- void registerServerShutdownHandler(ZooKeeperServerShutdownHandler zkShutdownHandler) {
- this.zkShutdownHandler = zkShutdownHandler;
- }
-
- public boolean isResponseCachingEnabled() {
- return isResponseCachingEnabled;
- }
-
- public void setResponseCachingEnabled(boolean isEnabled) {
- isResponseCachingEnabled = isEnabled;
- }
-
- public ResponseCache getReadResponseCache() {
- return isResponseCachingEnabled ? readResponseCache : null;
- }
-
- public ResponseCache getGetChildrenResponseCache() {
- return isResponseCachingEnabled ? getChildrenResponseCache : null;
- }
-
- protected void registerMetrics() {
- MetricsContext rootContext = ServerMetrics.getMetrics().getMetricsProvider().getRootContext();
-
- final ZKDatabase zkdb = this.getZKDatabase();
- final ServerStats stats = this.serverStats();
-
- rootContext.registerGauge("avg_latency", stats::getAvgLatency);
-
- rootContext.registerGauge("max_latency", stats::getMaxLatency);
- rootContext.registerGauge("min_latency", stats::getMinLatency);
-
- rootContext.registerGauge("packets_received", stats::getPacketsReceived);
- rootContext.registerGauge("packets_sent", stats::getPacketsSent);
- rootContext.registerGauge("num_alive_connections", stats::getNumAliveClientConnections);
-
- rootContext.registerGauge("outstanding_requests", stats::getOutstandingRequests);
- rootContext.registerGauge("uptime", stats::getUptime);
-
- rootContext.registerGauge("znode_count", zkdb::getNodeCount);
-
- rootContext.registerGauge("watch_count", zkdb.getDataTree()::getWatchCount);
- rootContext.registerGauge("ephemerals_count", zkdb.getDataTree()::getEphemeralsCount);
-
- rootContext.registerGauge("approximate_data_size", zkdb.getDataTree()::cachedApproximateDataSize);
-
- rootContext.registerGauge("global_sessions", zkdb::getSessionCount);
- rootContext.registerGauge("local_sessions", this.getSessionTracker()::getLocalSessionCount);
-
- OSMXBean osMbean = new OSMXBean();
- rootContext.registerGauge("open_file_descriptor_count", osMbean::getOpenFileDescriptorCount);
- rootContext.registerGauge("max_file_descriptor_count", osMbean::getMaxFileDescriptorCount);
- rootContext.registerGauge("connection_drop_probability", this::getConnectionDropChance);
-
- rootContext.registerGauge("last_client_response_size", stats.getClientResponseStats()::getLastBufferSize);
- rootContext.registerGauge("max_client_response_size", stats.getClientResponseStats()::getMaxBufferSize);
- rootContext.registerGauge("min_client_response_size", stats.getClientResponseStats()::getMinBufferSize);
-
- rootContext.registerGauge("outstanding_tls_handshake", this::getOutstandingHandshakeNum);
- rootContext.registerGauge("auth_failed_count", stats::getAuthFailedCount);
- rootContext.registerGauge("non_mtls_remote_conn_count", stats::getNonMTLSRemoteConnCount);
- rootContext.registerGauge("non_mtls_local_conn_count", stats::getNonMTLSLocalConnCount);
-
- rootContext.registerGaugeSet(QuotaMetricsUtils.QUOTA_COUNT_LIMIT_PER_NAMESPACE,
- () -> QuotaMetricsUtils.getQuotaCountLimit(zkDb.getDataTree()));
- rootContext.registerGaugeSet(QuotaMetricsUtils.QUOTA_BYTES_LIMIT_PER_NAMESPACE,
- () -> QuotaMetricsUtils.getQuotaBytesLimit(zkDb.getDataTree()));
- rootContext.registerGaugeSet(QuotaMetricsUtils.QUOTA_COUNT_USAGE_PER_NAMESPACE,
- () -> QuotaMetricsUtils.getQuotaCountUsage(zkDb.getDataTree()));
- rootContext.registerGaugeSet(QuotaMetricsUtils.QUOTA_BYTES_USAGE_PER_NAMESPACE,
- () -> QuotaMetricsUtils.getQuotaBytesUsage(zkDb.getDataTree()));
- }
-
- protected void unregisterMetrics() {
-
- MetricsContext rootContext = ServerMetrics.getMetrics().getMetricsProvider().getRootContext();
-
- rootContext.unregisterGauge("avg_latency");
-
- rootContext.unregisterGauge("max_latency");
- rootContext.unregisterGauge("min_latency");
-
- rootContext.unregisterGauge("packets_received");
- rootContext.unregisterGauge("packets_sent");
- rootContext.unregisterGauge("num_alive_connections");
-
- rootContext.unregisterGauge("outstanding_requests");
- rootContext.unregisterGauge("uptime");
-
- rootContext.unregisterGauge("znode_count");
-
- rootContext.unregisterGauge("watch_count");
- rootContext.unregisterGauge("ephemerals_count");
- rootContext.unregisterGauge("approximate_data_size");
-
- rootContext.unregisterGauge("global_sessions");
- rootContext.unregisterGauge("local_sessions");
-
- rootContext.unregisterGauge("open_file_descriptor_count");
- rootContext.unregisterGauge("max_file_descriptor_count");
- rootContext.unregisterGauge("connection_drop_probability");
-
- rootContext.unregisterGauge("last_client_response_size");
- rootContext.unregisterGauge("max_client_response_size");
- rootContext.unregisterGauge("min_client_response_size");
-
- rootContext.unregisterGauge("auth_failed_count");
- rootContext.unregisterGauge("non_mtls_remote_conn_count");
- rootContext.unregisterGauge("non_mtls_local_conn_count");
-
- rootContext.unregisterGaugeSet(QuotaMetricsUtils.QUOTA_COUNT_LIMIT_PER_NAMESPACE);
- rootContext.unregisterGaugeSet(QuotaMetricsUtils.QUOTA_BYTES_LIMIT_PER_NAMESPACE);
- rootContext.unregisterGaugeSet(QuotaMetricsUtils.QUOTA_COUNT_USAGE_PER_NAMESPACE);
- rootContext.unregisterGaugeSet(QuotaMetricsUtils.QUOTA_BYTES_USAGE_PER_NAMESPACE);
- }
-
- /**
- * Hook into admin server, useful to expose additional data
- * that do not represent metrics.
- *
- * @param response a sink which collects the data.
- */
- public void dumpMonitorValues(BiConsumer<String, Object> response) {
- ServerStats stats = serverStats();
- response.accept("version", Version.getFullVersion());
- response.accept("server_state", stats.getServerState());
- }
-
- /**
- * Grant or deny authorization to an operation on a node as a function of:
- * @param cnxn : the server connection or null for admin server commands
- * @param acl : set of ACLs for the node
- * @param perm : the permission that the client is requesting
- * @param ids : the credentials supplied by the client
- * @param path : the ZNode path
- * @param setAcls : for set ACL operations, the list of ACLs being set. Otherwise null.
- */
- public void checkACL(ServerCnxn cnxn, List<ACL> acl, int perm, List<Id> ids, String path, List<ACL> setAcls) throws KeeperException.NoAuthException {
- if (skipACL) {
- return;
- }
-
- LOG.debug("Permission requested: {} ", perm);
- LOG.debug("ACLs for node: {}", acl);
- LOG.debug("Client credentials: {}", ids);
-
- if (acl == null || acl.size() == 0) {
- return;
- }
- for (Id authId : ids) {
- if (authId.getScheme().equals("super")) {
- return;
- }
- }
- for (ACL a : acl) {
- Id id = a.getId();
- if ((a.getPerms() & perm) != 0) {
- if (id.getScheme().equals("world") && id.getId().equals("anyone")) {
- return;
- }
- ServerAuthenticationProvider ap = ProviderRegistry.getServerProvider(id.getScheme());
- if (ap != null) {
- for (Id authId : ids) {
- if (authId.getScheme().equals(id.getScheme())
- && ap.matches(
- new ServerAuthenticationProvider.ServerObjs(this, cnxn),
- new ServerAuthenticationProvider.MatchValues(path, authId.getId(), id.getId(), perm, setAcls))) {
- return;
- }
- }
- }
- }
- }
- throw new KeeperException.NoAuthException();
- }
-
- /**
- * check a path whether exceeded the quota.
- *
- * @param path
- * the path of the node, used for the quota prefix check
- * @param lastData
- * the current node data, {@code null} for none
- * @param data
- * the data to be set, or {@code null} for none
- * @param type
- * currently, create and setData need to check quota
- */
- public void checkQuota(String path, byte[] lastData, byte[] data, int type) throws KeeperException.QuotaExceededException {
- if (!enforceQuota) {
- return;
- }
- long dataBytes = (data == null) ? 0 : data.length;
- ZKDatabase zkDatabase = getZKDatabase();
- String lastPrefix = zkDatabase.getDataTree().getMaxPrefixWithQuota(path);
- if (StringUtils.isEmpty(lastPrefix)) {
- return;
- }
-
- final String namespace = PathUtils.getTopNamespace(path);
- switch (type) {
- case OpCode.create:
- checkQuota(lastPrefix, dataBytes, 1, namespace);
- break;
- case OpCode.setData:
- checkQuota(lastPrefix, dataBytes - (lastData == null ? 0 : lastData.length), 0, namespace);
- break;
- default:
- throw new IllegalArgumentException("Unsupported OpCode for checkQuota: " + type);
- }
- }
-
- /**
- * check a path whether exceeded the quota.
- *
- * @param lastPrefix
- the path of the node which has a quota.
- * @param bytesDiff
- * the diff to be added to number of bytes
- * @param countDiff
- * the diff to be added to the count
- * @param namespace
- * the namespace for collecting quota exceeded errors
- */
- private void checkQuota(String lastPrefix, long bytesDiff, long countDiff, String namespace)
- throws KeeperException.QuotaExceededException {
- LOG.debug("checkQuota: lastPrefix={}, bytesDiff={}, countDiff={}", lastPrefix, bytesDiff, countDiff);
-
- // now check the quota we set
- String limitNode = Quotas.limitPath(lastPrefix);
- DataNode node = getZKDatabase().getNode(limitNode);
- StatsTrack limitStats;
- if (node == null) {
- // should not happen
- LOG.error("Missing limit node for quota {}", limitNode);
- return;
- }
- synchronized (node) {
- limitStats = new StatsTrack(node.data);
- }
- //check the quota
- boolean checkCountQuota = countDiff != 0 && (limitStats.getCount() > -1 || limitStats.getCountHardLimit() > -1);
- boolean checkByteQuota = bytesDiff != 0 && (limitStats.getBytes() > -1 || limitStats.getByteHardLimit() > -1);
-
- if (!checkCountQuota && !checkByteQuota) {
- return;
- }
-
- //check the statPath quota
- String statNode = Quotas.statPath(lastPrefix);
- node = getZKDatabase().getNode(statNode);
-
- StatsTrack currentStats;
- if (node == null) {
- // should not happen
- LOG.error("Missing node for stat {}", statNode);
- return;
- }
- synchronized (node) {
- currentStats = new StatsTrack(node.data);
- }
-
- //check the Count Quota
- if (checkCountQuota) {
- long newCount = currentStats.getCount() + countDiff;
- boolean isCountHardLimit = limitStats.getCountHardLimit() > -1;
- long countLimit = isCountHardLimit ? limitStats.getCountHardLimit() : limitStats.getCount();
-
- if (newCount > countLimit) {
- String msg = "Quota exceeded: " + lastPrefix + " [current count=" + newCount + ", " + (isCountHardLimit ? "hard" : "soft") + "CountLimit=" + countLimit + "]";
- RATE_LOGGER.rateLimitLog(msg);
- if (isCountHardLimit) {
- updateQuotaExceededMetrics(namespace);
- throw new KeeperException.QuotaExceededException(lastPrefix);
- }
- }
- }
-
- //check the Byte Quota
- if (checkByteQuota) {
- long newBytes = currentStats.getBytes() + bytesDiff;
- boolean isByteHardLimit = limitStats.getByteHardLimit() > -1;
- long byteLimit = isByteHardLimit ? limitStats.getByteHardLimit() : limitStats.getBytes();
- if (newBytes > byteLimit) {
- String msg = "Quota exceeded: " + lastPrefix + " [current bytes=" + newBytes + ", " + (isByteHardLimit ? "hard" : "soft") + "ByteLimit=" + byteLimit + "]";
- RATE_LOGGER.rateLimitLog(msg);
- if (isByteHardLimit) {
- updateQuotaExceededMetrics(namespace);
- throw new KeeperException.QuotaExceededException(lastPrefix);
- }
- }
- }
- }
-
- public static boolean isDigestEnabled() {
- return digestEnabled;
- }
-
- public static void setDigestEnabled(boolean digestEnabled) {
- LOG.info("{} = {}", ZOOKEEPER_DIGEST_ENABLED, digestEnabled);
- ZooKeeperServer.digestEnabled = digestEnabled;
- }
-
- public static boolean isSerializeLastProcessedZxidEnabled() {
- return serializeLastProcessedZxidEnabled;
- }
-
- public static void setSerializeLastProcessedZxidEnabled(boolean serializeLastZxidEnabled) {
- serializeLastProcessedZxidEnabled = serializeLastZxidEnabled;
- LOG.info("{} = {}", ZOOKEEPER_SERIALIZE_LAST_PROCESSED_ZXID_ENABLED, serializeLastZxidEnabled);
- }
-
- /**
- * Trim a path to get the immediate predecessor.
- *
- * @param path
- * @return
- * @throws KeeperException.BadArgumentsException
- */
- private String parentPath(String path) throws KeeperException.BadArgumentsException {
- int lastSlash = path.lastIndexOf('/');
- if (lastSlash == -1 || path.indexOf('\0') != -1 || getZKDatabase().isSpecialPath(path)) {
- throw new KeeperException.BadArgumentsException(path);
- }
- return lastSlash == 0 ? "/" : path.substring(0, lastSlash);
- }
-
- private String effectiveACLPath(Request request) throws KeeperException.BadArgumentsException, KeeperException.InvalidACLException {
- boolean mustCheckACL = false;
- String path = null;
- List<ACL> acl = null;
-
- switch (request.type) {
- case OpCode.create:
- case OpCode.create2: {
- CreateRequest req = request.readRequestRecordNoException(CreateRequest::new);
- if (req != null) {
- mustCheckACL = true;
- acl = req.getAcl();
- path = parentPath(req.getPath());
- }
- break;
- }
- case OpCode.delete: {
- DeleteRequest req = request.readRequestRecordNoException(DeleteRequest::new);
- if (req != null) {
- path = parentPath(req.getPath());
- }
- break;
- }
- case OpCode.setData: {
- SetDataRequest req = request.readRequestRecordNoException(SetDataRequest::new);
- if (req != null) {
- path = req.getPath();
- }
- break;
- }
- case OpCode.setACL: {
- SetACLRequest req = request.readRequestRecordNoException(SetACLRequest::new);
- if (req != null) {
- mustCheckACL = true;
- acl = req.getAcl();
- path = req.getPath();
- }
- break;
- }
- }
-
- if (mustCheckACL) {
- /* we ignore the extrapolated ACL returned by fixupACL because
- * we only care about it being well-formed (and if it isn't, an
- * exception will be raised).
- */
- PrepRequestProcessor.fixupACL(path, request.authInfo, acl);
- }
-
- return path;
- }
-
- private int effectiveACLPerms(Request request) {
- switch (request.type) {
- case OpCode.create:
- case OpCode.create2:
- return ZooDefs.Perms.CREATE;
- case OpCode.delete:
- return ZooDefs.Perms.DELETE;
- case OpCode.setData:
- return ZooDefs.Perms.WRITE;
- case OpCode.setACL:
- return ZooDefs.Perms.ADMIN;
- default:
- return ZooDefs.Perms.ALL;
- }
- }
-
- /**
- * Check Write Requests for Potential Access Restrictions
- * <p>
- * Before a request is being proposed to the quorum, lets check it
- * against local ACLs. Non-write requests (read, session, etc.)
- * are passed along. Invalid requests are sent a response.
- * <p>
- * While we are at it, if the request will set an ACL: make sure it's
- * a valid one.
- *
- * @param request
- * @return true if request is permitted, false if not.
- */
- public boolean authWriteRequest(Request request) {
- int err;
- String pathToCheck;
-
- if (!enableEagerACLCheck) {
- return true;
- }
-
- err = KeeperException.Code.OK.intValue();
-
- try {
- pathToCheck = effectiveACLPath(request);
- if (pathToCheck != null) {
- checkACL(request.cnxn, zkDb.getACL(pathToCheck, null), effectiveACLPerms(request), request.authInfo, pathToCheck, null);
- }
- } catch (KeeperException.NoAuthException e) {
- LOG.debug("Request failed ACL check", e);
- err = e.code().intValue();
- } catch (KeeperException.InvalidACLException e) {
- LOG.debug("Request has an invalid ACL check", e);
- err = e.code().intValue();
- } catch (KeeperException.NoNodeException e) {
- LOG.debug("ACL check against non-existent node: {}", e.getMessage());
- } catch (KeeperException.BadArgumentsException e) {
- LOG.debug("ACL check against illegal node path: {}", e.getMessage());
- } catch (Throwable t) {
- LOG.error("Uncaught exception in authWriteRequest with: ", t);
- throw t;
- } finally {
- if (err != KeeperException.Code.OK.intValue()) {
- /* This request has a bad ACL, so we are dismissing it early. */
- decInProcess();
- ReplyHeader rh = new ReplyHeader(request.cxid, 0, err);
- try {
- request.cnxn.sendResponse(rh, null, null);
- } catch (IOException e) {
- LOG.error("IOException : {}", e);
- }
- }
- }
-
- return err == KeeperException.Code.OK.intValue();
- }
-
- public int getOutstandingHandshakeNum() {
- if (serverCnxnFactory instanceof NettyServerCnxnFactory) {
- return ((NettyServerCnxnFactory) serverCnxnFactory).getOutstandingHandshakeNum();
- } else {
- return 0;
- }
- }
-
- public boolean isReconfigEnabled() {
- return this.reconfigEnabled;
- }
-
- public ZooKeeperServerShutdownHandler getZkShutdownHandler() {
- return zkShutdownHandler;
- }
-
- static void updateQuotaExceededMetrics(final String namespace) {
- if (namespace == null) {
- return;
- }
- ServerMetrics.getMetrics().QUOTA_EXCEEDED_ERROR_PER_NAMESPACE.add(namespace, 1);
- }
-}
-
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/LeaderZooKeeperServer.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/LeaderZooKeeperServer.java
deleted file mode 100644
index 1f629bed73d..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/LeaderZooKeeperServer.java
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zookeeper.server.quorum;
-
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Function;
-import javax.management.JMException;
-import org.apache.zookeeper.KeeperException.SessionExpiredException;
-import org.apache.zookeeper.jmx.MBeanRegistry;
-import org.apache.zookeeper.metrics.MetricsContext;
-import org.apache.zookeeper.server.ContainerManager;
-import org.apache.zookeeper.server.DataTreeBean;
-import org.apache.zookeeper.server.FinalRequestProcessor;
-import org.apache.zookeeper.server.PrepRequestProcessor;
-import org.apache.zookeeper.server.Request;
-import org.apache.zookeeper.server.RequestProcessor;
-import org.apache.zookeeper.server.ServerCnxn;
-import org.apache.zookeeper.server.ServerMetrics;
-import org.apache.zookeeper.server.ZKDatabase;
-import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
-
-/**
- *
- * Just like the standard ZooKeeperServer. We just replace the request
- * processors: PrepRequestProcessor -&gt; ProposalRequestProcessor -&gt;
- * CommitProcessor -&gt; Leader.ToBeAppliedRequestProcessor -&gt;
- * FinalRequestProcessor
- */
-public class LeaderZooKeeperServer extends QuorumZooKeeperServer {
-
- private ContainerManager containerManager; // guarded by sync
-
- CommitProcessor commitProcessor;
-
- PrepRequestProcessor prepRequestProcessor;
-
- /**
- * @throws IOException
- */
- public LeaderZooKeeperServer(FileTxnSnapLog logFactory, QuorumPeer self, ZKDatabase zkDb) throws IOException {
- super(logFactory, self.tickTime, self.minSessionTimeout, self.maxSessionTimeout, self.clientPortListenBacklog, zkDb, self);
- }
-
- public Leader getLeader() {
- return self.leader;
- }
-
- @Override
- protected void setupRequestProcessors() {
- RequestProcessor finalProcessor = new FinalRequestProcessor(this);
- RequestProcessor toBeAppliedProcessor = new Leader.ToBeAppliedRequestProcessor(finalProcessor, getLeader());
- commitProcessor = new CommitProcessor(toBeAppliedProcessor, Long.toString(getServerId()), false, getZooKeeperServerListener());
- commitProcessor.start();
- ProposalRequestProcessor proposalProcessor = new ProposalRequestProcessor(this, commitProcessor);
- proposalProcessor.initialize();
- prepRequestProcessor = new PrepRequestProcessor(this, proposalProcessor);
- prepRequestProcessor.start();
- firstProcessor = new LeaderRequestProcessor(this, prepRequestProcessor);
-
- setupContainerManager();
- }
-
- private synchronized void setupContainerManager() {
- containerManager = new ContainerManager(
- getZKDatabase(),
- prepRequestProcessor,
- Integer.getInteger("znode.container.checkIntervalMs", (int) TimeUnit.MINUTES.toMillis(1)),
- Integer.getInteger("znode.container.maxPerMinute", 10000),
- Long.getLong("znode.container.maxNeverUsedIntervalMs", 0)
- );
- }
-
- @Override
- public synchronized void startup() {
- super.startup();
- if (containerManager != null) {
- containerManager.start();
- }
- }
-
- @Override
- protected void registerMetrics() {
- super.registerMetrics();
-
- MetricsContext rootContext = ServerMetrics.getMetrics().getMetricsProvider().getRootContext();
- rootContext.registerGauge("learners", gaugeWithLeader(
- (leader) -> leader.getLearners().size())
- );
- rootContext.registerGauge("synced_followers", gaugeWithLeader(
- (leader) -> leader.getForwardingFollowers().size()
- ));
- rootContext.registerGauge("synced_non_voting_followers", gaugeWithLeader(
- (leader) -> leader.getNonVotingFollowers().size()
- ));
- rootContext.registerGauge("synced_observers", self::getSynced_observers_metric);
- rootContext.registerGauge("pending_syncs", gaugeWithLeader(
- (leader) -> leader.getNumPendingSyncs()
- ));
- rootContext.registerGauge("leader_uptime", gaugeWithLeader(
- (leader) -> leader.getUptime()
- ));
- rootContext.registerGauge("last_proposal_size", gaugeWithLeader(
- (leader) -> leader.getProposalStats().getLastBufferSize()
- ));
- rootContext.registerGauge("max_proposal_size", gaugeWithLeader(
- (leader) -> leader.getProposalStats().getMaxBufferSize()
- ));
- rootContext.registerGauge("min_proposal_size", gaugeWithLeader(
- (leader) -> leader.getProposalStats().getMinBufferSize()
- ));
- }
-
- private org.apache.zookeeper.metrics.Gauge gaugeWithLeader(Function<Leader, Number> supplier) {
- return () -> {
- final Leader leader = getLeader();
- if (leader == null) {
- return null;
- }
- return supplier.apply(leader);
- };
- }
-
- @Override
- protected void unregisterMetrics() {
- super.unregisterMetrics();
-
- MetricsContext rootContext = ServerMetrics.getMetrics().getMetricsProvider().getRootContext();
- rootContext.unregisterGauge("learners");
- rootContext.unregisterGauge("synced_followers");
- rootContext.unregisterGauge("synced_non_voting_followers");
- rootContext.unregisterGauge("synced_observers");
- rootContext.unregisterGauge("pending_syncs");
- rootContext.unregisterGauge("leader_uptime");
-
- rootContext.unregisterGauge("last_proposal_size");
- rootContext.unregisterGauge("max_proposal_size");
- rootContext.unregisterGauge("min_proposal_size");
- }
-
- @Override
- public synchronized void shutdown(boolean fullyShutDown) {
- if (containerManager != null) {
- containerManager.stop();
- }
- super.shutdown(fullyShutDown);
- }
-
- @Override
- public int getGlobalOutstandingLimit() {
- int divisor = self.getQuorumSize() > 2 ? self.getQuorumSize() - 1 : 1;
- int globalOutstandingLimit = super.getGlobalOutstandingLimit() / divisor;
- return globalOutstandingLimit;
- }
-
- @Override
- public void createSessionTracker() {
- sessionTracker = new LeaderSessionTracker(
- this,
- getZKDatabase().getSessionWithTimeOuts(),
- tickTime,
- self.getMyId(),
- self.areLocalSessionsEnabled(),
- getZooKeeperServerListener());
- }
-
- public boolean touch(long sess, int to) {
- return sessionTracker.touchSession(sess, to);
- }
-
- public boolean checkIfValidGlobalSession(long sess, int to) {
- if (self.areLocalSessionsEnabled() && !upgradeableSessionTracker.isGlobalSession(sess)) {
- return false;
- }
- return sessionTracker.touchSession(sess, to);
- }
-
- /**
- * Requests coming from the learner should go directly to
- * PrepRequestProcessor
- *
- * @param request
- */
- public void submitLearnerRequest(Request request) {
- /*
- * Requests coming from the learner should have gone through
- * submitRequest() on each server which already perform some request
- * validation, so we don't need to do it again.
- *
- * Additionally, LearnerHandler should start submitting requests into
- * the leader's pipeline only when the leader's server is started, so we
- * can submit the request directly into PrepRequestProcessor.
- *
- * This is done so that requests from learners won't go through
- * LeaderRequestProcessor which perform local session upgrade.
- */
- prepRequestProcessor.processRequest(request);
- }
-
- @Override
- protected void registerJMX() {
- // register with JMX
- try {
- jmxDataTreeBean = new DataTreeBean(getZKDatabase().getDataTree());
- MBeanRegistry.getInstance().register(jmxDataTreeBean, jmxServerBean);
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- jmxDataTreeBean = null;
- }
- }
-
- public void registerJMX(LeaderBean leaderBean, LocalPeerBean localPeerBean) {
- // register with JMX
- if (self.jmxLeaderElectionBean != null) {
- try {
- MBeanRegistry.getInstance().unregister(self.jmxLeaderElectionBean);
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- }
- self.jmxLeaderElectionBean = null;
- }
-
- try {
- jmxServerBean = leaderBean;
- MBeanRegistry.getInstance().register(leaderBean, localPeerBean);
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- jmxServerBean = null;
- }
- }
-
- boolean registerJMX(LearnerHandlerBean handlerBean) {
- try {
- MBeanRegistry.getInstance().register(handlerBean, jmxServerBean);
- return true;
- } catch (JMException e) {
- LOG.warn("Could not register connection", e);
- }
- return false;
- }
-
- @Override
- protected void unregisterJMX() {
- // unregister from JMX
- try {
- if (jmxDataTreeBean != null) {
- MBeanRegistry.getInstance().unregister(jmxDataTreeBean);
- }
- } catch (Exception e) {
- LOG.warn("Failed to unregister with JMX", e);
- }
- jmxDataTreeBean = null;
- }
-
- protected void unregisterJMX(Leader leader) {
- // unregister from JMX
- try {
- if (jmxServerBean != null) {
- MBeanRegistry.getInstance().unregister(jmxServerBean);
- }
- } catch (Exception e) {
- LOG.warn("Failed to unregister with JMX", e);
- }
- jmxServerBean = null;
- }
-
- @Override
- public String getState() {
- return "leader";
- }
-
- /**
- * Returns the id of the associated QuorumPeer, which will do for a unique
- * id of this server.
- */
- @Override
- public long getServerId() {
- return self.getMyId();
- }
-
- @Override
- protected void revalidateSession(ServerCnxn cnxn, long sessionId, int sessionTimeout) throws IOException {
- super.revalidateSession(cnxn, sessionId, sessionTimeout);
- try {
- // setowner as the leader itself, unless updated
- // via the follower handlers
- setOwner(sessionId, ServerCnxn.me);
- } catch (SessionExpiredException e) {
- // this is ok, it just means that the session revalidation failed.
- }
- }
-
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/Learner.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/Learner.java
deleted file mode 100644
index 3c7b2148400..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/Learner.java
+++ /dev/null
@@ -1,928 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zookeeper.server.quorum;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.nio.ByteBuffer;
-import java.util.ArrayDeque;
-import java.util.Deque;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-import javax.net.ssl.SSLSocket;
-import org.apache.jute.BinaryInputArchive;
-import org.apache.jute.BinaryOutputArchive;
-import org.apache.jute.InputArchive;
-import org.apache.jute.OutputArchive;
-import org.apache.jute.Record;
-import org.apache.zookeeper.ZooDefs.OpCode;
-import org.apache.zookeeper.common.Time;
-import org.apache.zookeeper.common.X509Exception;
-import org.apache.zookeeper.server.ExitCode;
-import org.apache.zookeeper.server.Request;
-import org.apache.zookeeper.server.ServerCnxn;
-import org.apache.zookeeper.server.ServerMetrics;
-import org.apache.zookeeper.server.TxnLogEntry;
-import org.apache.zookeeper.server.ZooTrace;
-import org.apache.zookeeper.server.quorum.QuorumPeer.QuorumServer;
-import org.apache.zookeeper.server.quorum.flexible.QuorumVerifier;
-import org.apache.zookeeper.server.util.ConfigUtils;
-import org.apache.zookeeper.server.util.MessageTracker;
-import org.apache.zookeeper.server.util.SerializeUtils;
-import org.apache.zookeeper.server.util.ZxidUtils;
-import org.apache.zookeeper.txn.SetDataTxn;
-import org.apache.zookeeper.txn.TxnDigest;
-import org.apache.zookeeper.txn.TxnHeader;
-import org.apache.zookeeper.util.ServiceUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is the superclass of two of the three main actors in a ZK
- * ensemble: Followers and Observers. Both Followers and Observers share
- * a good deal of code which is moved into Peer to avoid duplication.
- */
-public class Learner {
-
- static class PacketInFlight {
-
- TxnHeader hdr;
- Record rec;
- TxnDigest digest;
-
- }
-
- QuorumPeer self;
- LearnerZooKeeperServer zk;
-
- protected BufferedOutputStream bufferedOutput;
-
- protected Socket sock;
- protected MultipleAddresses leaderAddr;
- protected AtomicBoolean sockBeingClosed = new AtomicBoolean(false);
-
- /**
- * Socket getter
- */
- public Socket getSocket() {
- return sock;
- }
-
- LearnerSender sender = null;
- protected InputArchive leaderIs;
- protected OutputArchive leaderOs;
- /** the protocol version of the leader */
- protected int leaderProtocolVersion = 0x01;
-
- private static final int BUFFERED_MESSAGE_SIZE = 10;
- protected final MessageTracker messageTracker = new MessageTracker(BUFFERED_MESSAGE_SIZE);
-
- protected static final Logger LOG = LoggerFactory.getLogger(Learner.class);
-
- /**
- * Time to wait after connection attempt with the Leader or LearnerMaster before this
- * Learner tries to connect again.
- */
- private static final int leaderConnectDelayDuringRetryMs = Integer.getInteger("zookeeper.leaderConnectDelayDuringRetryMs", 100);
-
- private static final boolean nodelay = System.getProperty("follower.nodelay", "true").equals("true");
-
- public static final String LEARNER_ASYNC_SENDING = "zookeeper.learner.asyncSending";
- private static boolean asyncSending =
- Boolean.parseBoolean(ConfigUtils.getPropertyBackwardCompatibleWay(LEARNER_ASYNC_SENDING));
- public static final String LEARNER_CLOSE_SOCKET_ASYNC = "zookeeper.learner.closeSocketAsync";
- public static final boolean closeSocketAsync = Boolean
- .parseBoolean(ConfigUtils.getPropertyBackwardCompatibleWay(LEARNER_CLOSE_SOCKET_ASYNC));
-
- static {
- LOG.info("leaderConnectDelayDuringRetryMs: {}", leaderConnectDelayDuringRetryMs);
- LOG.info("TCP NoDelay set to: {}", nodelay);
- LOG.info("{} = {}", LEARNER_ASYNC_SENDING, asyncSending);
- LOG.info("{} = {}", LEARNER_CLOSE_SOCKET_ASYNC, closeSocketAsync);
- }
-
- final ConcurrentHashMap<Long, ServerCnxn> pendingRevalidations = new ConcurrentHashMap<>();
-
- public int getPendingRevalidationsCount() {
- return pendingRevalidations.size();
- }
-
- // for testing
- protected static void setAsyncSending(boolean newMode) {
- asyncSending = newMode;
- LOG.info("{} = {}", LEARNER_ASYNC_SENDING, asyncSending);
-
- }
- protected static boolean getAsyncSending() {
- return asyncSending;
- }
- /**
- * validate a session for a client
- *
- * @param clientId
- * the client to be revalidated
- * @param timeout
- * the timeout for which the session is valid
- * @throws IOException
- */
- void validateSession(ServerCnxn cnxn, long clientId, int timeout) throws IOException {
- LOG.info("Revalidating client: 0x{}", Long.toHexString(clientId));
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- DataOutputStream dos = new DataOutputStream(baos);
- dos.writeLong(clientId);
- dos.writeInt(timeout);
- dos.close();
- QuorumPacket qp = new QuorumPacket(Leader.REVALIDATE, -1, baos.toByteArray(), null);
- pendingRevalidations.put(clientId, cnxn);
- if (LOG.isTraceEnabled()) {
- ZooTrace.logTraceMessage(
- LOG,
- ZooTrace.SESSION_TRACE_MASK,
- "To validate session 0x" + Long.toHexString(clientId));
- }
- writePacket(qp, true);
- }
-
- /**
- * write a packet to the leader.
- *
- * This method is called by multiple threads. We need to make sure that only one thread is writing to leaderOs at a time.
- * When packets are sent synchronously, writing is done within a synchronization block.
- * When packets are sent asynchronously, sender.queuePacket() is called, which writes to a BlockingQueue, which is thread-safe.
- * Reading from this BlockingQueue and writing to leaderOs is the learner sender thread only.
- * So we have only one thread writing to leaderOs at a time in either case.
- *
- * @param pp
- * the proposal packet to be sent to the leader
- * @throws IOException
- */
- void writePacket(QuorumPacket pp, boolean flush) throws IOException {
- if (asyncSending) {
- sender.queuePacket(pp);
- } else {
- writePacketNow(pp, flush);
- }
- }
-
- void writePacketNow(QuorumPacket pp, boolean flush) throws IOException {
- synchronized (leaderOs) {
- if (pp != null) {
- messageTracker.trackSent(pp.getType());
- leaderOs.writeRecord(pp, "packet");
- }
- if (flush) {
- bufferedOutput.flush();
- }
- }
- }
-
- /**
- * Start thread that will forward any packet in the queue to the leader
- */
- protected void startSendingThread() {
- sender = new LearnerSender(this);
- sender.start();
- }
-
- /**
- * read a packet from the leader
- *
- * @param pp
- * the packet to be instantiated
- * @throws IOException
- */
- void readPacket(QuorumPacket pp) throws IOException {
- synchronized (leaderIs) {
- leaderIs.readRecord(pp, "packet");
- messageTracker.trackReceived(pp.getType());
- }
- if (LOG.isTraceEnabled()) {
- final long traceMask =
- (pp.getType() == Leader.PING) ? ZooTrace.SERVER_PING_TRACE_MASK
- : ZooTrace.SERVER_PACKET_TRACE_MASK;
-
- ZooTrace.logQuorumPacket(LOG, traceMask, 'i', pp);
- }
- }
-
- /**
- * send a request packet to the leader
- *
- * @param request
- * the request from the client
- * @throws IOException
- */
- void request(Request request) throws IOException {
- if (request.isThrottled()) {
- LOG.error("Throttled request sent to leader: {}. Exiting", request);
- ServiceUtils.requestSystemExit(ExitCode.UNEXPECTED_ERROR.getValue());
- }
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- DataOutputStream oa = new DataOutputStream(baos);
- oa.writeLong(request.sessionId);
- oa.writeInt(request.cxid);
- oa.writeInt(request.type);
- byte[] payload = request.readRequestBytes();
- if (payload != null) {
- oa.write(payload);
- }
- oa.close();
- QuorumPacket qp = new QuorumPacket(Leader.REQUEST, -1, baos.toByteArray(), request.authInfo);
- writePacket(qp, true);
- }
-
- /**
- * Returns the address of the node we think is the leader.
- */
- protected QuorumServer findLeader() {
- QuorumServer leaderServer = null;
- // Find the leader by id
- Vote current = self.getCurrentVote();
- for (QuorumServer s : self.getView().values()) {
- if (s.id == current.getId()) {
- // Ensure we have the leader's correct IP address before
- // attempting to connect.
- s.recreateSocketAddresses();
- leaderServer = s;
- break;
- }
- }
- if (leaderServer == null) {
- LOG.warn("Couldn't find the leader with id = {}", current.getId());
- }
- return leaderServer;
- }
-
- /**
- * Overridable helper method to return the System.nanoTime().
- * This method behaves identical to System.nanoTime().
- */
- protected long nanoTime() {
- return System.nanoTime();
- }
-
- /**
- * Overridable helper method to simply call sock.connect(). This can be
- * overriden in tests to fake connection success/failure for connectToLeader.
- */
- protected void sockConnect(Socket sock, InetSocketAddress addr, int timeout) throws IOException {
- sock.connect(addr, timeout);
- }
-
- /**
- * Establish a connection with the LearnerMaster found by findLearnerMaster.
- * Followers only connect to Leaders, Observers can connect to any active LearnerMaster.
- * Retries until either initLimit time has elapsed or 5 tries have happened.
- * @param multiAddr - the address of the Peer to connect to.
- * @throws IOException - if the socket connection fails on the 5th attempt
- * if there is an authentication failure while connecting to leader
- */
- protected void connectToLeader(MultipleAddresses multiAddr, String hostname) throws IOException {
-
- this.leaderAddr = multiAddr;
- Set<InetSocketAddress> addresses;
- if (self.isMultiAddressReachabilityCheckEnabled()) {
- // even if none of the addresses are reachable, we want to try to establish connection
- // see ZOOKEEPER-3758
- addresses = multiAddr.getAllReachableAddressesOrAll();
- } else {
- addresses = multiAddr.getAllAddresses();
- }
- ExecutorService executor = Executors.newFixedThreadPool(addresses.size());
- CountDownLatch latch = new CountDownLatch(addresses.size());
- AtomicReference<Socket> socket = new AtomicReference<>(null);
- addresses.stream().map(address -> new LeaderConnector(address, socket, latch)).forEach(executor::submit);
-
- try {
- latch.await();
- } catch (InterruptedException e) {
- LOG.warn("Interrupted while trying to connect to Leader", e);
- } finally {
- executor.shutdown();
- try {
- if (!executor.awaitTermination(1, TimeUnit.SECONDS)) {
- LOG.error("not all the LeaderConnector terminated properly");
- }
- } catch (InterruptedException ie) {
- LOG.error("Interrupted while terminating LeaderConnector executor.", ie);
- }
- }
-
- if (socket.get() == null) {
- throw new IOException("Failed connect to " + multiAddr);
- } else {
- sock = socket.get();
- sockBeingClosed.set(false);
- }
-
- self.authLearner.authenticate(sock, hostname);
-
- leaderIs = BinaryInputArchive.getArchive(new BufferedInputStream(sock.getInputStream()));
- bufferedOutput = new BufferedOutputStream(sock.getOutputStream());
- leaderOs = BinaryOutputArchive.getArchive(bufferedOutput);
- if (asyncSending) {
- startSendingThread();
- }
- }
-
- class LeaderConnector implements Runnable {
-
- private AtomicReference<Socket> socket;
- private InetSocketAddress address;
- private CountDownLatch latch;
-
- LeaderConnector(InetSocketAddress address, AtomicReference<Socket> socket, CountDownLatch latch) {
- this.address = address;
- this.socket = socket;
- this.latch = latch;
- }
-
- @Override
- public void run() {
- try {
- Thread.currentThread().setName("LeaderConnector-" + address);
- Socket sock = connectToLeader();
-
- if (sock != null && sock.isConnected()) {
- if (socket.compareAndSet(null, sock)) {
- LOG.info("Successfully connected to leader, using address: {}", address);
- } else {
- LOG.info("Connection to the leader is already established, close the redundant connection");
- sock.close();
- }
- }
-
- } catch (Exception e) {
- LOG.error("Failed connect to {}", address, e);
- } finally {
- latch.countDown();
- }
- }
-
- private Socket connectToLeader() throws IOException, X509Exception, InterruptedException {
- Socket sock = createSocket();
-
- // leader connection timeout defaults to tickTime * initLimit
- int connectTimeout = self.tickTime * self.initLimit;
-
- // but if connectToLearnerMasterLimit is specified, use that value to calculate
- // timeout instead of using the initLimit value
- if (self.connectToLearnerMasterLimit > 0) {
- connectTimeout = self.tickTime * self.connectToLearnerMasterLimit;
- }
-
- int remainingTimeout;
- long startNanoTime = nanoTime();
-
- for (int tries = 0; tries < 5 && socket.get() == null; tries++) {
- try {
- // recalculate the init limit time because retries sleep for 1000 milliseconds
- remainingTimeout = connectTimeout - (int) ((nanoTime() - startNanoTime) / 1_000_000);
- if (remainingTimeout <= 0) {
- LOG.error("connectToLeader exceeded on retries.");
- throw new IOException("connectToLeader exceeded on retries.");
- }
-
- sockConnect(sock, address, Math.min(connectTimeout, remainingTimeout));
- if (self.isSslQuorum()) {
- ((SSLSocket) sock).startHandshake();
- }
- sock.setTcpNoDelay(nodelay);
- break;
- } catch (IOException e) {
- remainingTimeout = connectTimeout - (int) ((nanoTime() - startNanoTime) / 1_000_000);
-
- if (remainingTimeout <= leaderConnectDelayDuringRetryMs) {
- LOG.error(
- "Unexpected exception, connectToLeader exceeded. tries={}, remaining init limit={}, connecting to {}",
- tries,
- remainingTimeout,
- address,
- e);
- throw e;
- } else if (tries >= 4) {
- LOG.error(
- "Unexpected exception, retries exceeded. tries={}, remaining init limit={}, connecting to {}",
- tries,
- remainingTimeout,
- address,
- e);
- throw e;
- } else {
- LOG.warn(
- "Unexpected exception, tries={}, remaining init limit={}, connecting to {}",
- tries,
- remainingTimeout,
- address,
- e);
- sock = createSocket();
- }
- }
- Thread.sleep(leaderConnectDelayDuringRetryMs);
- }
-
- return sock;
- }
- }
-
- /**
- * Creating a simple or and SSL socket.
- * This can be overridden in tests to fake already connected sockets for connectToLeader.
- */
- protected Socket createSocket() throws X509Exception, IOException {
- Socket sock;
- if (self.isSslQuorum()) {
- sock = self.getX509Util().createSSLSocket();
- } else {
- sock = new Socket();
- }
- sock.setSoTimeout(self.tickTime * self.initLimit);
- return sock;
- }
-
- /**
- * Once connected to the leader or learner master, perform the handshake
- * protocol to establish a following / observing connection.
- * @param pktType
- * @return the zxid the Leader sends for synchronization purposes.
- * @throws IOException
- */
- protected long registerWithLeader(int pktType) throws IOException {
- /*
- * Send follower info, including last zxid and sid
- */
- long lastLoggedZxid = self.getLastLoggedZxid();
- QuorumPacket qp = new QuorumPacket();
- qp.setType(pktType);
- qp.setZxid(ZxidUtils.makeZxid(self.getAcceptedEpoch(), 0));
-
- /*
- * Add sid to payload
- */
- LearnerInfo li = new LearnerInfo(self.getMyId(), 0x10000, self.getQuorumVerifier().getVersion());
- ByteArrayOutputStream bsid = new ByteArrayOutputStream();
- BinaryOutputArchive boa = BinaryOutputArchive.getArchive(bsid);
- boa.writeRecord(li, "LearnerInfo");
- qp.setData(bsid.toByteArray());
-
- writePacket(qp, true);
- readPacket(qp);
- final long newEpoch = ZxidUtils.getEpochFromZxid(qp.getZxid());
- if (qp.getType() == Leader.LEADERINFO) {
- // we are connected to a 1.0 server so accept the new epoch and read the next packet
- leaderProtocolVersion = ByteBuffer.wrap(qp.getData()).getInt();
- byte[] epochBytes = new byte[4];
- final ByteBuffer wrappedEpochBytes = ByteBuffer.wrap(epochBytes);
- if (newEpoch > self.getAcceptedEpoch()) {
- wrappedEpochBytes.putInt((int) self.getCurrentEpoch());
- self.setAcceptedEpoch(newEpoch);
- } else if (newEpoch == self.getAcceptedEpoch()) {
- // since we have already acked an epoch equal to the leaders, we cannot ack
- // again, but we still need to send our lastZxid to the leader so that we can
- // sync with it if it does assume leadership of the epoch.
- // the -1 indicates that this reply should not count as an ack for the new epoch
- wrappedEpochBytes.putInt(-1);
- } else {
- throw new IOException("Leaders epoch, "
- + newEpoch
- + " is less than accepted epoch, "
- + self.getAcceptedEpoch());
- }
- QuorumPacket ackNewEpoch = new QuorumPacket(Leader.ACKEPOCH, lastLoggedZxid, epochBytes, null);
- writePacket(ackNewEpoch, true);
- return ZxidUtils.makeZxid(newEpoch, 0);
- } else {
- if (newEpoch > self.getAcceptedEpoch()) {
- self.setAcceptedEpoch(newEpoch);
- }
- if (qp.getType() != Leader.NEWLEADER) {
- LOG.error("First packet should have been NEWLEADER");
- throw new IOException("First packet should have been NEWLEADER");
- }
- return qp.getZxid();
- }
- }
-
- /**
- * Finally, synchronize our history with the Leader (if Follower)
- * or the LearnerMaster (if Observer).
- * @param newLeaderZxid
- * @throws IOException
- * @throws InterruptedException
- */
- protected void syncWithLeader(long newLeaderZxid) throws Exception {
- long newEpoch = ZxidUtils.getEpochFromZxid(newLeaderZxid);
- QuorumVerifier newLeaderQV = null;
-
- class SyncHelper {
-
- // In the DIFF case we don't need to do a snapshot because the transactions will sync on top of any existing snapshot.
- // For SNAP and TRUNC the snapshot is needed to save that history.
- boolean willSnapshot = true;
- boolean syncSnapshot = false;
-
- // PROPOSALs received during sync, for matching up with COMMITs.
- Deque<PacketInFlight> proposals = new ArrayDeque<>();
-
- // PROPOSALs we delay forwarding to the ZK server until sync is done.
- Deque<PacketInFlight> delayedProposals = new ArrayDeque<>();
-
- // COMMITs we delay forwarding to the ZK server until sync is done.
- Deque<Long> delayedCommits = new ArrayDeque<>();
-
- void syncSnapshot() {
- syncSnapshot = true;
- }
-
- void noSnapshot() {
- willSnapshot = false;
- }
-
- void propose(PacketInFlight pif) {
- proposals.add(pif);
- delayedProposals.add(pif);
- }
-
- PacketInFlight nextProposal() {
- return proposals.peekFirst();
- }
-
- void commit() {
- PacketInFlight packet = proposals.remove();
- if (willSnapshot) {
- zk.processTxn(packet.hdr, packet.rec);
- delayedProposals.remove();
- } else {
- delayedCommits.add(packet.hdr.getZxid());
- }
- }
-
- void writeState() throws IOException, InterruptedException {
- // Ensure all received transaction PROPOSALs are written before we ACK the NEWLEADER,
- // since this allows the leader to apply those transactions to its served state:
- if (willSnapshot) {
- zk.takeSnapshot(syncSnapshot); // either, the snapshot contains the transactions,
- willSnapshot = false; // but anything after this needs to go to the transaction log; or
- }
-
- sock.setSoTimeout(self.tickTime * self.syncLimit);
- self.setSyncMode(QuorumPeer.SyncMode.NONE);
- zk.startupWithoutServing();
-
- // if we're a follower, we need to ensure the transactions are safely logged before ACK'ing.
- if (zk instanceof FollowerZooKeeperServer) {
- FollowerZooKeeperServer fzk = (FollowerZooKeeperServer) zk;
- // The leader expects the NEWLEADER ACK to precede all the PROPOSAL ACKs, so we only write them first.
- fzk.syncProcessor.setDelayForwarding(true);
- for (PacketInFlight p : delayedProposals) {
- fzk.logRequest(p.hdr, p.rec, p.digest);
- }
- delayedProposals.clear();
- fzk.syncProcessor.syncFlush();
- }
-
- self.setCurrentEpoch(newEpoch);
- }
-
- void flushAcks() throws InterruptedException {
- if (zk instanceof FollowerZooKeeperServer) {
- // The NEWLEADER is ACK'ed, and we can now ACK the PROPOSALs we wrote in writeState.
- FollowerZooKeeperServer fzk = (FollowerZooKeeperServer) zk;
- fzk.syncProcessor.setDelayForwarding(false);
- fzk.syncProcessor.syncFlush(); // Ensure these are all ACK'ed before the UPTODATE ACK.
- }
- }
-
- void applyDelayedPackets() {
- // Any delayed packets must now be applied: all PROPOSALs first, then any COMMITs.
- if (zk instanceof FollowerZooKeeperServer) {
- FollowerZooKeeperServer fzk = (FollowerZooKeeperServer) zk;
- for (PacketInFlight p : delayedProposals) {
- fzk.logRequest(p.hdr, p.rec, p.digest);
- }
- for (Long zxid : delayedCommits) {
- fzk.commit(zxid);
- }
- } else if (zk instanceof ObserverZooKeeperServer) {
- ObserverZooKeeperServer ozk = (ObserverZooKeeperServer) zk;
- for (PacketInFlight p : delayedProposals) {
- Long zxid = delayedCommits.peekFirst();
- if (p.hdr.getZxid() != zxid) {
- // log warning message if there is no matching commit
- // old leader send outstanding proposal to observer
- LOG.warn(
- "Committing 0x{}, but next proposal is 0x{}",
- Long.toHexString(zxid),
- Long.toHexString(p.hdr.getZxid()));
- continue;
- }
- delayedCommits.remove();
- Request request = new Request(p.hdr.getClientId(), p.hdr.getCxid(), p.hdr.getType(), p.hdr, p.rec, -1);
- request.setTxnDigest(p.digest);
- ozk.commitRequest(request);
- }
- } else {
- // New server type need to handle in-flight packets
- throw new UnsupportedOperationException("Unknown server type");
- }
- }
-
- }
-
- SyncHelper helper = new SyncHelper();
- QuorumPacket qp = new QuorumPacket();
- readPacket(qp);
- synchronized (zk) {
- if (qp.getType() == Leader.DIFF) {
- LOG.info("Getting a diff from the leader 0x{}", Long.toHexString(qp.getZxid()));
- self.setSyncMode(QuorumPeer.SyncMode.DIFF);
- if (zk.shouldForceWriteInitialSnapshotAfterLeaderElection()) {
- LOG.info("Forcing a snapshot write as part of upgrading from an older Zookeeper. This should only happen while upgrading.");
- helper.syncSnapshot();
- } else {
- helper.noSnapshot();
- }
- } else if (qp.getType() == Leader.SNAP) {
- self.setSyncMode(QuorumPeer.SyncMode.SNAP);
- LOG.info("Getting a snapshot from leader 0x{}", Long.toHexString(qp.getZxid()));
- // The leader is going to dump the database
- zk.getZKDatabase().deserializeSnapshot(leaderIs);
- // ZOOKEEPER-2819: overwrite config node content extracted
- // from leader snapshot with local config, to avoid potential
- // inconsistency of config node content during rolling restart.
- if (!self.isReconfigEnabled()) {
- LOG.debug("Reset config node content from local config after deserialization of snapshot.");
- zk.getZKDatabase().initConfigInZKDatabase(self.getQuorumVerifier());
- }
- String signature = leaderIs.readString("signature");
- if (!signature.equals("BenWasHere")) {
- LOG.error("Missing signature. Got {}", signature);
- throw new IOException("Missing signature");
- }
- zk.getZKDatabase().setlastProcessedZxid(qp.getZxid());
-
- // Immediately persist the latest snapshot when there is txn log gap
- helper.syncSnapshot();
- } else if (qp.getType() == Leader.TRUNC) {
- // We need to truncate the log to the lastZxid of the leader
- self.setSyncMode(QuorumPeer.SyncMode.TRUNC);
- LOG.warn("Truncating log to get in sync with the leader 0x{}", Long.toHexString(qp.getZxid()));
- boolean truncated = zk.getZKDatabase().truncateLog(qp.getZxid());
- if (!truncated) {
- LOG.error("Not able to truncate the log 0x{}", Long.toHexString(qp.getZxid()));
- ServiceUtils.requestSystemExit(ExitCode.QUORUM_PACKET_ERROR.getValue());
- }
- zk.getZKDatabase().setlastProcessedZxid(qp.getZxid());
- } else {
- LOG.error("Got unexpected packet from leader: {}, exiting ... ", LearnerHandler.packetToString(qp));
- ServiceUtils.requestSystemExit(ExitCode.QUORUM_PACKET_ERROR.getValue());
- }
- zk.getZKDatabase().initConfigInZKDatabase(self.getQuorumVerifier());
- zk.createSessionTracker();
-
-
- // we are now going to start getting transactions to apply followed by an UPTODATE
- long lastQueued = 0;
- TxnLogEntry logEntry;
- outerLoop:
- while (self.isRunning()) {
- readPacket(qp);
- switch (qp.getType()) {
- case Leader.PROPOSAL:
- PacketInFlight pif = new PacketInFlight();
- logEntry = SerializeUtils.deserializeTxn(qp.getData());
- pif.hdr = logEntry.getHeader();
- pif.rec = logEntry.getTxn();
- pif.digest = logEntry.getDigest();
- if (pif.hdr.getZxid() != lastQueued + 1) {
- LOG.warn(
- "Got zxid 0x{} expected 0x{}",
- Long.toHexString(pif.hdr.getZxid()),
- Long.toHexString(lastQueued + 1));
- }
- lastQueued = pif.hdr.getZxid();
-
- if (pif.hdr.getType() == OpCode.reconfig) {
- SetDataTxn setDataTxn = (SetDataTxn) pif.rec;
- QuorumVerifier qv = self.configFromString(new String(setDataTxn.getData(), UTF_8));
- self.setLastSeenQuorumVerifier(qv, true);
- }
- helper.propose(pif);
- break;
- case Leader.COMMIT:
- case Leader.COMMITANDACTIVATE:
- pif = helper.nextProposal();
- if (pif.hdr.getZxid() != qp.getZxid()) {
- LOG.warn(
- "Committing 0x{}, but next proposal is 0x{}",
- Long.toHexString(qp.getZxid()),
- Long.toHexString(pif.hdr.getZxid()));
- } else {
- if (qp.getType() == Leader.COMMITANDACTIVATE) {
- tryReconfig(pif, ByteBuffer.wrap(qp.getData()).getLong(), qp.getZxid());
- }
- helper.commit();
- }
- break;
- case Leader.INFORM:
- case Leader.INFORMANDACTIVATE:
- PacketInFlight packet = new PacketInFlight();
- if (qp.getType() == Leader.INFORMANDACTIVATE) {
- ByteBuffer buffer = ByteBuffer.wrap(qp.getData());
- long suggestedLeaderId = buffer.getLong();
- byte[] remainingData = new byte[buffer.remaining()];
- buffer.get(remainingData);
- logEntry = SerializeUtils.deserializeTxn(remainingData);
- packet.hdr = logEntry.getHeader();
- packet.rec = logEntry.getTxn();
- packet.digest = logEntry.getDigest();
- tryReconfig(packet, suggestedLeaderId, qp.getZxid());
- } else {
- logEntry = SerializeUtils.deserializeTxn(qp.getData());
- packet.rec = logEntry.getTxn();
- packet.hdr = logEntry.getHeader();
- packet.digest = logEntry.getDigest();
- // Log warning message if txn comes out-of-order
- if (packet.hdr.getZxid() != lastQueued + 1) {
- LOG.warn(
- "Got zxid 0x{} expected 0x{}",
- Long.toHexString(packet.hdr.getZxid()),
- Long.toHexString(lastQueued + 1));
- }
- lastQueued = packet.hdr.getZxid();
- }
- helper.propose(packet);
- helper.commit();
- break;
- case Leader.UPTODATE:
- LOG.info("Learner received UPTODATE message");
- if (newLeaderQV != null) {
- boolean majorChange = self.processReconfig(newLeaderQV, null, null, true);
- if (majorChange) {
- throw new Exception("changes proposed in reconfig");
- }
- }
- helper.flushAcks();
- self.setZooKeeperServer(zk);
- self.adminServer.setZooKeeperServer(zk);
- break outerLoop;
- case Leader.NEWLEADER: // Getting NEWLEADER here instead of in discovery
- LOG.info("Learner received NEWLEADER message");
- if (qp.getData() != null && qp.getData().length > 1) {
- try {
- QuorumVerifier qv = self.configFromString(new String(qp.getData(), UTF_8));
- self.setLastSeenQuorumVerifier(qv, true);
- newLeaderQV = qv;
- } catch (Exception e) {
- e.printStackTrace();
- }
- }
-
- helper.writeState();
- writePacket(new QuorumPacket(Leader.ACK, newLeaderZxid, null, null), true);
- break;
- }
- }
- }
- QuorumPacket ack = new QuorumPacket(Leader.ACK, 0, null, null);
- ack.setZxid(ZxidUtils.makeZxid(newEpoch, 0));
- writePacket(ack, true);
- zk.startServing();
- /*
- * Update the election vote here to ensure that all members of the
- * ensemble report the same vote to new servers that start up and
- * send leader election notifications to the ensemble.
- *
- * @see https://issues.apache.org/jira/browse/ZOOKEEPER-1732
- */
- self.updateElectionVote(newEpoch);
-
- helper.applyDelayedPackets();
- }
-
- private void tryReconfig(PacketInFlight pif, long newLeader, long zxid) throws Exception {
- QuorumVerifier qv = self.configFromString(new String(((SetDataTxn) pif.rec).getData(), UTF_8));
- boolean majorChange = self.processReconfig(qv, newLeader, zxid, true);
- if (majorChange) {
- throw new Exception("changes proposed in reconfig");
- }
- }
-
- protected void revalidate(QuorumPacket qp) throws IOException {
- ByteArrayInputStream bis = new ByteArrayInputStream(qp.getData());
- DataInputStream dis = new DataInputStream(bis);
- long sessionId = dis.readLong();
- boolean valid = dis.readBoolean();
- ServerCnxn cnxn = pendingRevalidations.remove(sessionId);
- if (cnxn == null) {
- LOG.warn("Missing session 0x{} for validation", Long.toHexString(sessionId));
- } else {
- zk.finishSessionInit(cnxn, valid);
- }
- if (LOG.isTraceEnabled()) {
- ZooTrace.logTraceMessage(
- LOG,
- ZooTrace.SESSION_TRACE_MASK,
- "Session 0x" + Long.toHexString(sessionId) + " is valid: " + valid);
- }
- }
-
- protected void ping(QuorumPacket qp) throws IOException {
- // Send back the ping with our session data
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- DataOutputStream dos = new DataOutputStream(bos);
- Map<Long, Integer> touchTable = zk.getTouchSnapshot();
- for (Entry<Long, Integer> entry : touchTable.entrySet()) {
- dos.writeLong(entry.getKey());
- dos.writeInt(entry.getValue());
- }
-
- QuorumPacket pingReply = new QuorumPacket(qp.getType(), qp.getZxid(), bos.toByteArray(), qp.getAuthinfo());
- writePacket(pingReply, true);
- }
-
- /**
- * Shutdown the Peer
- */
- public void shutdown() {
- self.setZooKeeperServer(null);
- self.closeAllConnections();
- self.adminServer.setZooKeeperServer(null);
-
- if (sender != null) {
- sender.shutdown();
- }
-
- closeSocket();
- // shutdown previous zookeeper
- if (zk != null) {
- // If we haven't finished SNAP sync, force fully shutdown
- // to avoid potential inconsistency
- zk.shutdown(self.getSyncMode().equals(QuorumPeer.SyncMode.SNAP));
- }
- }
-
- boolean isRunning() {
- return self.isRunning() && zk.isRunning();
- }
-
- void closeSocket() {
- if (sock != null) {
- if (sockBeingClosed.compareAndSet(false, true)) {
- if (closeSocketAsync) {
- final Thread closingThread = new Thread(() -> closeSockSync(), "CloseSocketThread(sid:" + zk.getServerId());
- closingThread.setDaemon(true);
- closingThread.start();
- } else {
- closeSockSync();
- }
- }
- }
- }
-
- void closeSockSync() {
- try {
- long startTime = Time.currentElapsedTime();
- if (sock != null) {
- sock.close();
- sock = null;
- }
- ServerMetrics.getMetrics().SOCKET_CLOSING_TIME.add(Time.currentElapsedTime() - startTime);
- } catch (IOException e) {
- LOG.warn("Ignoring error closing connection to leader", e);
- }
- }
-
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/LearnerZooKeeperServer.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/LearnerZooKeeperServer.java
deleted file mode 100644
index 99c4ae16dce..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/LearnerZooKeeperServer.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zookeeper.server.quorum;
-
-import java.io.IOException;
-import java.util.Map;
-import org.apache.zookeeper.jmx.MBeanRegistry;
-import org.apache.zookeeper.server.DataTreeBean;
-import org.apache.zookeeper.server.ServerCnxn;
-import org.apache.zookeeper.server.SyncRequestProcessor;
-import org.apache.zookeeper.server.ZKDatabase;
-import org.apache.zookeeper.server.ZooKeeperServerBean;
-import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
-
-/**
- * Parent class for all ZooKeeperServers for Learners
- */
-public abstract class LearnerZooKeeperServer extends QuorumZooKeeperServer {
-
- /*
- * Request processors
- */
- protected CommitProcessor commitProcessor;
- protected SyncRequestProcessor syncProcessor;
-
- public LearnerZooKeeperServer(FileTxnSnapLog logFactory, int tickTime, int minSessionTimeout, int maxSessionTimeout, int listenBacklog, ZKDatabase zkDb, QuorumPeer self) throws IOException {
- super(logFactory, tickTime, minSessionTimeout, maxSessionTimeout, listenBacklog, zkDb, self);
- }
-
- /**
- * Abstract method to return the learner associated with this server.
- * Since the Learner may change under our feet (when QuorumPeer reassigns
- * it) we can't simply take a reference here. Instead, we need the
- * subclasses to implement this.
- */
- public abstract Learner getLearner();
-
- /**
- * Returns the current state of the session tracker. This is only currently
- * used by a Learner to build a ping response packet.
- *
- */
- protected Map<Long, Integer> getTouchSnapshot() {
- if (sessionTracker != null) {
- return ((LearnerSessionTracker) sessionTracker).snapshot();
- }
- Map<Long, Integer> map = Map.of();
- return map;
- }
-
- /**
- * Returns the id of the associated QuorumPeer, which will do for a unique
- * id of this server.
- */
- @Override
- public long getServerId() {
- return self.getMyId();
- }
-
- @Override
- public void createSessionTracker() {
- sessionTracker = new LearnerSessionTracker(
- this,
- getZKDatabase().getSessionWithTimeOuts(),
- this.tickTime,
- self.getMyId(),
- self.areLocalSessionsEnabled(),
- getZooKeeperServerListener());
- }
-
- @Override
- protected void revalidateSession(ServerCnxn cnxn, long sessionId, int sessionTimeout) throws IOException {
- if (upgradeableSessionTracker.isLocalSession(sessionId)) {
- super.revalidateSession(cnxn, sessionId, sessionTimeout);
- } else {
- getLearner().validateSession(cnxn, sessionId, sessionTimeout);
- }
- }
-
- @Override
- protected void registerJMX() {
- // register with JMX
- try {
- jmxDataTreeBean = new DataTreeBean(getZKDatabase().getDataTree());
- MBeanRegistry.getInstance().register(jmxDataTreeBean, jmxServerBean);
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- jmxDataTreeBean = null;
- }
- }
-
- public void registerJMX(ZooKeeperServerBean serverBean, LocalPeerBean localPeerBean) {
- // register with JMX
- if (self.jmxLeaderElectionBean != null) {
- try {
- MBeanRegistry.getInstance().unregister(self.jmxLeaderElectionBean);
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- }
- self.jmxLeaderElectionBean = null;
- }
-
- try {
- jmxServerBean = serverBean;
- MBeanRegistry.getInstance().register(serverBean, localPeerBean);
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- jmxServerBean = null;
- }
- }
-
- @Override
- protected void unregisterJMX() {
- // unregister from JMX
- try {
- if (jmxDataTreeBean != null) {
- MBeanRegistry.getInstance().unregister(jmxDataTreeBean);
- }
- } catch (Exception e) {
- LOG.warn("Failed to unregister with JMX", e);
- }
- jmxDataTreeBean = null;
- }
-
- protected void unregisterJMX(Learner peer) {
- // unregister from JMX
- try {
- if (jmxServerBean != null) {
- MBeanRegistry.getInstance().unregister(jmxServerBean);
- }
- } catch (Exception e) {
- LOG.warn("Failed to unregister with JMX", e);
- }
- jmxServerBean = null;
- }
-
- @Override
- public synchronized void shutdown(boolean fullyShutDown) {
- if (!canShutdown()) {
- LOG.debug("ZooKeeper server is not running, so not proceeding to shutdown!");
- } else {
- LOG.info("Shutting down");
- try {
- if (syncProcessor != null) {
- // Shutting down the syncProcessor here, first, ensures queued transactions here are written to
- // permanent storage, which ensures that crash recovery data is consistent with what is used for a
- // leader election immediately following shutdown, because of the old leader going down; and also
- // that any state on its way to being written is also loaded in the potential call to
- // fast-forward-from-edits, in super.shutdown(...), so we avoid getting a DIFF from the new leader
- // that contains entries we have already written to our transaction log.
- syncProcessor.shutdown();
- }
- } catch (Exception e) {
- LOG.warn("Ignoring unexpected exception in syncprocessor shutdown", e);
- }
- }
- try {
- super.shutdown(fullyShutDown);
- } catch (Exception e) {
- LOG.warn("Ignoring unexpected exception during shutdown", e);
- }
- }
-
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/ObserverZooKeeperServer.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/ObserverZooKeeperServer.java
deleted file mode 100644
index 1a44a98e6e7..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/ObserverZooKeeperServer.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zookeeper.server.quorum;
-
-import java.io.IOException;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.function.BiConsumer;
-import org.apache.zookeeper.server.FinalRequestProcessor;
-import org.apache.zookeeper.server.Request;
-import org.apache.zookeeper.server.RequestProcessor;
-import org.apache.zookeeper.server.SyncRequestProcessor;
-import org.apache.zookeeper.server.ZKDatabase;
-import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A ZooKeeperServer for the Observer node type. Not much is different, but
- * we anticipate specializing the request processors in the future.
- *
- */
-public class ObserverZooKeeperServer extends LearnerZooKeeperServer {
-
- private static final Logger LOG = LoggerFactory.getLogger(ObserverZooKeeperServer.class);
-
- /**
- * Enable since request processor for writing txnlog to disk and
- * take periodic snapshot. Default is ON.
- */
-
- private boolean syncRequestProcessorEnabled = this.self.getSyncEnabled();
-
- /*
- * Pending sync requests
- */ ConcurrentLinkedQueue<Request> pendingSyncs = new ConcurrentLinkedQueue<>();
-
- ObserverZooKeeperServer(FileTxnSnapLog logFactory, QuorumPeer self, ZKDatabase zkDb) throws IOException {
- super(logFactory, self.tickTime, self.minSessionTimeout, self.maxSessionTimeout, self.clientPortListenBacklog, zkDb, self);
- LOG.info("syncEnabled ={}", syncRequestProcessorEnabled);
- }
-
- public Observer getObserver() {
- return self.observer;
- }
-
- @Override
- public Learner getLearner() {
- return self.observer;
- }
-
- /**
- * Unlike a Follower, which sees a full request only during the PROPOSAL
- * phase, Observers get all the data required with the INFORM packet.
- * This method commits a request that has been unpacked by from an INFORM
- * received from the Leader.
- *
- * @param request
- */
- public void commitRequest(Request request) {
- if (syncProcessor != null) {
- // Write to txnlog and take periodic snapshot
- syncProcessor.processRequest(request);
- }
- commitProcessor.commit(request);
- }
-
- /**
- * Set up the request processors for an Observer:
- * firstProcesor-&gt;commitProcessor-&gt;finalProcessor
- */
- @Override
- protected void setupRequestProcessors() {
- // We might consider changing the processor behaviour of
- // Observers to, for example, remove the disk sync requirements.
- // Currently, they behave almost exactly the same as followers.
- RequestProcessor finalProcessor = new FinalRequestProcessor(this);
- commitProcessor = new CommitProcessor(finalProcessor, Long.toString(getServerId()), true, getZooKeeperServerListener());
- commitProcessor.start();
- firstProcessor = new ObserverRequestProcessor(this, commitProcessor);
- ((ObserverRequestProcessor) firstProcessor).start();
-
- /*
- * Observer should write to disk, so that the it won't request
- * too old txn from the leader which may lead to getting an entire
- * snapshot.
- *
- * However, this may degrade performance as it has to write to disk
- * and do periodic snapshot which may double the memory requirements
- */
- if (syncRequestProcessorEnabled) {
- syncProcessor = new SyncRequestProcessor(this, null);
- syncProcessor.start();
- }
- }
-
- /*
- * Process a sync request
- */
- public synchronized void sync() {
- if (pendingSyncs.size() == 0) {
- LOG.warn("Not expecting a sync.");
- return;
- }
-
- Request r = pendingSyncs.remove();
- commitProcessor.commit(r);
- }
-
- @Override
- public String getState() {
- return "observer";
- }
-
- @Override
- public void dumpMonitorValues(BiConsumer<String, Object> response) {
- super.dumpMonitorValues(response);
- response.accept("observer_master_id", getObserver().getLearnerMasterId());
- }
-
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/QuorumPeer.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/QuorumPeer.java
deleted file mode 100644
index f6fc87d7716..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/QuorumPeer.java
+++ /dev/null
@@ -1,2711 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zookeeper.server.quorum;
-
-import static org.apache.zookeeper.common.NetUtils.formatInetAddr;
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.io.Writer;
-import java.net.DatagramPacket;
-import java.net.DatagramSocket;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-import javax.security.sasl.SaslException;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.zookeeper.KeeperException.BadArgumentsException;
-import org.apache.zookeeper.common.AtomicFileOutputStream;
-import org.apache.zookeeper.common.AtomicFileWritingIdiom;
-import org.apache.zookeeper.common.AtomicFileWritingIdiom.WriterStatement;
-import org.apache.zookeeper.common.QuorumX509Util;
-import org.apache.zookeeper.common.Time;
-import org.apache.zookeeper.common.X509Exception;
-import org.apache.zookeeper.jmx.MBeanRegistry;
-import org.apache.zookeeper.jmx.ZKMBeanInfo;
-import org.apache.zookeeper.server.ServerCnxn;
-import org.apache.zookeeper.server.ServerCnxnFactory;
-import org.apache.zookeeper.server.ServerMetrics;
-import org.apache.zookeeper.server.ZKDatabase;
-import org.apache.zookeeper.server.ZooKeeperServer;
-import org.apache.zookeeper.server.ZooKeeperThread;
-import org.apache.zookeeper.server.admin.AdminServer;
-import org.apache.zookeeper.server.admin.AdminServer.AdminServerException;
-import org.apache.zookeeper.server.admin.AdminServerFactory;
-import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
-import org.apache.zookeeper.server.quorum.QuorumPeerConfig.ConfigException;
-import org.apache.zookeeper.server.quorum.auth.NullQuorumAuthLearner;
-import org.apache.zookeeper.server.quorum.auth.NullQuorumAuthServer;
-import org.apache.zookeeper.server.quorum.auth.QuorumAuth;
-import org.apache.zookeeper.server.quorum.auth.QuorumAuthLearner;
-import org.apache.zookeeper.server.quorum.auth.QuorumAuthServer;
-import org.apache.zookeeper.server.quorum.auth.SaslQuorumAuthLearner;
-import org.apache.zookeeper.server.quorum.auth.SaslQuorumAuthServer;
-import org.apache.zookeeper.server.quorum.flexible.QuorumMaj;
-import org.apache.zookeeper.server.quorum.flexible.QuorumOracleMaj;
-import org.apache.zookeeper.server.quorum.flexible.QuorumVerifier;
-import org.apache.zookeeper.server.util.ConfigUtils;
-import org.apache.zookeeper.server.util.JvmPauseMonitor;
-import org.apache.zookeeper.server.util.ZxidUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class manages the quorum protocol. There are three states this server
- * can be in:
- * <ol>
- * <li>Leader election - each server will elect a leader (proposing itself as a
- * leader initially).</li>
- * <li>Follower - the server will synchronize with the leader and replicate any
- * transactions.</li>
- * <li>Leader - the server will process requests and forward them to followers.
- * A majority of followers must log the request before it can be accepted.
- * </ol>
- *
- * This class will setup a datagram socket that will always respond with its
- * view of the current leader. The response will take the form of:
- *
- * <pre>
- * int xid;
- *
- * long myid;
- *
- * long leader_id;
- *
- * long leader_zxid;
- * </pre>
- *
- * The request for the current leader will consist solely of an xid: int xid;
- */
-public class QuorumPeer extends ZooKeeperThread implements QuorumStats.Provider {
-
- private static final Logger LOG = LoggerFactory.getLogger(QuorumPeer.class);
-
- public static final String CONFIG_KEY_KERBEROS_CANONICALIZE_HOST_NAMES = "zookeeper.kerberos.canonicalizeHostNames";
- public static final String CONFIG_DEFAULT_KERBEROS_CANONICALIZE_HOST_NAMES = "false";
-
- private QuorumBean jmxQuorumBean;
- LocalPeerBean jmxLocalPeerBean;
- private Map<Long, RemotePeerBean> jmxRemotePeerBean;
- LeaderElectionBean jmxLeaderElectionBean;
-
- // The QuorumCnxManager is held through an AtomicReference to ensure cross-thread visibility
- // of updates; see the implementation comment at setLastSeenQuorumVerifier().
- private AtomicReference<QuorumCnxManager> qcmRef = new AtomicReference<>();
-
- QuorumAuthServer authServer;
- QuorumAuthLearner authLearner;
-
- /**
- * ZKDatabase is a top level member of quorumpeer
- * which will be used in all the zookeeperservers
- * instantiated later. Also, it is created once on
- * bootup and only thrown away in case of a truncate
- * message from the leader
- */
- private ZKDatabase zkDb;
-
- private JvmPauseMonitor jvmPauseMonitor;
-
- private final AtomicBoolean suspended = new AtomicBoolean(false);
-
- public static final class AddressTuple {
-
- public final MultipleAddresses quorumAddr;
- public final MultipleAddresses electionAddr;
- public final InetSocketAddress clientAddr;
-
- public AddressTuple(MultipleAddresses quorumAddr, MultipleAddresses electionAddr, InetSocketAddress clientAddr) {
- this.quorumAddr = quorumAddr;
- this.electionAddr = electionAddr;
- this.clientAddr = clientAddr;
- }
-
- }
-
- private int observerMasterPort;
-
- public int getObserverMasterPort() {
- return observerMasterPort;
- }
-
- public void setObserverMasterPort(int observerMasterPort) {
- this.observerMasterPort = observerMasterPort;
- }
-
- public static final String CONFIG_KEY_MULTI_ADDRESS_ENABLED = "zookeeper.multiAddress.enabled";
- public static final String CONFIG_DEFAULT_MULTI_ADDRESS_ENABLED = "false";
-
- private boolean multiAddressEnabled = true;
- public boolean isMultiAddressEnabled() {
- return multiAddressEnabled;
- }
-
- public void setMultiAddressEnabled(boolean multiAddressEnabled) {
- this.multiAddressEnabled = multiAddressEnabled;
- LOG.info("multiAddress.enabled set to {}", multiAddressEnabled);
- }
-
- public static final String CONFIG_KEY_MULTI_ADDRESS_REACHABILITY_CHECK_TIMEOUT_MS = "zookeeper.multiAddress.reachabilityCheckTimeoutMs";
-
- private int multiAddressReachabilityCheckTimeoutMs = (int) MultipleAddresses.DEFAULT_TIMEOUT.toMillis();
- public int getMultiAddressReachabilityCheckTimeoutMs() {
- return multiAddressReachabilityCheckTimeoutMs;
- }
-
- public void setMultiAddressReachabilityCheckTimeoutMs(int multiAddressReachabilityCheckTimeoutMs) {
- this.multiAddressReachabilityCheckTimeoutMs = multiAddressReachabilityCheckTimeoutMs;
- LOG.info("multiAddress.reachabilityCheckTimeoutMs set to {}", multiAddressReachabilityCheckTimeoutMs);
- }
-
- public static final String CONFIG_KEY_MULTI_ADDRESS_REACHABILITY_CHECK_ENABLED = "zookeeper.multiAddress.reachabilityCheckEnabled";
-
- private boolean multiAddressReachabilityCheckEnabled = true;
-
- public boolean isMultiAddressReachabilityCheckEnabled() {
- return multiAddressReachabilityCheckEnabled;
- }
-
- public void setMultiAddressReachabilityCheckEnabled(boolean multiAddressReachabilityCheckEnabled) {
- this.multiAddressReachabilityCheckEnabled = multiAddressReachabilityCheckEnabled;
- LOG.info("multiAddress.reachabilityCheckEnabled set to {}", multiAddressReachabilityCheckEnabled);
- }
-
- public static class QuorumServer {
-
- public MultipleAddresses addr = new MultipleAddresses();
-
- public MultipleAddresses electionAddr = new MultipleAddresses();
-
- public InetSocketAddress clientAddr = null;
-
- public long id;
-
- public String hostname;
-
- public LearnerType type = LearnerType.PARTICIPANT;
-
- public boolean isClientAddrFromStatic = false;
-
- private List<InetSocketAddress> myAddrs;
-
- public QuorumServer(long id, InetSocketAddress addr, InetSocketAddress electionAddr, InetSocketAddress clientAddr) {
- this(id, addr, electionAddr, clientAddr, LearnerType.PARTICIPANT);
- }
-
- public QuorumServer(long id, InetSocketAddress addr, InetSocketAddress electionAddr) {
- this(id, addr, electionAddr, null, LearnerType.PARTICIPANT);
- }
-
- // VisibleForTesting
- public QuorumServer(long id, InetSocketAddress addr) {
- this(id, addr, null, null, LearnerType.PARTICIPANT);
- }
-
- public long getId() {
- return id;
- }
-
- /**
- * Performs a DNS lookup for server address and election address.
- *
- * If the DNS lookup fails, this.addr and electionAddr remain
- * unmodified.
- */
- public void recreateSocketAddresses() {
- if (this.addr.isEmpty()) {
- LOG.warn("Server address has not been initialized");
- return;
- }
- if (this.electionAddr.isEmpty()) {
- LOG.warn("Election address has not been initialized");
- return;
- }
- this.addr.recreateSocketAddresses();
- this.electionAddr.recreateSocketAddresses();
- }
-
- private LearnerType getType(String s) throws ConfigException {
- switch (s.trim().toLowerCase()) {
- case "observer":
- return LearnerType.OBSERVER;
- case "participant":
- return LearnerType.PARTICIPANT;
- default:
- throw new ConfigException("Unrecognised peertype: " + s);
- }
- }
-
- public QuorumServer(long sid, String addressStr) throws ConfigException {
- this(sid, addressStr, QuorumServer::getInetAddress);
- }
-
- QuorumServer(long sid, String addressStr, Function<InetSocketAddress, InetAddress> getInetAddress) throws ConfigException {
- this.id = sid;
- initializeWithAddressString(addressStr, getInetAddress);
- }
-
- public QuorumServer(long id, InetSocketAddress addr, InetSocketAddress electionAddr, LearnerType type) {
- this(id, addr, electionAddr, null, type);
- }
-
- public QuorumServer(long id, InetSocketAddress addr, InetSocketAddress electionAddr, InetSocketAddress clientAddr, LearnerType type) {
- this.id = id;
- if (addr != null) {
- this.addr.addAddress(addr);
- }
- if (electionAddr != null) {
- this.electionAddr.addAddress(electionAddr);
- }
- this.type = type;
- this.clientAddr = clientAddr;
-
- setMyAddrs();
- }
-
- private static final String wrongFormat =
- " does not have the form server_config or server_config;client_config"
- + " where server_config is the pipe separated list of host:port:port or host:port:port:type"
- + " and client_config is port or host:port";
-
- private void initializeWithAddressString(String addressStr, Function<InetSocketAddress, InetAddress> getInetAddress) throws ConfigException {
- LearnerType newType = null;
- String[] serverClientParts = addressStr.split(";");
- String[] serverAddresses = serverClientParts[0].split("\\|");
-
- if (serverClientParts.length == 2) {
- String[] clientParts = ConfigUtils.getHostAndPort(serverClientParts[1]);
- if (clientParts.length > 2) {
- throw new ConfigException(addressStr + wrongFormat);
- }
-
- // is client_config a host:port or just a port
- String clientHostName = (clientParts.length == 2) ? clientParts[0] : "0.0.0.0";
- try {
- clientAddr = new InetSocketAddress(clientHostName, Integer.parseInt(clientParts[clientParts.length - 1]));
- } catch (NumberFormatException e) {
- throw new ConfigException("Address unresolved: " + hostname + ":" + clientParts[clientParts.length - 1]);
- }
- }
-
- boolean multiAddressEnabled = Boolean.parseBoolean(
- System.getProperty(QuorumPeer.CONFIG_KEY_MULTI_ADDRESS_ENABLED, QuorumPeer.CONFIG_DEFAULT_MULTI_ADDRESS_ENABLED));
- if (!multiAddressEnabled && serverAddresses.length > 1) {
- throw new ConfigException("Multiple address feature is disabled, but multiple addresses were specified for sid " + this.id);
- }
-
- boolean canonicalize = Boolean.parseBoolean(
- System.getProperty(
- CONFIG_KEY_KERBEROS_CANONICALIZE_HOST_NAMES,
- CONFIG_DEFAULT_KERBEROS_CANONICALIZE_HOST_NAMES));
-
- for (String serverAddress : serverAddresses) {
- String serverParts[] = ConfigUtils.getHostAndPort(serverAddress);
- if ((serverClientParts.length > 2) || (serverParts.length < 3)
- || (serverParts.length > 4)) {
- throw new ConfigException(addressStr + wrongFormat);
- }
-
- String serverHostName = serverParts[0];
-
- // server_config should be either host:port:port or host:port:port:type
- InetSocketAddress tempAddress;
- InetSocketAddress tempElectionAddress;
- try {
- tempAddress = new InetSocketAddress(serverHostName, Integer.parseInt(serverParts[1]));
- addr.addAddress(tempAddress);
- } catch (NumberFormatException e) {
- throw new ConfigException("Address unresolved: " + serverHostName + ":" + serverParts[1]);
- }
- try {
- tempElectionAddress = new InetSocketAddress(serverHostName, Integer.parseInt(serverParts[2]));
- electionAddr.addAddress(tempElectionAddress);
- } catch (NumberFormatException e) {
- throw new ConfigException("Address unresolved: " + serverHostName + ":" + serverParts[2]);
- }
-
- if (tempAddress.getPort() == tempElectionAddress.getPort()) {
- throw new ConfigException("Client and election port must be different! Please update the "
- + "configuration file on server." + this.id);
- }
-
- if (canonicalize) {
- InetAddress ia = getInetAddress.apply(tempAddress);
- if (ia == null) {
- throw new ConfigException("Unable to canonicalize address " + serverHostName + " because it's not resolvable");
- }
-
- String canonicalHostName = ia.getCanonicalHostName();
-
- if (!canonicalHostName.equals(serverHostName)
- // Avoid using literal IP address when
- // security check fails
- && !canonicalHostName.equals(ia.getHostAddress())) {
- LOG.info("Host name for quorum server {} "
- + "canonicalized from {} to {}",
- this.id, serverHostName, canonicalHostName);
- serverHostName = canonicalHostName;
- }
- }
-
- if (serverParts.length == 4) {
- LearnerType tempType = getType(serverParts[3]);
- if (newType == null) {
- newType = tempType;
- }
-
- if (newType != tempType) {
- throw new ConfigException("Multiple addresses should have similar roles: " + type + " vs " + tempType);
- }
- }
-
- this.hostname = serverHostName;
- }
-
- if (newType != null) {
- type = newType;
- }
-
- setMyAddrs();
- }
-
- private static InetAddress getInetAddress(InetSocketAddress addr) {
- return addr.getAddress();
- }
-
- private void setMyAddrs() {
- this.myAddrs = new ArrayList<>();
- this.myAddrs.addAll(this.addr.getAllAddresses());
- this.myAddrs.add(this.clientAddr);
- this.myAddrs.addAll(this.electionAddr.getAllAddresses());
- this.myAddrs = excludedSpecialAddresses(this.myAddrs);
- }
-
- public static String delimitedHostString(InetSocketAddress addr) {
- String host = addr.getHostString();
- if (host.contains(":")) {
- return "[" + host + "]";
- } else {
- return host;
- }
- }
-
- public String toString() {
- StringWriter sw = new StringWriter();
-
- List<InetSocketAddress> addrList = new LinkedList<>(addr.getAllAddresses());
- List<InetSocketAddress> electionAddrList = new LinkedList<>(electionAddr.getAllAddresses());
-
- if (addrList.size() > 0 && electionAddrList.size() > 0) {
- addrList.sort(Comparator.comparing(InetSocketAddress::getHostString));
- electionAddrList.sort(Comparator.comparing(InetSocketAddress::getHostString));
- sw.append(IntStream.range(0, addrList.size()).mapToObj(i -> String.format("%s:%d:%d",
- delimitedHostString(addrList.get(i)), addrList.get(i).getPort(), electionAddrList.get(i).getPort()))
- .collect(Collectors.joining("|")));
- }
-
- if (type == LearnerType.OBSERVER) {
- sw.append(":observer");
- } else if (type == LearnerType.PARTICIPANT) {
- sw.append(":participant");
- }
-
- if (clientAddr != null && !isClientAddrFromStatic) {
- sw.append(";");
- sw.append(delimitedHostString(clientAddr));
- sw.append(":");
- sw.append(String.valueOf(clientAddr.getPort()));
- }
-
- return sw.toString();
- }
-
- public int hashCode() {
- assert false : "hashCode not designed";
- return 42; // any arbitrary constant will do
- }
-
- private boolean checkAddressesEqual(InetSocketAddress addr1, InetSocketAddress addr2) {
- return (addr1 != null || addr2 == null)
- && (addr1 == null || addr2 != null)
- && (addr1 == null || addr2 == null || addr1.equals(addr2));
- }
-
- public boolean equals(Object o) {
- if (!(o instanceof QuorumServer)) {
- return false;
- }
- QuorumServer qs = (QuorumServer) o;
- if ((qs.id != id) || (qs.type != type)) {
- return false;
- }
- if (!addr.equals(qs.addr)) {
- return false;
- }
- if (!electionAddr.equals(qs.electionAddr)) {
- return false;
- }
- return checkAddressesEqual(clientAddr, qs.clientAddr);
- }
-
- public void checkAddressDuplicate(QuorumServer s) throws BadArgumentsException {
- List<InetSocketAddress> otherAddrs = new ArrayList<>(s.addr.getAllAddresses());
- otherAddrs.add(s.clientAddr);
- otherAddrs.addAll(s.electionAddr.getAllAddresses());
- otherAddrs = excludedSpecialAddresses(otherAddrs);
-
- for (InetSocketAddress my : this.myAddrs) {
-
- for (InetSocketAddress other : otherAddrs) {
- if (my.equals(other)) {
- String error = String.format("%s of server.%d conflicts %s of server.%d", my, this.id, other, s.id);
- throw new BadArgumentsException(error);
- }
- }
- }
- }
-
- private List<InetSocketAddress> excludedSpecialAddresses(List<InetSocketAddress> addrs) {
- List<InetSocketAddress> included = new ArrayList<>();
-
- for (InetSocketAddress addr : addrs) {
- if (addr == null) {
- continue;
- }
- InetAddress inetaddr = addr.getAddress();
-
- if (inetaddr == null || inetaddr.isAnyLocalAddress() // wildCard addresses (0.0.0.0 or [::])
- || inetaddr.isLoopbackAddress()) { // loopback address(localhost/127.0.0.1)
- continue;
- }
- included.add(addr);
- }
- return included;
- }
-
- }
-
- public enum ServerState {
- LOOKING,
- FOLLOWING,
- LEADING,
- OBSERVING
- }
-
- /**
- * (Used for monitoring) shows the current phase of
- * Zab protocol that peer is running.
- */
- public enum ZabState {
- ELECTION,
- DISCOVERY,
- SYNCHRONIZATION,
- BROADCAST
- }
-
- /**
- * (Used for monitoring) When peer is in synchronization phase, this shows
- * which synchronization mechanism is being used
- */
- public enum SyncMode {
- NONE,
- DIFF,
- SNAP,
- TRUNC
- }
-
- /*
- * A peer can either be participating, which implies that it is willing to
- * both vote in instances of consensus and to elect or become a Leader, or
- * it may be observing in which case it isn't.
- *
- * We need this distinction to decide which ServerState to move to when
- * conditions change (e.g. which state to become after LOOKING).
- */
- public enum LearnerType {
- PARTICIPANT,
- OBSERVER
- }
-
- /*
- * To enable observers to have no identifier, we need a generic identifier
- * at least for QuorumCnxManager. We use the following constant to as the
- * value of such a generic identifier.
- */
-
- static final long OBSERVER_ID = Long.MAX_VALUE;
-
- /*
- * Record leader election time
- */
- public long start_fle, end_fle; // fle = fast leader election
- public static final String FLE_TIME_UNIT = "MS";
- private long unavailableStartTime;
-
- /*
- * Default value of peer is participant
- */
- private LearnerType learnerType = LearnerType.PARTICIPANT;
-
- public LearnerType getLearnerType() {
- return learnerType;
- }
-
- /**
- * Sets the LearnerType
- */
- public void setLearnerType(LearnerType p) {
- learnerType = p;
- }
-
- protected synchronized void setConfigFileName(String s) {
- configFilename = s;
- }
-
- private String configFilename = null;
-
- public int getQuorumSize() {
- return getVotingView().size();
- }
-
- public void setJvmPauseMonitor(JvmPauseMonitor jvmPauseMonitor) {
- this.jvmPauseMonitor = jvmPauseMonitor;
- }
-
- /**
- * QuorumVerifier implementation; default (majority).
- */
-
- //last committed quorum verifier
- private QuorumVerifier quorumVerifier;
-
- //last proposed quorum verifier
- private QuorumVerifier lastSeenQuorumVerifier = null;
-
- // Lock object that guard access to quorumVerifier and lastSeenQuorumVerifier.
- final Object QV_LOCK = new Object();
-
- /**
- * My id
- */
- private long myid;
-
- /**
- * get the id of this quorum peer.
- */
- public long getMyId() {
- return myid;
- }
-
- // VisibleForTesting
- void setId(long id) {
- this.myid = id;
- }
-
- private boolean sslQuorum;
- private boolean shouldUsePortUnification;
-
- public boolean isSslQuorum() {
- return sslQuorum;
- }
-
- public boolean shouldUsePortUnification() {
- return shouldUsePortUnification;
- }
-
- private final QuorumX509Util x509Util;
-
- QuorumX509Util getX509Util() {
- return x509Util;
- }
-
- /**
- * This is who I think the leader currently is.
- */
- private volatile Vote currentVote;
-
- public synchronized Vote getCurrentVote() {
- return currentVote;
- }
-
- public synchronized void setCurrentVote(Vote v) {
- currentVote = v;
- }
-
- private volatile boolean running = true;
-
- private String initialConfig;
-
- /**
- * The number of milliseconds of each tick
- */
- protected int tickTime;
-
- /**
- * Whether learners in this quorum should create new sessions as local.
- * False by default to preserve existing behavior.
- */
- protected boolean localSessionsEnabled = false;
-
- /**
- * Whether learners in this quorum should upgrade local sessions to
- * global. Only matters if local sessions are enabled.
- */
- protected boolean localSessionsUpgradingEnabled = true;
-
- /**
- * Minimum number of milliseconds to allow for session timeout.
- * A value of -1 indicates unset, use default.
- */
- protected int minSessionTimeout = -1;
-
- /**
- * Maximum number of milliseconds to allow for session timeout.
- * A value of -1 indicates unset, use default.
- */
- protected int maxSessionTimeout = -1;
-
- /**
- * The ZooKeeper server's socket backlog length. The number of connections
- * that will be queued to be read before new connections are dropped. A
- * value of one indicates the default backlog will be used.
- */
- protected int clientPortListenBacklog = -1;
-
- /**
- * The number of ticks that the initial synchronization phase can take
- */
- protected volatile int initLimit;
-
- /**
- * The number of ticks that can pass between sending a request and getting
- * an acknowledgment
- */
- protected volatile int syncLimit;
-
- /**
- * The number of ticks that can pass before retrying to connect to learner master
- */
- protected volatile int connectToLearnerMasterLimit;
-
- /**
- * Enables/Disables sync request processor. This option is enabled
- * by default and is to be used with observers.
- */
- protected boolean syncEnabled = true;
-
- /**
- * The current tick
- */
- protected AtomicInteger tick = new AtomicInteger();
-
- /**
- * Whether or not to listen on all IPs for the two quorum ports
- * (broadcast and fast leader election).
- */
- protected boolean quorumListenOnAllIPs = false;
-
- /**
- * Keeps time taken for leader election in milliseconds. Sets the value to
- * this variable only after the completion of leader election.
- */
- private long electionTimeTaken = -1;
-
- /**
- * Enable/Disables quorum authentication using sasl. Defaulting to false.
- */
- protected boolean quorumSaslEnableAuth;
-
- /**
- * If this is false, quorum peer server will accept another quorum peer client
- * connection even if the authentication did not succeed. This can be used while
- * upgrading ZooKeeper server. Defaulting to false (required).
- */
- protected boolean quorumServerSaslAuthRequired;
-
- /**
- * If this is false, quorum peer learner will talk to quorum peer server
- * without authentication. This can be used while upgrading ZooKeeper
- * server. Defaulting to false (required).
- */
- protected boolean quorumLearnerSaslAuthRequired;
-
- /**
- * Kerberos quorum service principal. Defaulting to 'zkquorum/localhost'.
- */
- protected String quorumServicePrincipal;
-
- /**
- * Quorum learner login context name in jaas-conf file to read the kerberos
- * security details. Defaulting to 'QuorumLearner'.
- */
- protected String quorumLearnerLoginContext;
-
- /**
- * Quorum server login context name in jaas-conf file to read the kerberos
- * security details. Defaulting to 'QuorumServer'.
- */
- protected String quorumServerLoginContext;
-
- // TODO: need to tune the default value of thread size
- private static final int QUORUM_CNXN_THREADS_SIZE_DEFAULT_VALUE = 20;
- /**
- * The maximum number of threads to allow in the connectionExecutors thread
- * pool which will be used to initiate quorum server connections.
- */
- protected int quorumCnxnThreadsSize = QUORUM_CNXN_THREADS_SIZE_DEFAULT_VALUE;
-
- public static final String QUORUM_CNXN_TIMEOUT_MS = "zookeeper.quorumCnxnTimeoutMs";
- private static int quorumCnxnTimeoutMs;
-
- static {
- quorumCnxnTimeoutMs = Integer.getInteger(QUORUM_CNXN_TIMEOUT_MS, -1);
- LOG.info("{}={}", QUORUM_CNXN_TIMEOUT_MS, quorumCnxnTimeoutMs);
- }
-
- /**
- * @deprecated As of release 3.4.0, this class has been deprecated, since
- * it is used with one of the udp-based versions of leader election, which
- * we are also deprecating.
- *
- * This class simply responds to requests for the current leader of this
- * node.
- * <p>
- * The request contains just an xid generated by the requestor.
- * <p>
- * The response has the xid, the id of this server, the id of the leader,
- * and the zxid of the leader.
- *
- *
- */
- @Deprecated
- class ResponderThread extends ZooKeeperThread {
-
- ResponderThread() {
- super("ResponderThread");
- }
-
- volatile boolean running = true;
-
- @Override
- public void run() {
- try {
- byte[] b = new byte[36];
- ByteBuffer responseBuffer = ByteBuffer.wrap(b);
- DatagramPacket packet = new DatagramPacket(b, b.length);
- while (running) {
- udpSocket.receive(packet);
- if (packet.getLength() != 4) {
- LOG.warn("Got more than just an xid! Len = {}", packet.getLength());
- } else {
- responseBuffer.clear();
- responseBuffer.getInt(); // Skip the xid
- responseBuffer.putLong(myid);
- Vote current = getCurrentVote();
- switch (getPeerState()) {
- case LOOKING:
- responseBuffer.putLong(current.getId());
- responseBuffer.putLong(current.getZxid());
- break;
- case LEADING:
- responseBuffer.putLong(myid);
- try {
- long proposed;
- synchronized (leader) {
- proposed = leader.lastProposed;
- }
- responseBuffer.putLong(proposed);
- } catch (NullPointerException npe) {
- // This can happen in state transitions,
- // just ignore the request
- }
- break;
- case FOLLOWING:
- responseBuffer.putLong(current.getId());
- try {
- responseBuffer.putLong(follower.getZxid());
- } catch (NullPointerException npe) {
- // This can happen in state transitions,
- // just ignore the request
- }
- break;
- case OBSERVING:
- // Do nothing, Observers keep themselves to
- // themselves.
- break;
- }
- packet.setData(b);
- udpSocket.send(packet);
- }
- packet.setLength(b.length);
- }
- } catch (RuntimeException e) {
- LOG.warn("Unexpected runtime exception in ResponderThread", e);
- } catch (IOException e) {
- LOG.warn("Unexpected IO exception in ResponderThread", e);
- } finally {
- LOG.warn("QuorumPeer responder thread exited");
- }
- }
-
- }
-
- private ServerState state = ServerState.LOOKING;
-
- private AtomicReference<ZabState> zabState = new AtomicReference<>(ZabState.ELECTION);
- private AtomicReference<SyncMode> syncMode = new AtomicReference<>(SyncMode.NONE);
- private AtomicReference<String> leaderAddress = new AtomicReference<>("");
- private AtomicLong leaderId = new AtomicLong(-1);
-
- private boolean reconfigFlag = false; // indicates that a reconfig just committed
-
- public synchronized void setPeerState(ServerState newState) {
- state = newState;
- if (newState == ServerState.LOOKING) {
- setLeaderAddressAndId(null, -1);
- setZabState(ZabState.ELECTION);
- } else {
- LOG.info("Peer state changed: {}", getDetailedPeerState());
- }
- }
-
- public void setZabState(ZabState zabState) {
- if ((zabState == ZabState.BROADCAST) && (unavailableStartTime != 0)) {
- long unavailableTime = Time.currentElapsedTime() - unavailableStartTime;
- ServerMetrics.getMetrics().UNAVAILABLE_TIME.add(unavailableTime);
- if (getPeerState() == ServerState.LEADING) {
- ServerMetrics.getMetrics().LEADER_UNAVAILABLE_TIME.add(unavailableTime);
- }
- unavailableStartTime = 0;
- }
- this.zabState.set(zabState);
- LOG.info("Peer state changed: {}", getDetailedPeerState());
- }
-
- public void setSyncMode(SyncMode syncMode) {
- this.syncMode.set(syncMode);
- LOG.info("Peer state changed: {}", getDetailedPeerState());
- }
-
- public ZabState getZabState() {
- return zabState.get();
- }
-
- public SyncMode getSyncMode() {
- return syncMode.get();
- }
-
- public void setLeaderAddressAndId(MultipleAddresses addr, long newId) {
- if (addr != null) {
- leaderAddress.set(String.join("|", addr.getAllHostStrings()));
- } else {
- leaderAddress.set(null);
- }
- leaderId.set(newId);
- }
-
- public String getLeaderAddress() {
- return leaderAddress.get();
- }
-
- public long getLeaderId() {
- return leaderId.get();
- }
-
- public String getDetailedPeerState() {
- final StringBuilder sb = new StringBuilder(getPeerState().toString().toLowerCase());
- final ZabState zabState = getZabState();
- if (!ZabState.ELECTION.equals(zabState)) {
- sb.append(" - ").append(zabState.toString().toLowerCase());
- }
- final SyncMode syncMode = getSyncMode();
- if (!SyncMode.NONE.equals(syncMode)) {
- sb.append(" - ").append(syncMode.toString().toLowerCase());
- }
- return sb.toString();
- }
-
- public synchronized void reconfigFlagSet() {
- reconfigFlag = true;
- }
- public synchronized void reconfigFlagClear() {
- reconfigFlag = false;
- }
- public synchronized boolean isReconfigStateChange() {
- return reconfigFlag;
- }
- public synchronized ServerState getPeerState() {
- return state;
- }
-
- DatagramSocket udpSocket;
-
- private final AtomicReference<AddressTuple> myAddrs = new AtomicReference<>();
-
- /**
- * Resolves hostname for a given server ID.
- *
- * This method resolves hostname for a given server ID in both quorumVerifer
- * and lastSeenQuorumVerifier. If the server ID matches the local server ID,
- * it also updates myAddrs.
- */
- public void recreateSocketAddresses(long id) {
- QuorumVerifier qv = getQuorumVerifier();
- if (qv != null) {
- QuorumServer qs = qv.getAllMembers().get(id);
- if (qs != null) {
- qs.recreateSocketAddresses();
- if (id == getMyId()) {
- setAddrs(qs.addr, qs.electionAddr, qs.clientAddr);
- }
- }
- }
- qv = getLastSeenQuorumVerifier();
- if (qv != null) {
- QuorumServer qs = qv.getAllMembers().get(id);
- if (qs != null) {
- qs.recreateSocketAddresses();
- }
- }
- }
-
- private AddressTuple getAddrs() {
- AddressTuple addrs = myAddrs.get();
- if (addrs != null) {
- return addrs;
- }
- try {
- synchronized (QV_LOCK) {
- addrs = myAddrs.get();
- while (addrs == null) {
- QV_LOCK.wait();
- addrs = myAddrs.get();
- }
- return addrs;
- }
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- throw new RuntimeException(e);
- }
- }
-
- public MultipleAddresses getQuorumAddress() {
- return getAddrs().quorumAddr;
- }
-
- public MultipleAddresses getElectionAddress() {
- return getAddrs().electionAddr;
- }
-
- public InetSocketAddress getClientAddress() {
- final AddressTuple addrs = myAddrs.get();
- return (addrs == null) ? null : addrs.clientAddr;
- }
-
- private void setAddrs(MultipleAddresses quorumAddr, MultipleAddresses electionAddr, InetSocketAddress clientAddr) {
- synchronized (QV_LOCK) {
- myAddrs.set(new AddressTuple(quorumAddr, electionAddr, clientAddr));
- QV_LOCK.notifyAll();
- }
- }
-
- private int electionType;
-
- Election electionAlg;
-
- ServerCnxnFactory cnxnFactory;
- ServerCnxnFactory secureCnxnFactory;
-
- private FileTxnSnapLog logFactory = null;
-
- private final QuorumStats quorumStats;
-
- AdminServer adminServer;
-
- private final boolean reconfigEnabled;
-
- public static QuorumPeer testingQuorumPeer() throws SaslException {
- return new QuorumPeer();
- }
-
- public QuorumPeer() throws SaslException {
- super("QuorumPeer");
- quorumStats = new QuorumStats(this);
- jmxRemotePeerBean = new HashMap<>();
- adminServer = AdminServerFactory.createAdminServer();
- x509Util = createX509Util();
- initialize();
- reconfigEnabled = QuorumPeerConfig.isReconfigEnabled();
- }
-
- // VisibleForTesting
- QuorumX509Util createX509Util() {
- return new QuorumX509Util();
- }
-
- /**
- * For backward compatibility purposes, we instantiate QuorumMaj by default.
- */
-
- public QuorumPeer(Map<Long, QuorumServer> quorumPeers, File dataDir, File dataLogDir, int electionType, long myid, int tickTime, int initLimit, int syncLimit, int connectToLearnerMasterLimit, ServerCnxnFactory cnxnFactory) throws IOException {
- this(quorumPeers, dataDir, dataLogDir, electionType, myid, tickTime, initLimit, syncLimit, connectToLearnerMasterLimit, false, cnxnFactory, new QuorumMaj(quorumPeers));
- }
-
- public QuorumPeer(Map<Long, QuorumServer> quorumPeers, File dataDir, File dataLogDir, int electionType, long myid, int tickTime, int initLimit, int syncLimit, int connectToLearnerMasterLimit, boolean quorumListenOnAllIPs, ServerCnxnFactory cnxnFactory, QuorumVerifier quorumConfig) throws IOException {
- this();
- this.cnxnFactory = cnxnFactory;
- this.electionType = electionType;
- this.myid = myid;
- this.tickTime = tickTime;
- this.initLimit = initLimit;
- this.syncLimit = syncLimit;
- this.connectToLearnerMasterLimit = connectToLearnerMasterLimit;
- this.quorumListenOnAllIPs = quorumListenOnAllIPs;
- this.logFactory = new FileTxnSnapLog(dataLogDir, dataDir);
- this.zkDb = new ZKDatabase(this.logFactory);
- if (quorumConfig == null) {
- quorumConfig = new QuorumMaj(quorumPeers);
- }
- setQuorumVerifier(quorumConfig, false);
- adminServer = AdminServerFactory.createAdminServer();
- }
-
- public void initialize() throws SaslException {
- // init quorum auth server & learner
- if (isQuorumSaslAuthEnabled()) {
- Set<String> authzHosts = new HashSet<>();
- for (QuorumServer qs : getView().values()) {
- authzHosts.add(qs.hostname);
- }
- authServer = new SaslQuorumAuthServer(isQuorumServerSaslAuthRequired(), quorumServerLoginContext, authzHosts);
- authLearner = new SaslQuorumAuthLearner(isQuorumLearnerSaslAuthRequired(), quorumServicePrincipal, quorumLearnerLoginContext);
- } else {
- authServer = new NullQuorumAuthServer();
- authLearner = new NullQuorumAuthLearner();
- }
- }
-
- QuorumStats quorumStats() {
- return quorumStats;
- }
-
- @Override
- public synchronized void start() {
- if (!getView().containsKey(myid)) {
- throw new RuntimeException("My id " + myid + " not in the peer list");
- }
- loadDataBase();
- startServerCnxnFactory();
- try {
- adminServer.start();
- } catch (AdminServerException e) {
- LOG.warn("Problem starting AdminServer", e);
- }
- startLeaderElection();
- startJvmPauseMonitor();
- super.start();
- }
-
- private void loadDataBase() {
- try {
- zkDb.loadDataBase();
-
- // load the epochs
- long lastProcessedZxid = zkDb.getDataTree().lastProcessedZxid;
- long epochOfZxid = ZxidUtils.getEpochFromZxid(lastProcessedZxid);
- try {
- currentEpoch = readLongFromFile(CURRENT_EPOCH_FILENAME);
- } catch (FileNotFoundException e) {
- // pick a reasonable epoch number
- // this should only happen once when moving to a
- // new code version
- currentEpoch = epochOfZxid;
- LOG.info(
- "{} not found! Creating with a reasonable default of {}. "
- + "This should only happen when you are upgrading your installation",
- CURRENT_EPOCH_FILENAME,
- currentEpoch);
- writeLongToFile(CURRENT_EPOCH_FILENAME, currentEpoch);
- }
- if (epochOfZxid > currentEpoch) {
- // acceptedEpoch.tmp file in snapshot directory
- File currentTmp = new File(getTxnFactory().getSnapDir(),
- CURRENT_EPOCH_FILENAME + AtomicFileOutputStream.TMP_EXTENSION);
- if (currentTmp.exists()) {
- long epochOfTmp = readLongFromFile(currentTmp.getName());
- LOG.info("{} found. Setting current epoch to {}.", currentTmp, epochOfTmp);
- setCurrentEpoch(epochOfTmp);
- } else {
- throw new IOException(
- "The current epoch, " + ZxidUtils.zxidToString(currentEpoch)
- + ", is older than the last zxid, " + lastProcessedZxid);
- }
- }
- try {
- acceptedEpoch = readLongFromFile(ACCEPTED_EPOCH_FILENAME);
- } catch (FileNotFoundException e) {
- // pick a reasonable epoch number
- // this should only happen once when moving to a
- // new code version
- acceptedEpoch = epochOfZxid;
- LOG.info(
- "{} not found! Creating with a reasonable default of {}. "
- + "This should only happen when you are upgrading your installation",
- ACCEPTED_EPOCH_FILENAME,
- acceptedEpoch);
- writeLongToFile(ACCEPTED_EPOCH_FILENAME, acceptedEpoch);
- }
- if (acceptedEpoch < currentEpoch) {
- throw new IOException("The accepted epoch, "
- + ZxidUtils.zxidToString(acceptedEpoch)
- + " is less than the current epoch, "
- + ZxidUtils.zxidToString(currentEpoch));
- }
- } catch (IOException ie) {
- LOG.error("Unable to load database on disk", ie);
- throw new RuntimeException("Unable to run quorum server ", ie);
- }
- }
-
- ResponderThread responder;
-
- public synchronized void stopLeaderElection() {
- responder.running = false;
- responder.interrupt();
- }
- public synchronized void startLeaderElection() {
- try {
- if (getPeerState() == ServerState.LOOKING) {
- currentVote = new Vote(myid, getLastLoggedZxid(), getCurrentEpoch());
- }
- } catch (IOException e) {
- RuntimeException re = new RuntimeException(e.getMessage());
- re.setStackTrace(e.getStackTrace());
- throw re;
- }
-
- this.electionAlg = createElectionAlgorithm(electionType);
- }
-
- private void startJvmPauseMonitor() {
- if (this.jvmPauseMonitor != null) {
- this.jvmPauseMonitor.serviceStart();
- }
- }
-
- /**
- * Count the number of nodes in the map that could be followers.
- * @param peers
- * @return The number of followers in the map
- */
- protected static int countParticipants(Map<Long, QuorumServer> peers) {
- int count = 0;
- for (QuorumServer q : peers.values()) {
- if (q.type == LearnerType.PARTICIPANT) {
- count++;
- }
- }
- return count;
- }
-
- /**
- * This constructor is only used by the existing unit test code.
- * It defaults to FileLogProvider persistence provider.
- */
- public QuorumPeer(Map<Long, QuorumServer> quorumPeers, File snapDir, File logDir, int clientPort, int electionAlg, long myid, int tickTime, int initLimit, int syncLimit, int connectToLearnerMasterLimit) throws IOException {
- this(
- quorumPeers,
- snapDir,
- logDir,
- electionAlg,
- myid,
- tickTime,
- initLimit,
- syncLimit,
- connectToLearnerMasterLimit,
- false,
- ServerCnxnFactory.createFactory(getClientAddress(quorumPeers, myid, clientPort), -1),
- new QuorumMaj(quorumPeers));
- }
-
- public QuorumPeer(Map<Long, QuorumServer> quorumPeers, File snapDir, File logDir, int clientPort, int electionAlg, long myid, int tickTime, int initLimit, int syncLimit, int connectToLearnerMasterLimit, String oraclePath) throws IOException {
- this(
- quorumPeers,
- snapDir,
- logDir,
- electionAlg,
- myid,
- tickTime,
- initLimit,
- syncLimit,
- connectToLearnerMasterLimit,
- false,
- ServerCnxnFactory.createFactory(getClientAddress(quorumPeers, myid, clientPort), -1),
- new QuorumOracleMaj(quorumPeers, oraclePath));
- }
-
- /**
- * This constructor is only used by the existing unit test code.
- * It defaults to FileLogProvider persistence provider.
- */
- public QuorumPeer(Map<Long, QuorumServer> quorumPeers, File snapDir, File logDir, int clientPort, int electionAlg, long myid, int tickTime, int initLimit, int syncLimit, int connectToLearnerMasterLimit, QuorumVerifier quorumConfig) throws IOException {
- this(
- quorumPeers,
- snapDir,
- logDir,
- electionAlg,
- myid,
- tickTime,
- initLimit,
- syncLimit,
- connectToLearnerMasterLimit,
- false,
- ServerCnxnFactory.createFactory(getClientAddress(quorumPeers, myid, clientPort), -1),
- quorumConfig);
- }
-
- private static InetSocketAddress getClientAddress(Map<Long, QuorumServer> quorumPeers, long myid, int clientPort) throws IOException {
- QuorumServer quorumServer = quorumPeers.get(myid);
- if (null == quorumServer) {
- throw new IOException("No QuorumServer correspoding to myid " + myid);
- }
- if (null == quorumServer.clientAddr) {
- return new InetSocketAddress(clientPort);
- }
- if (quorumServer.clientAddr.getPort() != clientPort) {
- throw new IOException("QuorumServer port "
- + quorumServer.clientAddr.getPort()
- + " does not match with given port "
- + clientPort);
- }
- return quorumServer.clientAddr;
- }
-
- /**
- * returns the highest zxid that this host has seen
- *
- * @return the highest zxid for this host
- */
- public long getLastLoggedZxid() {
- if (!zkDb.isInitialized()) {
- loadDataBase();
- }
- return zkDb.getDataTreeLastProcessedZxid();
- }
-
- public Follower follower;
- public Leader leader;
- public Observer observer;
-
- protected Follower makeFollower(FileTxnSnapLog logFactory) throws IOException {
- return new Follower(this, new FollowerZooKeeperServer(logFactory, this, this.zkDb));
- }
-
- protected Leader makeLeader(FileTxnSnapLog logFactory) throws IOException, X509Exception {
- return new Leader(this, new LeaderZooKeeperServer(logFactory, this, this.zkDb));
- }
-
- protected Observer makeObserver(FileTxnSnapLog logFactory) throws IOException {
- return new Observer(this, new ObserverZooKeeperServer(logFactory, this, this.zkDb));
- }
-
- @SuppressWarnings("deprecation")
- protected Election createElectionAlgorithm(int electionAlgorithm) {
- Election le = null;
-
- //TODO: use a factory rather than a switch
- switch (electionAlgorithm) {
- case 1:
- throw new UnsupportedOperationException("Election Algorithm 1 is not supported.");
- case 2:
- throw new UnsupportedOperationException("Election Algorithm 2 is not supported.");
- case 3:
- QuorumCnxManager qcm = createCnxnManager();
- QuorumCnxManager oldQcm = qcmRef.getAndSet(qcm);
- if (oldQcm != null) {
- LOG.warn("Clobbering already-set QuorumCnxManager (restarting leader election?)");
- oldQcm.halt();
- }
- QuorumCnxManager.Listener listener = qcm.listener;
- if (listener != null) {
- listener.start();
- FastLeaderElection fle = new FastLeaderElection(this, qcm);
- fle.start();
- le = fle;
- } else {
- LOG.error("Null listener when initializing cnx manager");
- }
- break;
- default:
- assert false;
- }
- return le;
- }
-
- @SuppressWarnings("deprecation")
- protected Election makeLEStrategy() {
- LOG.debug("Initializing leader election protocol...");
- return electionAlg;
- }
-
- protected synchronized void setLeader(Leader newLeader) {
- leader = newLeader;
- }
-
- protected synchronized void setFollower(Follower newFollower) {
- follower = newFollower;
- }
-
- protected synchronized void setObserver(Observer newObserver) {
- observer = newObserver;
- }
-
- public synchronized ZooKeeperServer getActiveServer() {
- if (leader != null) {
- return leader.zk;
- } else if (follower != null) {
- return follower.zk;
- } else if (observer != null) {
- return observer.zk;
- }
- return null;
- }
-
- boolean shuttingDownLE = false;
-
- public void setSuspended(boolean suspended) {
- this.suspended.set(suspended);
- }
- private void checkSuspended() {
- try {
- while (suspended.get()) {
- Thread.sleep(10);
- }
- } catch (InterruptedException err) {
- Thread.currentThread().interrupt();
- }
- }
-
- @Override
- public void run() {
- updateThreadName();
-
- LOG.debug("Starting quorum peer");
- try {
- jmxQuorumBean = new QuorumBean(this);
- MBeanRegistry.getInstance().register(jmxQuorumBean, null);
- for (QuorumServer s : getView().values()) {
- ZKMBeanInfo p;
- if (getMyId() == s.id) {
- p = jmxLocalPeerBean = new LocalPeerBean(this);
- try {
- MBeanRegistry.getInstance().register(p, jmxQuorumBean);
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- jmxLocalPeerBean = null;
- }
- } else {
- RemotePeerBean rBean = new RemotePeerBean(this, s);
- try {
- MBeanRegistry.getInstance().register(rBean, jmxQuorumBean);
- jmxRemotePeerBean.put(s.id, rBean);
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- }
- }
- }
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- jmxQuorumBean = null;
- }
-
- try {
- /*
- * Main loop
- */
- while (running) {
- if (unavailableStartTime == 0) {
- unavailableStartTime = Time.currentElapsedTime();
- }
-
- switch (getPeerState()) {
- case LOOKING:
- LOG.info("LOOKING");
- ServerMetrics.getMetrics().LOOKING_COUNT.add(1);
-
- if (Boolean.getBoolean("readonlymode.enabled")) {
- LOG.info("Attempting to start ReadOnlyZooKeeperServer");
-
- // Create read-only server but don't start it immediately
- final ReadOnlyZooKeeperServer roZk = new ReadOnlyZooKeeperServer(logFactory, this, this.zkDb);
-
- // Instead of starting roZk immediately, wait some grace
- // period before we decide we're partitioned.
- //
- // Thread is used here because otherwise it would require
- // changes in each of election strategy classes which is
- // unnecessary code coupling.
- Thread roZkMgr = new Thread() {
- public void run() {
- try {
- // lower-bound grace period to 2 secs
- sleep(Math.max(2000, tickTime));
- if (ServerState.LOOKING.equals(getPeerState())) {
- roZk.startup();
- }
- } catch (InterruptedException e) {
- LOG.info("Interrupted while attempting to start ReadOnlyZooKeeperServer, not started");
- } catch (Exception e) {
- LOG.error("FAILED to start ReadOnlyZooKeeperServer", e);
- }
- }
- };
- try {
- roZkMgr.start();
- reconfigFlagClear();
- if (shuttingDownLE) {
- shuttingDownLE = false;
- startLeaderElection();
- }
- setCurrentVote(makeLEStrategy().lookForLeader());
- checkSuspended();
- } catch (Exception e) {
- LOG.warn("Unexpected exception", e);
- setPeerState(ServerState.LOOKING);
- } finally {
- // If the thread is in the the grace period, interrupt
- // to come out of waiting.
- roZkMgr.interrupt();
- roZk.shutdown();
- }
- } else {
- try {
- reconfigFlagClear();
- if (shuttingDownLE) {
- shuttingDownLE = false;
- startLeaderElection();
- }
- setCurrentVote(makeLEStrategy().lookForLeader());
- } catch (Exception e) {
- LOG.warn("Unexpected exception", e);
- setPeerState(ServerState.LOOKING);
- }
- }
- break;
- case OBSERVING:
- try {
- LOG.info("OBSERVING");
- setObserver(makeObserver(logFactory));
- observer.observeLeader();
- } catch (Exception e) {
- LOG.warn("Unexpected exception", e);
- } finally {
- observer.shutdown();
- setObserver(null);
- updateServerState();
-
- // Add delay jitter before we switch to LOOKING
- // state to reduce the load of ObserverMaster
- if (isRunning()) {
- Observer.waitForObserverElectionDelay();
- }
- }
- break;
- case FOLLOWING:
- try {
- LOG.info("FOLLOWING");
- setFollower(makeFollower(logFactory));
- follower.followLeader();
- } catch (Exception e) {
- LOG.warn("Unexpected exception", e);
- } finally {
- follower.shutdown();
- setFollower(null);
- updateServerState();
- }
- break;
- case LEADING:
- LOG.info("LEADING");
- try {
- setLeader(makeLeader(logFactory));
- leader.lead();
- setLeader(null);
- } catch (Exception e) {
- LOG.warn("Unexpected exception", e);
- } finally {
- if (leader != null) {
- leader.shutdown("Forcing shutdown");
- setLeader(null);
- }
- updateServerState();
- }
- break;
- }
- }
- } finally {
- LOG.warn("QuorumPeer main thread exited");
- MBeanRegistry instance = MBeanRegistry.getInstance();
- instance.unregister(jmxQuorumBean);
- instance.unregister(jmxLocalPeerBean);
-
- for (RemotePeerBean remotePeerBean : jmxRemotePeerBean.values()) {
- instance.unregister(remotePeerBean);
- }
-
- jmxQuorumBean = null;
- jmxLocalPeerBean = null;
- jmxRemotePeerBean = null;
- }
- }
-
- private synchronized void updateServerState() {
- if (!reconfigFlag) {
- setPeerState(ServerState.LOOKING);
- LOG.warn("PeerState set to LOOKING");
- return;
- }
-
- if (getMyId() == getCurrentVote().getId()) {
- setPeerState(ServerState.LEADING);
- LOG.debug("PeerState set to LEADING");
- } else if (getLearnerType() == LearnerType.PARTICIPANT) {
- setPeerState(ServerState.FOLLOWING);
- LOG.debug("PeerState set to FOLLOWING");
- } else if (getLearnerType() == LearnerType.OBSERVER) {
- setPeerState(ServerState.OBSERVING);
- LOG.debug("PeerState set to OBSERVER");
- } else { // currently shouldn't happen since there are only 2 learner types
- setPeerState(ServerState.LOOKING);
- LOG.debug("Should not be here");
- }
- reconfigFlag = false;
- }
-
- public void shutdown() {
- running = false;
- x509Util.close();
- if (leader != null) {
- leader.shutdown("quorum Peer shutdown");
- }
- if (follower != null) {
- follower.shutdown();
- }
- shutdownServerCnxnFactory();
- if (udpSocket != null) {
- udpSocket.close();
- }
- if (jvmPauseMonitor != null) {
- jvmPauseMonitor.serviceStop();
- }
-
- try {
- adminServer.shutdown();
- } catch (AdminServerException e) {
- LOG.warn("Problem stopping AdminServer", e);
- }
-
- if (getElectionAlg() != null) {
- this.interrupt();
- getElectionAlg().shutdown();
- }
- try {
- zkDb.close();
- } catch (IOException ie) {
- LOG.warn("Error closing logs ", ie);
- }
- }
-
- /**
- * A 'view' is a node's current opinion of the membership of the entire
- * ensemble.
- */
- public Map<Long, QuorumPeer.QuorumServer> getView() {
- return Collections.unmodifiableMap(getQuorumVerifier().getAllMembers());
- }
-
- /**
- * Observers are not contained in this view, only nodes with
- * PeerType=PARTICIPANT.
- */
- public Map<Long, QuorumPeer.QuorumServer> getVotingView() {
- return getQuorumVerifier().getVotingMembers();
- }
-
- /**
- * Returns only observers, no followers.
- */
- public Map<Long, QuorumPeer.QuorumServer> getObservingView() {
- return getQuorumVerifier().getObservingMembers();
- }
-
- public synchronized Set<Long> getCurrentAndNextConfigVoters() {
- Set<Long> voterIds = new HashSet<>(getQuorumVerifier().getVotingMembers().keySet());
- if (getLastSeenQuorumVerifier() != null) {
- voterIds.addAll(getLastSeenQuorumVerifier().getVotingMembers().keySet());
- }
- return voterIds;
- }
-
- /**
- * Check if a node is in the current view. With static membership, the
- * result of this check will never change; only when dynamic membership
- * is introduced will this be more useful.
- */
- public boolean viewContains(Long sid) {
- return this.getView().containsKey(sid);
- }
-
- /**
- * Only used by QuorumStats at the moment
- */
- public String[] getQuorumPeers() {
- List<String> l = new ArrayList<>();
- synchronized (this) {
- if (leader != null) {
- for (LearnerHandler fh : leader.getLearners()) {
- if (fh.getSocket() != null) {
- String s = formatInetAddr((InetSocketAddress) fh.getSocket().getRemoteSocketAddress());
- if (leader.isLearnerSynced(fh)) {
- s += "*";
- }
- l.add(s);
- }
- }
- } else if (follower != null) {
- l.add(formatInetAddr((InetSocketAddress) follower.sock.getRemoteSocketAddress()));
- }
- }
- return l.toArray(new String[0]);
- }
-
- public String getServerState() {
- switch (getPeerState()) {
- case LOOKING:
- return QuorumStats.Provider.LOOKING_STATE;
- case LEADING:
- return QuorumStats.Provider.LEADING_STATE;
- case FOLLOWING:
- return QuorumStats.Provider.FOLLOWING_STATE;
- case OBSERVING:
- return QuorumStats.Provider.OBSERVING_STATE;
- }
- return QuorumStats.Provider.UNKNOWN_STATE;
- }
-
- /**
- * set the id of this quorum peer.
- */
- public void setMyid(long myid) {
- this.myid = myid;
- }
-
- public void setInitialConfig(String initialConfig) {
- this.initialConfig = initialConfig;
- }
-
- public String getInitialConfig() {
- return initialConfig;
- }
-
- /**
- * Get the number of milliseconds of each tick
- */
- public int getTickTime() {
- return tickTime;
- }
-
- /**
- * Set the number of milliseconds of each tick
- */
- public void setTickTime(int tickTime) {
- LOG.info("tickTime set to {}", tickTime);
- this.tickTime = tickTime;
- }
-
- /** Maximum number of connections allowed from particular host (ip) */
- public int getMaxClientCnxnsPerHost() {
- if (cnxnFactory != null) {
- return cnxnFactory.getMaxClientCnxnsPerHost();
- }
- if (secureCnxnFactory != null) {
- return secureCnxnFactory.getMaxClientCnxnsPerHost();
- }
- return -1;
- }
-
- /** Whether local sessions are enabled */
- public boolean areLocalSessionsEnabled() {
- return localSessionsEnabled;
- }
-
- /** Whether to enable local sessions */
- public void enableLocalSessions(boolean flag) {
- LOG.info("Local sessions {}", (flag ? "enabled" : "disabled"));
- localSessionsEnabled = flag;
- }
-
- /** Whether local sessions are allowed to upgrade to global sessions */
- public boolean isLocalSessionsUpgradingEnabled() {
- return localSessionsUpgradingEnabled;
- }
-
- /** Whether to allow local sessions to upgrade to global sessions */
- public void enableLocalSessionsUpgrading(boolean flag) {
- LOG.info("Local session upgrading {}", (flag ? "enabled" : "disabled"));
- localSessionsUpgradingEnabled = flag;
- }
-
- /** minimum session timeout in milliseconds */
- public int getMinSessionTimeout() {
- return minSessionTimeout;
- }
-
- /** minimum session timeout in milliseconds */
- public void setMinSessionTimeout(int min) {
- LOG.info("minSessionTimeout set to {}", min);
- this.minSessionTimeout = min;
- }
-
- /** maximum session timeout in milliseconds */
- public int getMaxSessionTimeout() {
- return maxSessionTimeout;
- }
-
- /** maximum session timeout in milliseconds */
- public void setMaxSessionTimeout(int max) {
- LOG.info("maxSessionTimeout set to {}", max);
- this.maxSessionTimeout = max;
- }
-
- /** The server socket's listen backlog length */
- public int getClientPortListenBacklog() {
- return this.clientPortListenBacklog;
- }
-
- /** Sets the server socket's listen backlog length. */
- public void setClientPortListenBacklog(int backlog) {
- this.clientPortListenBacklog = backlog;
- }
-
- /**
- * Get the number of ticks that the initial synchronization phase can take
- */
- public int getInitLimit() {
- return initLimit;
- }
-
- /**
- * Set the number of ticks that the initial synchronization phase can take
- */
- public void setInitLimit(int initLimit) {
- LOG.info("initLimit set to {}", initLimit);
- this.initLimit = initLimit;
- }
-
- /**
- * Get the current tick
- */
- public int getTick() {
- return tick.get();
- }
-
- public QuorumVerifier configFromString(String s) throws IOException, ConfigException {
- Properties props = new Properties();
- props.load(new StringReader(s));
- return QuorumPeerConfig.parseDynamicConfig(props, electionType, false, false, getQuorumVerifier().getOraclePath());
- }
-
- /**
- * Return QuorumVerifier object for the last committed configuration.
- */
- public QuorumVerifier getQuorumVerifier() {
- synchronized (QV_LOCK) {
- return quorumVerifier;
- }
- }
-
- /**
- * Return QuorumVerifier object for the last proposed configuration.
- */
- public QuorumVerifier getLastSeenQuorumVerifier() {
- synchronized (QV_LOCK) {
- return lastSeenQuorumVerifier;
- }
- }
-
- public synchronized void restartLeaderElection(QuorumVerifier qvOLD, QuorumVerifier qvNEW) {
- if (qvOLD == null || !qvOLD.equals(qvNEW)) {
- LOG.warn("Restarting Leader Election");
- getElectionAlg().shutdown();
- shuttingDownLE = false;
- startLeaderElection();
- }
- }
-
- public String getNextDynamicConfigFilename() {
- if (configFilename == null) {
- LOG.warn("configFilename is null! This should only happen in tests.");
- return null;
- }
- return configFilename + QuorumPeerConfig.nextDynamicConfigFileSuffix;
- }
-
- // On entry to this method, qcm must be non-null and the locks on both qcm and QV_LOCK
- // must be held. We don't want quorumVerifier/lastSeenQuorumVerifier to change out from
- // under us, so we have to hold QV_LOCK; and since the call to qcm.connectOne() will take
- // the lock on qcm (and take QV_LOCK again inside that), the caller needs to have taken
- // qcm outside QV_LOCK to avoid a deadlock against other callers of qcm.connectOne().
- private void connectNewPeers(QuorumCnxManager qcm) {
- if (quorumVerifier != null && lastSeenQuorumVerifier != null) {
- Map<Long, QuorumServer> committedView = quorumVerifier.getAllMembers();
- for (Entry<Long, QuorumServer> e : lastSeenQuorumVerifier.getAllMembers().entrySet()) {
- if (e.getKey() != getMyId() && !committedView.containsKey(e.getKey())) {
- qcm.connectOne(e.getKey());
- }
- }
- }
- }
-
- public void setLastSeenQuorumVerifier(QuorumVerifier qv, boolean writeToDisk) {
- if (!isReconfigEnabled()) {
- LOG.info("Dynamic reconfig is disabled, we don't store the last seen config.");
- return;
- }
-
- // If qcm is non-null, we may call qcm.connectOne(), which will take the lock on qcm
- // and then take QV_LOCK. Take the locks in the same order to ensure that we don't
- // deadlock against other callers of connectOne(). If qcmRef gets set in another
- // thread while we're inside the synchronized block, that does no harm; if we didn't
- // take a lock on qcm (because it was null when we sampled it), we won't call
- // connectOne() on it. (Use of an AtomicReference is enough to guarantee visibility
- // of updates that provably happen in another thread before entering this method.)
- QuorumCnxManager qcm = qcmRef.get();
- Object outerLockObject = (qcm != null) ? qcm : QV_LOCK;
- synchronized (outerLockObject) {
- synchronized (QV_LOCK) {
- if (lastSeenQuorumVerifier != null && lastSeenQuorumVerifier.getVersion() > qv.getVersion()) {
- LOG.error("setLastSeenQuorumVerifier called with stale config "
- + qv.getVersion()
- + ". Current version: "
- + quorumVerifier.getVersion());
- }
- // assuming that a version uniquely identifies a configuration, so if
- // version is the same, nothing to do here.
- if (lastSeenQuorumVerifier != null && lastSeenQuorumVerifier.getVersion() == qv.getVersion()) {
- return;
- }
- lastSeenQuorumVerifier = qv;
- if (qcm != null) {
- connectNewPeers(qcm);
- }
-
- if (writeToDisk) {
- try {
- String fileName = getNextDynamicConfigFilename();
- if (fileName != null) {
- QuorumPeerConfig.writeDynamicConfig(fileName, qv, true);
- }
- } catch (IOException e) {
- LOG.error("Error writing next dynamic config file to disk", e);
- }
- }
- }
- }
- }
-
- public QuorumVerifier setQuorumVerifier(QuorumVerifier qv, boolean writeToDisk) {
- synchronized (QV_LOCK) {
- if ((quorumVerifier != null) && (quorumVerifier.getVersion() >= qv.getVersion())) {
- // this is normal. For example - server found out about new config through FastLeaderElection gossiping
- // and then got the same config in UPTODATE message so its already known
- LOG.debug(
- "{} setQuorumVerifier called with known or old config {}. Current version: {}",
- getMyId(),
- qv.getVersion(),
- quorumVerifier.getVersion());
- return quorumVerifier;
- }
- QuorumVerifier prevQV = quorumVerifier;
- quorumVerifier = qv;
- if (lastSeenQuorumVerifier == null || (qv.getVersion() > lastSeenQuorumVerifier.getVersion())) {
- lastSeenQuorumVerifier = qv;
- }
-
- if (writeToDisk) {
- // some tests initialize QuorumPeer without a static config file
- if (configFilename != null) {
- try {
- String dynamicConfigFilename = makeDynamicConfigFilename(qv.getVersion());
- QuorumPeerConfig.writeDynamicConfig(dynamicConfigFilename, qv, false);
- QuorumPeerConfig.editStaticConfig(configFilename, dynamicConfigFilename, needEraseClientInfoFromStaticConfig());
- } catch (IOException e) {
- LOG.error("Error closing file", e);
- }
- } else {
- LOG.info("writeToDisk == true but configFilename == null");
- }
- }
-
- if (qv.getVersion() == lastSeenQuorumVerifier.getVersion()) {
- QuorumPeerConfig.deleteFile(getNextDynamicConfigFilename());
- }
- QuorumServer qs = qv.getAllMembers().get(getMyId());
- if (qs != null) {
- setAddrs(qs.addr, qs.electionAddr, qs.clientAddr);
- }
- updateObserverMasterList();
- return prevQV;
- }
- }
-
- private String makeDynamicConfigFilename(long version) {
- return configFilename + ".dynamic." + Long.toHexString(version);
- }
-
- private boolean needEraseClientInfoFromStaticConfig() {
- QuorumServer server = quorumVerifier.getAllMembers().get(getMyId());
- return (server != null && server.clientAddr != null && !server.isClientAddrFromStatic);
- }
-
- /**
- * Get an instance of LeaderElection
- */
- public Election getElectionAlg() {
- return electionAlg;
- }
-
- /**
- * Get the synclimit
- */
- public int getSyncLimit() {
- return syncLimit;
- }
-
- /**
- * Set the synclimit
- */
- public void setSyncLimit(int syncLimit) {
- LOG.info("syncLimit set to {}", syncLimit);
- this.syncLimit = syncLimit;
- }
-
- /**
- * Get the connectToLearnerMasterLimit
- */
- public int getConnectToLearnerMasterLimit() {
- return connectToLearnerMasterLimit;
- }
-
- /**
- * Set the connectToLearnerMasterLimit
- */
- public void setConnectToLearnerMasterLimit(int connectToLearnerMasterLimit) {
- LOG.info("connectToLearnerMasterLimit set to {}", connectToLearnerMasterLimit);
- this.connectToLearnerMasterLimit = connectToLearnerMasterLimit;
- }
-
- /**
- * The syncEnabled can also be set via a system property.
- */
- public static final String SYNC_ENABLED = "zookeeper.observer.syncEnabled";
-
- /**
- * Return syncEnabled.
- */
- public boolean getSyncEnabled() {
- if (System.getProperty(SYNC_ENABLED) != null) {
- LOG.info("{}={}", SYNC_ENABLED, Boolean.getBoolean(SYNC_ENABLED));
- return Boolean.getBoolean(SYNC_ENABLED);
- } else {
- return syncEnabled;
- }
- }
-
- /**
- * Set syncEnabled.
- *
- * @param syncEnabled
- */
- public void setSyncEnabled(boolean syncEnabled) {
- this.syncEnabled = syncEnabled;
- }
-
- /**
- * Gets the election type
- */
- public int getElectionType() {
- return electionType;
- }
-
- /**
- * Sets the election type
- */
- public void setElectionType(int electionType) {
- this.electionType = electionType;
- }
-
- public boolean getQuorumListenOnAllIPs() {
- return quorumListenOnAllIPs;
- }
-
- public void setQuorumListenOnAllIPs(boolean quorumListenOnAllIPs) {
- this.quorumListenOnAllIPs = quorumListenOnAllIPs;
- }
-
- public void setCnxnFactory(ServerCnxnFactory cnxnFactory) {
- this.cnxnFactory = cnxnFactory;
- }
-
- public void setSecureCnxnFactory(ServerCnxnFactory secureCnxnFactory) {
- this.secureCnxnFactory = secureCnxnFactory;
- }
-
- public void setSslQuorum(boolean sslQuorum) {
- if (sslQuorum) {
- LOG.info("Using TLS encrypted quorum communication");
- } else {
- LOG.info("Using insecure (non-TLS) quorum communication");
- }
- this.sslQuorum = sslQuorum;
- }
-
- public void setUsePortUnification(boolean shouldUsePortUnification) {
- LOG.info("Port unification {}", shouldUsePortUnification ? "enabled" : "disabled");
- this.shouldUsePortUnification = shouldUsePortUnification;
- }
-
- private void startServerCnxnFactory() {
- if (cnxnFactory != null) {
- cnxnFactory.start();
- }
- if (secureCnxnFactory != null) {
- secureCnxnFactory.start();
- }
- }
-
- private void shutdownServerCnxnFactory() {
- if (cnxnFactory != null) {
- cnxnFactory.shutdown();
- }
- if (secureCnxnFactory != null) {
- secureCnxnFactory.shutdown();
- }
- }
-
- // Leader and learner will control the zookeeper server and pass it into QuorumPeer.
- public void setZooKeeperServer(ZooKeeperServer zks) {
- if (cnxnFactory != null) {
- cnxnFactory.setZooKeeperServer(zks);
- }
- if (secureCnxnFactory != null) {
- secureCnxnFactory.setZooKeeperServer(zks);
- }
- }
-
- public void closeAllConnections() {
- if (cnxnFactory != null) {
- cnxnFactory.closeAll(ServerCnxn.DisconnectReason.SERVER_SHUTDOWN);
- }
- if (secureCnxnFactory != null) {
- secureCnxnFactory.closeAll(ServerCnxn.DisconnectReason.SERVER_SHUTDOWN);
- }
- }
-
- public int getClientPort() {
- if (cnxnFactory != null) {
- return cnxnFactory.getLocalPort();
- }
- return -1;
- }
-
- public int getSecureClientPort() {
- if (secureCnxnFactory != null) {
- return secureCnxnFactory.getLocalPort();
- }
- return -1;
- }
-
- public void setTxnFactory(FileTxnSnapLog factory) {
- this.logFactory = factory;
- }
-
- public FileTxnSnapLog getTxnFactory() {
- return this.logFactory;
- }
-
- /**
- * set zk database for this node
- * @param database
- */
- public void setZKDatabase(ZKDatabase database) {
- this.zkDb = database;
- }
-
- protected ZKDatabase getZkDb() {
- return zkDb;
- }
-
- public synchronized void initConfigInZKDatabase() {
- if (zkDb != null) {
- zkDb.initConfigInZKDatabase(getQuorumVerifier());
- }
- }
-
- public boolean isRunning() {
- return running;
- }
-
- /**
- * get reference to QuorumCnxManager
- */
- public QuorumCnxManager getQuorumCnxManager() {
- return qcmRef.get();
- }
- private long readLongFromFile(String name) throws IOException {
- File file = new File(logFactory.getSnapDir(), name);
- BufferedReader br = new BufferedReader(new FileReader(file));
- String line = "";
- try {
- line = br.readLine();
- return Long.parseLong(line);
- } catch (NumberFormatException e) {
- throw new IOException("Found " + line + " in " + file);
- } finally {
- br.close();
- }
- }
-
- private long acceptedEpoch = -1;
- private long currentEpoch = -1;
-
- public static final String CURRENT_EPOCH_FILENAME = "currentEpoch";
-
- public static final String ACCEPTED_EPOCH_FILENAME = "acceptedEpoch";
-
- /**
- * Write a long value to disk atomically. Either succeeds or an exception
- * is thrown.
- * @param name file name to write the long to
- * @param value the long value to write to the named file
- * @throws IOException if the file cannot be written atomically
- */
- // visibleForTest
- void writeLongToFile(String name, final long value) throws IOException {
- File file = new File(logFactory.getSnapDir(), name);
- new AtomicFileWritingIdiom(file, new WriterStatement() {
- @Override
- public void write(Writer bw) throws IOException {
- bw.write(Long.toString(value));
- }
- });
- }
-
- public long getCurrentEpoch() throws IOException {
- if (currentEpoch == -1) {
- currentEpoch = readLongFromFile(CURRENT_EPOCH_FILENAME);
- }
- return currentEpoch;
- }
-
- public long getAcceptedEpoch() throws IOException {
- if (acceptedEpoch == -1) {
- acceptedEpoch = readLongFromFile(ACCEPTED_EPOCH_FILENAME);
- }
- return acceptedEpoch;
- }
-
- public void setCurrentEpoch(long e) throws IOException {
- writeLongToFile(CURRENT_EPOCH_FILENAME, e);
- currentEpoch = e;
- }
-
- public void setAcceptedEpoch(long e) throws IOException {
- writeLongToFile(ACCEPTED_EPOCH_FILENAME, e);
- acceptedEpoch = e;
- }
-
- public boolean processReconfig(QuorumVerifier qv, Long suggestedLeaderId, Long zxid, boolean restartLE) {
- if (!isReconfigEnabled()) {
- LOG.debug("Reconfig feature is disabled, skip reconfig processing.");
- return false;
- }
-
- InetSocketAddress oldClientAddr = getClientAddress();
-
- // update last committed quorum verifier, write the new config to disk
- // and restart leader election if config changed.
- QuorumVerifier prevQV = setQuorumVerifier(qv, true);
-
- // There is no log record for the initial config, thus after syncing
- // with leader
- // /zookeeper/config is empty! it is also possible that last committed
- // config is propagated during leader election
- // without the propagation the corresponding log records.
- // so we should explicitly do this (this is not necessary when we're
- // already a Follower/Observer, only
- // for Learner):
- initConfigInZKDatabase();
-
- if (prevQV.getVersion() < qv.getVersion() && !prevQV.equals(qv)) {
- Map<Long, QuorumServer> newMembers = qv.getAllMembers();
- updateRemotePeerMXBeans(newMembers);
- if (restartLE) {
- restartLeaderElection(prevQV, qv);
- }
-
- QuorumServer myNewQS = newMembers.get(getMyId());
- if (myNewQS != null && myNewQS.clientAddr != null && !myNewQS.clientAddr.equals(oldClientAddr)) {
- cnxnFactory.reconfigure(myNewQS.clientAddr);
- updateThreadName();
- }
-
- boolean roleChange = updateLearnerType(qv);
- boolean leaderChange = false;
- if (suggestedLeaderId != null) {
- // zxid should be non-null too
- leaderChange = updateVote(suggestedLeaderId, zxid);
- } else {
- long currentLeaderId = getCurrentVote().getId();
- QuorumServer myleaderInCurQV = prevQV.getVotingMembers().get(currentLeaderId);
- QuorumServer myleaderInNewQV = qv.getVotingMembers().get(currentLeaderId);
- leaderChange = (myleaderInCurQV == null
- || myleaderInCurQV.addr == null
- || myleaderInNewQV == null
- || !myleaderInCurQV.addr.equals(myleaderInNewQV.addr));
- // we don't have a designated leader - need to go into leader
- // election
- reconfigFlagClear();
- }
-
- return roleChange || leaderChange;
- }
- return false;
-
- }
-
- private void updateRemotePeerMXBeans(Map<Long, QuorumServer> newMembers) {
- Set<Long> existingMembers = new HashSet<>(newMembers.keySet());
- existingMembers.retainAll(jmxRemotePeerBean.keySet());
- for (Long id : existingMembers) {
- RemotePeerBean rBean = jmxRemotePeerBean.get(id);
- rBean.setQuorumServer(newMembers.get(id));
- }
-
- Set<Long> joiningMembers = new HashSet<>(newMembers.keySet());
- joiningMembers.removeAll(jmxRemotePeerBean.keySet());
- joiningMembers.remove(getMyId()); // remove self as it is local bean
- for (Long id : joiningMembers) {
- QuorumServer qs = newMembers.get(id);
- RemotePeerBean rBean = new RemotePeerBean(this, qs);
- try {
- MBeanRegistry.getInstance().register(rBean, jmxQuorumBean);
- jmxRemotePeerBean.put(qs.id, rBean);
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- }
- }
-
- Set<Long> leavingMembers = new HashSet<>(jmxRemotePeerBean.keySet());
- leavingMembers.removeAll(newMembers.keySet());
- for (Long id : leavingMembers) {
- RemotePeerBean rBean = jmxRemotePeerBean.remove(id);
- try {
- MBeanRegistry.getInstance().unregister(rBean);
- } catch (Exception e) {
- LOG.warn("Failed to unregister with JMX", e);
- }
- }
- }
-
- private ArrayList<QuorumServer> observerMasters = new ArrayList<>();
- private void updateObserverMasterList() {
- if (observerMasterPort <= 0) {
- return; // observer masters not enabled
- }
- observerMasters.clear();
- StringBuilder sb = new StringBuilder();
- for (QuorumServer server : quorumVerifier.getVotingMembers().values()) {
- InetAddress address = server.addr.getReachableOrOne().getAddress();
- InetSocketAddress addr = new InetSocketAddress(address, observerMasterPort);
- observerMasters.add(new QuorumServer(server.id, addr));
- sb.append(addr).append(",");
- }
- LOG.info("Updated learner master list to be {}", sb.toString());
- Collections.shuffle(observerMasters);
- // Reset the internal index of the observerMaster when
- // the observerMaster List is refreshed
- nextObserverMaster = 0;
- }
-
- private boolean useObserverMasters() {
- return getLearnerType() == LearnerType.OBSERVER && observerMasters.size() > 0;
- }
-
- private int nextObserverMaster = 0;
- private QuorumServer nextObserverMaster() {
- if (nextObserverMaster >= observerMasters.size()) {
- nextObserverMaster = 0;
- // Add a reconnect delay only after the observer
- // has exhausted trying to connect to all the masters
- // from the observerMasterList
- if (isRunning()) {
- Observer.waitForReconnectDelay();
- }
- }
- return observerMasters.get(nextObserverMaster++);
- }
-
- QuorumServer findLearnerMaster(QuorumServer leader) {
- if (useObserverMasters()) {
- return nextObserverMaster();
- } else {
- // Add delay jitter to reduce the load on the leader
- if (isRunning()) {
- Observer.waitForReconnectDelay();
- }
- return leader;
- }
- }
-
- /**
- * Vet a given learner master's information.
- * Allows specification by server id, ip only, or ip and port
- */
- QuorumServer validateLearnerMaster(String desiredMaster) {
- if (useObserverMasters()) {
- Long sid;
- try {
- sid = Long.parseLong(desiredMaster);
- } catch (NumberFormatException e) {
- sid = null;
- }
- for (QuorumServer server : observerMasters) {
- if (sid == null) {
- for (InetSocketAddress address : server.addr.getAllAddresses()) {
- String serverAddr = address.getAddress().getHostAddress() + ':' + address.getPort();
- if (serverAddr.startsWith(desiredMaster)) {
- return server;
- }
- }
- } else {
- if (sid.equals(server.id)) {
- return server;
- }
- }
- }
- if (sid == null) {
- LOG.info("could not find learner master address={}", desiredMaster);
- } else {
- LOG.warn("could not find learner master sid={}", sid);
- }
- } else {
- LOG.info("cannot validate request, observer masters not enabled");
- }
- return null;
- }
-
- private boolean updateLearnerType(QuorumVerifier newQV) {
- //check if I'm an observer in new config
- if (newQV.getObservingMembers().containsKey(getMyId())) {
- if (getLearnerType() != LearnerType.OBSERVER) {
- setLearnerType(LearnerType.OBSERVER);
- LOG.info("Becoming an observer");
- reconfigFlagSet();
- return true;
- } else {
- return false;
- }
- } else if (newQV.getVotingMembers().containsKey(getMyId())) {
- if (getLearnerType() != LearnerType.PARTICIPANT) {
- setLearnerType(LearnerType.PARTICIPANT);
- LOG.info("Becoming a voting participant");
- reconfigFlagSet();
- return true;
- } else {
- return false;
- }
- }
- // I'm not in the view
- if (getLearnerType() != LearnerType.PARTICIPANT) {
- setLearnerType(LearnerType.PARTICIPANT);
- LOG.info("Becoming a non-voting participant");
- reconfigFlagSet();
- return true;
- }
- return false;
- }
-
- private boolean updateVote(long designatedLeader, long zxid) {
- Vote currentVote = getCurrentVote();
- if (currentVote != null && designatedLeader != currentVote.getId()) {
- setCurrentVote(new Vote(designatedLeader, zxid));
- reconfigFlagSet();
- LOG.warn("Suggested leader: {}", designatedLeader);
- return true;
- }
- return false;
- }
-
- /**
- * Updates leader election info to avoid inconsistencies when
- * a new server tries to join the ensemble.
- *
- * Here is the inconsistency scenario we try to solve by updating the peer
- * epoch after following leader:
- *
- * Let's say we have an ensemble with 3 servers z1, z2 and z3.
- *
- * 1. z1, z2 were following z3 with peerEpoch to be 0xb8, the new epoch is
- * 0xb9, aka current accepted epoch on disk.
- * 2. z2 get restarted, which will use 0xb9 as it's peer epoch when loading
- * the current accept epoch from disk.
- * 3. z2 received notification from z1 and z3, which is following z3 with
- * epoch 0xb8, so it started following z3 again with peer epoch 0xb8.
- * 4. before z2 successfully connected to z3, z3 get restarted with new
- * epoch 0xb9.
- * 5. z2 will retry around a few round (default 5s) before giving up,
- * meanwhile it will report z3 as leader.
- * 6. z1 restarted, and looking with peer epoch 0xb9.
- * 7. z1 voted z3, and z3 was elected as leader again with peer epoch 0xb9.
- * 8. z2 successfully connected to z3 before giving up, but with peer
- * epoch 0xb8.
- * 9. z1 get restarted, looking for leader with peer epoch 0xba, but cannot
- * join, because z2 is reporting peer epoch 0xb8, while z3 is reporting
- * 0xb9.
- *
- * By updating the election vote after actually following leader, we can
- * avoid this kind of stuck happened.
- *
- * Btw, the zxid and electionEpoch could be inconsistent because of the same
- * reason, it's better to update these as well after syncing with leader, but
- * that required protocol change which is non trivial. This problem is worked
- * around by skipping comparing the zxid and electionEpoch when counting for
- * votes for out of election servers during looking for leader.
- *
- * See https://issues.apache.org/jira/browse/ZOOKEEPER-1732
- */
- protected void updateElectionVote(long newEpoch) {
- Vote currentVote = getCurrentVote();
- if (currentVote != null) {
- setCurrentVote(new Vote(currentVote.getId(), currentVote.getZxid(), currentVote.getElectionEpoch(), newEpoch, currentVote
- .getState()));
- }
- }
-
- private void updateThreadName() {
- String plain = cnxnFactory != null
- ? cnxnFactory.getLocalAddress() != null
- ? formatInetAddr(cnxnFactory.getLocalAddress())
- : "disabled"
- : "disabled";
- String secure = secureCnxnFactory != null ? formatInetAddr(secureCnxnFactory.getLocalAddress()) : "disabled";
- setName(String.format("QuorumPeer[myid=%d](plain=%s)(secure=%s)", getMyId(), plain, secure));
- }
-
- /**
- * Sets the time taken for leader election in milliseconds.
- *
- * @param electionTimeTaken time taken for leader election
- */
- void setElectionTimeTaken(long electionTimeTaken) {
- this.electionTimeTaken = electionTimeTaken;
- }
-
- /**
- * @return the time taken for leader election in milliseconds.
- */
- long getElectionTimeTaken() {
- return electionTimeTaken;
- }
-
- void setQuorumServerSaslRequired(boolean serverSaslRequired) {
- quorumServerSaslAuthRequired = serverSaslRequired;
- LOG.info("{} set to {}", QuorumAuth.QUORUM_SERVER_SASL_AUTH_REQUIRED, serverSaslRequired);
- }
-
- void setQuorumLearnerSaslRequired(boolean learnerSaslRequired) {
- quorumLearnerSaslAuthRequired = learnerSaslRequired;
- LOG.info("{} set to {}", QuorumAuth.QUORUM_LEARNER_SASL_AUTH_REQUIRED, learnerSaslRequired);
- }
-
- void setQuorumSaslEnabled(boolean enableAuth) {
- quorumSaslEnableAuth = enableAuth;
- if (!quorumSaslEnableAuth) {
- LOG.info("QuorumPeer communication is not secured! (SASL auth disabled)");
- } else {
- LOG.info("{} set to {}", QuorumAuth.QUORUM_SASL_AUTH_ENABLED, enableAuth);
- }
- }
-
- void setQuorumServicePrincipal(String servicePrincipal) {
- quorumServicePrincipal = servicePrincipal;
- LOG.info("{} set to {}", QuorumAuth.QUORUM_KERBEROS_SERVICE_PRINCIPAL, quorumServicePrincipal);
- }
-
- void setQuorumLearnerLoginContext(String learnerContext) {
- quorumLearnerLoginContext = learnerContext;
- LOG.info("{} set to {}", QuorumAuth.QUORUM_LEARNER_SASL_LOGIN_CONTEXT, quorumLearnerLoginContext);
- }
-
- void setQuorumServerLoginContext(String serverContext) {
- quorumServerLoginContext = serverContext;
- LOG.info("{} set to {}", QuorumAuth.QUORUM_SERVER_SASL_LOGIN_CONTEXT, quorumServerLoginContext);
- }
-
- void setQuorumCnxnThreadsSize(int qCnxnThreadsSize) {
- if (qCnxnThreadsSize > QUORUM_CNXN_THREADS_SIZE_DEFAULT_VALUE) {
- quorumCnxnThreadsSize = qCnxnThreadsSize;
- }
- LOG.info("quorum.cnxn.threads.size set to {}", quorumCnxnThreadsSize);
- }
-
- boolean isQuorumSaslAuthEnabled() {
- return quorumSaslEnableAuth;
- }
-
- private boolean isQuorumServerSaslAuthRequired() {
- return quorumServerSaslAuthRequired;
- }
-
- private boolean isQuorumLearnerSaslAuthRequired() {
- return quorumLearnerSaslAuthRequired;
- }
-
- public QuorumCnxManager createCnxnManager() {
- int timeout = quorumCnxnTimeoutMs > 0 ? quorumCnxnTimeoutMs : this.tickTime * this.syncLimit;
- LOG.info("Using {}ms as the quorum cnxn socket timeout", timeout);
- return new QuorumCnxManager(
- this,
- this.getMyId(),
- this.getView(),
- this.authServer,
- this.authLearner,
- timeout,
- this.getQuorumListenOnAllIPs(),
- this.quorumCnxnThreadsSize,
- this.isQuorumSaslAuthEnabled());
- }
-
- boolean isLeader(long id) {
- Vote vote = getCurrentVote();
- return vote != null && id == vote.getId();
- }
-
- public boolean isReconfigEnabled() {
- return reconfigEnabled;
- }
-
- @InterfaceAudience.Private
- /**
- * This is a metric that depends on the status of the peer.
- */ public Integer getSynced_observers_metric() {
- if (leader != null) {
- return leader.getObservingLearners().size();
- } else if (follower != null) {
- return follower.getSyncedObserverSize();
- } else {
- return null;
- }
- }
-
- /**
- * Create a new QuorumPeer and apply all the values per the already-parsed config.
- *
- * @param config The appertained quorum peer config.
- * @return A QuorumPeer instantiated with specified peer config. Note this peer
- * is not fully initialized; caller should finish initialization through
- * additional configurations (connection factory settings, etc).
- *
- * @throws IOException
- */
- public static QuorumPeer createFromConfig(QuorumPeerConfig config) throws IOException {
- QuorumPeer quorumPeer = new QuorumPeer();
- quorumPeer.setTxnFactory(new FileTxnSnapLog(config.getDataLogDir(), config.getDataDir()));
- quorumPeer.enableLocalSessions(config.areLocalSessionsEnabled());
- quorumPeer.enableLocalSessionsUpgrading(config.isLocalSessionsUpgradingEnabled());
- quorumPeer.setElectionType(config.getElectionAlg());
- quorumPeer.setMyid(config.getServerId());
- quorumPeer.setTickTime(config.getTickTime());
- quorumPeer.setMinSessionTimeout(config.getMinSessionTimeout());
- quorumPeer.setMaxSessionTimeout(config.getMaxSessionTimeout());
- quorumPeer.setInitLimit(config.getInitLimit());
- quorumPeer.setSyncLimit(config.getSyncLimit());
- quorumPeer.setConnectToLearnerMasterLimit(config.getConnectToLearnerMasterLimit());
- quorumPeer.setObserverMasterPort(config.getObserverMasterPort());
- quorumPeer.setConfigFileName(config.getConfigFilename());
- quorumPeer.setClientPortListenBacklog(config.getClientPortListenBacklog());
- quorumPeer.setZKDatabase(new ZKDatabase(quorumPeer.getTxnFactory()));
- quorumPeer.setQuorumVerifier(config.getQuorumVerifier(), false);
- if (config.getLastSeenQuorumVerifier() != null) {
- quorumPeer.setLastSeenQuorumVerifier(config.getLastSeenQuorumVerifier(), false);
- }
- quorumPeer.initConfigInZKDatabase();
- quorumPeer.setSslQuorum(config.isSslQuorum());
- quorumPeer.setUsePortUnification(config.shouldUsePortUnification());
- quorumPeer.setLearnerType(config.getPeerType());
- quorumPeer.setSyncEnabled(config.getSyncEnabled());
- quorumPeer.setQuorumListenOnAllIPs(config.getQuorumListenOnAllIPs());
- if (config.sslQuorumReloadCertFiles) {
- quorumPeer.getX509Util().enableCertFileReloading();
- }
- quorumPeer.setMultiAddressEnabled(config.isMultiAddressEnabled());
- quorumPeer.setMultiAddressReachabilityCheckEnabled(config.isMultiAddressReachabilityCheckEnabled());
- quorumPeer.setMultiAddressReachabilityCheckTimeoutMs(config.getMultiAddressReachabilityCheckTimeoutMs());
-
- // sets quorum sasl authentication configurations
- quorumPeer.setQuorumSaslEnabled(config.quorumEnableSasl);
- if (quorumPeer.isQuorumSaslAuthEnabled()) {
- quorumPeer.setQuorumServerSaslRequired(config.quorumServerRequireSasl);
- quorumPeer.setQuorumLearnerSaslRequired(config.quorumLearnerRequireSasl);
- quorumPeer.setQuorumServicePrincipal(config.quorumServicePrincipal);
- quorumPeer.setQuorumServerLoginContext(config.quorumServerLoginContext);
- quorumPeer.setQuorumLearnerLoginContext(config.quorumLearnerLoginContext);
- }
- quorumPeer.setQuorumCnxnThreadsSize(config.quorumCnxnThreadsSize);
-
- if (config.jvmPauseMonitorToRun) {
- quorumPeer.setJvmPauseMonitor(new JvmPauseMonitor(config));
- }
-
- return quorumPeer;
- }
-
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/ReadOnlyZooKeeperServer.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/ReadOnlyZooKeeperServer.java
deleted file mode 100644
index a96a395b03b..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/ReadOnlyZooKeeperServer.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zookeeper.server.quorum;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.Objects;
-import java.util.stream.Collectors;
-import org.apache.zookeeper.ZooDefs.OpCode;
-import org.apache.zookeeper.jmx.MBeanRegistry;
-import org.apache.zookeeper.server.DataTreeBean;
-import org.apache.zookeeper.server.FinalRequestProcessor;
-import org.apache.zookeeper.server.PrepRequestProcessor;
-import org.apache.zookeeper.server.Request;
-import org.apache.zookeeper.server.RequestProcessor;
-import org.apache.zookeeper.server.ServerCnxn;
-import org.apache.zookeeper.server.ZKDatabase;
-import org.apache.zookeeper.server.ZooKeeperServer;
-import org.apache.zookeeper.server.ZooKeeperServerBean;
-import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
-
-/**
- * A ZooKeeperServer which comes into play when peer is partitioned from the
- * majority. Handles read-only clients, but drops connections from not-read-only
- * ones.
- * <p>
- * The very first processor in the chain of request processors is a
- * ReadOnlyRequestProcessor which drops state-changing requests.
- */
-public class ReadOnlyZooKeeperServer extends ZooKeeperServer {
-
- protected final QuorumPeer self;
- private volatile boolean shutdown = false;
-
- ReadOnlyZooKeeperServer(FileTxnSnapLog logFactory, QuorumPeer self, ZKDatabase zkDb) {
- super(
- logFactory,
- self.tickTime,
- self.minSessionTimeout,
- self.maxSessionTimeout,
- self.clientPortListenBacklog,
- zkDb,
- self.getInitialConfig(),
- self.isReconfigEnabled());
- this.self = self;
- }
-
- @Override
- protected void setupRequestProcessors() {
- RequestProcessor finalProcessor = new FinalRequestProcessor(this);
- RequestProcessor prepProcessor = new PrepRequestProcessor(this, finalProcessor);
- ((PrepRequestProcessor) prepProcessor).start();
- firstProcessor = new ReadOnlyRequestProcessor(this, prepProcessor);
- ((ReadOnlyRequestProcessor) firstProcessor).start();
- }
-
- @Override
- public synchronized void startup() {
- // check to avoid startup follows shutdown
- if (shutdown) {
- LOG.warn("Not starting Read-only server as startup follows shutdown!");
- return;
- }
- registerJMX(new ReadOnlyBean(this), self.jmxLocalPeerBean);
- super.startup();
- self.setZooKeeperServer(this);
- self.adminServer.setZooKeeperServer(this);
- LOG.info("Read-only server started");
- }
-
- @Override
- public void createSessionTracker() {
- sessionTracker = new LearnerSessionTracker(
- this, getZKDatabase().getSessionWithTimeOuts(),
- this.tickTime, self.getMyId(), self.areLocalSessionsEnabled(),
- getZooKeeperServerListener());
- }
-
- @Override
- protected void startSessionTracker() {
- ((LearnerSessionTracker) sessionTracker).start();
- }
-
- @Override
- protected void setLocalSessionFlag(Request si) {
- switch (si.type) {
- case OpCode.createSession:
- if (self.areLocalSessionsEnabled()) {
- si.setLocalSession(true);
- }
- break;
- case OpCode.closeSession:
- if (((UpgradeableSessionTracker) sessionTracker).isLocalSession(si.sessionId)) {
- si.setLocalSession(true);
- } else {
- LOG.warn("Submitting global closeSession request for session 0x{} in ReadOnly mode",
- Long.toHexString(si.sessionId));
- }
- break;
- default:
- break;
- }
- }
-
- @Override
- protected void validateSession(ServerCnxn cnxn, long sessionId) throws IOException {
- if (((LearnerSessionTracker) sessionTracker).isGlobalSession(sessionId)) {
- String msg = "Refusing global session reconnection in RO mode " + cnxn.getRemoteSocketAddress();
- LOG.info(msg);
- throw new ServerCnxn.CloseRequestException(msg, ServerCnxn.DisconnectReason.RENEW_GLOBAL_SESSION_IN_RO_MODE);
- }
- }
-
- @Override
- protected void registerJMX() {
- // register with JMX
- try {
- jmxDataTreeBean = new DataTreeBean(getZKDatabase().getDataTree());
- MBeanRegistry.getInstance().register(jmxDataTreeBean, jmxServerBean);
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- jmxDataTreeBean = null;
- }
- }
-
- public void registerJMX(ZooKeeperServerBean serverBean, LocalPeerBean localPeerBean) {
- // register with JMX
- try {
- jmxServerBean = serverBean;
- MBeanRegistry.getInstance().register(serverBean, localPeerBean);
- } catch (Exception e) {
- LOG.warn("Failed to register with JMX", e);
- jmxServerBean = null;
- }
- }
-
- @Override
- protected void unregisterJMX() {
- // unregister from JMX
- try {
- if (jmxDataTreeBean != null) {
- MBeanRegistry.getInstance().unregister(jmxDataTreeBean);
- }
- } catch (Exception e) {
- LOG.warn("Failed to unregister with JMX", e);
- }
- jmxDataTreeBean = null;
- }
-
- protected void unregisterJMX(ZooKeeperServer zks) {
- // unregister from JMX
- try {
- if (jmxServerBean != null) {
- MBeanRegistry.getInstance().unregister(jmxServerBean);
- }
- } catch (Exception e) {
- LOG.warn("Failed to unregister with JMX", e);
- }
- jmxServerBean = null;
- }
-
- @Override
- public String getState() {
- return "read-only";
- }
-
- /**
- * Returns the id of the associated QuorumPeer, which will do for a unique
- * id of this server.
- */
- @Override
- public long getServerId() {
- return self.getMyId();
- }
-
- @Override
- public synchronized void shutdown(boolean fullyShutDown) {
- if (!canShutdown()) {
- LOG.debug("ZooKeeper server is not running, so not proceeding to shutdown!");
- } else {
- shutdown = true;
- unregisterJMX(this);
-
- // set peer's server to null
- self.setZooKeeperServer(null);
- // clear all the connections
- self.closeAllConnections();
-
- self.adminServer.setZooKeeperServer(null);
- }
- // shutdown the server itself
- super.shutdown(fullyShutDown);
- }
-
- @Override
- public void dumpConf(PrintWriter pwriter) {
- super.dumpConf(pwriter);
-
- pwriter.print("initLimit=");
- pwriter.println(self.getInitLimit());
- pwriter.print("syncLimit=");
- pwriter.println(self.getSyncLimit());
- pwriter.print("electionAlg=");
- pwriter.println(self.getElectionType());
- pwriter.print("electionPort=");
- pwriter.println(self.getElectionAddress().getAllPorts()
- .stream().map(Objects::toString).collect(Collectors.joining("|")));
- pwriter.print("quorumPort=");
- pwriter.println(self.getQuorumAddress().getAllPorts()
- .stream().map(Objects::toString).collect(Collectors.joining("|")));
- pwriter.print("peerType=");
- pwriter.println(self.getLearnerType().ordinal());
- }
-
- @Override
- protected void setState(State state) {
- this.state = state;
- }
-
-}
diff --git a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/SendAckRequestProcessor.java b/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/SendAckRequestProcessor.java
deleted file mode 100644
index d65ead216f0..00000000000
--- a/zookeeper-server/zookeeper-server-3.9.2/src/main/java/org/apache/zookeeper/server/quorum/SendAckRequestProcessor.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zookeeper.server.quorum;
-
-import java.io.Flushable;
-import java.io.IOException;
-import java.net.Socket;
-import org.apache.zookeeper.ZooDefs.OpCode;
-import org.apache.zookeeper.server.Request;
-import org.apache.zookeeper.server.RequestProcessor;
-import org.apache.zookeeper.server.ServerMetrics;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class SendAckRequestProcessor implements RequestProcessor, Flushable {
-
- private static final Logger LOG = LoggerFactory.getLogger(SendAckRequestProcessor.class);
-
- final Learner learner;
-
- SendAckRequestProcessor(Learner peer) {
- this.learner = peer;
- }
-
- public void processRequest(Request si) {
- if (si.type != OpCode.sync) {
- QuorumPacket qp = new QuorumPacket(Leader.ACK, si.getHdr().getZxid(), null, null);
- try {
- si.logLatency(ServerMetrics.getMetrics().PROPOSAL_ACK_CREATION_LATENCY);
-
- learner.writePacket(qp, false);
- } catch (IOException e) {
- LOG.warn("Closing connection to leader, exception during packet send", e);
- try {
- if (!learner.sock.isClosed()) {
- learner.sock.close();
- }
- } catch (IOException e1) {
- // Nothing to do, we are shutting things down, so an exception here is irrelevant
- LOG.debug("Ignoring error closing the connection", e1);
- }
- }
- }
- }
-
- public void flush() throws IOException {
- try {
- learner.writePacket(null, true);
- } catch (IOException e) {
- LOG.warn("Closing connection to leader, exception during packet send", e);
- try {
- Socket socket = learner.sock;
- if (socket != null && !socket.isClosed()) {
- learner.sock.close();
- }
- } catch (IOException e1) {
- // Nothing to do, we are shutting things down, so an exception here is irrelevant
- LOG.debug("Ignoring error closing the connection", e1);
- }
- }
- }
-
- public void shutdown() {
- // Nothing needed
- }
-
-}
diff --git a/zookeeper-server/zookeeper-server/CMakeLists.txt b/zookeeper-server/zookeeper-server/CMakeLists.txt
index c7e9679ed24..30ed1ed4404 100644
--- a/zookeeper-server/zookeeper-server/CMakeLists.txt
+++ b/zookeeper-server/zookeeper-server/CMakeLists.txt
@@ -1,4 +1,4 @@
# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-install_jar(zookeeper-server-3.9.1-jar-with-dependencies.jar)
+install_jar(zookeeper-server-3.9.2-jar-with-dependencies.jar)
# Make symlink so that we have a default version, should be done only in zookeeper-server module
-install_symlink(lib/jars/zookeeper-server-3.9.1-jar-with-dependencies.jar lib/jars/zookeeper-server-jar-with-dependencies.jar)
+install_symlink(lib/jars/zookeeper-server-3.9.2-jar-with-dependencies.jar lib/jars/zookeeper-server-jar-with-dependencies.jar)
diff --git a/zookeeper-server/zookeeper-server/pom.xml b/zookeeper-server/zookeeper-server/pom.xml
index f1b33dd0ae7..791c026234a 100644
--- a/zookeeper-server/zookeeper-server/pom.xml
+++ b/zookeeper-server/zookeeper-server/pom.xml
@@ -8,11 +8,11 @@
<version>8-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
- <artifactId>zookeeper-server-3.9.1</artifactId>
+ <artifactId>zookeeper-server-3.9.2</artifactId>
<packaging>container-plugin</packaging>
<version>8-SNAPSHOT</version>
<properties>
- <zookeeper.version>3.9.1</zookeeper.version>
+ <zookeeper.version>3.9.2</zookeeper.version>
</properties>
<dependencies>
<dependency>
diff --git a/zookeeper-server/zookeeper-server/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java b/zookeeper-server/zookeeper-server/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java
index 891a35582b3..c74a020bcf4 100644
--- a/zookeeper-server/zookeeper-server/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java
+++ b/zookeeper-server/zookeeper-server/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java
@@ -26,6 +26,7 @@ public class VespaZooKeeperAdminImpl implements VespaZooKeeperAdmin {
private static final Logger log = java.util.logging.Logger.getLogger(VespaZooKeeperAdminImpl.class.getName());
+
@SuppressWarnings("try")
@Override
public void reconfigure(String connectionSpec, String servers) throws ReconfigException {
diff --git a/zookeeper-server/zookeeper-server/src/main/java/org/apache/zookeeper/server/ZooKeeperServer.java b/zookeeper-server/zookeeper-server/src/main/java/org/apache/zookeeper/server/ZooKeeperServer.java
index 895bbeffa5f..00af31b46d4 100644
--- a/zookeeper-server/zookeeper-server/src/main/java/org/apache/zookeeper/server/ZooKeeperServer.java
+++ b/zookeeper-server/zookeeper-server/src/main/java/org/apache/zookeeper/server/ZooKeeperServer.java
@@ -384,13 +384,13 @@ public class ZooKeeperServer implements SessionExpirer, ServerStats.Provider {
+ " minSessionTimeout {} ms"
+ " maxSessionTimeout {} ms"
+ " clientPortListenBacklog {}"
- + " datadir {}"
+ + " dataLogdir {}"
+ " snapdir {}",
tickTime,
getMinSessionTimeout(),
getMaxSessionTimeout(),
getClientPortListenBacklog(),
- txnLogFactory.getDataDir(),
+ txnLogFactory.getDataLogDir(),
txnLogFactory.getSnapDir());
}
@@ -442,7 +442,7 @@ public class ZooKeeperServer implements SessionExpirer, ServerStats.Provider {
pwriter.print("dataDirSize=");
pwriter.println(getDataDirSize());
pwriter.print("dataLogDir=");
- pwriter.println(zkDb.snapLog.getDataDir().getAbsolutePath());
+ pwriter.println(zkDb.snapLog.getDataLogDir().getAbsolutePath());
pwriter.print("dataLogSize=");
pwriter.println(getLogDirSize());
pwriter.print("tickTime=");
@@ -464,7 +464,7 @@ public class ZooKeeperServer implements SessionExpirer, ServerStats.Provider {
return new ZooKeeperServerConf(
getClientPort(),
zkDb.snapLog.getSnapDir().getAbsolutePath(),
- zkDb.snapLog.getDataDir().getAbsolutePath(),
+ zkDb.snapLog.getDataLogDir().getAbsolutePath(),
getTickTime(),
getMaxClientCnxnsPerHost(),
getMinSessionTimeout(),
@@ -649,7 +649,7 @@ public class ZooKeeperServer implements SessionExpirer, ServerStats.Provider {
if (zkDb == null) {
return 0L;
}
- File path = zkDb.snapLog.getDataDir();
+ File path = zkDb.snapLog.getSnapDir();
return getDirSize(path);
}
@@ -658,7 +658,7 @@ public class ZooKeeperServer implements SessionExpirer, ServerStats.Provider {
if (zkDb == null) {
return 0L;
}
- File path = zkDb.snapLog.getSnapDir();
+ File path = zkDb.snapLog.getDataLogDir();
return getDirSize(path);
}
@@ -1506,7 +1506,9 @@ public class ZooKeeperServer implements SessionExpirer, ServerStats.Provider {
throw new CloseRequestException(msg, ServerCnxn.DisconnectReason.NOT_READ_ONLY_CLIENT);
}
if (request.getLastZxidSeen() > zkDb.dataTree.lastProcessedZxid) {
- String msg = "Refusing session request for client "
+ String msg = "Refusing session(0x"
+ + Long.toHexString(sessionId)
+ + ") request for client "
+ cnxn.getRemoteSocketAddress()
+ " as it has seen zxid 0x"
+ Long.toHexString(request.getLastZxidSeen())
diff --git a/zookeeper-server/zookeeper-server/src/main/java/org/apache/zookeeper/server/quorum/Learner.java b/zookeeper-server/zookeeper-server/src/main/java/org/apache/zookeeper/server/quorum/Learner.java
index 8d8b6dabce8..3c7b2148400 100644
--- a/zookeeper-server/zookeeper-server/src/main/java/org/apache/zookeeper/server/quorum/Learner.java
+++ b/zookeeper-server/zookeeper-server/src/main/java/org/apache/zookeeper/server/quorum/Learner.java
@@ -597,7 +597,6 @@ public class Learner {
willSnapshot = false; // but anything after this needs to go to the transaction log; or
}
- self.setCurrentEpoch(newEpoch);
sock.setSoTimeout(self.tickTime * self.syncLimit);
self.setSyncMode(QuorumPeer.SyncMode.NONE);
zk.startupWithoutServing();
@@ -613,6 +612,8 @@ public class Learner {
delayedProposals.clear();
fzk.syncProcessor.syncFlush();
}
+
+ self.setCurrentEpoch(newEpoch);
}
void flushAcks() throws InterruptedException {