summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt1
-rw-r--r--airlift-zstd/CMakeLists.txt2
-rw-r--r--airlift-zstd/OWNERS2
-rw-r--r--airlift-zstd/README.md4
-rw-r--r--airlift-zstd/pom.xml47
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/compress/Compressor.java28
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/compress/Decompressor.java28
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/compress/IncompatibleJvmException.java23
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/compress/MalformedInputException.java36
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BitInputStream.java207
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BitOutputStream.java90
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BlockCompressionState.java60
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BlockCompressor.java21
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/CompressionContext.java46
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/CompressionParameters.java306
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Constants.java85
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/DoubleFastBlockCompressor.java261
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FiniteStateEntropy.java551
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FrameHeader.java70
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FseCompressionTable.java158
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FseTableReader.java169
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Histogram.java65
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Huffman.java323
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressionContext.java61
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressionTable.java437
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressionTableWorkspace.java33
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressor.java137
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanTableWriterWorkspace.java29
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/NodeTable.java48
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/RepeatedOffsets.java49
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/SequenceEncoder.java351
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/SequenceEncodingContext.java30
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/SequenceStore.java160
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/UnsafeUtil.java64
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Util.java94
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/XxHash64.java286
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdBlockDecompressor.java810
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdCompressor.java126
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdDecompressor.java119
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdFrameCompressor.java438
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdFrameDecompressor.java212
-rw-r--r--airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdInputStream.java471
-rw-r--r--application/pom.xml6
-rw-r--r--client/go/cmd/document.go2
-rw-r--r--client/go/jvm/env.go5
-rw-r--r--client/go/vespa/document.go5
-rw-r--r--client/js/app/yarn.lock12
-rw-r--r--cloud-tenant-base-dependencies-enforcer/pom.xml13
-rw-r--r--clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterController.java5
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java16
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeLookup.java7
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java17
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java5
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/StatusHandler.java9
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/statuspage/StatusPageServerInterface.java12
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java4
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DatabaseTest.java18
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyCommunicator.java4
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyVdsNode.java34
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java18
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java2
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java4
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java2
-rw-r--r--config-model-fat/pom.xml2
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/CloudDataPlaneFilterValidator.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/component/AccessLogComponent.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java34
-rw-r--r--config-model/src/main/resources/schema/common.rnc4
-rw-r--r--config-model/src/main/resources/schema/container-include.rnc1
-rw-r--r--config-model/src/main/resources/schema/containercluster.rnc49
-rw-r--r--config-model/src/main/resources/schema/docproc-standalone.rnc6
-rw-r--r--config-model/src/main/resources/schema/docproc.rnc98
-rw-r--r--config-model/src/main/resources/schema/services.rnc1
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/CloudDataPlaneFilterValidatorTest.java3
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/xml/CloudDataPlaneFilterTest.java43
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java91
-rw-r--r--config-model/src/test/schema-test-files/services.xml18
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java3
-rw-r--r--container-apache-http-client-bundle/pom.xml8
-rw-r--r--container-dev/pom.xml4
-rw-r--r--container-messagebus/src/main/java/com/yahoo/container/jdisc/messagebus/SessionCache.java21
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java3
-rw-r--r--container-test/pom.xml10
-rw-r--r--container/pom.xml4
-rw-r--r--controller-api/pom.xml22
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillingController.java1
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/MockBillingController.java5
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java159
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Load.java12
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/AutoscalingData.java44
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterData.java32
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterResourcesData.java1
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterUtilizationData.java37
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/ArchiveAccess.java24
-rw-r--r--controller-api/src/test/java/com/yahoo/vespa/hosted/controller/tenant/ArchiveAccessTest.java45
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java83
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java11
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notifier.java35
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java50
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandler.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerV2.java7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/AthenzRoleFilter.java8
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/response/application.json47
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java89
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java20
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json47
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerTest.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiTest.java6
-rw-r--r--controller-server/src/test/resources/mail/notification.txt2
-rw-r--r--dist/vespa.spec1
-rw-r--r--document/src/main/java/com/yahoo/document/DocumentUtil.java1
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java35
-rw-r--r--fnet/src/tests/frt/parallel_rpc/parallel_rpc_test.cpp7
-rw-r--r--fnet/src/tests/frt/parallel_rpc/tls_rpc_bench.cpp7
-rw-r--r--fnet/src/tests/frt/rpc/CMakeLists.txt1
-rw-r--r--fnet/src/tests/frt/rpc/my_crypto_engine.hpp5
-rw-r--r--jrt/src/com/yahoo/jrt/XorCryptoEngine.java20
-rw-r--r--jrt/src/com/yahoo/jrt/XorCryptoSocket.java126
-rw-r--r--jrt/tests/com/yahoo/jrt/EchoTest.java4
-rw-r--r--jrt/tests/com/yahoo/jrt/LatencyTest.java8
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java21
-rwxr-xr-xmessagebus/src/main/java/com/yahoo/messagebus/MessageBusParams.java8
-rwxr-xr-xmessagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java52
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorException.java2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java4
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java13
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java67
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java62
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java76
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java107
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java57
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancers.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java24
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureVersions.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java24
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java107
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CachingCurator.java (renamed from node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabase.java)26
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java (renamed from node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java)55
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/JobControlFlags.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java34
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ArchiveUris.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FirmwareChecks.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java7
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java57
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java20
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java1
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java31
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java15
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java6
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java34
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java31
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CachingCuratorTest.java (renamed from node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseTest.java)16
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java (renamed from node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java)4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json80
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json27
-rw-r--r--parent/pom.xml17
-rw-r--r--pom.xml1
-rwxr-xr-xscrewdriver/release-container-image.sh2
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/scheduled_forward_executor.cpp34
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp1
-rw-r--r--searchlib/src/tests/docstore/document_store/document_store_test.cpp2
-rw-r--r--searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/docstore/chunk.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/docstore/chunk.h4
-rw-r--r--searchlib/src/vespa/searchlib/docstore/chunkformat.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/docstore/chunkformat.h4
-rw-r--r--searchlib/src/vespa/searchlib/docstore/compacter.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/docstore/compacter.h2
-rw-r--r--searchlib/src/vespa/searchlib/docstore/documentstore.cpp36
-rw-r--r--searchlib/src/vespa/searchlib/docstore/documentstore.h22
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdatastore.h2
-rw-r--r--searchlib/src/vespa/searchlib/docstore/storebybucket.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/docstore/storebybucket.h2
-rw-r--r--searchlib/src/vespa/searchlib/docstore/value.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/docstore/value.h2
-rw-r--r--searchlib/src/vespa/searchlib/docstore/visitcache.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/docstore/visitcache.h13
-rw-r--r--searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h4
-rwxr-xr-xstandalone-container/src/main/sh/standalone-container.sh2
-rw-r--r--vespa-dependencies-enforcer/allowed-maven-dependencies.txt31
-rw-r--r--vespa-feed-client-api/abi-spec.json18
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java5
-rw-r--r--vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java19
-rw-r--r--vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliClient.java5
-rw-r--r--vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/impl/CliArgumentsTest.java41
-rw-r--r--vespa-feed-client-cli/src/test/resources/help.txt3
-rw-r--r--vespa-feed-client/pom.xml5
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java29
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java8
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/ApacheClusterTest.java74
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/WireMockExtension.java42
-rwxr-xr-xvespaclient-java/src/main/java/com/yahoo/dummyreceiver/DummyReceiver.java1
-rw-r--r--vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/CipherUtils.java24
-rw-r--r--vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/DecryptTool.java10
-rw-r--r--vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/EncryptTool.java16
-rw-r--r--vespaclient-java/src/test/java/com/yahoo/vespa/security/tool/CryptoToolsTest.java44
-rw-r--r--vespaclient-java/src/test/resources/expected-decrypt-help-output.txt2
-rw-r--r--vespaclient-java/src/test/resources/expected-encrypt-help-output.txt2
-rw-r--r--vespaclient/CMakeLists.txt1
-rw-r--r--vespaclient/src/vespa/vespaclient/vdsstates/.gitignore5
-rw-r--r--vespaclient/src/vespa/vespaclient/vdsstates/CMakeLists.txt8
-rw-r--r--vespaclient/src/vespa/vespaclient/vdsstates/statesapp.cpp463
-rw-r--r--vespajlib/pom.xml5
-rw-r--r--vespajlib/src/main/java/com/yahoo/compress/ZstdCompressor.java6
-rw-r--r--vespalib/src/tests/compression/compression_test.cpp5
-rw-r--r--vespalib/src/tests/net/crypto_socket/crypto_socket_test.cpp6
-rw-r--r--vespalib/src/tests/net/sync_crypto_socket/sync_crypto_socket_test.cpp6
-rw-r--r--vespalib/src/tests/portal/portal_test.cpp2
-rw-r--r--vespalib/src/vespa/vespalib/coro/async_io.cpp154
-rw-r--r--vespalib/src/vespa/vespalib/geo/zcurve.cpp15
-rw-r--r--vespalib/src/vespa/vespalib/net/crypto_engine.cpp168
-rw-r--r--vespalib/src/vespa/vespalib/net/crypto_engine.h12
-rw-r--r--vespalib/src/vespa/vespalib/util/compressionconfig.h36
-rw-r--r--vespalib/src/vespa/vespalib/util/compressor.cpp15
-rw-r--r--vespalib/src/vespa/vespalib/util/compressor.h23
-rw-r--r--vespalib/src/vespa/vespalib/util/lz4compressor.cpp2
-rw-r--r--vespalib/src/vespa/vespalib/util/lz4compressor.h2
-rw-r--r--vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp5
-rw-r--r--vespalib/src/vespa/vespalib/util/zstdcompressor.cpp2
-rw-r--r--vespalib/src/vespa/vespalib/util/zstdcompressor.h2
-rw-r--r--zkfacade/src/main/java/com/yahoo/vespa/curator/SingletonManager.java16
235 files changed, 8471 insertions, 2369 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 6cf91c10822..6d432994d1b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -63,6 +63,7 @@ vespa_install_data(tsan-suppressions.txt etc/vespa)
# Include vespa config definitions in every target
include_directories(BEFORE ${CMAKE_BINARY_DIR}/configdefinitions/src)
+add_subdirectory(airlift-zstd)
add_subdirectory(ann_benchmark)
add_subdirectory(application-model)
add_subdirectory(athenz-identity-provider-service)
diff --git a/airlift-zstd/CMakeLists.txt b/airlift-zstd/CMakeLists.txt
new file mode 100644
index 00000000000..c9be5ff262a
--- /dev/null
+++ b/airlift-zstd/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_jar(airlift-zstd.jar)
diff --git a/airlift-zstd/OWNERS b/airlift-zstd/OWNERS
new file mode 100644
index 00000000000..122e4c0e31e
--- /dev/null
+++ b/airlift-zstd/OWNERS
@@ -0,0 +1,2 @@
+arnej
+gjoranv
diff --git a/airlift-zstd/README.md b/airlift-zstd/README.md
new file mode 100644
index 00000000000..6f3a9b0027c
--- /dev/null
+++ b/airlift-zstd/README.md
@@ -0,0 +1,4 @@
+<!-- Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+# airlift-zstd
+
+Fork of https://github.com/airlift/aircompressor (zstd only).
diff --git a/airlift-zstd/pom.xml b/airlift-zstd/pom.xml
new file mode 100644
index 00000000000..2d2f83daed9
--- /dev/null
+++ b/airlift-zstd/pom.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0"?>
+<!-- Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>parent</artifactId>
+ <version>8-SNAPSHOT</version>
+ <relativePath>../parent/pom.xml</relativePath>
+ </parent>
+ <artifactId>airlift-zstd</artifactId>
+ <packaging>jar</packaging>
+ <version>8-SNAPSHOT</version>
+ <description>
+ Fork of https://github.com/airlift/aircompressor (zstd only).
+ This module is temporary until we get an official release that includes the
+ ZstdInputStream API (which is already implemented by two different people
+ but neither PR shows any progress).
+ </description>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <configuration>
+ <compilerArgs>
+ <arg>-Xlint:all</arg>
+ <arg>-Xlint:-serial</arg>
+ <arg>-Xlint:-try</arg>
+ <arg>-Xlint:-processing</arg>
+ </compilerArgs>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <configuration>
+ <updateReleaseInfo>true</updateReleaseInfo>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/compress/Compressor.java b/airlift-zstd/src/main/java/ai/vespa/airlift/compress/Compressor.java
new file mode 100644
index 00000000000..ba0530c985f
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/compress/Compressor.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.compress;
+
+import java.nio.ByteBuffer;
+
+public interface Compressor
+{
+ int maxCompressedLength(int uncompressedSize);
+
+ /**
+ * @return number of bytes written to the output
+ */
+ int compress(byte[] input, int inputOffset, int inputLength, byte[] output, int outputOffset, int maxOutputLength);
+
+ void compress(ByteBuffer input, ByteBuffer output);
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/compress/Decompressor.java b/airlift-zstd/src/main/java/ai/vespa/airlift/compress/Decompressor.java
new file mode 100644
index 00000000000..256df93e7c7
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/compress/Decompressor.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.compress;
+
+import java.nio.ByteBuffer;
+
+public interface Decompressor
+{
+ /**
+ * @return number of bytes written to the output
+ */
+ int decompress(byte[] input, int inputOffset, int inputLength, byte[] output, int outputOffset, int maxOutputLength)
+ throws MalformedInputException;
+
+ void decompress(ByteBuffer input, ByteBuffer output)
+ throws MalformedInputException;
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/compress/IncompatibleJvmException.java b/airlift-zstd/src/main/java/ai/vespa/airlift/compress/IncompatibleJvmException.java
new file mode 100644
index 00000000000..3c65f2c9cda
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/compress/IncompatibleJvmException.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.compress;
+
+public class IncompatibleJvmException
+ extends RuntimeException
+{
+ public IncompatibleJvmException(String message)
+ {
+ super(message);
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/compress/MalformedInputException.java b/airlift-zstd/src/main/java/ai/vespa/airlift/compress/MalformedInputException.java
new file mode 100644
index 00000000000..82e14e8ab19
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/compress/MalformedInputException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.compress;
+
+public class MalformedInputException
+ extends RuntimeException
+{
+ private final long offset;
+
+ public MalformedInputException(long offset)
+ {
+ this(offset, "Malformed input");
+ }
+
+ public MalformedInputException(long offset, String reason)
+ {
+ super(reason + ": offset=" + offset);
+ this.offset = offset;
+ }
+
+ public long getOffset()
+ {
+ return offset;
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BitInputStream.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BitInputStream.java
new file mode 100644
index 00000000000..5b7234594f9
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BitInputStream.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_LONG;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+import static ai.vespa.airlift.zstd.Util.highestBit;
+import static ai.vespa.airlift.zstd.Util.verify;
+
+/**
+ * Bit streams are encoded as a byte-aligned little-endian stream. Thus, bits are laid out
+ * in the following manner, and the stream is read from right to left.
+ * <p>
+ * <p>
+ * ... [16 17 18 19 20 21 22 23] [8 9 10 11 12 13 14 15] [0 1 2 3 4 5 6 7]
+ */
+class BitInputStream
+{
+ private BitInputStream()
+ {
+ }
+
+ public static boolean isEndOfStream(long startAddress, long currentAddress, int bitsConsumed)
+ {
+ return startAddress == currentAddress && bitsConsumed == Long.SIZE;
+ }
+
+ @SuppressWarnings("fallthrough")
+ static long readTail(Object inputBase, long inputAddress, int inputSize)
+ {
+ long bits = UNSAFE.getByte(inputBase, inputAddress) & 0xFF;
+
+ switch (inputSize) {
+ case 7:
+ bits |= (UNSAFE.getByte(inputBase, inputAddress + 6) & 0xFFL) << 48;
+ case 6:
+ bits |= (UNSAFE.getByte(inputBase, inputAddress + 5) & 0xFFL) << 40;
+ case 5:
+ bits |= (UNSAFE.getByte(inputBase, inputAddress + 4) & 0xFFL) << 32;
+ case 4:
+ bits |= (UNSAFE.getByte(inputBase, inputAddress + 3) & 0xFFL) << 24;
+ case 3:
+ bits |= (UNSAFE.getByte(inputBase, inputAddress + 2) & 0xFFL) << 16;
+ case 2:
+ bits |= (UNSAFE.getByte(inputBase, inputAddress + 1) & 0xFFL) << 8;
+ }
+
+ return bits;
+ }
+
+ /**
+ * @return numberOfBits in the low order bits of a long
+ */
+ public static long peekBits(int bitsConsumed, long bitContainer, int numberOfBits)
+ {
+ return (((bitContainer << bitsConsumed) >>> 1) >>> (63 - numberOfBits));
+ }
+
+ /**
+ * numberOfBits must be > 0
+ *
+ * @return numberOfBits in the low order bits of a long
+ */
+ public static long peekBitsFast(int bitsConsumed, long bitContainer, int numberOfBits)
+ {
+ return ((bitContainer << bitsConsumed) >>> (64 - numberOfBits));
+ }
+
+ static class Initializer
+ {
+ private final Object inputBase;
+ private final long startAddress;
+ private final long endAddress;
+ private long bits;
+ private long currentAddress;
+ private int bitsConsumed;
+
+ public Initializer(Object inputBase, long startAddress, long endAddress)
+ {
+ this.inputBase = inputBase;
+ this.startAddress = startAddress;
+ this.endAddress = endAddress;
+ }
+
+ public long getBits()
+ {
+ return bits;
+ }
+
+ public long getCurrentAddress()
+ {
+ return currentAddress;
+ }
+
+ public int getBitsConsumed()
+ {
+ return bitsConsumed;
+ }
+
+ public void initialize()
+ {
+ verify(endAddress - startAddress >= 1, startAddress, "Bitstream is empty");
+
+ int lastByte = UNSAFE.getByte(inputBase, endAddress - 1) & 0xFF;
+ verify(lastByte != 0, endAddress, "Bitstream end mark not present");
+
+ bitsConsumed = SIZE_OF_LONG - highestBit(lastByte);
+
+ int inputSize = (int) (endAddress - startAddress);
+ if (inputSize >= SIZE_OF_LONG) { /* normal case */
+ currentAddress = endAddress - SIZE_OF_LONG;
+ bits = UNSAFE.getLong(inputBase, currentAddress);
+ }
+ else {
+ currentAddress = startAddress;
+ bits = readTail(inputBase, startAddress, inputSize);
+
+ bitsConsumed += (SIZE_OF_LONG - inputSize) * 8;
+ }
+ }
+ }
+
+ static final class Loader
+ {
+ private final Object inputBase;
+ private final long startAddress;
+ private long bits;
+ private long currentAddress;
+ private int bitsConsumed;
+ private boolean overflow;
+
+ public Loader(Object inputBase, long startAddress, long currentAddress, long bits, int bitsConsumed)
+ {
+ this.inputBase = inputBase;
+ this.startAddress = startAddress;
+ this.bits = bits;
+ this.currentAddress = currentAddress;
+ this.bitsConsumed = bitsConsumed;
+ }
+
+ public long getBits()
+ {
+ return bits;
+ }
+
+ public long getCurrentAddress()
+ {
+ return currentAddress;
+ }
+
+ public int getBitsConsumed()
+ {
+ return bitsConsumed;
+ }
+
+ public boolean isOverflow()
+ {
+ return overflow;
+ }
+
+ public boolean load()
+ {
+ if (bitsConsumed > 64) {
+ overflow = true;
+ return true;
+ }
+
+ else if (currentAddress == startAddress) {
+ return true;
+ }
+
+ int bytes = bitsConsumed >>> 3; // divide by 8
+ if (currentAddress >= startAddress + SIZE_OF_LONG) {
+ if (bytes > 0) {
+ currentAddress -= bytes;
+ bits = UNSAFE.getLong(inputBase, currentAddress);
+ }
+ bitsConsumed &= 0b111;
+ }
+ else if (currentAddress - bytes < startAddress) {
+ bytes = (int) (currentAddress - startAddress);
+ currentAddress = startAddress;
+ bitsConsumed -= bytes * SIZE_OF_LONG;
+ bits = UNSAFE.getLong(inputBase, startAddress);
+ return true;
+ }
+ else {
+ currentAddress -= bytes;
+ bitsConsumed -= bytes * SIZE_OF_LONG;
+ bits = UNSAFE.getLong(inputBase, currentAddress);
+ }
+
+ return false;
+ }
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BitOutputStream.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BitOutputStream.java
new file mode 100644
index 00000000000..29dd168fca2
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BitOutputStream.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_LONG;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+import static ai.vespa.airlift.zstd.Util.checkArgument;
+
+class BitOutputStream
+{
+ private static final long[] BIT_MASK = {
+ 0x0, 0x1, 0x3, 0x7, 0xF, 0x1F,
+ 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+ 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
+ 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
+ 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
+ 0x3FFFFFFF, 0x7FFFFFFF}; // up to 31 bits
+
+ private final Object outputBase;
+ private final long outputAddress;
+ private final long outputLimit;
+
+ private long container;
+ private int bitCount;
+ private long currentAddress;
+
+ public BitOutputStream(Object outputBase, long outputAddress, int outputSize)
+ {
+ checkArgument(outputSize >= SIZE_OF_LONG, "Output buffer too small");
+
+ this.outputBase = outputBase;
+ this.outputAddress = outputAddress;
+ outputLimit = this.outputAddress + outputSize - SIZE_OF_LONG;
+
+ currentAddress = this.outputAddress;
+ }
+
+ public void addBits(int value, int bits)
+ {
+ container |= (value & BIT_MASK[bits]) << bitCount;
+ bitCount += bits;
+ }
+
+ /**
+ * Note: leading bits of value must be 0
+ */
+ public void addBitsFast(int value, int bits)
+ {
+ container |= ((long) value) << bitCount;
+ bitCount += bits;
+ }
+
+ public void flush()
+ {
+ int bytes = bitCount >>> 3;
+
+ UNSAFE.putLong(outputBase, currentAddress, container);
+ currentAddress += bytes;
+
+ if (currentAddress > outputLimit) {
+ currentAddress = outputLimit;
+ }
+
+ bitCount &= 7;
+ container >>>= bytes * 8;
+ }
+
+ public int close()
+ {
+ addBitsFast(1, 1); // end mark
+ flush();
+
+ if (currentAddress >= outputLimit) {
+ return 0;
+ }
+
+ return (int) ((currentAddress - outputAddress) + (bitCount > 0 ? 1 : 0));
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BlockCompressionState.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BlockCompressionState.java
new file mode 100644
index 00000000000..e5d15cc6a58
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BlockCompressionState.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import java.util.Arrays;
+
+class BlockCompressionState
+{
+ public final int[] hashTable;
+ public final int[] chainTable;
+
+ private final long baseAddress;
+
+ // starting point of the window with respect to baseAddress
+ private int windowBaseOffset;
+
+ public BlockCompressionState(CompressionParameters parameters, long baseAddress)
+ {
+ this.baseAddress = baseAddress;
+ hashTable = new int[1 << parameters.getHashLog()];
+ chainTable = new int[1 << parameters.getChainLog()]; // TODO: chain table not used by Strategy.FAST
+ }
+
+ public void reset()
+ {
+ Arrays.fill(hashTable, 0);
+ Arrays.fill(chainTable, 0);
+ }
+
+ public void enforceMaxDistance(long inputLimit, int maxDistance)
+ {
+ int distance = (int) (inputLimit - baseAddress);
+
+ int newOffset = distance - maxDistance;
+ if (windowBaseOffset < newOffset) {
+ windowBaseOffset = newOffset;
+ }
+ }
+
+ public long getBaseAddress()
+ {
+ return baseAddress;
+ }
+
+ public int getWindowBaseOffset()
+ {
+ return windowBaseOffset;
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BlockCompressor.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BlockCompressor.java
new file mode 100644
index 00000000000..a23fd0ae9a9
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/BlockCompressor.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+interface BlockCompressor
+{
+ BlockCompressor UNSUPPORTED = (inputBase, inputAddress, inputSize, sequenceStore, blockCompressionState, offsets, parameters) -> { throw new UnsupportedOperationException(); };
+
+ int compressBlock(Object inputBase, long inputAddress, int inputSize, SequenceStore output, BlockCompressionState state, RepeatedOffsets offsets, CompressionParameters parameters);
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/CompressionContext.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/CompressionContext.java
new file mode 100644
index 00000000000..fd4b393c758
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/CompressionContext.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.Constants.MAX_BLOCK_SIZE;
+
+class CompressionContext
+{
+ public final RepeatedOffsets offsets = new RepeatedOffsets();
+ public final BlockCompressionState blockCompressionState;
+ public final SequenceStore sequenceStore;
+
+ public final SequenceEncodingContext sequenceEncodingContext = new SequenceEncodingContext();
+
+ public final HuffmanCompressionContext huffmanContext = new HuffmanCompressionContext();
+
+ public CompressionContext(CompressionParameters parameters, long baseAddress, int inputSize)
+ {
+ int windowSize = Math.max(1, Math.min(1 << parameters.getWindowLog(), inputSize));
+ int blockSize = Math.min(MAX_BLOCK_SIZE, windowSize);
+ int divider = (parameters.getSearchLength() == 3) ? 3 : 4;
+
+ int maxSequences = blockSize / divider;
+
+ sequenceStore = new SequenceStore(blockSize, maxSequences);
+
+ blockCompressionState = new BlockCompressionState(parameters, baseAddress);
+ }
+
+ public void commit()
+ {
+ offsets.commit();
+ huffmanContext.saveChanges();
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/CompressionParameters.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/CompressionParameters.java
new file mode 100644
index 00000000000..586a07a8cb2
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/CompressionParameters.java
@@ -0,0 +1,306 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.Constants.MAX_WINDOW_LOG;
+import static ai.vespa.airlift.zstd.Constants.MIN_WINDOW_LOG;
+import static ai.vespa.airlift.zstd.Util.cycleLog;
+import static ai.vespa.airlift.zstd.Util.highestBit;
+
+class CompressionParameters
+{
+ private static final int MIN_HASH_LOG = 6;
+
+ public static final int DEFAULT_COMPRESSION_LEVEL = 3;
+ private static final int MAX_COMPRESSION_LEVEL = 22;
+
+ private final int windowLog; // largest match distance : larger == more compression, more memory needed during decompression
+ private final int chainLog; // fully searched segment : larger == more compression, slower, more memory (useless for fast)
+ private final int hashLog; // dispatch table : larger == faster, more memory
+ private final int searchLog; // nb of searches : larger == more compression, slower
+ private final int searchLength; // match length searched : larger == faster decompression, sometimes less compression
+ private final int targetLength; // acceptable match size for optimal parser (only) : larger == more compression, slower
+ private final Strategy strategy;
+
+ private static final CompressionParameters[][] DEFAULT_COMPRESSION_PARAMETERS = new CompressionParameters[][] {
+ {
+ // default
+ new CompressionParameters(19, 12, 13, 1, 6, 1, Strategy.FAST), /* base for negative levels */
+ new CompressionParameters(19, 13, 14, 1, 7, 0, Strategy.FAST), /* level 1 */
+ new CompressionParameters(19, 15, 16, 1, 6, 0, Strategy.FAST), /* level 2 */
+ new CompressionParameters(20, 16, 17, 1, 5, 1, Strategy.DFAST), /* level 3 */
+ new CompressionParameters(20, 18, 18, 1, 5, 1, Strategy.DFAST), /* level 4 */
+ new CompressionParameters(20, 18, 18, 2, 5, 2, Strategy.GREEDY), /* level 5 */
+ new CompressionParameters(21, 18, 19, 2, 5, 4, Strategy.LAZY), /* level 6 */
+ new CompressionParameters(21, 18, 19, 3, 5, 8, Strategy.LAZY2), /* level 7 */
+ new CompressionParameters(21, 19, 19, 3, 5, 16, Strategy.LAZY2), /* level 8 */
+ new CompressionParameters(21, 19, 20, 4, 5, 16, Strategy.LAZY2), /* level 9 */
+ new CompressionParameters(21, 20, 21, 4, 5, 16, Strategy.LAZY2), /* level 10 */
+ new CompressionParameters(21, 21, 22, 4, 5, 16, Strategy.LAZY2), /* level 11 */
+ new CompressionParameters(22, 20, 22, 5, 5, 16, Strategy.LAZY2), /* level 12 */
+ new CompressionParameters(22, 21, 22, 4, 5, 32, Strategy.BTLAZY2), /* level 13 */
+ new CompressionParameters(22, 21, 22, 5, 5, 32, Strategy.BTLAZY2), /* level 14 */
+ new CompressionParameters(22, 22, 22, 6, 5, 32, Strategy.BTLAZY2), /* level 15 */
+ new CompressionParameters(22, 21, 22, 4, 5, 48, Strategy.BTOPT), /* level 16 */
+ new CompressionParameters(23, 22, 22, 4, 4, 64, Strategy.BTOPT), /* level 17 */
+ new CompressionParameters(23, 23, 22, 6, 3, 256, Strategy.BTOPT), /* level 18 */
+ new CompressionParameters(23, 24, 22, 7, 3, 256, Strategy.BTULTRA), /* level 19 */
+ new CompressionParameters(25, 25, 23, 7, 3, 256, Strategy.BTULTRA), /* level 20 */
+ new CompressionParameters(26, 26, 24, 7, 3, 512, Strategy.BTULTRA), /* level 21 */
+ new CompressionParameters(27, 27, 25, 9, 3, 999, Strategy.BTULTRA) /* level 22 */
+ },
+ {
+ // for size <= 256 KB
+ new CompressionParameters(18, 12, 13, 1, 5, 1, Strategy.FAST), /* base for negative levels */
+ new CompressionParameters(18, 13, 14, 1, 6, 0, Strategy.FAST), /* level 1 */
+ new CompressionParameters(18, 14, 14, 1, 5, 1, Strategy.DFAST), /* level 2 */
+ new CompressionParameters(18, 16, 16, 1, 4, 1, Strategy.DFAST), /* level 3 */
+ new CompressionParameters(18, 16, 17, 2, 5, 2, Strategy.GREEDY), /* level 4.*/
+ new CompressionParameters(18, 18, 18, 3, 5, 2, Strategy.GREEDY), /* level 5.*/
+ new CompressionParameters(18, 18, 19, 3, 5, 4, Strategy.LAZY), /* level 6.*/
+ new CompressionParameters(18, 18, 19, 4, 4, 4, Strategy.LAZY), /* level 7 */
+ new CompressionParameters(18, 18, 19, 4, 4, 8, Strategy.LAZY2), /* level 8 */
+ new CompressionParameters(18, 18, 19, 5, 4, 8, Strategy.LAZY2), /* level 9 */
+ new CompressionParameters(18, 18, 19, 6, 4, 8, Strategy.LAZY2), /* level 10 */
+ new CompressionParameters(18, 18, 19, 5, 4, 16, Strategy.BTLAZY2), /* level 11.*/
+ new CompressionParameters(18, 19, 19, 6, 4, 16, Strategy.BTLAZY2), /* level 12.*/
+ new CompressionParameters(18, 19, 19, 8, 4, 16, Strategy.BTLAZY2), /* level 13 */
+ new CompressionParameters(18, 18, 19, 4, 4, 24, Strategy.BTOPT), /* level 14.*/
+ new CompressionParameters(18, 18, 19, 4, 3, 24, Strategy.BTOPT), /* level 15.*/
+ new CompressionParameters(18, 19, 19, 6, 3, 64, Strategy.BTOPT), /* level 16.*/
+ new CompressionParameters(18, 19, 19, 8, 3, 128, Strategy.BTOPT), /* level 17.*/
+ new CompressionParameters(18, 19, 19, 10, 3, 256, Strategy.BTOPT), /* level 18.*/
+ new CompressionParameters(18, 19, 19, 10, 3, 256, Strategy.BTULTRA), /* level 19.*/
+ new CompressionParameters(18, 19, 19, 11, 3, 512, Strategy.BTULTRA), /* level 20.*/
+ new CompressionParameters(18, 19, 19, 12, 3, 512, Strategy.BTULTRA), /* level 21.*/
+ new CompressionParameters(18, 19, 19, 13, 3, 999, Strategy.BTULTRA) /* level 22.*/
+ },
+ {
+ // for size <= 128 KB
+ new CompressionParameters(17, 12, 12, 1, 5, 1, Strategy.FAST), /* base for negative levels */
+ new CompressionParameters(17, 12, 13, 1, 6, 0, Strategy.FAST), /* level 1 */
+ new CompressionParameters(17, 13, 15, 1, 5, 0, Strategy.FAST), /* level 2 */
+ new CompressionParameters(17, 15, 16, 2, 5, 1, Strategy.DFAST), /* level 3 */
+ new CompressionParameters(17, 17, 17, 2, 4, 1, Strategy.DFAST), /* level 4 */
+ new CompressionParameters(17, 16, 17, 3, 4, 2, Strategy.GREEDY), /* level 5 */
+ new CompressionParameters(17, 17, 17, 3, 4, 4, Strategy.LAZY), /* level 6 */
+ new CompressionParameters(17, 17, 17, 3, 4, 8, Strategy.LAZY2), /* level 7 */
+ new CompressionParameters(17, 17, 17, 4, 4, 8, Strategy.LAZY2), /* level 8 */
+ new CompressionParameters(17, 17, 17, 5, 4, 8, Strategy.LAZY2), /* level 9 */
+ new CompressionParameters(17, 17, 17, 6, 4, 8, Strategy.LAZY2), /* level 10 */
+ new CompressionParameters(17, 17, 17, 7, 4, 8, Strategy.LAZY2), /* level 11 */
+ new CompressionParameters(17, 18, 17, 6, 4, 16, Strategy.BTLAZY2), /* level 12 */
+ new CompressionParameters(17, 18, 17, 8, 4, 16, Strategy.BTLAZY2), /* level 13.*/
+ new CompressionParameters(17, 18, 17, 4, 4, 32, Strategy.BTOPT), /* level 14.*/
+ new CompressionParameters(17, 18, 17, 6, 3, 64, Strategy.BTOPT), /* level 15.*/
+ new CompressionParameters(17, 18, 17, 7, 3, 128, Strategy.BTOPT), /* level 16.*/
+ new CompressionParameters(17, 18, 17, 7, 3, 256, Strategy.BTOPT), /* level 17.*/
+ new CompressionParameters(17, 18, 17, 8, 3, 256, Strategy.BTOPT), /* level 18.*/
+ new CompressionParameters(17, 18, 17, 8, 3, 256, Strategy.BTULTRA), /* level 19.*/
+ new CompressionParameters(17, 18, 17, 9, 3, 256, Strategy.BTULTRA), /* level 20.*/
+ new CompressionParameters(17, 18, 17, 10, 3, 256, Strategy.BTULTRA), /* level 21.*/
+ new CompressionParameters(17, 18, 17, 11, 3, 512, Strategy.BTULTRA) /* level 22.*/
+ },
+ {
+ // for size <= 16 KB
+ new CompressionParameters(14, 12, 13, 1, 5, 1, Strategy.FAST), /* base for negative levels */
+ new CompressionParameters(14, 14, 15, 1, 5, 0, Strategy.FAST), /* level 1 */
+ new CompressionParameters(14, 14, 15, 1, 4, 0, Strategy.FAST), /* level 2 */
+ new CompressionParameters(14, 14, 14, 2, 4, 1, Strategy.DFAST), /* level 3.*/
+ new CompressionParameters(14, 14, 14, 4, 4, 2, Strategy.GREEDY), /* level 4.*/
+ new CompressionParameters(14, 14, 14, 3, 4, 4, Strategy.LAZY), /* level 5.*/
+ new CompressionParameters(14, 14, 14, 4, 4, 8, Strategy.LAZY2), /* level 6 */
+ new CompressionParameters(14, 14, 14, 6, 4, 8, Strategy.LAZY2), /* level 7 */
+ new CompressionParameters(14, 14, 14, 8, 4, 8, Strategy.LAZY2), /* level 8.*/
+ new CompressionParameters(14, 15, 14, 5, 4, 8, Strategy.BTLAZY2), /* level 9.*/
+ new CompressionParameters(14, 15, 14, 9, 4, 8, Strategy.BTLAZY2), /* level 10.*/
+ new CompressionParameters(14, 15, 14, 3, 4, 12, Strategy.BTOPT), /* level 11.*/
+ new CompressionParameters(14, 15, 14, 6, 3, 16, Strategy.BTOPT), /* level 12.*/
+ new CompressionParameters(14, 15, 14, 6, 3, 24, Strategy.BTOPT), /* level 13.*/
+ new CompressionParameters(14, 15, 15, 6, 3, 48, Strategy.BTOPT), /* level 14.*/
+ new CompressionParameters(14, 15, 15, 6, 3, 64, Strategy.BTOPT), /* level 15.*/
+ new CompressionParameters(14, 15, 15, 6, 3, 96, Strategy.BTOPT), /* level 16.*/
+ new CompressionParameters(14, 15, 15, 6, 3, 128, Strategy.BTOPT), /* level 17.*/
+ new CompressionParameters(14, 15, 15, 8, 3, 256, Strategy.BTOPT), /* level 18.*/
+ new CompressionParameters(14, 15, 15, 6, 3, 256, Strategy.BTULTRA), /* level 19.*/
+ new CompressionParameters(14, 15, 15, 8, 3, 256, Strategy.BTULTRA), /* level 20.*/
+ new CompressionParameters(14, 15, 15, 9, 3, 256, Strategy.BTULTRA), /* level 21.*/
+ new CompressionParameters(14, 15, 15, 10, 3, 512, Strategy.BTULTRA) /* level 22.*/
+ }
+ };
+
+ public enum Strategy
+ {
+ // from faster to stronger
+
+ // YC: fast is a "single probe" strategy : at every position, we attempt to find a match, and give up if we don't find any. similar to lz4.
+ FAST(BlockCompressor.UNSUPPORTED),
+
+ // YC: double_fast is a 2 attempts strategies. They are not symmetrical by the way. One attempt is "normal" while the second one looks for "long matches". It was
+ // empirically found that this was the best trade off. As can be guessed, it's slower than single-attempt, but find more and better matches, so compresses better.
+ DFAST(new DoubleFastBlockCompressor()),
+
+ // YC: greedy uses a hash chain strategy. Every position is hashed, and all positions with same hash are chained. The algorithm goes through all candidates. There are
+ // diminishing returns in going deeper and deeper, so after a nb of attempts (which can be selected), it abandons the search. The best (longest) match wins. If there is
+ // one winner, it's immediately encoded.
+ GREEDY(BlockCompressor.UNSUPPORTED),
+
+ // YC: lazy will do something similar to greedy, but will not encode immediately. It will search again at next position, in case it would find something better.
+ // It's actually fairly common to have a small match at position p hiding a more worthy one at position p+1. This obviously increases the search workload. But the
+ // resulting compressed stream generally contains larger matches, hence compresses better.
+ LAZY(BlockCompressor.UNSUPPORTED),
+
+ // YC: lazy2 is same as lazy, but deeper. It will search at P, P+1 and then P+2 in case it would find something even better. More workload. Better matches.
+ LAZY2(BlockCompressor.UNSUPPORTED),
+
+ // YC: btlazy2 is like lazy2, but trades the hash chain for a binary tree. This becomes necessary, as the nb of attempts becomes prohibitively expensive. The binary tree
+ // complexity increases with log of search depth, instead of proportionally with search depth. So searching deeper in history quickly becomes the dominant operation.
+ // btlazy2 cuts into that. But it costs 2x more memory. It's also relatively "slow", even when trying to cut its parameters to make it perform faster. So it's really
+ // a high compression strategy.
+ BTLAZY2(BlockCompressor.UNSUPPORTED),
+
+ // YC: btopt is, well, a hell of lot more complex.
+ // It will compute and find multiple matches per position, will dynamically compare every path from point P to P+N, reverse the graph to find cheapest path, iterate on
+ // batches of overlapping matches, etc. It's much more expensive. But the compression ratio is also much better.
+ BTOPT(BlockCompressor.UNSUPPORTED),
+
+ // YC: btultra is about the same, but doesn't cut as many corners (btopt "abandons" more quickly unpromising little gains). Slower, stronger.
+ BTULTRA(BlockCompressor.UNSUPPORTED);
+
+ private final BlockCompressor compressor;
+
+ Strategy(BlockCompressor compressor)
+ {
+ this.compressor = compressor;
+ }
+
+ public BlockCompressor getCompressor()
+ {
+ return compressor;
+ }
+ }
+
+ public CompressionParameters(int windowLog, int chainLog, int hashLog, int searchLog, int searchLength, int targetLength, Strategy strategy)
+ {
+ this.windowLog = windowLog;
+ this.chainLog = chainLog;
+ this.hashLog = hashLog;
+ this.searchLog = searchLog;
+ this.searchLength = searchLength;
+ this.targetLength = targetLength;
+ this.strategy = strategy;
+ }
+
+ public int getWindowLog()
+ {
+ return windowLog;
+ }
+
+ public int getSearchLength()
+ {
+ return searchLength;
+ }
+
+ public int getChainLog()
+ {
+ return chainLog;
+ }
+
+ public int getHashLog()
+ {
+ return hashLog;
+ }
+
+ public int getSearchLog()
+ {
+ return searchLog;
+ }
+
+ public int getTargetLength()
+ {
+ return targetLength;
+ }
+
+ public Strategy getStrategy()
+ {
+ return strategy;
+ }
+
+ public static CompressionParameters compute(int compressionLevel, int inputSize)
+ {
+ CompressionParameters defaultParameters = getDefaultParameters(compressionLevel, inputSize);
+
+ int targetLength = defaultParameters.targetLength;
+ int windowLog = defaultParameters.windowLog;
+ int chainLog = defaultParameters.chainLog;
+ int hashLog = defaultParameters.hashLog;
+ int searchLog = defaultParameters.searchLog;
+ int searchLength = defaultParameters.searchLength;
+ Strategy strategy = defaultParameters.strategy;
+
+ if (compressionLevel < 0) {
+ targetLength = -compressionLevel; // acceleration factor
+ }
+
+ // resize windowLog if input is small enough, to use less memory
+ long maxWindowResize = 1L << (MAX_WINDOW_LOG - 1);
+ if (inputSize < maxWindowResize) {
+ int hashSizeMin = 1 << MIN_HASH_LOG;
+ int inputSizeLog = (inputSize < hashSizeMin) ? MIN_HASH_LOG : highestBit(inputSize - 1) + 1;
+ if (windowLog > inputSizeLog) {
+ windowLog = inputSizeLog;
+ }
+ }
+
+ if (hashLog > windowLog + 1) {
+ hashLog = windowLog + 1;
+ }
+
+ int cycleLog = cycleLog(chainLog, strategy);
+ if (cycleLog > windowLog) {
+ chainLog -= (cycleLog - windowLog);
+ }
+
+ if (windowLog < MIN_WINDOW_LOG) {
+ windowLog = MIN_WINDOW_LOG;
+ }
+
+ return new CompressionParameters(windowLog, chainLog, hashLog, searchLog, searchLength, targetLength, strategy);
+ }
+
+ private static CompressionParameters getDefaultParameters(int compressionLevel, long estimatedInputSize)
+ {
+ int table = 0;
+
+ if (estimatedInputSize != 0) {
+ if (estimatedInputSize <= 16 * 1024) {
+ table = 3;
+ }
+ else if (estimatedInputSize <= 128 * 1024) {
+ table = 2;
+ }
+ else if (estimatedInputSize <= 256 * 1024) {
+ table = 1;
+ }
+ }
+
+ int row = DEFAULT_COMPRESSION_LEVEL;
+
+ if (compressionLevel != 0) { // TODO: figure out better way to indicate default compression level
+ row = Math.min(Math.max(0, compressionLevel), MAX_COMPRESSION_LEVEL);
+ }
+
+ return DEFAULT_COMPRESSION_PARAMETERS[table][row];
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Constants.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Constants.java
new file mode 100644
index 00000000000..8777487b8c2
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Constants.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+class Constants
+{
+ public static final int SIZE_OF_BYTE = 1;
+ public static final int SIZE_OF_SHORT = 2;
+ public static final int SIZE_OF_INT = 4;
+ public static final int SIZE_OF_LONG = 8;
+
+ public static final int MAGIC_NUMBER = 0xFD2FB528;
+ public static final int MAGIC_SKIPFRAME_MIN = 0x184D2A50;
+ public static final int MAGIC_SKIPFRAME_MAX = 0x184D2A5F;
+
+ public static final int MIN_WINDOW_LOG = 10;
+ public static final int MAX_WINDOW_LOG = 31;
+
+ public static final int SIZE_OF_BLOCK_HEADER = 3;
+
+ public static final int MIN_SEQUENCES_SIZE = 1;
+ public static final int MIN_BLOCK_SIZE = 1 // block type tag
+ + 1 // min size of raw or rle length header
+ + MIN_SEQUENCES_SIZE;
+ public static final int MAX_BLOCK_SIZE = 128 * 1024;
+
+ public static final int REPEATED_OFFSET_COUNT = 3;
+
+ // block types
+ public static final int RAW_BLOCK = 0;
+ public static final int RLE_BLOCK = 1;
+ public static final int COMPRESSED_BLOCK = 2;
+
+ // sequence encoding types
+ public static final int SEQUENCE_ENCODING_BASIC = 0;
+ public static final int SEQUENCE_ENCODING_RLE = 1;
+ public static final int SEQUENCE_ENCODING_COMPRESSED = 2;
+ public static final int SEQUENCE_ENCODING_REPEAT = 3;
+
+ public static final int MAX_LITERALS_LENGTH_SYMBOL = 35;
+ public static final int MAX_MATCH_LENGTH_SYMBOL = 52;
+ public static final int MAX_OFFSET_CODE_SYMBOL = 31;
+ public static final int DEFAULT_MAX_OFFSET_CODE_SYMBOL = 28;
+
+ public static final int LITERAL_LENGTH_TABLE_LOG = 9;
+ public static final int MATCH_LENGTH_TABLE_LOG = 9;
+ public static final int OFFSET_TABLE_LOG = 8;
+
+ // literal block types
+ public static final int RAW_LITERALS_BLOCK = 0;
+ public static final int RLE_LITERALS_BLOCK = 1;
+ public static final int COMPRESSED_LITERALS_BLOCK = 2;
+ public static final int TREELESS_LITERALS_BLOCK = 3;
+
+ public static final int LONG_NUMBER_OF_SEQUENCES = 0x7F00;
+
+ public static final int[] LITERALS_LENGTH_BITS = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16};
+
+ public static final int[] MATCH_LENGTH_BITS = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16};
+
+ private Constants()
+ {
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/DoubleFastBlockCompressor.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/DoubleFastBlockCompressor.java
new file mode 100644
index 00000000000..c2c6b4a936d
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/DoubleFastBlockCompressor.java
@@ -0,0 +1,261 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_INT;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_LONG;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+
+class DoubleFastBlockCompressor
+ implements BlockCompressor
+{
+ private static final int MIN_MATCH = 3;
+ private static final int SEARCH_STRENGTH = 8;
+ private static final int REP_MOVE = Constants.REPEATED_OFFSET_COUNT - 1;
+
+ public int compressBlock(Object inputBase, final long inputAddress, int inputSize, SequenceStore output, BlockCompressionState state, RepeatedOffsets offsets, CompressionParameters parameters)
+ {
+ int matchSearchLength = Math.max(parameters.getSearchLength(), 4);
+
+ // Offsets in hash tables are relative to baseAddress. Hash tables can be reused across calls to compressBlock as long as
+ // baseAddress is kept constant.
+ // We don't want to generate sequences that point before the current window limit, so we "filter" out all results from looking up in the hash tables
+ // beyond that point.
+ final long baseAddress = state.getBaseAddress();
+ final long windowBaseAddress = baseAddress + state.getWindowBaseOffset();
+
+ int[] longHashTable = state.hashTable;
+ int longHashBits = parameters.getHashLog();
+
+ int[] shortHashTable = state.chainTable;
+ int shortHashBits = parameters.getChainLog();
+
+ final long inputEnd = inputAddress + inputSize;
+ final long inputLimit = inputEnd - SIZE_OF_LONG; // We read a long at a time for computing the hashes
+
+ long input = inputAddress;
+ long anchor = inputAddress;
+
+ int offset1 = offsets.getOffset0();
+ int offset2 = offsets.getOffset1();
+
+ int savedOffset = 0;
+
+ if (input - windowBaseAddress == 0) {
+ input++;
+ }
+ int maxRep = (int) (input - windowBaseAddress);
+
+ if (offset2 > maxRep) {
+ savedOffset = offset2;
+ offset2 = 0;
+ }
+
+ if (offset1 > maxRep) {
+ savedOffset = offset1;
+ offset1 = 0;
+ }
+
+ while (input < inputLimit) { // < instead of <=, because repcode check at (input+1)
+ int shortHash = hash(inputBase, input, shortHashBits, matchSearchLength);
+ long shortMatchAddress = baseAddress + shortHashTable[shortHash];
+
+ int longHash = hash8(UNSAFE.getLong(inputBase, input), longHashBits);
+ long longMatchAddress = baseAddress + longHashTable[longHash];
+
+ // update hash tables
+ int current = (int) (input - baseAddress);
+ longHashTable[longHash] = current;
+ shortHashTable[shortHash] = current;
+
+ int matchLength;
+ int offset;
+
+ if (offset1 > 0 && UNSAFE.getInt(inputBase, input + 1 - offset1) == UNSAFE.getInt(inputBase, input + 1)) {
+ // found a repeated sequence of at least 4 bytes, separated by offset1
+ matchLength = count(inputBase, input + 1 + SIZE_OF_INT, inputEnd, input + 1 + SIZE_OF_INT - offset1) + SIZE_OF_INT;
+ input++;
+ output.storeSequence(inputBase, anchor, (int) (input - anchor), 0, matchLength - MIN_MATCH);
+ }
+ else {
+ // check prefix long match
+ if (longMatchAddress > windowBaseAddress && UNSAFE.getLong(inputBase, longMatchAddress) == UNSAFE.getLong(inputBase, input)) {
+ matchLength = count(inputBase, input + SIZE_OF_LONG, inputEnd, longMatchAddress + SIZE_OF_LONG) + SIZE_OF_LONG;
+ offset = (int) (input - longMatchAddress);
+ while (input > anchor && longMatchAddress > windowBaseAddress && UNSAFE.getByte(inputBase, input - 1) == UNSAFE.getByte(inputBase, longMatchAddress - 1)) {
+ input--;
+ longMatchAddress--;
+ matchLength++;
+ }
+ }
+ else {
+ // check prefix short match
+ if (shortMatchAddress > windowBaseAddress && UNSAFE.getInt(inputBase, shortMatchAddress) == UNSAFE.getInt(inputBase, input)) {
+ int nextOffsetHash = hash8(UNSAFE.getLong(inputBase, input + 1), longHashBits);
+ long nextOffsetMatchAddress = baseAddress + longHashTable[nextOffsetHash];
+ longHashTable[nextOffsetHash] = current + 1;
+
+ // check prefix long +1 match
+ if (nextOffsetMatchAddress > windowBaseAddress && UNSAFE.getLong(inputBase, nextOffsetMatchAddress) == UNSAFE.getLong(inputBase, input + 1)) {
+ matchLength = count(inputBase, input + 1 + SIZE_OF_LONG, inputEnd, nextOffsetMatchAddress + SIZE_OF_LONG) + SIZE_OF_LONG;
+ input++;
+ offset = (int) (input - nextOffsetMatchAddress);
+ while (input > anchor && nextOffsetMatchAddress > windowBaseAddress && UNSAFE.getByte(inputBase, input - 1) == UNSAFE.getByte(inputBase, nextOffsetMatchAddress - 1)) {
+ input--;
+ nextOffsetMatchAddress--;
+ matchLength++;
+ }
+ }
+ else {
+ // if no long +1 match, explore the short match we found
+ matchLength = count(inputBase, input + SIZE_OF_INT, inputEnd, shortMatchAddress + SIZE_OF_INT) + SIZE_OF_INT;
+ offset = (int) (input - shortMatchAddress);
+ while (input > anchor && shortMatchAddress > windowBaseAddress && UNSAFE.getByte(inputBase, input - 1) == UNSAFE.getByte(inputBase, shortMatchAddress - 1)) {
+ input--;
+ shortMatchAddress--;
+ matchLength++;
+ }
+ }
+ }
+ else {
+ input += ((input - anchor) >> SEARCH_STRENGTH) + 1;
+ continue;
+ }
+ }
+
+ offset2 = offset1;
+ offset1 = offset;
+
+ output.storeSequence(inputBase, anchor, (int) (input - anchor), offset + REP_MOVE, matchLength - MIN_MATCH);
+ }
+
+ input += matchLength;
+ anchor = input;
+
+ if (input <= inputLimit) {
+ // Fill Table
+ longHashTable[hash8(UNSAFE.getLong(inputBase, baseAddress + current + 2), longHashBits)] = current + 2;
+ shortHashTable[hash(inputBase, baseAddress + current + 2, shortHashBits, matchSearchLength)] = current + 2;
+
+ longHashTable[hash8(UNSAFE.getLong(inputBase, input - 2), longHashBits)] = (int) (input - 2 - baseAddress);
+ shortHashTable[hash(inputBase, input - 2, shortHashBits, matchSearchLength)] = (int) (input - 2 - baseAddress);
+
+ while (input <= inputLimit && offset2 > 0 && UNSAFE.getInt(inputBase, input) == UNSAFE.getInt(inputBase, input - offset2)) {
+ int repetitionLength = count(inputBase, input + SIZE_OF_INT, inputEnd, input + SIZE_OF_INT - offset2) + SIZE_OF_INT;
+
+ // swap offset2 <=> offset1
+ int temp = offset2;
+ offset2 = offset1;
+ offset1 = temp;
+
+ shortHashTable[hash(inputBase, input, shortHashBits, matchSearchLength)] = (int) (input - baseAddress);
+ longHashTable[hash8(UNSAFE.getLong(inputBase, input), longHashBits)] = (int) (input - baseAddress);
+
+ output.storeSequence(inputBase, anchor, 0, 0, repetitionLength - MIN_MATCH);
+
+ input += repetitionLength;
+ anchor = input;
+ }
+ }
+ }
+
+ // save reps for next block
+ offsets.saveOffset0(offset1 != 0 ? offset1 : savedOffset);
+ offsets.saveOffset1(offset2 != 0 ? offset2 : savedOffset);
+
+ // return the last literals size
+ return (int) (inputEnd - anchor);
+ }
+
+ // TODO: same as LZ4RawCompressor.count
+
+ /**
+ * matchAddress must be < inputAddress
+ */
+ public static int count(Object inputBase, final long inputAddress, final long inputLimit, final long matchAddress)
+ {
+ long input = inputAddress;
+ long match = matchAddress;
+
+ int remaining = (int) (inputLimit - inputAddress);
+
+ // first, compare long at a time
+ int count = 0;
+ while (count < remaining - (SIZE_OF_LONG - 1)) {
+ long diff = UNSAFE.getLong(inputBase, match) ^ UNSAFE.getLong(inputBase, input);
+ if (diff != 0) {
+ return count + (Long.numberOfTrailingZeros(diff) >> 3);
+ }
+
+ count += SIZE_OF_LONG;
+ input += SIZE_OF_LONG;
+ match += SIZE_OF_LONG;
+ }
+
+ while (count < remaining && UNSAFE.getByte(inputBase, match) == UNSAFE.getByte(inputBase, input)) {
+ count++;
+ input++;
+ match++;
+ }
+
+ return count;
+ }
+
+ private static int hash(Object inputBase, long inputAddress, int bits, int matchSearchLength)
+ {
+ switch (matchSearchLength) {
+ case 8:
+ return hash8(UNSAFE.getLong(inputBase, inputAddress), bits);
+ case 7:
+ return hash7(UNSAFE.getLong(inputBase, inputAddress), bits);
+ case 6:
+ return hash6(UNSAFE.getLong(inputBase, inputAddress), bits);
+ case 5:
+ return hash5(UNSAFE.getLong(inputBase, inputAddress), bits);
+ default:
+ return hash4(UNSAFE.getInt(inputBase, inputAddress), bits);
+ }
+ }
+
+ private static final int PRIME_4_BYTES = 0x9E3779B1;
+ private static final long PRIME_5_BYTES = 0xCF1BBCDCBBL;
+ private static final long PRIME_6_BYTES = 0xCF1BBCDCBF9BL;
+ private static final long PRIME_7_BYTES = 0xCF1BBCDCBFA563L;
+ private static final long PRIME_8_BYTES = 0xCF1BBCDCB7A56463L;
+
+ private static int hash4(int value, int bits)
+ {
+ return (value * PRIME_4_BYTES) >>> (Integer.SIZE - bits);
+ }
+
+ private static int hash5(long value, int bits)
+ {
+ return (int) (((value << (Long.SIZE - 40)) * PRIME_5_BYTES) >>> (Long.SIZE - bits));
+ }
+
+ private static int hash6(long value, int bits)
+ {
+ return (int) (((value << (Long.SIZE - 48)) * PRIME_6_BYTES) >>> (Long.SIZE - bits));
+ }
+
+ private static int hash7(long value, int bits)
+ {
+ return (int) (((value << (Long.SIZE - 56)) * PRIME_7_BYTES) >>> (Long.SIZE - bits));
+ }
+
+ private static int hash8(long value, int bits)
+ {
+ return (int) ((value * PRIME_8_BYTES) >>> (Long.SIZE - bits));
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FiniteStateEntropy.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FiniteStateEntropy.java
new file mode 100644
index 00000000000..5703f0200a3
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FiniteStateEntropy.java
@@ -0,0 +1,551 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.BitInputStream.peekBits;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_INT;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_LONG;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_SHORT;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+import static ai.vespa.airlift.zstd.Util.checkArgument;
+import static ai.vespa.airlift.zstd.Util.verify;
+import static sun.misc.Unsafe.ARRAY_BYTE_BASE_OFFSET;
+
+class FiniteStateEntropy
+{
+ public static final int MAX_SYMBOL = 255;
+ public static final int MAX_TABLE_LOG = 12;
+ public static final int MIN_TABLE_LOG = 5;
+
+ private static final int[] REST_TO_BEAT = new int[] {0, 473195, 504333, 520860, 550000, 700000, 750000, 830000};
+ private static final short UNASSIGNED = -2;
+
+ private FiniteStateEntropy()
+ {
+ }
+
+ public static int decompress(FiniteStateEntropy.Table table, final Object inputBase, final long inputAddress, final long inputLimit, byte[] outputBuffer)
+ {
+ final Object outputBase = outputBuffer;
+ final long outputAddress = ARRAY_BYTE_BASE_OFFSET;
+ final long outputLimit = outputAddress + outputBuffer.length;
+
+ long input = inputAddress;
+ long output = outputAddress;
+
+ // initialize bit stream
+ BitInputStream.Initializer initializer = new BitInputStream.Initializer(inputBase, input, inputLimit);
+ initializer.initialize();
+ int bitsConsumed = initializer.getBitsConsumed();
+ long currentAddress = initializer.getCurrentAddress();
+ long bits = initializer.getBits();
+
+ // initialize first FSE stream
+ int state1 = (int) peekBits(bitsConsumed, bits, table.log2Size);
+ bitsConsumed += table.log2Size;
+
+ BitInputStream.Loader loader = new BitInputStream.Loader(inputBase, input, currentAddress, bits, bitsConsumed);
+ loader.load();
+ bits = loader.getBits();
+ bitsConsumed = loader.getBitsConsumed();
+ currentAddress = loader.getCurrentAddress();
+
+ // initialize second FSE stream
+ int state2 = (int) peekBits(bitsConsumed, bits, table.log2Size);
+ bitsConsumed += table.log2Size;
+
+ loader = new BitInputStream.Loader(inputBase, input, currentAddress, bits, bitsConsumed);
+ loader.load();
+ bits = loader.getBits();
+ bitsConsumed = loader.getBitsConsumed();
+ currentAddress = loader.getCurrentAddress();
+
+ byte[] symbols = table.symbol;
+ byte[] numbersOfBits = table.numberOfBits;
+ int[] newStates = table.newState;
+
+ // decode 4 symbols per loop
+ while (output <= outputLimit - 4) {
+ int numberOfBits;
+
+ UNSAFE.putByte(outputBase, output, symbols[state1]);
+ numberOfBits = numbersOfBits[state1];
+ state1 = (int) (newStates[state1] + peekBits(bitsConsumed, bits, numberOfBits));
+ bitsConsumed += numberOfBits;
+
+ UNSAFE.putByte(outputBase, output + 1, symbols[state2]);
+ numberOfBits = numbersOfBits[state2];
+ state2 = (int) (newStates[state2] + peekBits(bitsConsumed, bits, numberOfBits));
+ bitsConsumed += numberOfBits;
+
+ UNSAFE.putByte(outputBase, output + 2, symbols[state1]);
+ numberOfBits = numbersOfBits[state1];
+ state1 = (int) (newStates[state1] + peekBits(bitsConsumed, bits, numberOfBits));
+ bitsConsumed += numberOfBits;
+
+ UNSAFE.putByte(outputBase, output + 3, symbols[state2]);
+ numberOfBits = numbersOfBits[state2];
+ state2 = (int) (newStates[state2] + peekBits(bitsConsumed, bits, numberOfBits));
+ bitsConsumed += numberOfBits;
+
+ output += SIZE_OF_INT;
+
+ loader = new BitInputStream.Loader(inputBase, input, currentAddress, bits, bitsConsumed);
+ boolean done = loader.load();
+ bitsConsumed = loader.getBitsConsumed();
+ bits = loader.getBits();
+ currentAddress = loader.getCurrentAddress();
+ if (done) {
+ break;
+ }
+ }
+
+ while (true) {
+ verify(output <= outputLimit - 2, input, "Output buffer is too small");
+ UNSAFE.putByte(outputBase, output++, symbols[state1]);
+ int numberOfBits = numbersOfBits[state1];
+ state1 = (int) (newStates[state1] + peekBits(bitsConsumed, bits, numberOfBits));
+ bitsConsumed += numberOfBits;
+
+ loader = new BitInputStream.Loader(inputBase, input, currentAddress, bits, bitsConsumed);
+ loader.load();
+ bitsConsumed = loader.getBitsConsumed();
+ bits = loader.getBits();
+ currentAddress = loader.getCurrentAddress();
+
+ if (loader.isOverflow()) {
+ UNSAFE.putByte(outputBase, output++, symbols[state2]);
+ break;
+ }
+
+ verify(output <= outputLimit - 2, input, "Output buffer is too small");
+ UNSAFE.putByte(outputBase, output++, symbols[state2]);
+ int numberOfBits1 = numbersOfBits[state2];
+ state2 = (int) (newStates[state2] + peekBits(bitsConsumed, bits, numberOfBits1));
+ bitsConsumed += numberOfBits1;
+
+ loader = new BitInputStream.Loader(inputBase, input, currentAddress, bits, bitsConsumed);
+ loader.load();
+ bitsConsumed = loader.getBitsConsumed();
+ bits = loader.getBits();
+ currentAddress = loader.getCurrentAddress();
+
+ if (loader.isOverflow()) {
+ UNSAFE.putByte(outputBase, output++, symbols[state1]);
+ break;
+ }
+ }
+
+ return (int) (output - outputAddress);
+ }
+
+ public static int compress(Object outputBase, long outputAddress, int outputSize, byte[] input, int inputSize, FseCompressionTable table)
+ {
+ return compress(outputBase, outputAddress, outputSize, input, ARRAY_BYTE_BASE_OFFSET, inputSize, table);
+ }
+
+ public static int compress(Object outputBase, long outputAddress, int outputSize, Object inputBase, long inputAddress, int inputSize, FseCompressionTable table)
+ {
+ checkArgument(outputSize >= SIZE_OF_LONG, "Output buffer too small");
+
+ final long start = inputAddress;
+ final long inputLimit = start + inputSize;
+
+ long input = inputLimit;
+
+ if (inputSize <= 2) {
+ return 0;
+ }
+
+ BitOutputStream stream = new BitOutputStream(outputBase, outputAddress, outputSize);
+
+ int state1;
+ int state2;
+
+ if ((inputSize & 1) != 0) {
+ input--;
+ state1 = table.begin(UNSAFE.getByte(inputBase, input));
+
+ input--;
+ state2 = table.begin(UNSAFE.getByte(inputBase, input));
+
+ input--;
+ state1 = table.encode(stream, state1, UNSAFE.getByte(inputBase, input));
+
+ stream.flush();
+ }
+ else {
+ input--;
+ state2 = table.begin(UNSAFE.getByte(inputBase, input));
+
+ input--;
+ state1 = table.begin(UNSAFE.getByte(inputBase, input));
+ }
+
+ // join to mod 4
+ inputSize -= 2;
+
+ if ((SIZE_OF_LONG * 8 > MAX_TABLE_LOG * 4 + 7) && (inputSize & 2) != 0) { /* test bit 2 */
+ input--;
+ state2 = table.encode(stream, state2, UNSAFE.getByte(inputBase, input));
+
+ input--;
+ state1 = table.encode(stream, state1, UNSAFE.getByte(inputBase, input));
+
+ stream.flush();
+ }
+
+ // 2 or 4 encoding per loop
+ while (input > start) {
+ input--;
+ state2 = table.encode(stream, state2, UNSAFE.getByte(inputBase, input));
+
+ if (SIZE_OF_LONG * 8 < MAX_TABLE_LOG * 2 + 7) {
+ stream.flush();
+ }
+
+ input--;
+ state1 = table.encode(stream, state1, UNSAFE.getByte(inputBase, input));
+
+ if (SIZE_OF_LONG * 8 > MAX_TABLE_LOG * 4 + 7) {
+ input--;
+ state2 = table.encode(stream, state2, UNSAFE.getByte(inputBase, input));
+
+ input--;
+ state1 = table.encode(stream, state1, UNSAFE.getByte(inputBase, input));
+ }
+
+ stream.flush();
+ }
+
+ table.finish(stream, state2);
+ table.finish(stream, state1);
+
+ return stream.close();
+ }
+
+ public static int optimalTableLog(int maxTableLog, int inputSize, int maxSymbol)
+ {
+ if (inputSize <= 1) {
+ throw new IllegalArgumentException(); // not supported. Use RLE instead
+ }
+
+ int result = maxTableLog;
+
+ result = Math.min(result, Util.highestBit((inputSize - 1)) - 2); // we may be able to reduce accuracy if input is small
+
+ // Need a minimum to safely represent all symbol values
+ result = Math.max(result, Util.minTableLog(inputSize, maxSymbol));
+
+ result = Math.max(result, MIN_TABLE_LOG);
+ result = Math.min(result, MAX_TABLE_LOG);
+
+ return result;
+ }
+
+ public static int normalizeCounts(short[] normalizedCounts, int tableLog, int[] counts, int total, int maxSymbol)
+ {
+ checkArgument(tableLog >= MIN_TABLE_LOG, "Unsupported FSE table size");
+ checkArgument(tableLog <= MAX_TABLE_LOG, "FSE table size too large");
+ checkArgument(tableLog >= Util.minTableLog(total, maxSymbol), "FSE table size too small");
+
+ long scale = 62 - tableLog;
+ long step = (1L << 62) / total;
+ long vstep = 1L << (scale - 20);
+
+ int stillToDistribute = 1 << tableLog;
+
+ int largest = 0;
+ short largestProbability = 0;
+ int lowThreshold = total >>> tableLog;
+
+ for (int symbol = 0; symbol <= maxSymbol; symbol++) {
+ if (counts[symbol] == total) {
+ throw new IllegalArgumentException(); // TODO: should have been RLE-compressed by upper layers
+ }
+ if (counts[symbol] == 0) {
+ normalizedCounts[symbol] = 0;
+ continue;
+ }
+ if (counts[symbol] <= lowThreshold) {
+ normalizedCounts[symbol] = -1;
+ stillToDistribute--;
+ }
+ else {
+ short probability = (short) ((counts[symbol] * step) >>> scale);
+ if (probability < 8) {
+ long restToBeat = vstep * REST_TO_BEAT[probability];
+ long delta = counts[symbol] * step - (((long) probability) << scale);
+ if (delta > restToBeat) {
+ probability++;
+ }
+ }
+ if (probability > largestProbability) {
+ largestProbability = probability;
+ largest = symbol;
+ }
+ normalizedCounts[symbol] = probability;
+ stillToDistribute -= probability;
+ }
+ }
+
+ if (-stillToDistribute >= (normalizedCounts[largest] >>> 1)) {
+ // corner case. Need another normalization method
+ // TODO size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
+ normalizeCounts2(normalizedCounts, tableLog, counts, total, maxSymbol);
+ }
+ else {
+ normalizedCounts[largest] += (short) stillToDistribute;
+ }
+
+ return tableLog;
+ }
+
+ private static int normalizeCounts2(short[] normalizedCounts, int tableLog, int[] counts, int total, int maxSymbol)
+ {
+ int distributed = 0;
+
+ int lowThreshold = total >>> tableLog; // minimum count below which frequency in the normalized table is "too small" (~ < 1)
+ int lowOne = (total * 3) >>> (tableLog + 1); // 1.5 * lowThreshold. If count in (lowThreshold, lowOne] => assign frequency 1
+
+ for (int i = 0; i <= maxSymbol; i++) {
+ if (counts[i] == 0) {
+ normalizedCounts[i] = 0;
+ }
+ else if (counts[i] <= lowThreshold) {
+ normalizedCounts[i] = -1;
+ distributed++;
+ total -= counts[i];
+ }
+ else if (counts[i] <= lowOne) {
+ normalizedCounts[i] = 1;
+ distributed++;
+ total -= counts[i];
+ }
+ else {
+ normalizedCounts[i] = UNASSIGNED;
+ }
+ }
+
+ int normalizationFactor = 1 << tableLog;
+ int toDistribute = normalizationFactor - distributed;
+
+ if ((total / toDistribute) > lowOne) {
+ /* risk of rounding to zero */
+ lowOne = ((total * 3) / (toDistribute * 2));
+ for (int i = 0; i <= maxSymbol; i++) {
+ if ((normalizedCounts[i] == UNASSIGNED) && (counts[i] <= lowOne)) {
+ normalizedCounts[i] = 1;
+ distributed++;
+ total -= counts[i];
+ }
+ }
+ toDistribute = normalizationFactor - distributed;
+ }
+
+ if (distributed == maxSymbol + 1) {
+ // all values are pretty poor;
+ // probably incompressible data (should have already been detected);
+ // find max, then give all remaining points to max
+ int maxValue = 0;
+ int maxCount = 0;
+ for (int i = 0; i <= maxSymbol; i++) {
+ if (counts[i] > maxCount) {
+ maxValue = i;
+ maxCount = counts[i];
+ }
+ }
+ normalizedCounts[maxValue] += (short) toDistribute;
+ return 0;
+ }
+
+ if (total == 0) {
+ // all of the symbols were low enough for the lowOne or lowThreshold
+ for (int i = 0; toDistribute > 0; i = (i + 1) % (maxSymbol + 1)) {
+ if (normalizedCounts[i] > 0) {
+ toDistribute--;
+ normalizedCounts[i]++;
+ }
+ }
+ return 0;
+ }
+
+ // TODO: simplify/document this code
+ long vStepLog = 62 - tableLog;
+ long mid = (1L << (vStepLog - 1)) - 1;
+ long rStep = (((1L << vStepLog) * toDistribute) + mid) / total; /* scale on remaining */
+ long tmpTotal = mid;
+ for (int i = 0; i <= maxSymbol; i++) {
+ if (normalizedCounts[i] == UNASSIGNED) {
+ long end = tmpTotal + (counts[i] * rStep);
+ int sStart = (int) (tmpTotal >>> vStepLog);
+ int sEnd = (int) (end >>> vStepLog);
+ int weight = sEnd - sStart;
+
+ if (weight < 1) {
+ throw new AssertionError();
+ }
+ normalizedCounts[i] = (short) weight;
+ tmpTotal = end;
+ }
+ }
+
+ return 0;
+ }
+
+ public static int writeNormalizedCounts(Object outputBase, long outputAddress, int outputSize, short[] normalizedCounts, int maxSymbol, int tableLog)
+ {
+ checkArgument(tableLog <= MAX_TABLE_LOG, "FSE table too large");
+ checkArgument(tableLog >= MIN_TABLE_LOG, "FSE table too small");
+
+ long output = outputAddress;
+ long outputLimit = outputAddress + outputSize;
+
+ int tableSize = 1 << tableLog;
+
+ int bitCount = 0;
+
+ // encode table size
+ int bitStream = (tableLog - MIN_TABLE_LOG);
+ bitCount += 4;
+
+ int remaining = tableSize + 1; // +1 for extra accuracy
+ int threshold = tableSize;
+ int tableBitCount = tableLog + 1;
+
+ int symbol = 0;
+
+ boolean previousIs0 = false;
+ while (remaining > 1) {
+ if (previousIs0) {
+ // From RFC 8478, section 4.1.1:
+ // When a symbol has a probability of zero, it is followed by a 2-bit
+ // repeat flag. This repeat flag tells how many probabilities of zeroes
+ // follow the current one. It provides a number ranging from 0 to 3.
+ // If it is a 3, another 2-bit repeat flag follows, and so on.
+ int start = symbol;
+
+ // find run of symbols with count 0
+ while (normalizedCounts[symbol] == 0) {
+ symbol++;
+ }
+
+ // encode in batches if 8 repeat sequences in one shot (representing 24 symbols total)
+ while (symbol >= start + 24) {
+ start += 24;
+ bitStream |= (0b11_11_11_11_11_11_11_11 << bitCount);
+ checkArgument(output + SIZE_OF_SHORT <= outputLimit, "Output buffer too small");
+
+ UNSAFE.putShort(outputBase, output, (short) bitStream);
+ output += SIZE_OF_SHORT;
+
+ // flush now, so no need to increase bitCount by 16
+ bitStream >>>= Short.SIZE;
+ }
+
+ // encode remaining in batches of 3 symbols
+ while (symbol >= start + 3) {
+ start += 3;
+ bitStream |= 0b11 << bitCount;
+ bitCount += 2;
+ }
+
+ // encode tail
+ bitStream |= (symbol - start) << bitCount;
+ bitCount += 2;
+
+ // flush bitstream if necessary
+ if (bitCount > 16) {
+ checkArgument(output + SIZE_OF_SHORT <= outputLimit, "Output buffer too small");
+
+ UNSAFE.putShort(outputBase, output, (short) bitStream);
+ output += SIZE_OF_SHORT;
+
+ bitStream >>>= Short.SIZE;
+ bitCount -= Short.SIZE;
+ }
+ }
+
+ int count = normalizedCounts[symbol++];
+ int max = (2 * threshold - 1) - remaining;
+ remaining -= count < 0 ? -count : count;
+ count++; /* +1 for extra accuracy */
+ if (count >= threshold) {
+ count += max;
+ }
+ bitStream |= count << bitCount;
+ bitCount += tableBitCount;
+ bitCount -= (count < max ? 1 : 0);
+ previousIs0 = (count == 1);
+
+ if (remaining < 1) {
+ throw new AssertionError();
+ }
+
+ while (remaining < threshold) {
+ tableBitCount--;
+ threshold >>= 1;
+ }
+
+ // flush bitstream if necessary
+ if (bitCount > 16) {
+ checkArgument(output + SIZE_OF_SHORT <= outputLimit, "Output buffer too small");
+
+ UNSAFE.putShort(outputBase, output, (short) bitStream);
+ output += SIZE_OF_SHORT;
+
+ bitStream >>>= Short.SIZE;
+ bitCount -= Short.SIZE;
+ }
+ }
+
+ // flush remaining bitstream
+ checkArgument(output + SIZE_OF_SHORT <= outputLimit, "Output buffer too small");
+ UNSAFE.putShort(outputBase, output, (short) bitStream);
+ output += (bitCount + 7) / 8;
+
+ checkArgument(symbol <= maxSymbol + 1, "Error"); // TODO
+
+ return (int) (output - outputAddress);
+ }
+
+ public static final class Table
+ {
+ int log2Size;
+ final int[] newState;
+ final byte[] symbol;
+ final byte[] numberOfBits;
+
+ public Table(int log2Capacity)
+ {
+ int capacity = 1 << log2Capacity;
+ newState = new int[capacity];
+ symbol = new byte[capacity];
+ numberOfBits = new byte[capacity];
+ }
+
+ public Table(int log2Size, int[] newState, byte[] symbol, byte[] numberOfBits)
+ {
+ int size = 1 << log2Size;
+ if (newState.length != size || symbol.length != size || numberOfBits.length != size) {
+ throw new IllegalArgumentException("Expected arrays to match provided size");
+ }
+
+ this.log2Size = log2Size;
+ this.newState = newState;
+ this.symbol = symbol;
+ this.numberOfBits = numberOfBits;
+ }
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FrameHeader.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FrameHeader.java
new file mode 100644
index 00000000000..6495939cb38
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FrameHeader.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import java.util.Objects;
+import java.util.StringJoiner;
+
+class FrameHeader
+{
+ final long headerSize;
+ final int windowSize;
+ final long contentSize;
+ final long dictionaryId;
+ final boolean hasChecksum;
+
+ public FrameHeader(long headerSize, int windowSize, long contentSize, long dictionaryId, boolean hasChecksum)
+ {
+ this.headerSize = headerSize;
+ this.windowSize = windowSize;
+ this.contentSize = contentSize;
+ this.dictionaryId = dictionaryId;
+ this.hasChecksum = hasChecksum;
+ }
+
+ @Override
+ public boolean equals(Object o)
+ {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ FrameHeader that = (FrameHeader) o;
+ return headerSize == that.headerSize &&
+ windowSize == that.windowSize &&
+ contentSize == that.contentSize &&
+ dictionaryId == that.dictionaryId &&
+ hasChecksum == that.hasChecksum;
+ }
+
+ @Override
+ public int hashCode()
+ {
+ return Objects.hash(headerSize, windowSize, contentSize, dictionaryId, hasChecksum);
+ }
+
+ @Override
+ public String toString()
+ {
+ return new StringJoiner(", ", FrameHeader.class.getSimpleName() + "[", "]")
+ .add("headerSize=" + headerSize)
+ .add("windowSize=" + windowSize)
+ .add("contentSize=" + contentSize)
+ .add("dictionaryId=" + dictionaryId)
+ .add("hasChecksum=" + hasChecksum)
+ .toString();
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FseCompressionTable.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FseCompressionTable.java
new file mode 100644
index 00000000000..e360c5ea5a6
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FseCompressionTable.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.FiniteStateEntropy.MAX_SYMBOL;
+
+class FseCompressionTable
+{
+ private final short[] nextState;
+ private final int[] deltaNumberOfBits;
+ private final int[] deltaFindState;
+
+ private int log2Size;
+
+ public FseCompressionTable(int maxTableLog, int maxSymbol)
+ {
+ nextState = new short[1 << maxTableLog];
+ deltaNumberOfBits = new int[maxSymbol + 1];
+ deltaFindState = new int[maxSymbol + 1];
+ }
+
+ public static FseCompressionTable newInstance(short[] normalizedCounts, int maxSymbol, int tableLog)
+ {
+ FseCompressionTable result = new FseCompressionTable(tableLog, maxSymbol);
+ result.initialize(normalizedCounts, maxSymbol, tableLog);
+
+ return result;
+ }
+
+ public void initializeRleTable(int symbol)
+ {
+ log2Size = 0;
+
+ nextState[0] = 0;
+ nextState[1] = 0;
+
+ deltaFindState[symbol] = 0;
+ deltaNumberOfBits[symbol] = 0;
+ }
+
+ public void initialize(short[] normalizedCounts, int maxSymbol, int tableLog)
+ {
+ int tableSize = 1 << tableLog;
+
+ byte[] table = new byte[tableSize]; // TODO: allocate in workspace
+ int highThreshold = tableSize - 1;
+
+ // TODO: make sure FseCompressionTable has enough size
+ log2Size = tableLog;
+
+ // For explanations on how to distribute symbol values over the table:
+ // http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html
+
+ // symbol start positions
+ int[] cumulative = new int[MAX_SYMBOL + 2]; // TODO: allocate in workspace
+ cumulative[0] = 0;
+ for (int i = 1; i <= maxSymbol + 1; i++) {
+ if (normalizedCounts[i - 1] == -1) { // Low probability symbol
+ cumulative[i] = cumulative[i - 1] + 1;
+ table[highThreshold--] = (byte) (i - 1);
+ }
+ else {
+ cumulative[i] = cumulative[i - 1] + normalizedCounts[i - 1];
+ }
+ }
+ cumulative[maxSymbol + 1] = tableSize + 1;
+
+ // Spread symbols
+ int position = spreadSymbols(normalizedCounts, maxSymbol, tableSize, highThreshold, table);
+
+ if (position != 0) {
+ throw new AssertionError("Spread symbols failed");
+ }
+
+ // Build table
+ for (int i = 0; i < tableSize; i++) {
+ byte symbol = table[i];
+ nextState[cumulative[symbol]++] = (short) (tableSize + i); /* TableU16 : sorted by symbol order; gives next state value */
+ }
+
+ // Build symbol transformation table
+ int total = 0;
+ for (int symbol = 0; symbol <= maxSymbol; symbol++) {
+ switch (normalizedCounts[symbol]) {
+ case 0:
+ deltaNumberOfBits[symbol] = ((tableLog + 1) << 16) - tableSize;
+ break;
+ case -1:
+ case 1:
+ deltaNumberOfBits[symbol] = (tableLog << 16) - tableSize;
+ deltaFindState[symbol] = total - 1;
+ total++;
+ break;
+ default:
+ int maxBitsOut = tableLog - Util.highestBit(normalizedCounts[symbol] - 1);
+ int minStatePlus = normalizedCounts[symbol] << maxBitsOut;
+ deltaNumberOfBits[symbol] = (maxBitsOut << 16) - minStatePlus;
+ deltaFindState[symbol] = total - normalizedCounts[symbol];
+ total += normalizedCounts[symbol];
+ break;
+ }
+ }
+ }
+
+ public int begin(byte symbol)
+ {
+ int outputBits = (deltaNumberOfBits[symbol] + (1 << 15)) >>> 16;
+ int base = ((outputBits << 16) - deltaNumberOfBits[symbol]) >>> outputBits;
+ return nextState[base + deltaFindState[symbol]];
+ }
+
+ public int encode(BitOutputStream stream, int state, int symbol)
+ {
+ int outputBits = (state + deltaNumberOfBits[symbol]) >>> 16;
+ stream.addBits(state, outputBits);
+ return nextState[(state >>> outputBits) + deltaFindState[symbol]];
+ }
+
+ public void finish(BitOutputStream stream, int state)
+ {
+ stream.addBits(state, log2Size);
+ stream.flush();
+ }
+
+ private static int calculateStep(int tableSize)
+ {
+ return (tableSize >>> 1) + (tableSize >>> 3) + 3;
+ }
+
+ public static int spreadSymbols(short[] normalizedCounters, int maxSymbolValue, int tableSize, int highThreshold, byte[] symbols)
+ {
+ int mask = tableSize - 1;
+ int step = calculateStep(tableSize);
+
+ int position = 0;
+ for (byte symbol = 0; symbol <= maxSymbolValue; symbol++) {
+ for (int i = 0; i < normalizedCounters[symbol]; i++) {
+ symbols[position] = symbol;
+ do {
+ position = (position + step) & mask;
+ }
+ while (position > highThreshold);
+ }
+ }
+ return position;
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FseTableReader.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FseTableReader.java
new file mode 100644
index 00000000000..0b8182dbc42
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/FseTableReader.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.FiniteStateEntropy.MAX_SYMBOL;
+import static ai.vespa.airlift.zstd.FiniteStateEntropy.MIN_TABLE_LOG;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+import static ai.vespa.airlift.zstd.Util.highestBit;
+import static ai.vespa.airlift.zstd.Util.verify;
+
+class FseTableReader
+{
+ private final short[] nextSymbol = new short[MAX_SYMBOL + 1];
+ private final short[] normalizedCounters = new short[MAX_SYMBOL + 1];
+
+ public int readFseTable(FiniteStateEntropy.Table table, Object inputBase, long inputAddress, long inputLimit, int maxSymbol, int maxTableLog)
+ {
+ // read table headers
+ long input = inputAddress;
+ verify(inputLimit - inputAddress >= 4, input, "Not enough input bytes");
+
+ int threshold;
+ int symbolNumber = 0;
+ boolean previousIsZero = false;
+
+ int bitStream = UNSAFE.getInt(inputBase, input);
+
+ int tableLog = (bitStream & 0xF) + MIN_TABLE_LOG;
+
+ int numberOfBits = tableLog + 1;
+ bitStream >>>= 4;
+ int bitCount = 4;
+
+ verify(tableLog <= maxTableLog, input, "FSE table size exceeds maximum allowed size");
+
+ int remaining = (1 << tableLog) + 1;
+ threshold = 1 << tableLog;
+
+ while (remaining > 1 && symbolNumber <= maxSymbol) {
+ if (previousIsZero) {
+ int n0 = symbolNumber;
+ while ((bitStream & 0xFFFF) == 0xFFFF) {
+ n0 += 24;
+ if (input < inputLimit - 5) {
+ input += 2;
+ bitStream = (UNSAFE.getInt(inputBase, input) >>> bitCount);
+ }
+ else {
+ // end of bit stream
+ bitStream >>>= 16;
+ bitCount += 16;
+ }
+ }
+ while ((bitStream & 3) == 3) {
+ n0 += 3;
+ bitStream >>>= 2;
+ bitCount += 2;
+ }
+ n0 += bitStream & 3;
+ bitCount += 2;
+
+ verify(n0 <= maxSymbol, input, "Symbol larger than max value");
+
+ while (symbolNumber < n0) {
+ normalizedCounters[symbolNumber++] = 0;
+ }
+ if ((input <= inputLimit - 7) || (input + (bitCount >>> 3) <= inputLimit - 4)) {
+ input += bitCount >>> 3;
+ bitCount &= 7;
+ bitStream = UNSAFE.getInt(inputBase, input) >>> bitCount;
+ }
+ else {
+ bitStream >>>= 2;
+ }
+ }
+
+ short max = (short) ((2 * threshold - 1) - remaining);
+ short count;
+
+ if ((bitStream & (threshold - 1)) < max) {
+ count = (short) (bitStream & (threshold - 1));
+ bitCount += numberOfBits - 1;
+ }
+ else {
+ count = (short) (bitStream & (2 * threshold - 1));
+ if (count >= threshold) {
+ count -= max;
+ }
+ bitCount += numberOfBits;
+ }
+ count--; // extra accuracy
+
+ remaining -= Math.abs(count);
+ normalizedCounters[symbolNumber++] = count;
+ previousIsZero = count == 0;
+ while (remaining < threshold) {
+ numberOfBits--;
+ threshold >>>= 1;
+ }
+
+ if ((input <= inputLimit - 7) || (input + (bitCount >> 3) <= inputLimit - 4)) {
+ input += bitCount >>> 3;
+ bitCount &= 7;
+ }
+ else {
+ bitCount -= (int) (8 * (inputLimit - 4 - input));
+ input = inputLimit - 4;
+ }
+ bitStream = UNSAFE.getInt(inputBase, input) >>> (bitCount & 31);
+ }
+
+ verify(remaining == 1 && bitCount <= 32, input, "Input is corrupted");
+
+ maxSymbol = symbolNumber - 1;
+ verify(maxSymbol <= MAX_SYMBOL, input, "Max symbol value too large (too many symbols for FSE)");
+
+ input += (bitCount + 7) >> 3;
+
+ // populate decoding table
+ int symbolCount = maxSymbol + 1;
+ int tableSize = 1 << tableLog;
+ int highThreshold = tableSize - 1;
+
+ table.log2Size = tableLog;
+
+ for (byte symbol = 0; symbol < symbolCount; symbol++) {
+ if (normalizedCounters[symbol] == -1) {
+ table.symbol[highThreshold--] = symbol;
+ nextSymbol[symbol] = 1;
+ }
+ else {
+ nextSymbol[symbol] = normalizedCounters[symbol];
+ }
+ }
+
+ int position = FseCompressionTable.spreadSymbols(normalizedCounters, maxSymbol, tableSize, highThreshold, table.symbol);
+
+ // position must reach all cells once, otherwise normalizedCounter is incorrect
+ verify(position == 0, input, "Input is corrupted");
+
+ for (int i = 0; i < tableSize; i++) {
+ byte symbol = table.symbol[i];
+ short nextState = nextSymbol[symbol]++;
+ table.numberOfBits[i] = (byte) (tableLog - highestBit(nextState));
+ table.newState[i] = (short) ((nextState << table.numberOfBits[i]) - tableSize);
+ }
+
+ return (int) (input - inputAddress);
+ }
+
+ public static void initializeRleTable(FiniteStateEntropy.Table table, byte value)
+ {
+ table.log2Size = 0;
+ table.symbol[0] = value;
+ table.newState[0] = 0;
+ table.numberOfBits[0] = 0;
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Histogram.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Histogram.java
new file mode 100644
index 00000000000..169de8b2cfa
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Histogram.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import java.util.Arrays;
+
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+import static sun.misc.Unsafe.ARRAY_BYTE_BASE_OFFSET;
+
+class Histogram
+{
+ private Histogram()
+ {
+ }
+
+ // TODO: count parallel heuristic for large inputs
+ private static void count(Object inputBase, long inputAddress, int inputSize, int[] counts)
+ {
+ long input = inputAddress;
+
+ Arrays.fill(counts, 0);
+
+ for (int i = 0; i < inputSize; i++) {
+ int symbol = UNSAFE.getByte(inputBase, input) & 0xFF;
+ input++;
+ counts[symbol]++;
+ }
+ }
+
+ public static int findLargestCount(int[] counts, int maxSymbol)
+ {
+ int max = 0;
+ for (int i = 0; i <= maxSymbol; i++) {
+ if (counts[i] > max) {
+ max = counts[i];
+ }
+ }
+
+ return max;
+ }
+
+ public static int findMaxSymbol(int[] counts, int maxSymbol)
+ {
+ while (counts[maxSymbol] == 0) {
+ maxSymbol--;
+ }
+ return maxSymbol;
+ }
+
+ public static void count(byte[] input, int length, int[] counts)
+ {
+ count(input, ARRAY_BYTE_BASE_OFFSET, length, counts);
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Huffman.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Huffman.java
new file mode 100644
index 00000000000..c8ed6a1f5f0
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Huffman.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import java.util.Arrays;
+
+import static ai.vespa.airlift.zstd.BitInputStream.isEndOfStream;
+import static ai.vespa.airlift.zstd.BitInputStream.peekBitsFast;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_INT;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_SHORT;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+import static ai.vespa.airlift.zstd.Util.isPowerOf2;
+import static ai.vespa.airlift.zstd.Util.verify;
+
+class Huffman
+{
+ public static final int MAX_SYMBOL = 255;
+ public static final int MAX_SYMBOL_COUNT = MAX_SYMBOL + 1;
+
+ public static final int MAX_TABLE_LOG = 12;
+ public static final int MIN_TABLE_LOG = 5;
+ public static final int MAX_FSE_TABLE_LOG = 6;
+
+ // stats
+ private final byte[] weights = new byte[MAX_SYMBOL + 1];
+ private final int[] ranks = new int[MAX_TABLE_LOG + 1];
+
+ // table
+ private int tableLog = -1;
+ private final byte[] symbols = new byte[1 << MAX_TABLE_LOG];
+ private final byte[] numbersOfBits = new byte[1 << MAX_TABLE_LOG];
+
+ private final FseTableReader reader = new FseTableReader();
+ private final FiniteStateEntropy.Table fseTable = new FiniteStateEntropy.Table(MAX_FSE_TABLE_LOG);
+
+ public boolean isLoaded()
+ {
+ return tableLog != -1;
+ }
+
+ public int readTable(final Object inputBase, final long inputAddress, final int size)
+ {
+ Arrays.fill(ranks, 0);
+ long input = inputAddress;
+
+ // read table header
+ verify(size > 0, input, "Not enough input bytes");
+ int inputSize = UNSAFE.getByte(inputBase, input++) & 0xFF;
+
+ int outputSize;
+ if (inputSize >= 128) {
+ outputSize = inputSize - 127;
+ inputSize = ((outputSize + 1) / 2);
+
+ verify(inputSize + 1 <= size, input, "Not enough input bytes");
+ verify(outputSize <= MAX_SYMBOL + 1, input, "Input is corrupted");
+
+ for (int i = 0; i < outputSize; i += 2) {
+ int value = UNSAFE.getByte(inputBase, input + i / 2) & 0xFF;
+ weights[i] = (byte) (value >>> 4);
+ weights[i + 1] = (byte) (value & 0b1111);
+ }
+ }
+ else {
+ verify(inputSize + 1 <= size, input, "Not enough input bytes");
+
+ long inputLimit = input + inputSize;
+ input += reader.readFseTable(fseTable, inputBase, input, inputLimit, FiniteStateEntropy.MAX_SYMBOL, MAX_FSE_TABLE_LOG);
+ outputSize = FiniteStateEntropy.decompress(fseTable, inputBase, input, inputLimit, weights);
+ }
+
+ int totalWeight = 0;
+ for (int i = 0; i < outputSize; i++) {
+ ranks[weights[i]]++;
+ totalWeight += (1 << weights[i]) >> 1; // TODO same as 1 << (weights[n] - 1)?
+ }
+ verify(totalWeight != 0, input, "Input is corrupted");
+
+ tableLog = Util.highestBit(totalWeight) + 1;
+ verify(tableLog <= MAX_TABLE_LOG, input, "Input is corrupted");
+
+ int total = 1 << tableLog;
+ int rest = total - totalWeight;
+ verify(isPowerOf2(rest), input, "Input is corrupted");
+
+ int lastWeight = Util.highestBit(rest) + 1;
+
+ weights[outputSize] = (byte) lastWeight;
+ ranks[lastWeight]++;
+
+ int numberOfSymbols = outputSize + 1;
+
+ // populate table
+ int nextRankStart = 0;
+ for (int i = 1; i < tableLog + 1; ++i) {
+ int current = nextRankStart;
+ nextRankStart += ranks[i] << (i - 1);
+ ranks[i] = current;
+ }
+
+ for (int n = 0; n < numberOfSymbols; n++) {
+ int weight = weights[n];
+ int length = (1 << weight) >> 1; // TODO: 1 << (weight - 1) ??
+
+ byte symbol = (byte) n;
+ byte numberOfBits = (byte) (tableLog + 1 - weight);
+ for (int i = ranks[weight]; i < ranks[weight] + length; i++) {
+ symbols[i] = symbol;
+ numbersOfBits[i] = numberOfBits;
+ }
+ ranks[weight] += length;
+ }
+
+ verify(ranks[1] >= 2 && (ranks[1] & 1) == 0, input, "Input is corrupted");
+
+ return inputSize + 1;
+ }
+
+ public void decodeSingleStream(final Object inputBase, final long inputAddress, final long inputLimit, final Object outputBase, final long outputAddress, final long outputLimit)
+ {
+ BitInputStream.Initializer initializer = new BitInputStream.Initializer(inputBase, inputAddress, inputLimit);
+ initializer.initialize();
+
+ long bits = initializer.getBits();
+ int bitsConsumed = initializer.getBitsConsumed();
+ long currentAddress = initializer.getCurrentAddress();
+
+ int tableLog = this.tableLog;
+ byte[] numbersOfBits = this.numbersOfBits;
+ byte[] symbols = this.symbols;
+
+ // 4 symbols at a time
+ long output = outputAddress;
+ long fastOutputLimit = outputLimit - 4;
+ while (output < fastOutputLimit) {
+ BitInputStream.Loader loader = new BitInputStream.Loader(inputBase, inputAddress, currentAddress, bits, bitsConsumed);
+ boolean done = loader.load();
+ bits = loader.getBits();
+ bitsConsumed = loader.getBitsConsumed();
+ currentAddress = loader.getCurrentAddress();
+ if (done) {
+ break;
+ }
+
+ bitsConsumed = decodeSymbol(outputBase, output, bits, bitsConsumed, tableLog, numbersOfBits, symbols);
+ bitsConsumed = decodeSymbol(outputBase, output + 1, bits, bitsConsumed, tableLog, numbersOfBits, symbols);
+ bitsConsumed = decodeSymbol(outputBase, output + 2, bits, bitsConsumed, tableLog, numbersOfBits, symbols);
+ bitsConsumed = decodeSymbol(outputBase, output + 3, bits, bitsConsumed, tableLog, numbersOfBits, symbols);
+ output += SIZE_OF_INT;
+ }
+
+ decodeTail(inputBase, inputAddress, currentAddress, bitsConsumed, bits, outputBase, output, outputLimit);
+ }
+
+ public void decode4Streams(final Object inputBase, final long inputAddress, final long inputLimit, final Object outputBase, final long outputAddress, final long outputLimit)
+ {
+ verify(inputLimit - inputAddress >= 10, inputAddress, "Input is corrupted"); // jump table + 1 byte per stream
+
+ long start1 = inputAddress + 3 * SIZE_OF_SHORT; // for the shorts we read below
+ long start2 = start1 + (UNSAFE.getShort(inputBase, inputAddress) & 0xFFFF);
+ long start3 = start2 + (UNSAFE.getShort(inputBase, inputAddress + 2) & 0xFFFF);
+ long start4 = start3 + (UNSAFE.getShort(inputBase, inputAddress + 4) & 0xFFFF);
+
+ BitInputStream.Initializer initializer = new BitInputStream.Initializer(inputBase, start1, start2);
+ initializer.initialize();
+ int stream1bitsConsumed = initializer.getBitsConsumed();
+ long stream1currentAddress = initializer.getCurrentAddress();
+ long stream1bits = initializer.getBits();
+
+ initializer = new BitInputStream.Initializer(inputBase, start2, start3);
+ initializer.initialize();
+ int stream2bitsConsumed = initializer.getBitsConsumed();
+ long stream2currentAddress = initializer.getCurrentAddress();
+ long stream2bits = initializer.getBits();
+
+ initializer = new BitInputStream.Initializer(inputBase, start3, start4);
+ initializer.initialize();
+ int stream3bitsConsumed = initializer.getBitsConsumed();
+ long stream3currentAddress = initializer.getCurrentAddress();
+ long stream3bits = initializer.getBits();
+
+ initializer = new BitInputStream.Initializer(inputBase, start4, inputLimit);
+ initializer.initialize();
+ int stream4bitsConsumed = initializer.getBitsConsumed();
+ long stream4currentAddress = initializer.getCurrentAddress();
+ long stream4bits = initializer.getBits();
+
+ int segmentSize = (int) ((outputLimit - outputAddress + 3) / 4);
+
+ long outputStart2 = outputAddress + segmentSize;
+ long outputStart3 = outputStart2 + segmentSize;
+ long outputStart4 = outputStart3 + segmentSize;
+
+ long output1 = outputAddress;
+ long output2 = outputStart2;
+ long output3 = outputStart3;
+ long output4 = outputStart4;
+
+ long fastOutputLimit = outputLimit - 7;
+ int tableLog = this.tableLog;
+ byte[] numbersOfBits = this.numbersOfBits;
+ byte[] symbols = this.symbols;
+
+ while (output4 < fastOutputLimit) {
+ stream1bitsConsumed = decodeSymbol(outputBase, output1, stream1bits, stream1bitsConsumed, tableLog, numbersOfBits, symbols);
+ stream2bitsConsumed = decodeSymbol(outputBase, output2, stream2bits, stream2bitsConsumed, tableLog, numbersOfBits, symbols);
+ stream3bitsConsumed = decodeSymbol(outputBase, output3, stream3bits, stream3bitsConsumed, tableLog, numbersOfBits, symbols);
+ stream4bitsConsumed = decodeSymbol(outputBase, output4, stream4bits, stream4bitsConsumed, tableLog, numbersOfBits, symbols);
+
+ stream1bitsConsumed = decodeSymbol(outputBase, output1 + 1, stream1bits, stream1bitsConsumed, tableLog, numbersOfBits, symbols);
+ stream2bitsConsumed = decodeSymbol(outputBase, output2 + 1, stream2bits, stream2bitsConsumed, tableLog, numbersOfBits, symbols);
+ stream3bitsConsumed = decodeSymbol(outputBase, output3 + 1, stream3bits, stream3bitsConsumed, tableLog, numbersOfBits, symbols);
+ stream4bitsConsumed = decodeSymbol(outputBase, output4 + 1, stream4bits, stream4bitsConsumed, tableLog, numbersOfBits, symbols);
+
+ stream1bitsConsumed = decodeSymbol(outputBase, output1 + 2, stream1bits, stream1bitsConsumed, tableLog, numbersOfBits, symbols);
+ stream2bitsConsumed = decodeSymbol(outputBase, output2 + 2, stream2bits, stream2bitsConsumed, tableLog, numbersOfBits, symbols);
+ stream3bitsConsumed = decodeSymbol(outputBase, output3 + 2, stream3bits, stream3bitsConsumed, tableLog, numbersOfBits, symbols);
+ stream4bitsConsumed = decodeSymbol(outputBase, output4 + 2, stream4bits, stream4bitsConsumed, tableLog, numbersOfBits, symbols);
+
+ stream1bitsConsumed = decodeSymbol(outputBase, output1 + 3, stream1bits, stream1bitsConsumed, tableLog, numbersOfBits, symbols);
+ stream2bitsConsumed = decodeSymbol(outputBase, output2 + 3, stream2bits, stream2bitsConsumed, tableLog, numbersOfBits, symbols);
+ stream3bitsConsumed = decodeSymbol(outputBase, output3 + 3, stream3bits, stream3bitsConsumed, tableLog, numbersOfBits, symbols);
+ stream4bitsConsumed = decodeSymbol(outputBase, output4 + 3, stream4bits, stream4bitsConsumed, tableLog, numbersOfBits, symbols);
+
+ output1 += SIZE_OF_INT;
+ output2 += SIZE_OF_INT;
+ output3 += SIZE_OF_INT;
+ output4 += SIZE_OF_INT;
+
+ BitInputStream.Loader loader = new BitInputStream.Loader(inputBase, start1, stream1currentAddress, stream1bits, stream1bitsConsumed);
+ boolean done = loader.load();
+ stream1bitsConsumed = loader.getBitsConsumed();
+ stream1bits = loader.getBits();
+ stream1currentAddress = loader.getCurrentAddress();
+
+ if (done) {
+ break;
+ }
+
+ loader = new BitInputStream.Loader(inputBase, start2, stream2currentAddress, stream2bits, stream2bitsConsumed);
+ done = loader.load();
+ stream2bitsConsumed = loader.getBitsConsumed();
+ stream2bits = loader.getBits();
+ stream2currentAddress = loader.getCurrentAddress();
+
+ if (done) {
+ break;
+ }
+
+ loader = new BitInputStream.Loader(inputBase, start3, stream3currentAddress, stream3bits, stream3bitsConsumed);
+ done = loader.load();
+ stream3bitsConsumed = loader.getBitsConsumed();
+ stream3bits = loader.getBits();
+ stream3currentAddress = loader.getCurrentAddress();
+ if (done) {
+ break;
+ }
+
+ loader = new BitInputStream.Loader(inputBase, start4, stream4currentAddress, stream4bits, stream4bitsConsumed);
+ done = loader.load();
+ stream4bitsConsumed = loader.getBitsConsumed();
+ stream4bits = loader.getBits();
+ stream4currentAddress = loader.getCurrentAddress();
+ if (done) {
+ break;
+ }
+ }
+
+ verify(output1 <= outputStart2 && output2 <= outputStart3 && output3 <= outputStart4, inputAddress, "Input is corrupted");
+
+ /// finish streams one by one
+ decodeTail(inputBase, start1, stream1currentAddress, stream1bitsConsumed, stream1bits, outputBase, output1, outputStart2);
+ decodeTail(inputBase, start2, stream2currentAddress, stream2bitsConsumed, stream2bits, outputBase, output2, outputStart3);
+ decodeTail(inputBase, start3, stream3currentAddress, stream3bitsConsumed, stream3bits, outputBase, output3, outputStart4);
+ decodeTail(inputBase, start4, stream4currentAddress, stream4bitsConsumed, stream4bits, outputBase, output4, outputLimit);
+ }
+
+ private void decodeTail(final Object inputBase, final long startAddress, long currentAddress, int bitsConsumed, long bits, final Object outputBase, long outputAddress, final long outputLimit)
+ {
+ int tableLog = this.tableLog;
+ byte[] numbersOfBits = this.numbersOfBits;
+ byte[] symbols = this.symbols;
+
+ // closer to the end
+ while (outputAddress < outputLimit) {
+ BitInputStream.Loader loader = new BitInputStream.Loader(inputBase, startAddress, currentAddress, bits, bitsConsumed);
+ boolean done = loader.load();
+ bitsConsumed = loader.getBitsConsumed();
+ bits = loader.getBits();
+ currentAddress = loader.getCurrentAddress();
+ if (done) {
+ break;
+ }
+
+ bitsConsumed = decodeSymbol(outputBase, outputAddress++, bits, bitsConsumed, tableLog, numbersOfBits, symbols);
+ }
+
+ // not more data in bit stream, so no need to reload
+ while (outputAddress < outputLimit) {
+ bitsConsumed = decodeSymbol(outputBase, outputAddress++, bits, bitsConsumed, tableLog, numbersOfBits, symbols);
+ }
+
+ verify(isEndOfStream(startAddress, currentAddress, bitsConsumed), startAddress, "Bit stream is not fully consumed");
+ }
+
+ private static int decodeSymbol(Object outputBase, long outputAddress, long bitContainer, int bitsConsumed, int tableLog, byte[] numbersOfBits, byte[] symbols)
+ {
+ int value = (int) peekBitsFast(bitsConsumed, bitContainer, tableLog);
+ UNSAFE.putByte(outputBase, outputAddress, symbols[value]);
+ return bitsConsumed + numbersOfBits[value];
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressionContext.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressionContext.java
new file mode 100644
index 00000000000..a651ea2a625
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressionContext.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+class HuffmanCompressionContext
+{
+ private final HuffmanTableWriterWorkspace tableWriterWorkspace = new HuffmanTableWriterWorkspace();
+ private final HuffmanCompressionTableWorkspace compressionTableWorkspace = new HuffmanCompressionTableWorkspace();
+
+ private HuffmanCompressionTable previousTable = new HuffmanCompressionTable(Huffman.MAX_SYMBOL_COUNT);
+ private HuffmanCompressionTable temporaryTable = new HuffmanCompressionTable(Huffman.MAX_SYMBOL_COUNT);
+
+ private HuffmanCompressionTable previousCandidate = previousTable;
+ private HuffmanCompressionTable temporaryCandidate = temporaryTable;
+
+ public HuffmanCompressionTable getPreviousTable()
+ {
+ return previousTable;
+ }
+
+ public HuffmanCompressionTable borrowTemporaryTable()
+ {
+ previousCandidate = temporaryTable;
+ temporaryCandidate = previousTable;
+
+ return temporaryTable;
+ }
+
+ public void discardTemporaryTable()
+ {
+ previousCandidate = previousTable;
+ temporaryCandidate = temporaryTable;
+ }
+
+ public void saveChanges()
+ {
+ temporaryTable = temporaryCandidate;
+ previousTable = previousCandidate;
+ }
+
+ public HuffmanCompressionTableWorkspace getCompressionTableWorkspace()
+ {
+ return compressionTableWorkspace;
+ }
+
+ public HuffmanTableWriterWorkspace getTableWriterWorkspace()
+ {
+ return tableWriterWorkspace;
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressionTable.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressionTable.java
new file mode 100644
index 00000000000..a18d7343b52
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressionTable.java
@@ -0,0 +1,437 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import java.util.Arrays;
+
+import static ai.vespa.airlift.zstd.Huffman.MAX_FSE_TABLE_LOG;
+import static ai.vespa.airlift.zstd.Huffman.MAX_SYMBOL;
+import static ai.vespa.airlift.zstd.Huffman.MAX_SYMBOL_COUNT;
+import static ai.vespa.airlift.zstd.Huffman.MAX_TABLE_LOG;
+import static ai.vespa.airlift.zstd.Huffman.MIN_TABLE_LOG;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+import static ai.vespa.airlift.zstd.Util.checkArgument;
+import static ai.vespa.airlift.zstd.Util.minTableLog;
+
+final class HuffmanCompressionTable
+{
+ private final short[] values;
+ private final byte[] numberOfBits;
+
+ private int maxSymbol;
+ private int maxNumberOfBits;
+
+ public HuffmanCompressionTable(int capacity)
+ {
+ this.values = new short[capacity];
+ this.numberOfBits = new byte[capacity];
+ }
+
+ public static int optimalNumberOfBits(int maxNumberOfBits, int inputSize, int maxSymbol)
+ {
+ if (inputSize <= 1) {
+ throw new IllegalArgumentException(); // not supported. Use RLE instead
+ }
+
+ int result = maxNumberOfBits;
+
+ result = Math.min(result, Util.highestBit((inputSize - 1)) - 1); // we may be able to reduce accuracy if input is small
+
+ // Need a minimum to safely represent all symbol values
+ result = Math.max(result, minTableLog(inputSize, maxSymbol));
+
+ result = Math.max(result, MIN_TABLE_LOG); // absolute minimum for Huffman
+ result = Math.min(result, MAX_TABLE_LOG); // absolute maximum for Huffman
+
+ return result;
+ }
+
+ public void initialize(int[] counts, int maxSymbol, int maxNumberOfBits, HuffmanCompressionTableWorkspace workspace)
+ {
+ checkArgument(maxSymbol <= MAX_SYMBOL, "Max symbol value too large");
+
+ workspace.reset();
+
+ NodeTable nodeTable = workspace.nodeTable;
+ nodeTable.reset();
+
+ int lastNonZero = buildTree(counts, maxSymbol, nodeTable);
+
+ // enforce max table log
+ maxNumberOfBits = setMaxHeight(nodeTable, lastNonZero, maxNumberOfBits, workspace);
+ checkArgument(maxNumberOfBits <= MAX_TABLE_LOG, "Max number of bits larger than max table size");
+
+ // populate table
+ int symbolCount = maxSymbol + 1;
+ for (int node = 0; node < symbolCount; node++) {
+ int symbol = nodeTable.symbols[node];
+ numberOfBits[symbol] = nodeTable.numberOfBits[node];
+ }
+
+ short[] entriesPerRank = workspace.entriesPerRank;
+ short[] valuesPerRank = workspace.valuesPerRank;
+
+ for (int n = 0; n <= lastNonZero; n++) {
+ entriesPerRank[nodeTable.numberOfBits[n]]++;
+ }
+
+ // determine starting value per rank
+ short startingValue = 0;
+ for (int rank = maxNumberOfBits; rank > 0; rank--) {
+ valuesPerRank[rank] = startingValue; // get starting value within each rank
+ startingValue += entriesPerRank[rank];
+ startingValue >>>= 1;
+ }
+
+ for (int n = 0; n <= maxSymbol; n++) {
+ values[n] = valuesPerRank[numberOfBits[n]]++; // assign value within rank, symbol order
+ }
+
+ this.maxSymbol = maxSymbol;
+ this.maxNumberOfBits = maxNumberOfBits;
+ }
+
+ private int buildTree(int[] counts, int maxSymbol, NodeTable nodeTable)
+ {
+ // populate the leaves of the node table from the histogram of counts
+ // in descending order by count, ascending by symbol value.
+ short current = 0;
+
+ for (int symbol = 0; symbol <= maxSymbol; symbol++) {
+ int count = counts[symbol];
+
+ // simple insertion sort
+ int position = current;
+ while (position > 1 && count > nodeTable.count[position - 1]) {
+ nodeTable.copyNode(position - 1, position);
+ position--;
+ }
+
+ nodeTable.count[position] = count;
+ nodeTable.symbols[position] = symbol;
+
+ current++;
+ }
+
+ int lastNonZero = maxSymbol;
+ while (nodeTable.count[lastNonZero] == 0) {
+ lastNonZero--;
+ }
+
+ // populate the non-leaf nodes
+ short nonLeafStart = MAX_SYMBOL_COUNT;
+ current = nonLeafStart;
+
+ int currentLeaf = lastNonZero;
+
+ // combine the two smallest leaves to create the first intermediate node
+ int currentNonLeaf = current;
+ nodeTable.count[current] = nodeTable.count[currentLeaf] + nodeTable.count[currentLeaf - 1];
+ nodeTable.parents[currentLeaf] = current;
+ nodeTable.parents[currentLeaf - 1] = current;
+ current++;
+ currentLeaf -= 2;
+
+ int root = MAX_SYMBOL_COUNT + lastNonZero - 1;
+
+ // fill in sentinels
+ for (int n = current; n <= root; n++) {
+ nodeTable.count[n] = 1 << 30;
+ }
+
+ // create parents
+ while (current <= root) {
+ int child1;
+ if (currentLeaf >= 0 && nodeTable.count[currentLeaf] < nodeTable.count[currentNonLeaf]) {
+ child1 = currentLeaf--;
+ }
+ else {
+ child1 = currentNonLeaf++;
+ }
+
+ int child2;
+ if (currentLeaf >= 0 && nodeTable.count[currentLeaf] < nodeTable.count[currentNonLeaf]) {
+ child2 = currentLeaf--;
+ }
+ else {
+ child2 = currentNonLeaf++;
+ }
+
+ nodeTable.count[current] = nodeTable.count[child1] + nodeTable.count[child2];
+ nodeTable.parents[child1] = current;
+ nodeTable.parents[child2] = current;
+ current++;
+ }
+
+ // distribute weights
+ nodeTable.numberOfBits[root] = 0;
+ for (int n = root - 1; n >= nonLeafStart; n--) {
+ short parent = nodeTable.parents[n];
+ nodeTable.numberOfBits[n] = (byte) (nodeTable.numberOfBits[parent] + 1);
+ }
+
+ for (int n = 0; n <= lastNonZero; n++) {
+ short parent = nodeTable.parents[n];
+ nodeTable.numberOfBits[n] = (byte) (nodeTable.numberOfBits[parent] + 1);
+ }
+
+ return lastNonZero;
+ }
+
+ // TODO: consider encoding 2 symbols at a time
+ // - need a table with 256x256 entries with
+ // - the concatenated bits for the corresponding pair of symbols
+ // - the sum of bits for the corresponding pair of symbols
+ // - read 2 symbols at a time from the input
+ public void encodeSymbol(BitOutputStream output, int symbol)
+ {
+ output.addBitsFast(values[symbol], numberOfBits[symbol]);
+ }
+
+ public int write(Object outputBase, long outputAddress, int outputSize, HuffmanTableWriterWorkspace workspace)
+ {
+ byte[] weights = workspace.weights;
+
+ long output = outputAddress;
+
+ int maxNumberOfBits = this.maxNumberOfBits;
+ int maxSymbol = this.maxSymbol;
+
+ // convert to weights per RFC 8478 section 4.2.1
+ for (int symbol = 0; symbol < maxSymbol; symbol++) {
+ int bits = numberOfBits[symbol];
+
+ if (bits == 0) {
+ weights[symbol] = 0;
+ }
+ else {
+ weights[symbol] = (byte) (maxNumberOfBits + 1 - bits);
+ }
+ }
+
+ // attempt weights compression by FSE
+ int size = compressWeights(outputBase, output + 1, outputSize - 1, weights, maxSymbol, workspace);
+
+ if (maxSymbol > 127 && size > 127) {
+ // This should never happen. Since weights are in the range [0, 12], they can be compressed optimally to ~3.7 bits per symbol for a uniform distribution.
+ // Since maxSymbol has to be <= MAX_SYMBOL (255), this is 119 bytes + FSE headers.
+ throw new AssertionError();
+ }
+
+ if (size != 0 && size != 1 && size < maxSymbol / 2) {
+ // Go with FSE only if:
+ // - the weights are compressible
+ // - the compressed size is better than what we'd get with the raw encoding below
+ // - the compressed size is <= 127 bytes, which is the most that the encoding can hold for FSE-compressed weights (see RFC 8478 section 4.2.1.1). This is implied
+ // by the maxSymbol / 2 check, since maxSymbol must be <= 255
+ UNSAFE.putByte(outputBase, output, (byte) size);
+ return size + 1; // header + size
+ }
+ else {
+ // Use raw encoding (4 bits per entry)
+
+ // #entries = #symbols - 1 since last symbol is implicit. Thus, #entries = (maxSymbol + 1) - 1 = maxSymbol
+ int entryCount = maxSymbol;
+
+ size = (entryCount + 1) / 2; // ceil(#entries / 2)
+ checkArgument(size + 1 /* header */ <= outputSize, "Output size too small"); // 2 entries per byte
+
+ // encode number of symbols
+ // header = #entries + 127 per RFC
+ UNSAFE.putByte(outputBase, output, (byte) (127 + entryCount));
+ output++;
+
+ weights[maxSymbol] = 0; // last weight is implicit, so set to 0 so that it doesn't get encoded below
+ for (int i = 0; i < entryCount; i += 2) {
+ UNSAFE.putByte(outputBase, output, (byte) ((weights[i] << 4) + weights[i + 1]));
+ output++;
+ }
+
+ return (int) (output - outputAddress);
+ }
+ }
+
+ /**
+ * Can this table encode all symbols with non-zero count?
+ */
+ public boolean isValid(int[] counts, int maxSymbol)
+ {
+ if (maxSymbol > this.maxSymbol) {
+ // some non-zero count symbols cannot be encoded by the current table
+ return false;
+ }
+
+ for (int symbol = 0; symbol <= maxSymbol; ++symbol) {
+ if (counts[symbol] != 0 && numberOfBits[symbol] == 0) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public int estimateCompressedSize(int[] counts, int maxSymbol)
+ {
+ int numberOfBits = 0;
+ for (int symbol = 0; symbol <= Math.min(maxSymbol, this.maxSymbol); symbol++) {
+ numberOfBits += this.numberOfBits[symbol] * counts[symbol];
+ }
+
+ return numberOfBits >>> 3; // convert to bytes
+ }
+
+ // http://fastcompression.blogspot.com/2015/07/huffman-revisited-part-3-depth-limited.html
+ private static int setMaxHeight(NodeTable nodeTable, int lastNonZero, int maxNumberOfBits, HuffmanCompressionTableWorkspace workspace)
+ {
+ int largestBits = nodeTable.numberOfBits[lastNonZero];
+
+ if (largestBits <= maxNumberOfBits) {
+ return largestBits; // early exit: no elements > maxNumberOfBits
+ }
+
+ // there are several too large elements (at least >= 2)
+ int totalCost = 0;
+ int baseCost = 1 << (largestBits - maxNumberOfBits);
+ int n = lastNonZero;
+
+ while (nodeTable.numberOfBits[n] > maxNumberOfBits) {
+ totalCost += baseCost - (1 << (largestBits - nodeTable.numberOfBits[n]));
+ nodeTable.numberOfBits[n ] = (byte) maxNumberOfBits;
+ n--;
+ } // n stops at nodeTable.numberOfBits[n + offset] <= maxNumberOfBits
+
+ while (nodeTable.numberOfBits[n] == maxNumberOfBits) {
+ n--; // n ends at index of smallest symbol using < maxNumberOfBits
+ }
+
+ // renormalize totalCost
+ totalCost >>>= (largestBits - maxNumberOfBits); // note: totalCost is necessarily a multiple of baseCost
+
+ // repay normalized cost
+ int noSymbol = 0xF0F0F0F0;
+ int[] rankLast = workspace.rankLast;
+ Arrays.fill(rankLast, noSymbol);
+
+ // Get pos of last (smallest) symbol per rank
+ int currentNbBits = maxNumberOfBits;
+ for (int pos = n; pos >= 0; pos--) {
+ if (nodeTable.numberOfBits[pos] >= currentNbBits) {
+ continue;
+ }
+ currentNbBits = nodeTable.numberOfBits[pos]; // < maxNumberOfBits
+ rankLast[maxNumberOfBits - currentNbBits] = pos;
+ }
+
+ while (totalCost > 0) {
+ int numberOfBitsToDecrease = Util.highestBit(totalCost) + 1;
+ for (; numberOfBitsToDecrease > 1; numberOfBitsToDecrease--) {
+ int highPosition = rankLast[numberOfBitsToDecrease];
+ int lowPosition = rankLast[numberOfBitsToDecrease - 1];
+ if (highPosition == noSymbol) {
+ continue;
+ }
+ if (lowPosition == noSymbol) {
+ break;
+ }
+ int highTotal = nodeTable.count[highPosition];
+ int lowTotal = 2 * nodeTable.count[lowPosition];
+ if (highTotal <= lowTotal) {
+ break;
+ }
+ }
+
+ // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !)
+ // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary
+ while ((numberOfBitsToDecrease <= MAX_TABLE_LOG) && (rankLast[numberOfBitsToDecrease] == noSymbol)) {
+ numberOfBitsToDecrease++;
+ }
+ totalCost -= 1 << (numberOfBitsToDecrease - 1);
+ if (rankLast[numberOfBitsToDecrease - 1] == noSymbol) {
+ rankLast[numberOfBitsToDecrease - 1] = rankLast[numberOfBitsToDecrease]; // this rank is no longer empty
+ }
+ nodeTable.numberOfBits[rankLast[numberOfBitsToDecrease]]++;
+ if (rankLast[numberOfBitsToDecrease] == 0) { /* special case, reached largest symbol */
+ rankLast[numberOfBitsToDecrease] = noSymbol;
+ }
+ else {
+ rankLast[numberOfBitsToDecrease]--;
+ if (nodeTable.numberOfBits[rankLast[numberOfBitsToDecrease]] != maxNumberOfBits - numberOfBitsToDecrease) {
+ rankLast[numberOfBitsToDecrease] = noSymbol; // this rank is now empty
+ }
+ }
+ }
+
+ while (totalCost < 0) { // Sometimes, cost correction overshoot
+ if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNumberOfBits-1); let's create one from largest rank 0 (using maxNumberOfBits) */
+ while (nodeTable.numberOfBits[n] == maxNumberOfBits) {
+ n--;
+ }
+ nodeTable.numberOfBits[n + 1]--;
+ rankLast[1] = n + 1;
+ totalCost++;
+ continue;
+ }
+ nodeTable.numberOfBits[rankLast[1] + 1]--;
+ rankLast[1]++;
+ totalCost++;
+ }
+
+ return maxNumberOfBits;
+ }
+
+ /**
+ * All elements within weightTable must be <= Huffman.MAX_TABLE_LOG
+ */
+ private static int compressWeights(Object outputBase, long outputAddress, int outputSize, byte[] weights, int weightsLength, HuffmanTableWriterWorkspace workspace)
+ {
+ if (weightsLength <= 1) {
+ return 0; // Not compressible
+ }
+
+ // Scan input and build symbol stats
+ int[] counts = workspace.counts;
+ Histogram.count(weights, weightsLength, counts);
+ int maxSymbol = Histogram.findMaxSymbol(counts, MAX_TABLE_LOG);
+ int maxCount = Histogram.findLargestCount(counts, maxSymbol);
+
+ if (maxCount == weightsLength) {
+ return 1; // only a single symbol in source
+ }
+ if (maxCount == 1) {
+ return 0; // each symbol present maximum once => not compressible
+ }
+
+ short[] normalizedCounts = workspace.normalizedCounts;
+
+ int tableLog = FiniteStateEntropy.optimalTableLog(MAX_FSE_TABLE_LOG, weightsLength, maxSymbol);
+ FiniteStateEntropy.normalizeCounts(normalizedCounts, tableLog, counts, weightsLength, maxSymbol);
+
+ long output = outputAddress;
+ long outputLimit = outputAddress + outputSize;
+
+ // Write table description header
+ int headerSize = FiniteStateEntropy.writeNormalizedCounts(outputBase, output, outputSize, normalizedCounts, maxSymbol, tableLog);
+ output += headerSize;
+
+ // Compress
+ FseCompressionTable compressionTable = workspace.fseTable;
+ compressionTable.initialize(normalizedCounts, maxSymbol, tableLog);
+ int compressedSize = FiniteStateEntropy.compress(outputBase, output, (int) (outputLimit - output), weights, weightsLength, compressionTable);
+ if (compressedSize == 0) {
+ return 0;
+ }
+ output += compressedSize;
+
+ return (int) (output - outputAddress);
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressionTableWorkspace.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressionTableWorkspace.java
new file mode 100644
index 00000000000..b6ad2adaec7
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressionTableWorkspace.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import java.util.Arrays;
+
+class HuffmanCompressionTableWorkspace
+{
+ public final NodeTable nodeTable = new NodeTable((2 * Huffman.MAX_SYMBOL_COUNT - 1)); // number of nodes in binary tree with MAX_SYMBOL_COUNT leaves
+
+ public final short[] entriesPerRank = new short[Huffman.MAX_TABLE_LOG + 1];
+ public final short[] valuesPerRank = new short[Huffman.MAX_TABLE_LOG + 1];
+
+ // for setMaxHeight
+ public final int[] rankLast = new int[Huffman.MAX_TABLE_LOG + 2];
+
+ public void reset()
+ {
+ Arrays.fill(entriesPerRank, (short) 0);
+ Arrays.fill(valuesPerRank, (short) 0);
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressor.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressor.java
new file mode 100644
index 00000000000..6c94181a88f
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanCompressor.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_LONG;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_SHORT;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+
+class HuffmanCompressor
+{
+ private HuffmanCompressor()
+ {
+ }
+
+ public static int compress4streams(Object outputBase, long outputAddress, int outputSize, Object inputBase, long inputAddress, int inputSize, HuffmanCompressionTable table)
+ {
+ long input = inputAddress;
+ long inputLimit = inputAddress + inputSize;
+ long output = outputAddress;
+ long outputLimit = outputAddress + outputSize;
+
+ int segmentSize = (inputSize + 3) / 4;
+
+ if (outputSize < 6 /* jump table */ + 1 /* first stream */ + 1 /* second stream */ + 1 /* third stream */ + 8 /* 8 bytes minimum needed by the bitstream encoder */) {
+ return 0; // minimum space to compress successfully
+ }
+
+ if (inputSize <= 6 + 1 + 1 + 1) { // jump table + one byte per stream
+ return 0; // no saving possible: input too small
+ }
+
+ output += SIZE_OF_SHORT + SIZE_OF_SHORT + SIZE_OF_SHORT; // jump table
+
+ int compressedSize;
+
+ // first segment
+ compressedSize = compressSingleStream(outputBase, output, (int) (outputLimit - output), inputBase, input, segmentSize, table);
+ if (compressedSize == 0) {
+ return 0;
+ }
+ UNSAFE.putShort(outputBase, outputAddress, (short) compressedSize);
+ output += compressedSize;
+ input += segmentSize;
+
+ // second segment
+ compressedSize = compressSingleStream(outputBase, output, (int) (outputLimit - output), inputBase, input, segmentSize, table);
+ if (compressedSize == 0) {
+ return 0;
+ }
+ UNSAFE.putShort(outputBase, outputAddress + SIZE_OF_SHORT, (short) compressedSize);
+ output += compressedSize;
+ input += segmentSize;
+
+ // third segment
+ compressedSize = compressSingleStream(outputBase, output, (int) (outputLimit - output), inputBase, input, segmentSize, table);
+ if (compressedSize == 0) {
+ return 0;
+ }
+ UNSAFE.putShort(outputBase, outputAddress + SIZE_OF_SHORT + SIZE_OF_SHORT, (short) compressedSize);
+ output += compressedSize;
+ input += segmentSize;
+
+ // fourth segment
+ compressedSize = compressSingleStream(outputBase, output, (int) (outputLimit - output), inputBase, input, (int) (inputLimit - input), table);
+ if (compressedSize == 0) {
+ return 0;
+ }
+ output += compressedSize;
+
+ return (int) (output - outputAddress);
+ }
+
+ @SuppressWarnings("fallthrough")
+ public static int compressSingleStream(Object outputBase, long outputAddress, int outputSize, Object inputBase, long inputAddress, int inputSize, HuffmanCompressionTable table)
+ {
+ if (outputSize < SIZE_OF_LONG) {
+ return 0;
+ }
+
+ BitOutputStream bitstream = new BitOutputStream(outputBase, outputAddress, outputSize);
+ long input = inputAddress;
+
+ int n = inputSize & ~3; // join to mod 4
+
+ switch (inputSize & 3) {
+ case 3:
+ table.encodeSymbol(bitstream, UNSAFE.getByte(inputBase, input + n + 2) & 0xFF);
+ if (SIZE_OF_LONG * 8 < Huffman.MAX_TABLE_LOG * 4 + 7) {
+ bitstream.flush();
+ }
+ // fall-through
+ case 2:
+ table.encodeSymbol(bitstream, UNSAFE.getByte(inputBase, input + n + 1) & 0xFF);
+ if (SIZE_OF_LONG * 8 < Huffman.MAX_TABLE_LOG * 2 + 7) {
+ bitstream.flush();
+ }
+ // fall-through
+ case 1:
+ table.encodeSymbol(bitstream, UNSAFE.getByte(inputBase, input + n + 0) & 0xFF);
+ bitstream.flush();
+ // fall-through
+ case 0: /* fall-through */
+ default:
+ break;
+ }
+
+ for (; n > 0; n -= 4) { // note: n & 3 == 0 at this stage
+ table.encodeSymbol(bitstream, UNSAFE.getByte(inputBase, input + n - 1) & 0xFF);
+ if (SIZE_OF_LONG * 8 < Huffman.MAX_TABLE_LOG * 2 + 7) {
+ bitstream.flush();
+ }
+ table.encodeSymbol(bitstream, UNSAFE.getByte(inputBase, input + n - 2) & 0xFF);
+ if (SIZE_OF_LONG * 8 < Huffman.MAX_TABLE_LOG * 4 + 7) {
+ bitstream.flush();
+ }
+ table.encodeSymbol(bitstream, UNSAFE.getByte(inputBase, input + n - 3) & 0xFF);
+ if (SIZE_OF_LONG * 8 < Huffman.MAX_TABLE_LOG * 2 + 7) {
+ bitstream.flush();
+ }
+ table.encodeSymbol(bitstream, UNSAFE.getByte(inputBase, input + n - 4) & 0xFF);
+ bitstream.flush();
+ }
+
+ return bitstream.close();
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanTableWriterWorkspace.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanTableWriterWorkspace.java
new file mode 100644
index 00000000000..80f39506f07
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/HuffmanTableWriterWorkspace.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.Huffman.MAX_FSE_TABLE_LOG;
+import static ai.vespa.airlift.zstd.Huffman.MAX_SYMBOL;
+import static ai.vespa.airlift.zstd.Huffman.MAX_TABLE_LOG;
+
+class HuffmanTableWriterWorkspace
+{
+ // for encoding weights
+ public final byte[] weights = new byte[MAX_SYMBOL]; // the weight for the last symbol is implicit
+
+ // for compressing weights
+ public final int[] counts = new int[MAX_TABLE_LOG + 1];
+ public final short[] normalizedCounts = new short[MAX_TABLE_LOG + 1];
+ public final FseCompressionTable fseTable = new FseCompressionTable(MAX_FSE_TABLE_LOG, MAX_TABLE_LOG);
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/NodeTable.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/NodeTable.java
new file mode 100644
index 00000000000..4466071025d
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/NodeTable.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import java.util.Arrays;
+
+class NodeTable
+{
+ int[] count;
+ short[] parents;
+ int[] symbols;
+ byte[] numberOfBits;
+
+ public NodeTable(int size)
+ {
+ count = new int[size];
+ parents = new short[size];
+ symbols = new int[size];
+ numberOfBits = new byte[size];
+ }
+
+ public void reset()
+ {
+ Arrays.fill(count, 0);
+ Arrays.fill(parents, (short) 0);
+ Arrays.fill(symbols, 0);
+ Arrays.fill(numberOfBits, (byte) 0);
+ }
+
+ public void copyNode(int from, int to)
+ {
+ count[to] = count[from];
+ parents[to] = parents[from];
+ symbols[to] = symbols[from];
+ numberOfBits[to] = numberOfBits[from];
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/RepeatedOffsets.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/RepeatedOffsets.java
new file mode 100644
index 00000000000..9b6eab05611
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/RepeatedOffsets.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+class RepeatedOffsets
+{
+ private int offset0 = 1;
+ private int offset1 = 4;
+
+ private int tempOffset0;
+ private int tempOffset1;
+
+ public int getOffset0()
+ {
+ return offset0;
+ }
+
+ public int getOffset1()
+ {
+ return offset1;
+ }
+
+ public void saveOffset0(int offset)
+ {
+ tempOffset0 = offset;
+ }
+
+ public void saveOffset1(int offset)
+ {
+ tempOffset1 = offset;
+ }
+
+ public void commit()
+ {
+ offset0 = tempOffset0;
+ offset1 = tempOffset1;
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/SequenceEncoder.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/SequenceEncoder.java
new file mode 100644
index 00000000000..df80b08dd35
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/SequenceEncoder.java
@@ -0,0 +1,351 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.Constants.DEFAULT_MAX_OFFSET_CODE_SYMBOL;
+import static ai.vespa.airlift.zstd.Constants.LITERALS_LENGTH_BITS;
+import static ai.vespa.airlift.zstd.Constants.LITERAL_LENGTH_TABLE_LOG;
+import static ai.vespa.airlift.zstd.Constants.LONG_NUMBER_OF_SEQUENCES;
+import static ai.vespa.airlift.zstd.Constants.MATCH_LENGTH_BITS;
+import static ai.vespa.airlift.zstd.Constants.MATCH_LENGTH_TABLE_LOG;
+import static ai.vespa.airlift.zstd.Constants.MAX_LITERALS_LENGTH_SYMBOL;
+import static ai.vespa.airlift.zstd.Constants.MAX_MATCH_LENGTH_SYMBOL;
+import static ai.vespa.airlift.zstd.Constants.MAX_OFFSET_CODE_SYMBOL;
+import static ai.vespa.airlift.zstd.Constants.OFFSET_TABLE_LOG;
+import static ai.vespa.airlift.zstd.Constants.SEQUENCE_ENCODING_BASIC;
+import static ai.vespa.airlift.zstd.Constants.SEQUENCE_ENCODING_COMPRESSED;
+import static ai.vespa.airlift.zstd.Constants.SEQUENCE_ENCODING_RLE;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_SHORT;
+import static ai.vespa.airlift.zstd.FiniteStateEntropy.optimalTableLog;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+import static ai.vespa.airlift.zstd.Util.checkArgument;
+
+class SequenceEncoder
+{
+ private static final int DEFAULT_LITERAL_LENGTH_NORMALIZED_COUNTS_LOG = 6;
+ private static final short[] DEFAULT_LITERAL_LENGTH_NORMALIZED_COUNTS = {4, 3, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 2, 1, 1, 1, 1, 1,
+ -1, -1, -1, -1};
+
+ private static final int DEFAULT_MATCH_LENGTH_NORMALIZED_COUNTS_LOG = 6;
+ private static final short[] DEFAULT_MATCH_LENGTH_NORMALIZED_COUNTS = {1, 4, 3, 2, 2, 2, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, -1, -1,
+ -1, -1, -1, -1, -1};
+
+ private static final int DEFAULT_OFFSET_NORMALIZED_COUNTS_LOG = 5;
+ private static final short[] DEFAULT_OFFSET_NORMALIZED_COUNTS = {1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ -1, -1, -1, -1, -1};
+
+ private static final FseCompressionTable DEFAULT_LITERAL_LENGTHS_TABLE = FseCompressionTable.newInstance(DEFAULT_LITERAL_LENGTH_NORMALIZED_COUNTS, MAX_LITERALS_LENGTH_SYMBOL, DEFAULT_LITERAL_LENGTH_NORMALIZED_COUNTS_LOG);
+ private static final FseCompressionTable DEFAULT_MATCH_LENGTHS_TABLE = FseCompressionTable.newInstance(DEFAULT_MATCH_LENGTH_NORMALIZED_COUNTS, MAX_MATCH_LENGTH_SYMBOL, DEFAULT_LITERAL_LENGTH_NORMALIZED_COUNTS_LOG);
+ private static final FseCompressionTable DEFAULT_OFFSETS_TABLE = FseCompressionTable.newInstance(DEFAULT_OFFSET_NORMALIZED_COUNTS, DEFAULT_MAX_OFFSET_CODE_SYMBOL, DEFAULT_OFFSET_NORMALIZED_COUNTS_LOG);
+
+ private SequenceEncoder()
+ {
+ }
+
+ public static int compressSequences(Object outputBase, final long outputAddress, int outputSize, SequenceStore sequences, CompressionParameters.Strategy strategy, SequenceEncodingContext workspace)
+ {
+ long output = outputAddress;
+ long outputLimit = outputAddress + outputSize;
+
+ checkArgument(outputLimit - output > 3 /* max sequence count Size */ + 1 /* encoding type flags */, "Output buffer too small");
+
+ int sequenceCount = sequences.sequenceCount;
+ if (sequenceCount < 0x7F) {
+ UNSAFE.putByte(outputBase, output, (byte) sequenceCount);
+ output++;
+ }
+ else if (sequenceCount < LONG_NUMBER_OF_SEQUENCES) {
+ UNSAFE.putByte(outputBase, output, (byte) (sequenceCount >>> 8 | 0x80));
+ UNSAFE.putByte(outputBase, output + 1, (byte) sequenceCount);
+ output += SIZE_OF_SHORT;
+ }
+ else {
+ UNSAFE.putByte(outputBase, output, (byte) 0xFF);
+ output++;
+ UNSAFE.putShort(outputBase, output, (short) (sequenceCount - LONG_NUMBER_OF_SEQUENCES));
+ output += SIZE_OF_SHORT;
+ }
+
+ if (sequenceCount == 0) {
+ return (int) (output - outputAddress);
+ }
+
+ // flags for FSE encoding type
+ long headerAddress = output++;
+
+ int maxSymbol;
+ int largestCount;
+
+ // literal lengths
+ int[] counts = workspace.counts;
+ Histogram.count(sequences.literalLengthCodes, sequenceCount, workspace.counts);
+ maxSymbol = Histogram.findMaxSymbol(counts, MAX_LITERALS_LENGTH_SYMBOL);
+ largestCount = Histogram.findLargestCount(counts, maxSymbol);
+
+ int literalsLengthEncodingType = selectEncodingType(largestCount, sequenceCount, DEFAULT_LITERAL_LENGTH_NORMALIZED_COUNTS_LOG, true, strategy);
+
+ FseCompressionTable literalLengthTable;
+ switch (literalsLengthEncodingType) {
+ case SEQUENCE_ENCODING_RLE:
+ UNSAFE.putByte(outputBase, output, sequences.literalLengthCodes[0]);
+ output++;
+ workspace.literalLengthTable.initializeRleTable(maxSymbol);
+ literalLengthTable = workspace.literalLengthTable;
+ break;
+ case SEQUENCE_ENCODING_BASIC:
+ literalLengthTable = DEFAULT_LITERAL_LENGTHS_TABLE;
+ break;
+ case SEQUENCE_ENCODING_COMPRESSED:
+ output += buildCompressionTable(
+ workspace.literalLengthTable,
+ outputBase,
+ output,
+ outputLimit,
+ sequenceCount,
+ LITERAL_LENGTH_TABLE_LOG,
+ sequences.literalLengthCodes,
+ workspace.counts,
+ maxSymbol,
+ workspace.normalizedCounts);
+ literalLengthTable = workspace.literalLengthTable;
+ break;
+ default:
+ throw new UnsupportedOperationException("not yet implemented");
+ }
+
+ // offsets
+ Histogram.count(sequences.offsetCodes, sequenceCount, workspace.counts);
+ maxSymbol = Histogram.findMaxSymbol(counts, MAX_OFFSET_CODE_SYMBOL);
+ largestCount = Histogram.findLargestCount(counts, maxSymbol);
+
+ // We can only use the basic table if max <= DEFAULT_MAX_OFFSET_CODE_SYMBOL, otherwise the offsets are too large .
+ boolean defaultAllowed = maxSymbol < DEFAULT_MAX_OFFSET_CODE_SYMBOL;
+
+ int offsetEncodingType = selectEncodingType(largestCount, sequenceCount, DEFAULT_OFFSET_NORMALIZED_COUNTS_LOG, defaultAllowed, strategy);
+
+ FseCompressionTable offsetCodeTable;
+ switch (offsetEncodingType) {
+ case SEQUENCE_ENCODING_RLE:
+ UNSAFE.putByte(outputBase, output, sequences.offsetCodes[0]);
+ output++;
+ workspace.offsetCodeTable.initializeRleTable(maxSymbol);
+ offsetCodeTable = workspace.offsetCodeTable;
+ break;
+ case SEQUENCE_ENCODING_BASIC:
+ offsetCodeTable = DEFAULT_OFFSETS_TABLE;
+ break;
+ case SEQUENCE_ENCODING_COMPRESSED:
+ output += buildCompressionTable(
+ workspace.offsetCodeTable,
+ outputBase,
+ output,
+ output + outputSize,
+ sequenceCount,
+ OFFSET_TABLE_LOG,
+ sequences.offsetCodes,
+ workspace.counts,
+ maxSymbol,
+ workspace.normalizedCounts);
+ offsetCodeTable = workspace.offsetCodeTable;
+ break;
+ default:
+ throw new UnsupportedOperationException("not yet implemented");
+ }
+
+ // match lengths
+ Histogram.count(sequences.matchLengthCodes, sequenceCount, workspace.counts);
+ maxSymbol = Histogram.findMaxSymbol(counts, MAX_MATCH_LENGTH_SYMBOL);
+ largestCount = Histogram.findLargestCount(counts, maxSymbol);
+
+ int matchLengthEncodingType = selectEncodingType(largestCount, sequenceCount, DEFAULT_MATCH_LENGTH_NORMALIZED_COUNTS_LOG, true, strategy);
+
+ FseCompressionTable matchLengthTable;
+ switch (matchLengthEncodingType) {
+ case SEQUENCE_ENCODING_RLE:
+ UNSAFE.putByte(outputBase, output, sequences.matchLengthCodes[0]);
+ output++;
+ workspace.matchLengthTable.initializeRleTable(maxSymbol);
+ matchLengthTable = workspace.matchLengthTable;
+ break;
+ case SEQUENCE_ENCODING_BASIC:
+ matchLengthTable = DEFAULT_MATCH_LENGTHS_TABLE;
+ break;
+ case SEQUENCE_ENCODING_COMPRESSED:
+ output += buildCompressionTable(
+ workspace.matchLengthTable,
+ outputBase,
+ output,
+ outputLimit,
+ sequenceCount,
+ MATCH_LENGTH_TABLE_LOG,
+ sequences.matchLengthCodes,
+ workspace.counts,
+ maxSymbol,
+ workspace.normalizedCounts);
+ matchLengthTable = workspace.matchLengthTable;
+ break;
+ default:
+ throw new UnsupportedOperationException("not yet implemented");
+ }
+
+ // flags
+ UNSAFE.putByte(outputBase, headerAddress, (byte) ((literalsLengthEncodingType << 6) | (offsetEncodingType << 4) | (matchLengthEncodingType << 2)));
+
+ output += encodeSequences(outputBase, output, outputLimit, matchLengthTable, offsetCodeTable, literalLengthTable, sequences);
+
+ return (int) (output - outputAddress);
+ }
+
+ private static int buildCompressionTable(FseCompressionTable table, Object outputBase, long output, long outputLimit, int sequenceCount, int maxTableLog, byte[] codes, int[] counts, int maxSymbol, short[] normalizedCounts)
+ {
+ int tableLog = optimalTableLog(maxTableLog, sequenceCount, maxSymbol);
+
+ // this is a minor optimization. The last symbol is embedded in the initial FSE state, so it's not part of the bitstream. We can omit it from the
+ // statistics (but only if its count is > 1). This makes the statistics a tiny bit more accurate.
+ if (counts[codes[sequenceCount - 1]] > 1) {
+ counts[codes[sequenceCount - 1]]--;
+ sequenceCount--;
+ }
+
+ FiniteStateEntropy.normalizeCounts(normalizedCounts, tableLog, counts, sequenceCount, maxSymbol);
+ table.initialize(normalizedCounts, maxSymbol, tableLog);
+
+ return FiniteStateEntropy.writeNormalizedCounts(outputBase, output, (int) (outputLimit - output), normalizedCounts, maxSymbol, tableLog); // TODO: pass outputLimit directly
+ }
+
+ private static int encodeSequences(
+ Object outputBase,
+ long output,
+ long outputLimit,
+ FseCompressionTable matchLengthTable,
+ FseCompressionTable offsetsTable,
+ FseCompressionTable literalLengthTable,
+ SequenceStore sequences)
+ {
+ byte[] matchLengthCodes = sequences.matchLengthCodes;
+ byte[] offsetCodes = sequences.offsetCodes;
+ byte[] literalLengthCodes = sequences.literalLengthCodes;
+
+ BitOutputStream blockStream = new BitOutputStream(outputBase, output, (int) (outputLimit - output));
+
+ int sequenceCount = sequences.sequenceCount;
+
+ // first symbols
+ int matchLengthState = matchLengthTable.begin(matchLengthCodes[sequenceCount - 1]);
+ int offsetState = offsetsTable.begin(offsetCodes[sequenceCount - 1]);
+ int literalLengthState = literalLengthTable.begin(literalLengthCodes[sequenceCount - 1]);
+
+ blockStream.addBits(sequences.literalLengths[sequenceCount - 1], LITERALS_LENGTH_BITS[literalLengthCodes[sequenceCount - 1]]);
+ blockStream.addBits(sequences.matchLengths[sequenceCount - 1], MATCH_LENGTH_BITS[matchLengthCodes[sequenceCount - 1]]);
+ blockStream.addBits(sequences.offsets[sequenceCount - 1], offsetCodes[sequenceCount - 1]);
+ blockStream.flush();
+
+ if (sequenceCount >= 2) {
+ for (int n = sequenceCount - 2; n >= 0; n--) {
+ byte literalLengthCode = literalLengthCodes[n];
+ byte offsetCode = offsetCodes[n];
+ byte matchLengthCode = matchLengthCodes[n];
+
+ int literalLengthBits = LITERALS_LENGTH_BITS[literalLengthCode];
+ int offsetBits = offsetCode;
+ int matchLengthBits = MATCH_LENGTH_BITS[matchLengthCode];
+
+ // (7)
+ offsetState = offsetsTable.encode(blockStream, offsetState, offsetCode); // 15
+ matchLengthState = matchLengthTable.encode(blockStream, matchLengthState, matchLengthCode); // 24
+ literalLengthState = literalLengthTable.encode(blockStream, literalLengthState, literalLengthCode); // 33
+
+ if ((offsetBits + matchLengthBits + literalLengthBits >= 64 - 7 - (LITERAL_LENGTH_TABLE_LOG + MATCH_LENGTH_TABLE_LOG + OFFSET_TABLE_LOG))) {
+ blockStream.flush(); /* (7)*/
+ }
+
+ blockStream.addBits(sequences.literalLengths[n], literalLengthBits);
+ if (((literalLengthBits + matchLengthBits) > 24)) {
+ blockStream.flush();
+ }
+
+ blockStream.addBits(sequences.matchLengths[n], matchLengthBits);
+ if ((offsetBits + matchLengthBits + literalLengthBits > 56)) {
+ blockStream.flush();
+ }
+
+ blockStream.addBits(sequences.offsets[n], offsetBits); // 31
+ blockStream.flush(); // (7)
+ }
+ }
+
+ matchLengthTable.finish(blockStream, matchLengthState);
+ offsetsTable.finish(blockStream, offsetState);
+ literalLengthTable.finish(blockStream, literalLengthState);
+
+ int streamSize = blockStream.close();
+ checkArgument(streamSize > 0, "Output buffer too small");
+
+ return streamSize;
+ }
+
+ private static int selectEncodingType(
+ int largestCount,
+ int sequenceCount,
+ int defaultNormalizedCountsLog,
+ boolean isDefaultTableAllowed,
+ CompressionParameters.Strategy strategy)
+ {
+ if (largestCount == sequenceCount) { // => all entries are equal
+ if (isDefaultTableAllowed && sequenceCount <= 2) {
+ /* Prefer set_basic over set_rle when there are 2 or fewer symbols,
+ * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
+ * If basic encoding isn't possible, always choose RLE.
+ */
+ return SEQUENCE_ENCODING_BASIC;
+ }
+
+ return SEQUENCE_ENCODING_RLE;
+ }
+
+ if (strategy.ordinal() < CompressionParameters.Strategy.LAZY.ordinal()) { // TODO: more robust check. Maybe encapsulate in strategy objects
+ if (isDefaultTableAllowed) {
+ int factor = 10 - strategy.ordinal(); // TODO more robust. Move it to strategy
+ int baseLog = 3;
+ long minNumberOfSequences = ((1L << defaultNormalizedCountsLog) * factor) >> baseLog; /* 28-36 for offset, 56-72 for lengths */
+
+ if ((sequenceCount < minNumberOfSequences) || (largestCount < (sequenceCount >> (defaultNormalizedCountsLog - 1)))) {
+ /* The format allows default tables to be repeated, but it isn't useful.
+ * When using simple heuristics to select encoding type, we don't want
+ * to confuse these tables with dictionaries. When running more careful
+ * analysis, we don't need to waste time checking both repeating tables
+ * and default tables.
+ */
+ return SEQUENCE_ENCODING_BASIC;
+ }
+ }
+ }
+ else {
+ // TODO implement when other strategies are supported
+ throw new UnsupportedOperationException("not yet implemented");
+ }
+
+ return SEQUENCE_ENCODING_COMPRESSED;
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/SequenceEncodingContext.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/SequenceEncodingContext.java
new file mode 100644
index 00000000000..da5978336e8
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/SequenceEncodingContext.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.Constants.MAX_LITERALS_LENGTH_SYMBOL;
+import static ai.vespa.airlift.zstd.Constants.MAX_MATCH_LENGTH_SYMBOL;
+import static ai.vespa.airlift.zstd.Constants.MAX_OFFSET_CODE_SYMBOL;
+
+class SequenceEncodingContext
+{
+ private static final int MAX_SEQUENCES = Math.max(MAX_LITERALS_LENGTH_SYMBOL, MAX_MATCH_LENGTH_SYMBOL);
+
+ public final FseCompressionTable literalLengthTable = new FseCompressionTable(Constants.LITERAL_LENGTH_TABLE_LOG, MAX_LITERALS_LENGTH_SYMBOL);
+ public final FseCompressionTable offsetCodeTable = new FseCompressionTable(Constants.OFFSET_TABLE_LOG, MAX_OFFSET_CODE_SYMBOL);
+ public final FseCompressionTable matchLengthTable = new FseCompressionTable(Constants.MATCH_LENGTH_TABLE_LOG, MAX_MATCH_LENGTH_SYMBOL);
+
+ public final int[] counts = new int[MAX_SEQUENCES + 1];
+ public final short[] normalizedCounts = new short[MAX_SEQUENCES + 1];
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/SequenceStore.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/SequenceStore.java
new file mode 100644
index 00000000000..f01d54f0527
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/SequenceStore.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_LONG;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+import static sun.misc.Unsafe.ARRAY_BYTE_BASE_OFFSET;
+
+class SequenceStore
+{
+ public final byte[] literalsBuffer;
+ public int literalsLength;
+
+ public final int[] offsets;
+ public final int[] literalLengths;
+ public final int[] matchLengths;
+ public int sequenceCount;
+
+ public final byte[] literalLengthCodes;
+ public final byte[] matchLengthCodes;
+ public final byte[] offsetCodes;
+
+ public LongField longLengthField;
+ public int longLengthPosition;
+
+ public enum LongField
+ {
+ LITERAL, MATCH
+ }
+
+ private static final byte[] LITERAL_LENGTH_CODE = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24};
+
+ private static final byte[] MATCH_LENGTH_CODE = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+ 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42};
+
+ public SequenceStore(int blockSize, int maxSequences)
+ {
+ offsets = new int[maxSequences];
+ literalLengths = new int[maxSequences];
+ matchLengths = new int[maxSequences];
+
+ literalLengthCodes = new byte[maxSequences];
+ matchLengthCodes = new byte[maxSequences];
+ offsetCodes = new byte[maxSequences];
+
+ literalsBuffer = new byte[blockSize];
+
+ reset();
+ }
+
+ public void appendLiterals(Object inputBase, long inputAddress, int inputSize)
+ {
+ UNSAFE.copyMemory(inputBase, inputAddress, literalsBuffer, ARRAY_BYTE_BASE_OFFSET + literalsLength, inputSize);
+ literalsLength += inputSize;
+ }
+
+ public void storeSequence(Object literalBase, long literalAddress, int literalLength, int offsetCode, int matchLengthBase)
+ {
+ long input = literalAddress;
+ long output = ARRAY_BYTE_BASE_OFFSET + literalsLength;
+ int copied = 0;
+ do {
+ UNSAFE.putLong(literalsBuffer, output, UNSAFE.getLong(literalBase, input));
+ input += SIZE_OF_LONG;
+ output += SIZE_OF_LONG;
+ copied += SIZE_OF_LONG;
+ }
+ while (copied < literalLength);
+
+ literalsLength += literalLength;
+
+ if (literalLength > 65535) {
+ longLengthField = LongField.LITERAL;
+ longLengthPosition = sequenceCount;
+ }
+ literalLengths[sequenceCount] = literalLength;
+
+ offsets[sequenceCount] = offsetCode + 1;
+
+ if (matchLengthBase > 65535) {
+ longLengthField = LongField.MATCH;
+ longLengthPosition = sequenceCount;
+ }
+
+ matchLengths[sequenceCount] = matchLengthBase;
+
+ sequenceCount++;
+ }
+
+ public void reset()
+ {
+ literalsLength = 0;
+ sequenceCount = 0;
+ longLengthField = null;
+ }
+
+ public void generateCodes()
+ {
+ for (int i = 0; i < sequenceCount; ++i) {
+ literalLengthCodes[i] = (byte) literalLengthToCode(literalLengths[i]);
+ offsetCodes[i] = (byte) Util.highestBit(offsets[i]);
+ matchLengthCodes[i] = (byte) matchLengthToCode(matchLengths[i]);
+ }
+
+ if (longLengthField == LongField.LITERAL) {
+ literalLengthCodes[longLengthPosition] = Constants.MAX_LITERALS_LENGTH_SYMBOL;
+ }
+ if (longLengthField == LongField.MATCH) {
+ matchLengthCodes[longLengthPosition] = Constants.MAX_MATCH_LENGTH_SYMBOL;
+ }
+ }
+
+ private static int literalLengthToCode(int literalLength)
+ {
+ if (literalLength >= 64) {
+ return Util.highestBit(literalLength) + 19;
+ }
+ else {
+ return LITERAL_LENGTH_CODE[literalLength];
+ }
+ }
+
+ /*
+ * matchLengthBase = matchLength - MINMATCH
+ * (that's how it's stored in SequenceStore)
+ */
+ private static int matchLengthToCode(int matchLengthBase)
+ {
+ if (matchLengthBase >= 128) {
+ return Util.highestBit(matchLengthBase) + 36;
+ }
+ else {
+ return MATCH_LENGTH_CODE[matchLengthBase];
+ }
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/UnsafeUtil.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/UnsafeUtil.java
new file mode 100644
index 00000000000..decde678321
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/UnsafeUtil.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import ai.vespa.airlift.compress.IncompatibleJvmException;
+import sun.misc.Unsafe;
+
+import java.lang.reflect.Field;
+import java.nio.Buffer;
+import java.nio.ByteOrder;
+
+import static java.lang.String.format;
+
+final class UnsafeUtil
+{
+ public static final Unsafe UNSAFE;
+ private static final long ADDRESS_OFFSET;
+
+ private UnsafeUtil() {}
+
+ static {
+ ByteOrder order = ByteOrder.nativeOrder();
+ if (!order.equals(ByteOrder.LITTLE_ENDIAN)) {
+ throw new IncompatibleJvmException(format("Zstandard requires a little endian platform (found %s)", order));
+ }
+
+ try {
+ Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
+ theUnsafe.setAccessible(true);
+ UNSAFE = (Unsafe) theUnsafe.get(null);
+ }
+ catch (Exception e) {
+ throw new IncompatibleJvmException("Zstandard requires access to sun.misc.Unsafe");
+ }
+
+ try {
+ // fetch the address field for direct buffers
+ ADDRESS_OFFSET = UNSAFE.objectFieldOffset(Buffer.class.getDeclaredField("address"));
+ }
+ catch (NoSuchFieldException e) {
+ throw new IncompatibleJvmException("Zstandard requires access to java.nio.Buffer raw address field");
+ }
+ }
+
+ public static long getAddress(Buffer buffer)
+ {
+ if (!buffer.isDirect()) {
+ throw new IllegalArgumentException("buffer is not direct");
+ }
+
+ return UNSAFE.getLong(buffer, ADDRESS_OFFSET);
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Util.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Util.java
new file mode 100644
index 00000000000..d0e622f02c9
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/Util.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import ai.vespa.airlift.compress.MalformedInputException;
+
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_SHORT;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+
+final class Util
+{
+ private Util()
+ {
+ }
+
+ public static int highestBit(int value)
+ {
+ return 31 - Integer.numberOfLeadingZeros(value);
+ }
+
+ public static boolean isPowerOf2(int value)
+ {
+ return (value & (value - 1)) == 0;
+ }
+
+ public static int mask(int bits)
+ {
+ return (1 << bits) - 1;
+ }
+
+ public static void verify(boolean condition, long offset, String reason)
+ {
+ if (!condition) {
+ throw new MalformedInputException(offset, reason);
+ }
+ }
+
+ public static void checkArgument(boolean condition, String reason)
+ {
+ if (!condition) {
+ throw new IllegalArgumentException(reason);
+ }
+ }
+
+ public static void checkState(boolean condition, String reason)
+ {
+ if (!condition) {
+ throw new IllegalStateException(reason);
+ }
+ }
+
+ public static MalformedInputException fail(long offset, String reason)
+ {
+ throw new MalformedInputException(offset, reason);
+ }
+
+ public static int cycleLog(int hashLog, CompressionParameters.Strategy strategy)
+ {
+ int cycleLog = hashLog;
+ if (strategy == CompressionParameters.Strategy.BTLAZY2 || strategy == CompressionParameters.Strategy.BTOPT || strategy == CompressionParameters.Strategy.BTULTRA) {
+ cycleLog = hashLog - 1;
+ }
+ return cycleLog;
+ }
+
+ public static void put24BitLittleEndian(Object outputBase, long outputAddress, int value)
+ {
+ UNSAFE.putShort(outputBase, outputAddress, (short) value);
+ UNSAFE.putByte(outputBase, outputAddress + SIZE_OF_SHORT, (byte) (value >>> Short.SIZE));
+ }
+
+ // provides the minimum logSize to safely represent a distribution
+ public static int minTableLog(int inputSize, int maxSymbolValue)
+ {
+ if (inputSize <= 1) {
+ throw new IllegalArgumentException("Not supported. RLE should be used instead"); // TODO
+ }
+
+ int minBitsSrc = highestBit((inputSize - 1)) + 1;
+ int minBitsSymbols = highestBit(maxSymbolValue) + 2;
+ return Math.min(minBitsSrc, minBitsSymbols);
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/XxHash64.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/XxHash64.java
new file mode 100644
index 00000000000..df2c869d11b
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/XxHash64.java
@@ -0,0 +1,286 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_LONG;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+import static java.lang.Long.rotateLeft;
+import static java.lang.Math.min;
+import static java.lang.String.format;
+import static sun.misc.Unsafe.ARRAY_BYTE_BASE_OFFSET;
+
+// forked from https://github.com/airlift/slice
+final class XxHash64
+{
+ private static final long PRIME64_1 = 0x9E3779B185EBCA87L;
+ private static final long PRIME64_2 = 0xC2B2AE3D27D4EB4FL;
+ private static final long PRIME64_3 = 0x165667B19E3779F9L;
+ private static final long PRIME64_4 = 0x85EBCA77C2b2AE63L;
+ private static final long PRIME64_5 = 0x27D4EB2F165667C5L;
+
+ private static final long DEFAULT_SEED = 0;
+
+ private final long seed;
+
+ private static final long BUFFER_ADDRESS = ARRAY_BYTE_BASE_OFFSET;
+ private final byte[] buffer = new byte[32];
+ private int bufferSize;
+
+ private long bodyLength;
+
+ private long v1;
+ private long v2;
+ private long v3;
+ private long v4;
+
+ public static long hash(long seed, Object base, long address, int length)
+ {
+ XxHash64 hasher = new XxHash64(seed);
+ hasher.updateHash(base, address, length);
+ return hasher.hash();
+ }
+
+ public XxHash64()
+ {
+ this(DEFAULT_SEED);
+ }
+
+ public XxHash64(long seed)
+ {
+ this.seed = seed;
+ this.v1 = seed + PRIME64_1 + PRIME64_2;
+ this.v2 = seed + PRIME64_2;
+ this.v3 = seed;
+ this.v4 = seed - PRIME64_1;
+ }
+
+ public XxHash64 update(byte[] data)
+ {
+ return update(data, 0, data.length);
+ }
+
+ public XxHash64 update(byte[] data, int offset, int length)
+ {
+ checkPositionIndexes(offset, offset + length, data.length);
+ updateHash(data, ARRAY_BYTE_BASE_OFFSET + offset, length);
+ return this;
+ }
+
+ public long hash()
+ {
+ long hash;
+ if (bodyLength > 0) {
+ hash = computeBody();
+ }
+ else {
+ hash = seed + PRIME64_5;
+ }
+
+ hash += bodyLength + bufferSize;
+
+ return updateTail(hash, buffer, BUFFER_ADDRESS, 0, bufferSize);
+ }
+
+ private static String badPositionIndex(long index, long size, String desc)
+ {
+ if (index < 0) {
+ return format("%s (%s) must not be negative", desc, index);
+ }
+ else if (size < 0) {
+ throw new IllegalArgumentException("negative size: " + size);
+ }
+ else { // index > size
+ return format("%s (%s) must not be greater than size (%s)", desc, index, size);
+ }
+ }
+
+ private static String badPositionIndexes(int start, int end, int size)
+ {
+ if (start < 0 || start > size) {
+ return badPositionIndex(start, size, "start index");
+ }
+ if (end < 0 || end > size) {
+ return badPositionIndex(end, size, "end index");
+ }
+ // end < start
+ return format("end index (%s) must not be less than start index (%s)", end, start);
+ }
+
+ private static void checkPositionIndexes(int start, int end, int size)
+ {
+ // Carefully optimized for execution by hotspot
+ if (start < 0 || end < start || end > size) {
+ throw new IndexOutOfBoundsException(badPositionIndexes(start, end, size));
+ }
+ }
+
+ private long computeBody()
+ {
+ long hash = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+
+ hash = update(hash, v1);
+ hash = update(hash, v2);
+ hash = update(hash, v3);
+ hash = update(hash, v4);
+
+ return hash;
+ }
+
+ private void updateHash(Object base, long address, int length)
+ {
+ if (bufferSize > 0) {
+ int available = min(32 - bufferSize, length);
+
+ UNSAFE.copyMemory(base, address, buffer, BUFFER_ADDRESS + bufferSize, available);
+
+ bufferSize += available;
+ address += available;
+ length -= available;
+
+ if (bufferSize == 32) {
+ updateBody(buffer, BUFFER_ADDRESS, bufferSize);
+ bufferSize = 0;
+ }
+ }
+
+ if (length >= 32) {
+ int index = updateBody(base, address, length);
+ address += index;
+ length -= index;
+ }
+
+ if (length > 0) {
+ UNSAFE.copyMemory(base, address, buffer, BUFFER_ADDRESS, length);
+ bufferSize = length;
+ }
+ }
+
+ private int updateBody(Object base, long address, int length)
+ {
+ int remaining = length;
+ while (remaining >= 32) {
+ v1 = mix(v1, UNSAFE.getLong(base, address));
+ v2 = mix(v2, UNSAFE.getLong(base, address + 8));
+ v3 = mix(v3, UNSAFE.getLong(base, address + 16));
+ v4 = mix(v4, UNSAFE.getLong(base, address + 24));
+
+ address += 32;
+ remaining -= 32;
+ }
+
+ int index = length - remaining;
+ bodyLength += index;
+ return index;
+ }
+
+ public static long hash(long value)
+ {
+ long hash = DEFAULT_SEED + PRIME64_5 + SIZE_OF_LONG;
+ hash = updateTail(hash, value);
+ hash = finalShuffle(hash);
+
+ return hash;
+ }
+
+ private static long updateTail(long hash, Object base, long address, int index, int length)
+ {
+ while (index <= length - 8) {
+ hash = updateTail(hash, UNSAFE.getLong(base, address + index));
+ index += 8;
+ }
+
+ if (index <= length - 4) {
+ hash = updateTail(hash, UNSAFE.getInt(base, address + index));
+ index += 4;
+ }
+
+ while (index < length) {
+ hash = updateTail(hash, UNSAFE.getByte(base, address + index));
+ index++;
+ }
+
+ hash = finalShuffle(hash);
+
+ return hash;
+ }
+
+ private static long updateBody(long seed, Object base, long address, int length)
+ {
+ long v1 = seed + PRIME64_1 + PRIME64_2;
+ long v2 = seed + PRIME64_2;
+ long v3 = seed;
+ long v4 = seed - PRIME64_1;
+
+ int remaining = length;
+ while (remaining >= 32) {
+ v1 = mix(v1, UNSAFE.getLong(base, address));
+ v2 = mix(v2, UNSAFE.getLong(base, address + 8));
+ v3 = mix(v3, UNSAFE.getLong(base, address + 16));
+ v4 = mix(v4, UNSAFE.getLong(base, address + 24));
+
+ address += 32;
+ remaining -= 32;
+ }
+
+ long hash = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+
+ hash = update(hash, v1);
+ hash = update(hash, v2);
+ hash = update(hash, v3);
+ hash = update(hash, v4);
+
+ return hash;
+ }
+
+ private static long mix(long current, long value)
+ {
+ return rotateLeft(current + value * PRIME64_2, 31) * PRIME64_1;
+ }
+
+ private static long update(long hash, long value)
+ {
+ long temp = hash ^ mix(0, value);
+ return temp * PRIME64_1 + PRIME64_4;
+ }
+
+ private static long updateTail(long hash, long value)
+ {
+ long temp = hash ^ mix(0, value);
+ return rotateLeft(temp, 27) * PRIME64_1 + PRIME64_4;
+ }
+
+ private static long updateTail(long hash, int value)
+ {
+ long unsigned = value & 0xFFFF_FFFFL;
+ long temp = hash ^ (unsigned * PRIME64_1);
+ return rotateLeft(temp, 23) * PRIME64_2 + PRIME64_3;
+ }
+
+ private static long updateTail(long hash, byte value)
+ {
+ int unsigned = value & 0xFF;
+ long temp = hash ^ (unsigned * PRIME64_5);
+ return rotateLeft(temp, 11) * PRIME64_1;
+ }
+
+ private static long finalShuffle(long hash)
+ {
+ hash ^= hash >>> 33;
+ hash *= PRIME64_2;
+ hash ^= hash >>> 29;
+ hash *= PRIME64_3;
+ hash ^= hash >>> 32;
+ return hash;
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdBlockDecompressor.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdBlockDecompressor.java
new file mode 100644
index 00000000000..08adf88cfa6
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdBlockDecompressor.java
@@ -0,0 +1,810 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import java.util.Arrays;
+
+import static ai.vespa.airlift.zstd.BitInputStream.peekBits;
+import static ai.vespa.airlift.zstd.Constants.COMPRESSED_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.COMPRESSED_LITERALS_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.DEFAULT_MAX_OFFSET_CODE_SYMBOL;
+import static ai.vespa.airlift.zstd.Constants.LITERALS_LENGTH_BITS;
+import static ai.vespa.airlift.zstd.Constants.LITERAL_LENGTH_TABLE_LOG;
+import static ai.vespa.airlift.zstd.Constants.LONG_NUMBER_OF_SEQUENCES;
+import static ai.vespa.airlift.zstd.Constants.MATCH_LENGTH_BITS;
+import static ai.vespa.airlift.zstd.Constants.MATCH_LENGTH_TABLE_LOG;
+import static ai.vespa.airlift.zstd.Constants.MAX_BLOCK_SIZE;
+import static ai.vespa.airlift.zstd.Constants.MAX_LITERALS_LENGTH_SYMBOL;
+import static ai.vespa.airlift.zstd.Constants.MAX_MATCH_LENGTH_SYMBOL;
+import static ai.vespa.airlift.zstd.Constants.MIN_BLOCK_SIZE;
+import static ai.vespa.airlift.zstd.Constants.MIN_SEQUENCES_SIZE;
+import static ai.vespa.airlift.zstd.Constants.OFFSET_TABLE_LOG;
+import static ai.vespa.airlift.zstd.Constants.RAW_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.RAW_LITERALS_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.RLE_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.RLE_LITERALS_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.SEQUENCE_ENCODING_BASIC;
+import static ai.vespa.airlift.zstd.Constants.SEQUENCE_ENCODING_COMPRESSED;
+import static ai.vespa.airlift.zstd.Constants.SEQUENCE_ENCODING_REPEAT;
+import static ai.vespa.airlift.zstd.Constants.SEQUENCE_ENCODING_RLE;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_INT;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_LONG;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_SHORT;
+import static ai.vespa.airlift.zstd.Constants.TREELESS_LITERALS_BLOCK;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+import static ai.vespa.airlift.zstd.Util.fail;
+import static ai.vespa.airlift.zstd.Util.mask;
+import static ai.vespa.airlift.zstd.Util.verify;
+import static sun.misc.Unsafe.ARRAY_BYTE_BASE_OFFSET;
+
+/**
+ * Handles decompression of all blocks in a single frame.
+ **/
+class ZstdBlockDecompressor
+{
+ private static final int[] DEC_32_TABLE = {4, 1, 2, 1, 4, 4, 4, 4};
+ private static final int[] DEC_64_TABLE = {0, 0, 0, -1, 0, 1, 2, 3};
+
+ private static final int MAX_WINDOW_SIZE = 1 << 23;
+
+ private static final int[] LITERALS_LENGTH_BASE = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+ 0x2000, 0x4000, 0x8000, 0x10000};
+
+ private static final int[] MATCH_LENGTH_BASE = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
+ 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
+
+ private static final int[] OFFSET_CODES_BASE = {
+ 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
+ 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
+ 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
+ 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
+
+ private static final FiniteStateEntropy.Table DEFAULT_LITERALS_LENGTH_TABLE = new FiniteStateEntropy.Table(
+ 6,
+ new int[] {
+ 0, 16, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 32, 0, 0, 32, 0, 32, 0, 32, 0, 0, 32, 0, 32, 0, 32, 0, 0, 16, 32, 0, 0, 48, 16, 32, 32, 32,
+ 32, 32, 32, 32, 32, 0, 32, 32, 32, 32, 32, 32, 0, 0, 0, 0},
+ new byte[] {
+ 0, 0, 1, 3, 4, 6, 7, 9, 10, 12, 14, 16, 18, 19, 21, 22, 24, 25, 26, 27, 29, 31, 0, 1, 2, 4, 5, 7, 8, 10, 11, 13, 16, 17, 19, 20, 22, 23, 25, 25, 26, 28, 30, 0,
+ 1, 2, 3, 5, 6, 8, 9, 11, 12, 15, 17, 18, 20, 21, 23, 24, 35, 34, 33, 32},
+ new byte[] {
+ 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 5, 5, 5, 5, 4, 4, 5, 6, 6, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6});
+
+ private static final FiniteStateEntropy.Table DEFAULT_OFFSET_CODES_TABLE = new FiniteStateEntropy.Table(
+ 5,
+ new int[] {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 16, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0},
+ new byte[] {0, 6, 9, 15, 21, 3, 7, 12, 18, 23, 5, 8, 14, 20, 2, 7, 11, 17, 22, 4, 8, 13, 19, 1, 6, 10, 16, 28, 27, 26, 25, 24},
+ new byte[] {5, 4, 5, 5, 5, 5, 4, 5, 5, 5, 5, 4, 5, 5, 5, 4, 5, 5, 5, 5, 4, 5, 5, 5, 4, 5, 5, 5, 5, 5, 5, 5});
+
+ private static final FiniteStateEntropy.Table DEFAULT_MATCH_LENGTH_TABLE = new FiniteStateEntropy.Table(
+ 6,
+ new int[] {
+ 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 32, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 48, 16, 32, 32, 32, 32,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ new byte[] {
+ 0, 1, 2, 3, 5, 6, 8, 10, 13, 16, 19, 22, 25, 28, 31, 33, 35, 37, 39, 41, 43, 45, 1, 2, 3, 4, 6, 7, 9, 12, 15, 18, 21, 24, 27, 30, 32, 34, 36, 38, 40, 42, 44, 1,
+ 1, 2, 4, 5, 7, 8, 11, 14, 17, 20, 23, 26, 29, 52, 51, 50, 49, 48, 47, 46},
+ new byte[] {
+ 6, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6});
+
+ private final byte[] literals = new byte[MAX_BLOCK_SIZE + SIZE_OF_LONG]; // extra space to allow for long-at-a-time copy
+
+ // current buffer containing literals
+ private Object literalsBase;
+ private long literalsAddress;
+ private long literalsLimit;
+
+ private final int[] previousOffsets = new int[3];
+
+ private final FiniteStateEntropy.Table literalsLengthTable = new FiniteStateEntropy.Table(LITERAL_LENGTH_TABLE_LOG);
+ private final FiniteStateEntropy.Table offsetCodesTable = new FiniteStateEntropy.Table(OFFSET_TABLE_LOG);
+ private final FiniteStateEntropy.Table matchLengthTable = new FiniteStateEntropy.Table(MATCH_LENGTH_TABLE_LOG);
+
+ private FiniteStateEntropy.Table currentLiteralsLengthTable;
+ private FiniteStateEntropy.Table currentOffsetCodesTable;
+ private FiniteStateEntropy.Table currentMatchLengthTable;
+
+ private final Huffman huffman = new Huffman();
+ private final FseTableReader fse = new FseTableReader();
+
+ private final FrameHeader frameHeader;
+
+ public ZstdBlockDecompressor(FrameHeader frameHeader)
+ {
+ this.frameHeader = frameHeader;
+
+ previousOffsets[0] = 1;
+ previousOffsets[1] = 4;
+ previousOffsets[2] = 8;
+ }
+
+ int decompressBlock(
+ int blockType,
+ int blockSize,
+ final Object inputBase,
+ final long inputAddress,
+ final long inputLimit,
+ final Object outputBase,
+ final long outputAddress,
+ final long outputLimit)
+ {
+ int decodedSize;
+ switch (blockType) {
+ case RAW_BLOCK:
+ verify(inputAddress + blockSize <= inputLimit, inputAddress, "Not enough input bytes");
+ decodedSize = decodeRawBlock(inputBase, inputAddress, blockSize, outputBase, outputAddress, outputLimit);
+ break;
+ case RLE_BLOCK:
+ verify(inputAddress + 1 <= inputLimit, inputAddress, "Not enough input bytes");
+ decodedSize = decodeRleBlock(blockSize, inputBase, inputAddress, outputBase, outputAddress, outputLimit);
+ break;
+ case COMPRESSED_BLOCK:
+ verify(inputAddress + blockSize <= inputLimit, inputAddress, "Not enough input bytes");
+ decodedSize = decodeCompressedBlock(inputBase, inputAddress, blockSize, outputBase, outputAddress, outputLimit, frameHeader.windowSize, outputAddress);
+ break;
+ default:
+ throw fail(inputAddress, "Invalid block type");
+ }
+ return decodedSize;
+ }
+
+ static int decodeRawBlock(Object inputBase, long inputAddress, int blockSize, Object outputBase, long outputAddress, long outputLimit)
+ {
+ verify(outputAddress + blockSize <= outputLimit, inputAddress, "Output buffer too small");
+
+ UNSAFE.copyMemory(inputBase, inputAddress, outputBase, outputAddress, blockSize);
+ return blockSize;
+ }
+
+ static int decodeRleBlock(int size, Object inputBase, long inputAddress, Object outputBase, long outputAddress, long outputLimit)
+ {
+ verify(outputAddress + size <= outputLimit, inputAddress, "Output buffer too small");
+
+ long output = outputAddress;
+ long value = UNSAFE.getByte(inputBase, inputAddress) & 0xFFL;
+
+ int remaining = size;
+ if (remaining >= SIZE_OF_LONG) {
+ long packed = value
+ | (value << 8)
+ | (value << 16)
+ | (value << 24)
+ | (value << 32)
+ | (value << 40)
+ | (value << 48)
+ | (value << 56);
+
+ do {
+ UNSAFE.putLong(outputBase, output, packed);
+ output += SIZE_OF_LONG;
+ remaining -= SIZE_OF_LONG;
+ }
+ while (remaining >= SIZE_OF_LONG);
+ }
+
+ for (int i = 0; i < remaining; i++) {
+ UNSAFE.putByte(outputBase, output, (byte) value);
+ output++;
+ }
+
+ return size;
+ }
+
+ @SuppressWarnings("fallthrough")
+ int decodeCompressedBlock(Object inputBase, final long inputAddress, int blockSize, Object outputBase, long outputAddress, long outputLimit, int windowSize, long outputAbsoluteBaseAddress)
+ {
+ long inputLimit = inputAddress + blockSize;
+ long input = inputAddress;
+
+ verify(blockSize <= MAX_BLOCK_SIZE, input, "Expected match length table to be present");
+ verify(blockSize >= MIN_BLOCK_SIZE, input, "Compressed block size too small");
+
+ // decode literals
+ int literalsBlockType = UNSAFE.getByte(inputBase, input) & 0b11;
+
+ switch (literalsBlockType) {
+ case RAW_LITERALS_BLOCK: {
+ input += decodeRawLiterals(inputBase, input, inputLimit);
+ break;
+ }
+ case RLE_LITERALS_BLOCK: {
+ input += decodeRleLiterals(inputBase, input, blockSize);
+ break;
+ }
+ case TREELESS_LITERALS_BLOCK:
+ verify(huffman.isLoaded(), input, "Dictionary is corrupted");
+ case COMPRESSED_LITERALS_BLOCK: {
+ input += decodeCompressedLiterals(inputBase, input, blockSize, literalsBlockType);
+ break;
+ }
+ default:
+ throw fail(input, "Invalid literals block encoding type");
+ }
+
+ verify(windowSize <= MAX_WINDOW_SIZE, input, "Window size too large (not yet supported)");
+
+ return decompressSequences(
+ inputBase, input, inputAddress + blockSize,
+ outputBase, outputAddress, outputLimit,
+ literalsBase, literalsAddress, literalsLimit,
+ outputAbsoluteBaseAddress);
+ }
+
+ private int decompressSequences(
+ final Object inputBase, final long inputAddress, final long inputLimit,
+ final Object outputBase, final long outputAddress, final long outputLimit,
+ final Object literalsBase, final long literalsAddress, final long literalsLimit,
+ long outputAbsoluteBaseAddress)
+ {
+ final long fastOutputLimit = outputLimit - SIZE_OF_LONG;
+ final long fastMatchOutputLimit = fastOutputLimit - SIZE_OF_LONG;
+
+ long input = inputAddress;
+ long output = outputAddress;
+
+ long literalsInput = literalsAddress;
+
+ int size = (int) (inputLimit - inputAddress);
+ verify(size >= MIN_SEQUENCES_SIZE, input, "Not enough input bytes");
+
+ // decode header
+ int sequenceCount = UNSAFE.getByte(inputBase, input++) & 0xFF;
+ if (sequenceCount != 0) {
+ if (sequenceCount == 255) {
+ verify(input + SIZE_OF_SHORT <= inputLimit, input, "Not enough input bytes");
+ sequenceCount = (UNSAFE.getShort(inputBase, input) & 0xFFFF) + LONG_NUMBER_OF_SEQUENCES;
+ input += SIZE_OF_SHORT;
+ }
+ else if (sequenceCount > 127) {
+ verify(input < inputLimit, input, "Not enough input bytes");
+ sequenceCount = ((sequenceCount - 128) << 8) + (UNSAFE.getByte(inputBase, input++) & 0xFF);
+ }
+
+ verify(input + SIZE_OF_INT <= inputLimit, input, "Not enough input bytes");
+
+ byte type = UNSAFE.getByte(inputBase, input++);
+
+ int literalsLengthType = (type & 0xFF) >>> 6;
+ int offsetCodesType = (type >>> 4) & 0b11;
+ int matchLengthType = (type >>> 2) & 0b11;
+
+ input = computeLiteralsTable(literalsLengthType, inputBase, input, inputLimit);
+ input = computeOffsetsTable(offsetCodesType, inputBase, input, inputLimit);
+ input = computeMatchLengthTable(matchLengthType, inputBase, input, inputLimit);
+
+ // decompress sequences
+ BitInputStream.Initializer initializer = new BitInputStream.Initializer(inputBase, input, inputLimit);
+ initializer.initialize();
+ int bitsConsumed = initializer.getBitsConsumed();
+ long bits = initializer.getBits();
+ long currentAddress = initializer.getCurrentAddress();
+
+ FiniteStateEntropy.Table currentLiteralsLengthTable = this.currentLiteralsLengthTable;
+ FiniteStateEntropy.Table currentOffsetCodesTable = this.currentOffsetCodesTable;
+ FiniteStateEntropy.Table currentMatchLengthTable = this.currentMatchLengthTable;
+
+ int literalsLengthState = (int) peekBits(bitsConsumed, bits, currentLiteralsLengthTable.log2Size);
+ bitsConsumed += currentLiteralsLengthTable.log2Size;
+
+ int offsetCodesState = (int) peekBits(bitsConsumed, bits, currentOffsetCodesTable.log2Size);
+ bitsConsumed += currentOffsetCodesTable.log2Size;
+
+ int matchLengthState = (int) peekBits(bitsConsumed, bits, currentMatchLengthTable.log2Size);
+ bitsConsumed += currentMatchLengthTable.log2Size;
+
+ int[] previousOffsets = this.previousOffsets;
+
+ byte[] literalsLengthNumbersOfBits = currentLiteralsLengthTable.numberOfBits;
+ int[] literalsLengthNewStates = currentLiteralsLengthTable.newState;
+ byte[] literalsLengthSymbols = currentLiteralsLengthTable.symbol;
+
+ byte[] matchLengthNumbersOfBits = currentMatchLengthTable.numberOfBits;
+ int[] matchLengthNewStates = currentMatchLengthTable.newState;
+ byte[] matchLengthSymbols = currentMatchLengthTable.symbol;
+
+ byte[] offsetCodesNumbersOfBits = currentOffsetCodesTable.numberOfBits;
+ int[] offsetCodesNewStates = currentOffsetCodesTable.newState;
+ byte[] offsetCodesSymbols = currentOffsetCodesTable.symbol;
+
+ while (sequenceCount > 0) {
+ sequenceCount--;
+
+ BitInputStream.Loader loader = new BitInputStream.Loader(inputBase, input, currentAddress, bits, bitsConsumed);
+ loader.load();
+ bitsConsumed = loader.getBitsConsumed();
+ bits = loader.getBits();
+ currentAddress = loader.getCurrentAddress();
+ if (loader.isOverflow()) {
+ verify(sequenceCount == 0, input, "Not all sequences were consumed");
+ break;
+ }
+
+ // decode sequence
+ int literalsLengthCode = literalsLengthSymbols[literalsLengthState];
+ int matchLengthCode = matchLengthSymbols[matchLengthState];
+ int offsetCode = offsetCodesSymbols[offsetCodesState];
+
+ int literalsLengthBits = LITERALS_LENGTH_BITS[literalsLengthCode];
+ int matchLengthBits = MATCH_LENGTH_BITS[matchLengthCode];
+ int offsetBits = offsetCode;
+
+ int offset = OFFSET_CODES_BASE[offsetCode];
+ if (offsetCode > 0) {
+ offset += peekBits(bitsConsumed, bits, offsetBits);
+ bitsConsumed += offsetBits;
+ }
+
+ if (offsetCode <= 1) {
+ if (literalsLengthCode == 0) {
+ offset++;
+ }
+
+ if (offset != 0) {
+ int temp;
+ if (offset == 3) {
+ temp = previousOffsets[0] - 1;
+ }
+ else {
+ temp = previousOffsets[offset];
+ }
+
+ if (temp == 0) {
+ temp = 1;
+ }
+
+ if (offset != 1) {
+ previousOffsets[2] = previousOffsets[1];
+ }
+ previousOffsets[1] = previousOffsets[0];
+ previousOffsets[0] = temp;
+
+ offset = temp;
+ }
+ else {
+ offset = previousOffsets[0];
+ }
+ }
+ else {
+ previousOffsets[2] = previousOffsets[1];
+ previousOffsets[1] = previousOffsets[0];
+ previousOffsets[0] = offset;
+ }
+
+ int matchLength = MATCH_LENGTH_BASE[matchLengthCode];
+ if (matchLengthCode > 31) {
+ matchLength += peekBits(bitsConsumed, bits, matchLengthBits);
+ bitsConsumed += matchLengthBits;
+ }
+
+ int literalsLength = LITERALS_LENGTH_BASE[literalsLengthCode];
+ if (literalsLengthCode > 15) {
+ literalsLength += peekBits(bitsConsumed, bits, literalsLengthBits);
+ bitsConsumed += literalsLengthBits;
+ }
+
+ int totalBits = literalsLengthBits + matchLengthBits + offsetBits;
+ if (totalBits > 64 - 7 - (LITERAL_LENGTH_TABLE_LOG + MATCH_LENGTH_TABLE_LOG + OFFSET_TABLE_LOG)) {
+ BitInputStream.Loader loader1 = new BitInputStream.Loader(inputBase, input, currentAddress, bits, bitsConsumed);
+ loader1.load();
+
+ bitsConsumed = loader1.getBitsConsumed();
+ bits = loader1.getBits();
+ currentAddress = loader1.getCurrentAddress();
+ }
+
+ int numberOfBits;
+
+ numberOfBits = literalsLengthNumbersOfBits[literalsLengthState];
+ literalsLengthState = (int) (literalsLengthNewStates[literalsLengthState] + peekBits(bitsConsumed, bits, numberOfBits)); // <= 9 bits
+ bitsConsumed += numberOfBits;
+
+ numberOfBits = matchLengthNumbersOfBits[matchLengthState];
+ matchLengthState = (int) (matchLengthNewStates[matchLengthState] + peekBits(bitsConsumed, bits, numberOfBits)); // <= 9 bits
+ bitsConsumed += numberOfBits;
+
+ numberOfBits = offsetCodesNumbersOfBits[offsetCodesState];
+ offsetCodesState = (int) (offsetCodesNewStates[offsetCodesState] + peekBits(bitsConsumed, bits, numberOfBits)); // <= 8 bits
+ bitsConsumed += numberOfBits;
+
+ final long literalOutputLimit = output + literalsLength;
+ final long matchOutputLimit = literalOutputLimit + matchLength;
+
+ verify(matchOutputLimit <= outputLimit, input, "Output buffer too small");
+ long literalEnd = literalsInput + literalsLength;
+ verify(literalEnd <= literalsLimit, input, "Input is corrupted");
+
+ long matchAddress = literalOutputLimit - offset;
+ verify(matchAddress >= outputAbsoluteBaseAddress, input, "Input is corrupted");
+
+ if (literalOutputLimit > fastOutputLimit) {
+ executeLastSequence(outputBase, output, literalOutputLimit, matchOutputLimit, fastOutputLimit, literalsInput, matchAddress);
+ }
+ else {
+ // copy literals. literalOutputLimit <= fastOutputLimit, so we can copy
+ // long at a time with over-copy
+ output = copyLiterals(outputBase, literalsBase, output, literalsInput, literalOutputLimit);
+ copyMatch(outputBase, fastOutputLimit, output, offset, matchOutputLimit, matchAddress, matchLength, fastMatchOutputLimit);
+ }
+ output = matchOutputLimit;
+ literalsInput = literalEnd;
+ }
+ }
+
+ // last literal segment
+ output = copyLastLiteral(outputBase, literalsBase, literalsLimit, output, literalsInput);
+
+ return (int) (output - outputAddress);
+ }
+
+ private long copyLastLiteral(Object outputBase, Object literalsBase, long literalsLimit, long output, long literalsInput)
+ {
+ long lastLiteralsSize = literalsLimit - literalsInput;
+ UNSAFE.copyMemory(literalsBase, literalsInput, outputBase, output, lastLiteralsSize);
+ output += lastLiteralsSize;
+ return output;
+ }
+
+ private void copyMatch(Object outputBase, long fastOutputLimit, long output, int offset, long matchOutputLimit, long matchAddress, int matchLength, long fastMatchOutputLimit)
+ {
+ matchAddress = copyMatchHead(outputBase, output, offset, matchAddress);
+ output += SIZE_OF_LONG;
+ matchLength -= SIZE_OF_LONG; // first 8 bytes copied above
+
+ copyMatchTail(outputBase, fastOutputLimit, output, matchOutputLimit, matchAddress, matchLength, fastMatchOutputLimit);
+ }
+
+ private void copyMatchTail(Object outputBase, long fastOutputLimit, long output, long matchOutputLimit, long matchAddress, int matchLength, long fastMatchOutputLimit)
+ {
+ // fastMatchOutputLimit is just fastOutputLimit - SIZE_OF_LONG. It needs to be passed in so that it can be computed once for the
+ // whole invocation to decompressSequences. Otherwise, we'd just compute it here.
+ // If matchOutputLimit is < fastMatchOutputLimit, we know that even after the head (8 bytes) has been copied, the output pointer
+ // will be within fastOutputLimit, so it's safe to copy blindly before checking the limit condition
+ if (matchOutputLimit < fastMatchOutputLimit) {
+ int copied = 0;
+ do {
+ UNSAFE.putLong(outputBase, output, UNSAFE.getLong(outputBase, matchAddress));
+ output += SIZE_OF_LONG;
+ matchAddress += SIZE_OF_LONG;
+ copied += SIZE_OF_LONG;
+ }
+ while (copied < matchLength);
+ }
+ else {
+ while (output < fastOutputLimit) {
+ UNSAFE.putLong(outputBase, output, UNSAFE.getLong(outputBase, matchAddress));
+ matchAddress += SIZE_OF_LONG;
+ output += SIZE_OF_LONG;
+ }
+
+ while (output < matchOutputLimit) {
+ UNSAFE.putByte(outputBase, output++, UNSAFE.getByte(outputBase, matchAddress++));
+ }
+ }
+ }
+
+ private long copyMatchHead(Object outputBase, long output, int offset, long matchAddress)
+ {
+ // copy match
+ if (offset < 8) {
+ // 8 bytes apart so that we can copy long-at-a-time below
+ int increment32 = DEC_32_TABLE[offset];
+ int decrement64 = DEC_64_TABLE[offset];
+
+ UNSAFE.putByte(outputBase, output, UNSAFE.getByte(outputBase, matchAddress));
+ UNSAFE.putByte(outputBase, output + 1, UNSAFE.getByte(outputBase, matchAddress + 1));
+ UNSAFE.putByte(outputBase, output + 2, UNSAFE.getByte(outputBase, matchAddress + 2));
+ UNSAFE.putByte(outputBase, output + 3, UNSAFE.getByte(outputBase, matchAddress + 3));
+ matchAddress += increment32;
+
+ UNSAFE.putInt(outputBase, output + 4, UNSAFE.getInt(outputBase, matchAddress));
+ matchAddress -= decrement64;
+ }
+ else {
+ UNSAFE.putLong(outputBase, output, UNSAFE.getLong(outputBase, matchAddress));
+ matchAddress += SIZE_OF_LONG;
+ }
+ return matchAddress;
+ }
+
+ private long copyLiterals(Object outputBase, Object literalsBase, long output, long literalsInput, long literalOutputLimit)
+ {
+ long literalInput = literalsInput;
+ do {
+ UNSAFE.putLong(outputBase, output, UNSAFE.getLong(literalsBase, literalInput));
+ output += SIZE_OF_LONG;
+ literalInput += SIZE_OF_LONG;
+ }
+ while (output < literalOutputLimit);
+ output = literalOutputLimit; // correction in case we over-copied
+ return output;
+ }
+
+ private long computeMatchLengthTable(int matchLengthType, Object inputBase, long input, long inputLimit)
+ {
+ switch (matchLengthType) {
+ case SEQUENCE_ENCODING_RLE:
+ verify(input < inputLimit, input, "Not enough input bytes");
+
+ byte value = UNSAFE.getByte(inputBase, input++);
+ verify(value <= MAX_MATCH_LENGTH_SYMBOL, input, "Value exceeds expected maximum value");
+
+ FseTableReader.initializeRleTable(matchLengthTable, value);
+ currentMatchLengthTable = matchLengthTable;
+ break;
+ case SEQUENCE_ENCODING_BASIC:
+ currentMatchLengthTable = DEFAULT_MATCH_LENGTH_TABLE;
+ break;
+ case SEQUENCE_ENCODING_REPEAT:
+ verify(currentMatchLengthTable != null, input, "Expected match length table to be present");
+ break;
+ case SEQUENCE_ENCODING_COMPRESSED:
+ input += fse.readFseTable(matchLengthTable, inputBase, input, inputLimit, MAX_MATCH_LENGTH_SYMBOL, MATCH_LENGTH_TABLE_LOG);
+ currentMatchLengthTable = matchLengthTable;
+ break;
+ default:
+ throw fail(input, "Invalid match length encoding type");
+ }
+ return input;
+ }
+
+ private long computeOffsetsTable(int offsetCodesType, Object inputBase, long input, long inputLimit)
+ {
+ switch (offsetCodesType) {
+ case SEQUENCE_ENCODING_RLE:
+ verify(input < inputLimit, input, "Not enough input bytes");
+
+ byte value = UNSAFE.getByte(inputBase, input++);
+ verify(value <= DEFAULT_MAX_OFFSET_CODE_SYMBOL, input, "Value exceeds expected maximum value");
+
+ FseTableReader.initializeRleTable(offsetCodesTable, value);
+ currentOffsetCodesTable = offsetCodesTable;
+ break;
+ case SEQUENCE_ENCODING_BASIC:
+ currentOffsetCodesTable = DEFAULT_OFFSET_CODES_TABLE;
+ break;
+ case SEQUENCE_ENCODING_REPEAT:
+ verify(currentOffsetCodesTable != null, input, "Expected match length table to be present");
+ break;
+ case SEQUENCE_ENCODING_COMPRESSED:
+ input += fse.readFseTable(offsetCodesTable, inputBase, input, inputLimit, DEFAULT_MAX_OFFSET_CODE_SYMBOL, OFFSET_TABLE_LOG);
+ currentOffsetCodesTable = offsetCodesTable;
+ break;
+ default:
+ throw fail(input, "Invalid offset code encoding type");
+ }
+ return input;
+ }
+
+ private long computeLiteralsTable(int literalsLengthType, Object inputBase, long input, long inputLimit)
+ {
+ switch (literalsLengthType) {
+ case SEQUENCE_ENCODING_RLE:
+ verify(input < inputLimit, input, "Not enough input bytes");
+
+ byte value = UNSAFE.getByte(inputBase, input++);
+ verify(value <= MAX_LITERALS_LENGTH_SYMBOL, input, "Value exceeds expected maximum value");
+
+ FseTableReader.initializeRleTable(literalsLengthTable, value);
+ currentLiteralsLengthTable = literalsLengthTable;
+ break;
+ case SEQUENCE_ENCODING_BASIC:
+ currentLiteralsLengthTable = DEFAULT_LITERALS_LENGTH_TABLE;
+ break;
+ case SEQUENCE_ENCODING_REPEAT:
+ verify(currentLiteralsLengthTable != null, input, "Expected match length table to be present");
+ break;
+ case SEQUENCE_ENCODING_COMPRESSED:
+ input += fse.readFseTable(literalsLengthTable, inputBase, input, inputLimit, MAX_LITERALS_LENGTH_SYMBOL, LITERAL_LENGTH_TABLE_LOG);
+ currentLiteralsLengthTable = literalsLengthTable;
+ break;
+ default:
+ throw fail(input, "Invalid literals length encoding type");
+ }
+ return input;
+ }
+
+ private void executeLastSequence(Object outputBase, long output, long literalOutputLimit, long matchOutputLimit, long fastOutputLimit, long literalInput, long matchAddress)
+ {
+ // copy literals
+ if (output < fastOutputLimit) {
+ // wild copy
+ do {
+ UNSAFE.putLong(outputBase, output, UNSAFE.getLong(literalsBase, literalInput));
+ output += SIZE_OF_LONG;
+ literalInput += SIZE_OF_LONG;
+ }
+ while (output < fastOutputLimit);
+
+ literalInput -= output - fastOutputLimit;
+ output = fastOutputLimit;
+ }
+
+ while (output < literalOutputLimit) {
+ UNSAFE.putByte(outputBase, output, UNSAFE.getByte(literalsBase, literalInput));
+ output++;
+ literalInput++;
+ }
+
+ // copy match
+ while (output < matchOutputLimit) {
+ UNSAFE.putByte(outputBase, output, UNSAFE.getByte(outputBase, matchAddress));
+ output++;
+ matchAddress++;
+ }
+ }
+
+ @SuppressWarnings("fallthrough")
+ private int decodeCompressedLiterals(Object inputBase, final long inputAddress, int blockSize, int literalsBlockType)
+ {
+ long input = inputAddress;
+ verify(blockSize >= 5, input, "Not enough input bytes");
+
+ // compressed
+ int compressedSize;
+ int uncompressedSize;
+ boolean singleStream = false;
+ int headerSize;
+ int type = (UNSAFE.getByte(inputBase, input) >> 2) & 0b11;
+ switch (type) {
+ case 0:
+ singleStream = true;
+ case 1: {
+ int header = UNSAFE.getInt(inputBase, input);
+
+ headerSize = 3;
+ uncompressedSize = (header >>> 4) & mask(10);
+ compressedSize = (header >>> 14) & mask(10);
+ break;
+ }
+ case 2: {
+ int header = UNSAFE.getInt(inputBase, input);
+
+ headerSize = 4;
+ uncompressedSize = (header >>> 4) & mask(14);
+ compressedSize = (header >>> 18) & mask(14);
+ break;
+ }
+ case 3: {
+ // read 5 little-endian bytes
+ long header = UNSAFE.getByte(inputBase, input) & 0xFF |
+ (UNSAFE.getInt(inputBase, input + 1) & 0xFFFF_FFFFL) << 8;
+
+ headerSize = 5;
+ uncompressedSize = (int) ((header >>> 4) & mask(18));
+ compressedSize = (int) ((header >>> 22) & mask(18));
+ break;
+ }
+ default:
+ throw fail(input, "Invalid literals header size type");
+ }
+
+ verify(uncompressedSize <= MAX_BLOCK_SIZE, input, "Block exceeds maximum size");
+ verify(headerSize + compressedSize <= blockSize, input, "Input is corrupted");
+
+ input += headerSize;
+
+ long inputLimit = input + compressedSize;
+ if (literalsBlockType != TREELESS_LITERALS_BLOCK) {
+ input += huffman.readTable(inputBase, input, compressedSize);
+ }
+
+ literalsBase = literals;
+ literalsAddress = ARRAY_BYTE_BASE_OFFSET;
+ literalsLimit = ARRAY_BYTE_BASE_OFFSET + uncompressedSize;
+
+ if (singleStream) {
+ huffman.decodeSingleStream(inputBase, input, inputLimit, literals, literalsAddress, literalsLimit);
+ }
+ else {
+ huffman.decode4Streams(inputBase, input, inputLimit, literals, literalsAddress, literalsLimit);
+ }
+
+ return headerSize + compressedSize;
+ }
+
+ private int decodeRleLiterals(Object inputBase, final long inputAddress, int blockSize)
+ {
+ long input = inputAddress;
+ int outputSize;
+
+ int type = (UNSAFE.getByte(inputBase, input) >> 2) & 0b11;
+ switch (type) {
+ case 0:
+ case 2:
+ outputSize = (UNSAFE.getByte(inputBase, input) & 0xFF) >>> 3;
+ input++;
+ break;
+ case 1:
+ outputSize = (UNSAFE.getShort(inputBase, input) & 0xFFFF) >>> 4;
+ input += 2;
+ break;
+ case 3:
+ // we need at least 4 bytes (3 for the header, 1 for the payload)
+ verify(blockSize >= SIZE_OF_INT, input, "Not enough input bytes");
+ outputSize = (UNSAFE.getInt(inputBase, input) & 0xFF_FFFF) >>> 4;
+ input += 3;
+ break;
+ default:
+ throw fail(input, "Invalid RLE literals header encoding type");
+ }
+
+ verify(outputSize <= MAX_BLOCK_SIZE, input, "Output exceeds maximum block size");
+
+ byte value = UNSAFE.getByte(inputBase, input++);
+ Arrays.fill(literals, 0, outputSize + SIZE_OF_LONG, value);
+
+ literalsBase = literals;
+ literalsAddress = ARRAY_BYTE_BASE_OFFSET;
+ literalsLimit = ARRAY_BYTE_BASE_OFFSET + outputSize;
+
+ return (int) (input - inputAddress);
+ }
+
+ private int decodeRawLiterals(Object inputBase, final long inputAddress, long inputLimit)
+ {
+ long input = inputAddress;
+ int type = (UNSAFE.getByte(inputBase, input) >> 2) & 0b11;
+
+ int literalSize;
+ switch (type) {
+ case 0:
+ case 2:
+ literalSize = (UNSAFE.getByte(inputBase, input) & 0xFF) >>> 3;
+ input++;
+ break;
+ case 1:
+ literalSize = (UNSAFE.getShort(inputBase, input) & 0xFFFF) >>> 4;
+ input += 2;
+ break;
+ case 3:
+ // read 3 little-endian bytes
+ int header = ((UNSAFE.getByte(inputBase, input) & 0xFF) |
+ ((UNSAFE.getShort(inputBase, input + 1) & 0xFFFF) << 8));
+
+ literalSize = header >>> 4;
+ input += 3;
+ break;
+ default:
+ throw fail(input, "Invalid raw literals header encoding type");
+ }
+
+ verify(input + literalSize <= inputLimit, input, "Not enough input bytes");
+
+ // Set literals pointer to [input, literalSize], but only if we can copy 8 bytes at a time during sequence decoding
+ // Otherwise, copy literals into buffer that's big enough to guarantee that
+ if (literalSize > (inputLimit - input) - SIZE_OF_LONG) {
+ literalsBase = literals;
+ literalsAddress = ARRAY_BYTE_BASE_OFFSET;
+ literalsLimit = ARRAY_BYTE_BASE_OFFSET + literalSize;
+
+ UNSAFE.copyMemory(inputBase, input, literals, literalsAddress, literalSize);
+ Arrays.fill(literals, literalSize, literalSize + SIZE_OF_LONG, (byte) 0);
+ }
+ else {
+ literalsBase = inputBase;
+ literalsAddress = input;
+ literalsLimit = literalsAddress + literalSize;
+ }
+ input += literalSize;
+
+ return (int) (input - inputAddress);
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdCompressor.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdCompressor.java
new file mode 100644
index 00000000000..1624067f769
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdCompressor.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import ai.vespa.airlift.compress.Compressor;
+
+import java.nio.Buffer;
+import java.nio.ByteBuffer;
+
+import static ai.vespa.airlift.zstd.Constants.MAX_BLOCK_SIZE;
+import static ai.vespa.airlift.zstd.UnsafeUtil.getAddress;
+import static java.lang.String.format;
+import static java.util.Objects.requireNonNull;
+import static sun.misc.Unsafe.ARRAY_BYTE_BASE_OFFSET;
+
+public class ZstdCompressor
+ implements Compressor
+{
+ @Override
+ public int maxCompressedLength(int uncompressedSize)
+ {
+ int result = uncompressedSize + (uncompressedSize >>> 8);
+
+ if (uncompressedSize < MAX_BLOCK_SIZE) {
+ result += (MAX_BLOCK_SIZE - uncompressedSize) >>> 11;
+ }
+
+ return result;
+ }
+
+ @Override
+ public int compress(byte[] input, int inputOffset, int inputLength, byte[] output, int outputOffset, int maxOutputLength)
+ {
+ verifyRange(input, inputOffset, inputLength);
+ verifyRange(output, outputOffset, maxOutputLength);
+
+ long inputAddress = ARRAY_BYTE_BASE_OFFSET + inputOffset;
+ long outputAddress = ARRAY_BYTE_BASE_OFFSET + outputOffset;
+
+ return ZstdFrameCompressor.compress(input, inputAddress, inputAddress + inputLength, output, outputAddress, outputAddress + maxOutputLength, CompressionParameters.DEFAULT_COMPRESSION_LEVEL);
+ }
+
+ @Override
+ public void compress(ByteBuffer inputBuffer, ByteBuffer outputBuffer)
+ {
+ // Java 9+ added an overload of various methods in ByteBuffer. When compiling with Java 11+ and targeting Java 8 bytecode
+ // the resulting signatures are invalid for JDK 8, so accesses below result in NoSuchMethodError. Accessing the
+ // methods through the interface class works around the problem
+ // Sidenote: we can't target "javac --release 8" because Unsafe is not available in the signature data for that profile
+ Buffer input = inputBuffer;
+ Buffer output = outputBuffer;
+
+ Object inputBase;
+ long inputAddress;
+ long inputLimit;
+ if (input.isDirect()) {
+ inputBase = null;
+ long address = getAddress(input);
+ inputAddress = address + input.position();
+ inputLimit = address + input.limit();
+ }
+ else if (input.hasArray()) {
+ inputBase = input.array();
+ inputAddress = ARRAY_BYTE_BASE_OFFSET + input.arrayOffset() + input.position();
+ inputLimit = ARRAY_BYTE_BASE_OFFSET + input.arrayOffset() + input.limit();
+ }
+ else {
+ throw new IllegalArgumentException("Unsupported input ByteBuffer implementation " + input.getClass().getName());
+ }
+
+ Object outputBase;
+ long outputAddress;
+ long outputLimit;
+ if (output.isDirect()) {
+ outputBase = null;
+ long address = getAddress(output);
+ outputAddress = address + output.position();
+ outputLimit = address + output.limit();
+ }
+ else if (output.hasArray()) {
+ outputBase = output.array();
+ outputAddress = ARRAY_BYTE_BASE_OFFSET + output.arrayOffset() + output.position();
+ outputLimit = ARRAY_BYTE_BASE_OFFSET + output.arrayOffset() + output.limit();
+ }
+ else {
+ throw new IllegalArgumentException("Unsupported output ByteBuffer implementation " + output.getClass().getName());
+ }
+
+ // HACK: Assure JVM does not collect Slice wrappers while compressing, since the
+ // collection may trigger freeing of the underlying memory resulting in a segfault
+ // There is no other known way to signal to the JVM that an object should not be
+ // collected in a block, and technically, the JVM is allowed to eliminate these locks.
+ synchronized (input) {
+ synchronized (output) {
+ int written = ZstdFrameCompressor.compress(
+ inputBase,
+ inputAddress,
+ inputLimit,
+ outputBase,
+ outputAddress,
+ outputLimit,
+ CompressionParameters.DEFAULT_COMPRESSION_LEVEL);
+ output.position(output.position() + written);
+ }
+ }
+ }
+
+ private static void verifyRange(byte[] data, int offset, int length)
+ {
+ requireNonNull(data, "data is null");
+ if (offset < 0 || length < 0 || offset + length > data.length) {
+ throw new IllegalArgumentException(format("Invalid offset or length (%s, %s) in array of length %s", offset, length, data.length));
+ }
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdDecompressor.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdDecompressor.java
new file mode 100644
index 00000000000..a5c755e3685
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdDecompressor.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import ai.vespa.airlift.compress.Decompressor;
+import ai.vespa.airlift.compress.MalformedInputException;
+
+import java.nio.Buffer;
+import java.nio.ByteBuffer;
+
+import static ai.vespa.airlift.zstd.UnsafeUtil.getAddress;
+import static java.lang.String.format;
+import static java.util.Objects.requireNonNull;
+import static sun.misc.Unsafe.ARRAY_BYTE_BASE_OFFSET;
+
+public class ZstdDecompressor
+ implements Decompressor
+{
+ private final ZstdFrameDecompressor decompressor = new ZstdFrameDecompressor();
+
+ @Override
+ public int decompress(byte[] input, int inputOffset, int inputLength, byte[] output, int outputOffset, int maxOutputLength)
+ throws MalformedInputException
+ {
+ verifyRange(input, inputOffset, inputLength);
+ verifyRange(output, outputOffset, maxOutputLength);
+
+ long inputAddress = ARRAY_BYTE_BASE_OFFSET + inputOffset;
+ long inputLimit = inputAddress + inputLength;
+ long outputAddress = ARRAY_BYTE_BASE_OFFSET + outputOffset;
+ long outputLimit = outputAddress + maxOutputLength;
+
+ return decompressor.decompress(input, inputAddress, inputLimit, output, outputAddress, outputLimit);
+ }
+
+ @Override
+ public void decompress(ByteBuffer inputBuffer, ByteBuffer outputBuffer)
+ throws MalformedInputException
+ {
+ // Java 9+ added an overload of various methods in ByteBuffer. When compiling with Java 11+ and targeting Java 8 bytecode
+ // the resulting signatures are invalid for JDK 8, so accesses below result in NoSuchMethodError. Accessing the
+ // methods through the interface class works around the problem
+ // Sidenote: we can't target "javac --release 8" because Unsafe is not available in the signature data for that profile
+ Buffer input = inputBuffer;
+ Buffer output = outputBuffer;
+
+ Object inputBase;
+ long inputAddress;
+ long inputLimit;
+ if (input.isDirect()) {
+ inputBase = null;
+ long address = getAddress(input);
+ inputAddress = address + input.position();
+ inputLimit = address + input.limit();
+ }
+ else if (input.hasArray()) {
+ inputBase = input.array();
+ inputAddress = ARRAY_BYTE_BASE_OFFSET + input.arrayOffset() + input.position();
+ inputLimit = ARRAY_BYTE_BASE_OFFSET + input.arrayOffset() + input.limit();
+ }
+ else {
+ throw new IllegalArgumentException("Unsupported input ByteBuffer implementation " + input.getClass().getName());
+ }
+
+ Object outputBase;
+ long outputAddress;
+ long outputLimit;
+ if (output.isDirect()) {
+ outputBase = null;
+ long address = getAddress(output);
+ outputAddress = address + output.position();
+ outputLimit = address + output.limit();
+ }
+ else if (output.hasArray()) {
+ outputBase = output.array();
+ outputAddress = ARRAY_BYTE_BASE_OFFSET + output.arrayOffset() + output.position();
+ outputLimit = ARRAY_BYTE_BASE_OFFSET + output.arrayOffset() + output.limit();
+ }
+ else {
+ throw new IllegalArgumentException("Unsupported output ByteBuffer implementation " + output.getClass().getName());
+ }
+
+ // HACK: Assure JVM does not collect Slice wrappers while decompressing, since the
+ // collection may trigger freeing of the underlying memory resulting in a segfault
+ // There is no other known way to signal to the JVM that an object should not be
+ // collected in a block, and technically, the JVM is allowed to eliminate these locks.
+ synchronized (input) {
+ synchronized (output) {
+ int written = new ZstdFrameDecompressor().decompress(inputBase, inputAddress, inputLimit, outputBase, outputAddress, outputLimit);
+ output.position(output.position() + written);
+ }
+ }
+ }
+
+ public static long getDecompressedSize(byte[] input, int offset, int length)
+ {
+ int baseAddress = ARRAY_BYTE_BASE_OFFSET + offset;
+ return ZstdFrameDecompressor.getDecompressedSize(input, baseAddress, baseAddress + length);
+ }
+
+ private static void verifyRange(byte[] data, int offset, int length)
+ {
+ requireNonNull(data, "data is null");
+ if (offset < 0 || length < 0 || offset + length > data.length) {
+ throw new IllegalArgumentException(format("Invalid offset or length (%s, %s) in array of length %s", offset, length, data.length));
+ }
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdFrameCompressor.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdFrameCompressor.java
new file mode 100644
index 00000000000..44209b1f9e2
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdFrameCompressor.java
@@ -0,0 +1,438 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import static ai.vespa.airlift.zstd.Constants.COMPRESSED_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.COMPRESSED_LITERALS_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.MAGIC_NUMBER;
+import static ai.vespa.airlift.zstd.Constants.MAX_BLOCK_SIZE;
+import static ai.vespa.airlift.zstd.Constants.MIN_BLOCK_SIZE;
+import static ai.vespa.airlift.zstd.Constants.MIN_WINDOW_LOG;
+import static ai.vespa.airlift.zstd.Constants.RAW_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.RAW_LITERALS_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.RLE_LITERALS_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_BLOCK_HEADER;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_INT;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_SHORT;
+import static ai.vespa.airlift.zstd.Constants.TREELESS_LITERALS_BLOCK;
+import static ai.vespa.airlift.zstd.Huffman.MAX_SYMBOL;
+import static ai.vespa.airlift.zstd.Huffman.MAX_SYMBOL_COUNT;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+import static ai.vespa.airlift.zstd.Util.checkArgument;
+import static ai.vespa.airlift.zstd.Util.put24BitLittleEndian;
+import static sun.misc.Unsafe.ARRAY_BYTE_BASE_OFFSET;
+
+class ZstdFrameCompressor
+{
+ static final int MAX_FRAME_HEADER_SIZE = 14;
+
+ private static final int CHECKSUM_FLAG = 0b100;
+ private static final int SINGLE_SEGMENT_FLAG = 0b100000;
+
+ private static final int MINIMUM_LITERALS_SIZE = 63;
+
+ // the maximum table log allowed for literal encoding per RFC 8478, section 4.2.1
+ private static final int MAX_HUFFMAN_TABLE_LOG = 11;
+
+ private ZstdFrameCompressor()
+ {
+ }
+
+ // visible for testing
+ static int writeMagic(final Object outputBase, final long outputAddress, final long outputLimit)
+ {
+ checkArgument(outputLimit - outputAddress >= SIZE_OF_INT, "Output buffer too small");
+
+ UNSAFE.putInt(outputBase, outputAddress, MAGIC_NUMBER);
+ return SIZE_OF_INT;
+ }
+
+ // visible for testing
+ static int writeFrameHeader(final Object outputBase, final long outputAddress, final long outputLimit, int inputSize, int windowSize)
+ {
+ checkArgument(outputLimit - outputAddress >= MAX_FRAME_HEADER_SIZE, "Output buffer too small");
+
+ long output = outputAddress;
+
+ int contentSizeDescriptor = (inputSize >= 256 ? 1 : 0) + (inputSize >= 65536 + 256 ? 1 : 0);
+ int frameHeaderDescriptor = (contentSizeDescriptor << 6) | CHECKSUM_FLAG; // dictionary ID missing
+
+ boolean singleSegment = windowSize >= inputSize;
+ if (singleSegment) {
+ frameHeaderDescriptor |= SINGLE_SEGMENT_FLAG;
+ }
+
+ UNSAFE.putByte(outputBase, output, (byte) frameHeaderDescriptor);
+ output++;
+
+ if (!singleSegment) {
+ int base = Integer.highestOneBit(windowSize);
+
+ int exponent = 32 - Integer.numberOfLeadingZeros(base) - 1;
+ if (exponent < MIN_WINDOW_LOG) {
+ throw new IllegalArgumentException("Minimum window size is " + (1 << MIN_WINDOW_LOG));
+ }
+
+ int remainder = windowSize - base;
+ if (remainder % (base / 8) != 0) {
+ throw new IllegalArgumentException("Window size of magnitude 2^" + exponent + " must be multiple of " + (base / 8));
+ }
+
+ // mantissa is guaranteed to be between 0-7
+ int mantissa = remainder / (base / 8);
+ int encoded = ((exponent - MIN_WINDOW_LOG) << 3) | mantissa;
+
+ UNSAFE.putByte(outputBase, output, (byte) encoded);
+ output++;
+ }
+
+ switch (contentSizeDescriptor) {
+ case 0:
+ if (singleSegment) {
+ UNSAFE.putByte(outputBase, output++, (byte) inputSize);
+ }
+ break;
+ case 1:
+ UNSAFE.putShort(outputBase, output, (short) (inputSize - 256));
+ output += SIZE_OF_SHORT;
+ break;
+ case 2:
+ UNSAFE.putInt(outputBase, output, inputSize);
+ output += SIZE_OF_INT;
+ break;
+ default:
+ throw new AssertionError();
+ }
+
+ return (int) (output - outputAddress);
+ }
+
+ // visible for testing
+ static int writeChecksum(Object outputBase, long outputAddress, long outputLimit, Object inputBase, long inputAddress, long inputLimit)
+ {
+ checkArgument(outputLimit - outputAddress >= SIZE_OF_INT, "Output buffer too small");
+
+ int inputSize = (int) (inputLimit - inputAddress);
+
+ long hash = XxHash64.hash(0, inputBase, inputAddress, inputSize);
+
+ UNSAFE.putInt(outputBase, outputAddress, (int) hash);
+
+ return SIZE_OF_INT;
+ }
+
+ public static int compress(Object inputBase, long inputAddress, long inputLimit, Object outputBase, long outputAddress, long outputLimit, int compressionLevel)
+ {
+ int inputSize = (int) (inputLimit - inputAddress);
+
+ CompressionParameters parameters = CompressionParameters.compute(compressionLevel, inputSize);
+
+ long output = outputAddress;
+
+ output += writeMagic(outputBase, output, outputLimit);
+ output += writeFrameHeader(outputBase, output, outputLimit, inputSize, 1 << parameters.getWindowLog());
+ output += compressFrame(inputBase, inputAddress, inputLimit, outputBase, output, outputLimit, parameters);
+ output += writeChecksum(outputBase, output, outputLimit, inputBase, inputAddress, inputLimit);
+
+ return (int) (output - outputAddress);
+ }
+
+ private static int compressFrame(Object inputBase, long inputAddress, long inputLimit, Object outputBase, long outputAddress, long outputLimit, CompressionParameters parameters)
+ {
+ int windowSize = 1 << parameters.getWindowLog(); // TODO: store window size in parameters directly?
+ int blockSize = Math.min(MAX_BLOCK_SIZE, windowSize);
+
+ int outputSize = (int) (outputLimit - outputAddress);
+ int remaining = (int) (inputLimit - inputAddress);
+
+ long output = outputAddress;
+ long input = inputAddress;
+
+ CompressionContext context = new CompressionContext(parameters, inputAddress, remaining);
+
+ do {
+ checkArgument(outputSize >= SIZE_OF_BLOCK_HEADER + MIN_BLOCK_SIZE, "Output buffer too small");
+
+ int lastBlockFlag = blockSize >= remaining ? 1 : 0;
+ blockSize = Math.min(blockSize, remaining);
+
+ int compressedSize = 0;
+ if (remaining > 0) {
+ compressedSize = compressBlock(inputBase, input, blockSize, outputBase, output + SIZE_OF_BLOCK_HEADER, outputSize - SIZE_OF_BLOCK_HEADER, context, parameters);
+ }
+
+ if (compressedSize == 0) { // block is not compressible
+ checkArgument(blockSize + SIZE_OF_BLOCK_HEADER <= outputSize, "Output size too small");
+
+ int blockHeader = lastBlockFlag | (RAW_BLOCK << 1) | (blockSize << 3);
+ put24BitLittleEndian(outputBase, output, blockHeader);
+ UNSAFE.copyMemory(inputBase, input, outputBase, output + SIZE_OF_BLOCK_HEADER, blockSize);
+ compressedSize = SIZE_OF_BLOCK_HEADER + blockSize;
+ }
+ else {
+ int blockHeader = lastBlockFlag | (COMPRESSED_BLOCK << 1) | (compressedSize << 3);
+ put24BitLittleEndian(outputBase, output, blockHeader);
+ compressedSize += SIZE_OF_BLOCK_HEADER;
+ }
+
+ input += blockSize;
+ remaining -= blockSize;
+ output += compressedSize;
+ outputSize -= compressedSize;
+ }
+ while (remaining > 0);
+
+ return (int) (output - outputAddress);
+ }
+
+ private static int compressBlock(Object inputBase, long inputAddress, int inputSize, Object outputBase, long outputAddress, int outputSize, CompressionContext context, CompressionParameters parameters)
+ {
+ if (inputSize < MIN_BLOCK_SIZE + SIZE_OF_BLOCK_HEADER + 1) {
+ // don't even attempt compression below a certain input size
+ return 0;
+ }
+
+ context.blockCompressionState.enforceMaxDistance(inputAddress + inputSize, 1 << parameters.getWindowLog());
+ context.sequenceStore.reset();
+
+ int lastLiteralsSize = parameters.getStrategy()
+ .getCompressor()
+ .compressBlock(inputBase, inputAddress, inputSize, context.sequenceStore, context.blockCompressionState, context.offsets, parameters);
+
+ long lastLiteralsAddress = inputAddress + inputSize - lastLiteralsSize;
+
+ // append [lastLiteralsAddress .. lastLiteralsSize] to sequenceStore literals buffer
+ context.sequenceStore.appendLiterals(inputBase, lastLiteralsAddress, lastLiteralsSize);
+
+ // convert length/offsets into codes
+ context.sequenceStore.generateCodes();
+
+ long outputLimit = outputAddress + outputSize;
+ long output = outputAddress;
+
+ int compressedLiteralsSize = encodeLiterals(
+ context.huffmanContext,
+ parameters,
+ outputBase,
+ output,
+ (int) (outputLimit - output),
+ context.sequenceStore.literalsBuffer,
+ context.sequenceStore.literalsLength);
+ output += compressedLiteralsSize;
+
+ int compressedSequencesSize = SequenceEncoder.compressSequences(outputBase, output, (int) (outputLimit - output), context.sequenceStore, parameters.getStrategy(), context.sequenceEncodingContext);
+
+ int compressedSize = compressedLiteralsSize + compressedSequencesSize;
+ if (compressedSize == 0) {
+ // not compressible
+ return compressedSize;
+ }
+
+ // Check compressibility
+ int maxCompressedSize = inputSize - calculateMinimumGain(inputSize, parameters.getStrategy());
+ if (compressedSize > maxCompressedSize) {
+ return 0; // not compressed
+ }
+
+ // confirm repeated offsets and entropy tables
+ context.commit();
+
+ return compressedSize;
+ }
+
+ private static int encodeLiterals(
+ HuffmanCompressionContext context,
+ CompressionParameters parameters,
+ Object outputBase,
+ long outputAddress,
+ int outputSize,
+ byte[] literals,
+ int literalsSize)
+ {
+ // TODO: move this to Strategy
+ boolean bypassCompression = (parameters.getStrategy() == CompressionParameters.Strategy.FAST) && (parameters.getTargetLength() > 0);
+ if (bypassCompression || literalsSize <= MINIMUM_LITERALS_SIZE) {
+ return rawLiterals(outputBase, outputAddress, outputSize, literals, ARRAY_BYTE_BASE_OFFSET, literalsSize);
+ }
+
+ int headerSize = 3 + (literalsSize >= 1024 ? 1 : 0) + (literalsSize >= 16384 ? 1 : 0);
+
+ checkArgument(headerSize + 1 <= outputSize, "Output buffer too small");
+
+ int[] counts = new int[MAX_SYMBOL_COUNT]; // TODO: preallocate
+ Histogram.count(literals, literalsSize, counts);
+ int maxSymbol = Histogram.findMaxSymbol(counts, MAX_SYMBOL);
+ int largestCount = Histogram.findLargestCount(counts, maxSymbol);
+
+ long literalsAddress = ARRAY_BYTE_BASE_OFFSET;
+ if (largestCount == literalsSize) {
+ // all bytes in input are equal
+ return rleLiterals(outputBase, outputAddress, outputSize, literals, ARRAY_BYTE_BASE_OFFSET, literalsSize);
+ }
+ else if (largestCount <= (literalsSize >>> 7) + 4) {
+ // heuristic: probably not compressible enough
+ return rawLiterals(outputBase, outputAddress, outputSize, literals, ARRAY_BYTE_BASE_OFFSET, literalsSize);
+ }
+
+ HuffmanCompressionTable previousTable = context.getPreviousTable();
+ HuffmanCompressionTable table;
+ int serializedTableSize;
+ boolean reuseTable;
+
+ boolean canReuse = previousTable.isValid(counts, maxSymbol);
+
+ // heuristic: use existing table for small inputs if valid
+ // TODO: move to Strategy
+ boolean preferReuse = parameters.getStrategy().ordinal() < CompressionParameters.Strategy.LAZY.ordinal() && literalsSize <= 1024;
+ if (preferReuse && canReuse) {
+ table = previousTable;
+ reuseTable = true;
+ serializedTableSize = 0;
+ }
+ else {
+ HuffmanCompressionTable newTable = context.borrowTemporaryTable();
+
+ newTable.initialize(
+ counts,
+ maxSymbol,
+ HuffmanCompressionTable.optimalNumberOfBits(MAX_HUFFMAN_TABLE_LOG, literalsSize, maxSymbol),
+ context.getCompressionTableWorkspace());
+
+ serializedTableSize = newTable.write(outputBase, outputAddress + headerSize, outputSize - headerSize, context.getTableWriterWorkspace());
+
+ // Check if using previous huffman table is beneficial
+ if (canReuse && previousTable.estimateCompressedSize(counts, maxSymbol) <= serializedTableSize + newTable.estimateCompressedSize(counts, maxSymbol)) {
+ table = previousTable;
+ reuseTable = true;
+ serializedTableSize = 0;
+ context.discardTemporaryTable();
+ }
+ else {
+ table = newTable;
+ reuseTable = false;
+ }
+ }
+
+ int compressedSize;
+ boolean singleStream = literalsSize < 256;
+ if (singleStream) {
+ compressedSize = HuffmanCompressor.compressSingleStream(outputBase, outputAddress + headerSize + serializedTableSize, outputSize - headerSize - serializedTableSize, literals, literalsAddress, literalsSize, table);
+ }
+ else {
+ compressedSize = HuffmanCompressor.compress4streams(outputBase, outputAddress + headerSize + serializedTableSize, outputSize - headerSize - serializedTableSize, literals, literalsAddress, literalsSize, table);
+ }
+
+ int totalSize = serializedTableSize + compressedSize;
+ int minimumGain = calculateMinimumGain(literalsSize, parameters.getStrategy());
+
+ if (compressedSize == 0 || totalSize >= literalsSize - minimumGain) {
+ // incompressible or no savings
+
+ // discard any temporary table we might have borrowed above
+ context.discardTemporaryTable();
+
+ return rawLiterals(outputBase, outputAddress, outputSize, literals, ARRAY_BYTE_BASE_OFFSET, literalsSize);
+ }
+
+ int encodingType = reuseTable ? TREELESS_LITERALS_BLOCK : COMPRESSED_LITERALS_BLOCK;
+
+ // Build header
+ switch (headerSize) {
+ case 3: { // 2 - 2 - 10 - 10
+ int header = encodingType | ((singleStream ? 0 : 1) << 2) | (literalsSize << 4) | (totalSize << 14);
+ put24BitLittleEndian(outputBase, outputAddress, header);
+ break;
+ }
+ case 4: { // 2 - 2 - 14 - 14
+ int header = encodingType | (2 << 2) | (literalsSize << 4) | (totalSize << 18);
+ UNSAFE.putInt(outputBase, outputAddress, header);
+ break;
+ }
+ case 5: { // 2 - 2 - 18 - 18
+ int header = encodingType | (3 << 2) | (literalsSize << 4) | (totalSize << 22);
+ UNSAFE.putInt(outputBase, outputAddress, header);
+ UNSAFE.putByte(outputBase, outputAddress + SIZE_OF_INT, (byte) (totalSize >>> 10));
+ break;
+ }
+ default: // not possible : headerSize is {3,4,5}
+ throw new IllegalStateException();
+ }
+
+ return headerSize + totalSize;
+ }
+
+ private static int rleLiterals(Object outputBase, long outputAddress, int outputSize, Object inputBase, long inputAddress, int inputSize)
+ {
+ int headerSize = 1 + (inputSize > 31 ? 1 : 0) + (inputSize > 4095 ? 1 : 0);
+
+ switch (headerSize) {
+ case 1: // 2 - 1 - 5
+ UNSAFE.putByte(outputBase, outputAddress, (byte) (RLE_LITERALS_BLOCK | (inputSize << 3)));
+ break;
+ case 2: // 2 - 2 - 12
+ UNSAFE.putShort(outputBase, outputAddress, (short) (RLE_LITERALS_BLOCK | (1 << 2) | (inputSize << 4)));
+ break;
+ case 3: // 2 - 2 - 20
+ UNSAFE.putInt(outputBase, outputAddress, RLE_LITERALS_BLOCK | 3 << 2 | inputSize << 4);
+ break;
+ default: // impossible. headerSize is {1,2,3}
+ throw new IllegalStateException();
+ }
+
+ UNSAFE.putByte(outputBase, outputAddress + headerSize, UNSAFE.getByte(inputBase, inputAddress));
+
+ return headerSize + 1;
+ }
+
+ private static int calculateMinimumGain(int inputSize, CompressionParameters.Strategy strategy)
+ {
+ // TODO: move this to Strategy to avoid hardcoding a specific strategy here
+ int minLog = strategy == CompressionParameters.Strategy.BTULTRA ? 7 : 6;
+ return (inputSize >>> minLog) + 2;
+ }
+
+ private static int rawLiterals(Object outputBase, long outputAddress, int outputSize, Object inputBase, long inputAddress, int inputSize)
+ {
+ int headerSize = 1;
+ if (inputSize >= 32) {
+ headerSize++;
+ }
+ if (inputSize >= 4096) {
+ headerSize++;
+ }
+
+ checkArgument(inputSize + headerSize <= outputSize, "Output buffer too small");
+
+ switch (headerSize) {
+ case 1:
+ UNSAFE.putByte(outputBase, outputAddress, (byte) (RAW_LITERALS_BLOCK | (inputSize << 3)));
+ break;
+ case 2:
+ UNSAFE.putShort(outputBase, outputAddress, (short) (RAW_LITERALS_BLOCK | (1 << 2) | (inputSize << 4)));
+ break;
+ case 3:
+ put24BitLittleEndian(outputBase, outputAddress, RAW_LITERALS_BLOCK | (3 << 2) | (inputSize << 4));
+ break;
+ default:
+ throw new AssertionError();
+ }
+
+ // TODO: ensure this test is correct
+ checkArgument(inputSize + 1 <= outputSize, "Output buffer too small");
+
+ UNSAFE.copyMemory(inputBase, inputAddress, outputBase, outputAddress + headerSize, inputSize);
+
+ return headerSize + inputSize;
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdFrameDecompressor.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdFrameDecompressor.java
new file mode 100644
index 00000000000..46b2ea2a894
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdFrameDecompressor.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import ai.vespa.airlift.compress.MalformedInputException;
+
+import static ai.vespa.airlift.zstd.Constants.COMPRESSED_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.MAGIC_NUMBER;
+import static ai.vespa.airlift.zstd.Constants.MIN_WINDOW_LOG;
+import static ai.vespa.airlift.zstd.Constants.RAW_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.RLE_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_BLOCK_HEADER;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_BYTE;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_INT;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_LONG;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_SHORT;
+import static ai.vespa.airlift.zstd.UnsafeUtil.UNSAFE;
+import static ai.vespa.airlift.zstd.Util.fail;
+import static ai.vespa.airlift.zstd.Util.verify;
+
+class ZstdFrameDecompressor
+{
+ private static final int V07_MAGIC_NUMBER = 0xFD2FB527;
+
+ public int decompress(
+ final Object inputBase,
+ final long inputAddress,
+ final long inputLimit,
+ final Object outputBase,
+ final long outputAddress,
+ final long outputLimit)
+ {
+ if (outputAddress == outputLimit) {
+ return 0;
+ }
+ long input = inputAddress;
+ long output = outputAddress;
+
+ while (input < inputLimit) {
+ long outputStart = output;
+ input += verifyMagic(inputBase, input, inputLimit);
+
+ FrameHeader frameHeader = readFrameHeader(inputBase, input, inputLimit);
+ input += frameHeader.headerSize;
+
+ ZstdBlockDecompressor blockDecompressor = new ZstdBlockDecompressor(frameHeader);
+ boolean lastBlock;
+ do {
+ verify(input + SIZE_OF_BLOCK_HEADER <= inputLimit, input, "Not enough input bytes");
+
+ // read block header
+ int header = UNSAFE.getInt(inputBase, input) & 0xFF_FFFF;
+ input += SIZE_OF_BLOCK_HEADER;
+
+ lastBlock = (header & 1) != 0;
+ int blockType = (header >>> 1) & 0b11;
+ int blockSize = (header >>> 3) & 0x1F_FFFF; // 21 bits
+
+ int decodedSize;
+ switch (blockType) {
+ case RAW_BLOCK:
+ verify(inputAddress + blockSize <= inputLimit, input, "Not enough input bytes");
+ decodedSize = ZstdBlockDecompressor.decodeRawBlock(inputBase, input, blockSize, outputBase, output, outputLimit);
+ input += blockSize;
+ break;
+ case RLE_BLOCK:
+ verify(inputAddress + 1 <= inputLimit, input, "Not enough input bytes");
+ decodedSize = ZstdBlockDecompressor.decodeRleBlock(blockSize, inputBase, input, outputBase, output, outputLimit);
+ input += 1;
+ break;
+ case COMPRESSED_BLOCK:
+ verify(inputAddress + blockSize <= inputLimit, input, "Not enough input bytes");
+ decodedSize = blockDecompressor.decodeCompressedBlock(inputBase, input, blockSize, outputBase, output, outputLimit, frameHeader.windowSize, outputAddress);
+ input += blockSize;
+ break;
+ default:
+ throw fail(input, "Invalid block type");
+ }
+ output += decodedSize;
+ }
+ while (!lastBlock);
+
+ if (frameHeader.hasChecksum) {
+ int decodedFrameSize = (int) (output - outputStart);
+
+ long hash = XxHash64.hash(0, outputBase, outputStart, decodedFrameSize);
+
+ int checksum = UNSAFE.getInt(inputBase, input);
+ if (checksum != (int) hash) {
+ throw new MalformedInputException(input, String.format("Bad checksum. Expected: %s, actual: %s", Integer.toHexString(checksum), Integer.toHexString((int) hash)));
+ }
+
+ input += SIZE_OF_INT;
+ }
+ }
+
+ return (int) (output - outputAddress);
+ }
+
+ static FrameHeader readFrameHeader(final Object inputBase, final long inputAddress, final long inputLimit)
+ {
+ long input = inputAddress;
+ verify(input < inputLimit, input, "Not enough input bytes");
+
+ int frameHeaderDescriptor = UNSAFE.getByte(inputBase, input++) & 0xFF;
+ boolean singleSegment = (frameHeaderDescriptor & 0b100000) != 0;
+ int dictionaryDescriptor = frameHeaderDescriptor & 0b11;
+ int contentSizeDescriptor = frameHeaderDescriptor >>> 6;
+
+ int headerSize = 1 +
+ (singleSegment ? 0 : 1) +
+ (dictionaryDescriptor == 0 ? 0 : (1 << (dictionaryDescriptor - 1))) +
+ (contentSizeDescriptor == 0 ? (singleSegment ? 1 : 0) : (1 << contentSizeDescriptor));
+
+ verify(headerSize <= inputLimit - inputAddress, input, "Not enough input bytes");
+
+ // decode window size
+ int windowSize = -1;
+ if (!singleSegment) {
+ int windowDescriptor = UNSAFE.getByte(inputBase, input++) & 0xFF;
+ int exponent = windowDescriptor >>> 3;
+ int mantissa = windowDescriptor & 0b111;
+
+ int base = 1 << (MIN_WINDOW_LOG + exponent);
+ windowSize = base + (base / 8) * mantissa;
+ }
+
+ // decode dictionary id
+ long dictionaryId = -1;
+ switch (dictionaryDescriptor) {
+ case 1:
+ dictionaryId = UNSAFE.getByte(inputBase, input) & 0xFF;
+ input += SIZE_OF_BYTE;
+ break;
+ case 2:
+ dictionaryId = UNSAFE.getShort(inputBase, input) & 0xFFFF;
+ input += SIZE_OF_SHORT;
+ break;
+ case 3:
+ dictionaryId = UNSAFE.getInt(inputBase, input) & 0xFFFF_FFFFL;
+ input += SIZE_OF_INT;
+ break;
+ }
+ verify(dictionaryId == -1, input, "Custom dictionaries not supported");
+
+ // decode content size
+ long contentSize = -1;
+ switch (contentSizeDescriptor) {
+ case 0:
+ if (singleSegment) {
+ contentSize = UNSAFE.getByte(inputBase, input) & 0xFF;
+ input += SIZE_OF_BYTE;
+ }
+ break;
+ case 1:
+ contentSize = UNSAFE.getShort(inputBase, input) & 0xFFFF;
+ contentSize += 256;
+ input += SIZE_OF_SHORT;
+ break;
+ case 2:
+ contentSize = UNSAFE.getInt(inputBase, input) & 0xFFFF_FFFFL;
+ input += SIZE_OF_INT;
+ break;
+ case 3:
+ contentSize = UNSAFE.getLong(inputBase, input);
+ input += SIZE_OF_LONG;
+ break;
+ }
+
+ boolean hasChecksum = (frameHeaderDescriptor & 0b100) != 0;
+
+ return new FrameHeader(
+ input - inputAddress,
+ windowSize,
+ contentSize,
+ dictionaryId,
+ hasChecksum);
+ }
+
+ public static long getDecompressedSize(final Object inputBase, final long inputAddress, final long inputLimit)
+ {
+ long input = inputAddress;
+ input += verifyMagic(inputBase, input, inputLimit);
+ return readFrameHeader(inputBase, input, inputLimit).contentSize;
+ }
+
+ static int verifyMagic(Object inputBase, long inputAddress, long inputLimit)
+ {
+ verify(inputLimit - inputAddress >= 4, inputAddress, "Not enough input bytes");
+
+ int magic = UNSAFE.getInt(inputBase, inputAddress);
+ if (magic != MAGIC_NUMBER) {
+ if (magic == V07_MAGIC_NUMBER) {
+ throw new MalformedInputException(inputAddress, "Data encoded in unsupported ZSTD v0.7 format");
+ }
+ throw new MalformedInputException(inputAddress, "Invalid magic prefix: " + Integer.toHexString(magic));
+ }
+
+ return SIZE_OF_INT;
+ }
+}
diff --git a/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdInputStream.java b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdInputStream.java
new file mode 100644
index 00000000000..ffee9286fdb
--- /dev/null
+++ b/airlift-zstd/src/main/java/ai/vespa/airlift/zstd/ZstdInputStream.java
@@ -0,0 +1,471 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package ai.vespa.airlift.zstd;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import static ai.vespa.airlift.zstd.Constants.COMPRESSED_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.MAGIC_NUMBER;
+import static ai.vespa.airlift.zstd.Constants.MAGIC_SKIPFRAME_MAX;
+import static ai.vespa.airlift.zstd.Constants.MAGIC_SKIPFRAME_MIN;
+import static ai.vespa.airlift.zstd.Constants.MAX_BLOCK_SIZE;
+import static ai.vespa.airlift.zstd.Constants.RAW_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.RLE_BLOCK;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_BLOCK_HEADER;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_BYTE;
+import static ai.vespa.airlift.zstd.Constants.SIZE_OF_INT;
+import static ai.vespa.airlift.zstd.Util.fail;
+import static sun.misc.Unsafe.ARRAY_BYTE_BASE_OFFSET;
+
+/**
+ * Take a compressed InputStream and decompress it as needed
+ * @author arnej27959
+ */
+public class ZstdInputStream
+ extends InputStream
+{
+ private static final int DEFAULT_BUFFER_SIZE = 8 * 1024;
+ private static final int BUFFER_SIZE_MASK = ~(DEFAULT_BUFFER_SIZE - 1);
+ private static final int MAX_WINDOW_SIZE = 1 << 23;
+
+ private final InputStream inputStream;
+ private byte[] inputBuffer;
+ private int inputPosition;
+ private int inputEnd;
+ private byte[] outputBuffer;
+ private int outputPosition;
+ private int outputEnd;
+ private boolean isClosed;
+ private boolean seenEof;
+ private boolean lastBlock;
+ private boolean singleSegmentFlag;
+ private boolean contentChecksumFlag;
+ private long skipBytes;
+ private int windowSize;
+ private int blockMaximumSize = MAX_BLOCK_SIZE;
+ private int curBlockSize;
+ private int curBlockType = -1;
+ private FrameHeader curHeader;
+ private ZstdBlockDecompressor blockDecompressor;
+ private XxHash64 hasher;
+ private long evictedInput;
+
+ public ZstdInputStream(InputStream inp, int initialBufferSize)
+ {
+ this.inputStream = inp;
+ this.inputBuffer = new byte[initialBufferSize];
+ this.outputBuffer = new byte[initialBufferSize];
+ }
+
+ public ZstdInputStream(InputStream inp)
+ {
+ this(inp, DEFAULT_BUFFER_SIZE);
+ }
+
+ @Override
+ public int available()
+ {
+ return outputAvailable();
+ }
+
+ @Override
+ public int read() throws IOException
+ {
+ throwIfClosed();
+ if (ensureGotOutput()) {
+ int b = outputBuffer[outputPosition++];
+ return (b & 0xFF);
+ }
+ else {
+ return -1;
+ }
+ }
+
+ @Override
+ public int read(byte[] b) throws IOException
+ {
+ return read(b, 0, b.length);
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException
+ {
+ throwIfClosed();
+ if (ensureGotOutput()) {
+ len = Math.min(outputAvailable(), len);
+ System.arraycopy(outputBuffer, outputPosition, b, off, len);
+ outputPosition += len;
+ return len;
+ }
+ else {
+ return -1;
+ }
+ }
+
+ @Override
+ public void close() throws IOException
+ {
+ throwIfClosed();
+ if (!seenEof) {
+ inputStream.close();
+ }
+ isClosed = true;
+ }
+
+ private void check(boolean condition, String reason)
+ {
+ Util.verify(condition, curInputFilePosition(), reason);
+ }
+
+ private boolean ensureGotOutput() throws IOException
+ {
+ while ((outputAvailable() == 0) && !seenEof) {
+ if (ensureGotFrameHeader() && ensureGotBlock()) {
+ decompressBlock();
+ }
+ }
+ if (outputAvailable() > 0) {
+ return true;
+ }
+ else {
+ check(seenEof, "unable to decode to EOF");
+ check(inputAvailable() == 0, "leftover input at end of file");
+ check(curHeader == null, "unfinished frame at end of file");
+ return false;
+ }
+ }
+
+ private void readMoreInput() throws IOException
+ {
+ ensureInputSpace(1024);
+ int got = inputStream.read(inputBuffer, inputEnd, inputSpace());
+ if (got == -1) {
+ seenEof = true;
+ }
+ else {
+ inputEnd += got;
+ }
+ }
+
+ private ByteBuffer inputBB()
+ {
+ ByteBuffer bb = ByteBuffer.wrap(inputBuffer, inputPosition, inputAvailable());
+ bb.order(ByteOrder.LITTLE_ENDIAN);
+ return bb;
+ }
+
+ private boolean ensureGotFrameHeader() throws IOException
+ {
+ if (curHeader != null) {
+ return true;
+ }
+ // a skip frame is minimum 8 bytes
+ // a data frame is minimum 4 + 2 + 3 = 9 bytes, but we only
+ // need 5 bytes to know the size of the frame header
+ if (inputAvailable() < 8) {
+ readMoreInput();
+ // retry from start
+ return false;
+ }
+ ByteBuffer bb = inputBB();
+ int magic = bb.getInt();
+ // skippable frame header magic
+ if ((magic >= MAGIC_SKIPFRAME_MIN) && (magic <= MAGIC_SKIPFRAME_MAX)) {
+ inputPosition += SIZE_OF_INT; // for magic
+ skipBytes = (bb.getInt() & 0xffff_ffffL) + SIZE_OF_INT;
+ inputPosition += SIZE_OF_INT; // for skipsize
+ while (skipBytes > 0) {
+ if (skipBytes <= inputAvailable()) {
+ inputPosition += skipBytes;
+ skipBytes = 0;
+ }
+ else {
+ skipBytes -= inputAvailable();
+ inputPosition = inputEnd;
+ readMoreInput();
+ if (seenEof) {
+ throw fail(curInputFilePosition(), "unfinished skip frame at end of file");
+ }
+ }
+ }
+ // entire frame skipped; retry from start
+ return false;
+ }
+ // zstd frame header magic
+ if (magic == MAGIC_NUMBER) {
+ int fhDesc = 0xFF & bb.get();
+ int frameContentSizeFlag = (fhDesc & 0b11000000) >> 6;
+ singleSegmentFlag = (fhDesc & 0b00100000) != 0;
+ contentChecksumFlag = (fhDesc & 0b00000100) != 0;
+ int dictionaryIdFlag = (fhDesc & 0b00000011);
+ // 4 byte magic + 1 byte fhDesc
+ int fhSize = SIZE_OF_INT + SIZE_OF_BYTE;
+ // add size of frameContentSize
+ if (frameContentSizeFlag == 0) {
+ fhSize += (singleSegmentFlag ? 1 : 0);
+ }
+ else {
+ fhSize += 1 << frameContentSizeFlag;
+ }
+ // add size of window descriptor
+ fhSize += (singleSegmentFlag ? 0 : 1);
+ // add size of dictionary id
+ fhSize += (1 << dictionaryIdFlag) >> 1;
+ if (fhSize > inputAvailable()) {
+ readMoreInput();
+ // retry from start
+ return false;
+ }
+ inputPosition += SIZE_OF_INT;
+ curHeader = readFrameHeader();
+ inputPosition += fhSize - SIZE_OF_INT;
+ startFrame();
+ return true;
+ }
+ else {
+ throw fail(curInputFilePosition(), "Invalid magic prefix: " + magic);
+ }
+ }
+
+ private void startFrame()
+ {
+ blockDecompressor = new ZstdBlockDecompressor(curHeader);
+ check(outputPosition == outputEnd, "orphan output present");
+ outputPosition = 0;
+ outputEnd = 0;
+ if (singleSegmentFlag) {
+ if (curHeader.contentSize > MAX_WINDOW_SIZE) {
+ throw fail(curInputFilePosition(), "Single segment too large: " + curHeader.contentSize);
+ }
+ windowSize = (int) curHeader.contentSize;
+ blockMaximumSize = windowSize;
+ ensureOutputSpace(windowSize);
+ }
+ else {
+ if (curHeader.windowSize > MAX_WINDOW_SIZE) {
+ throw fail(curInputFilePosition(), "Window size too large: " + curHeader.windowSize);
+ }
+ windowSize = curHeader.windowSize;
+ blockMaximumSize = Math.min(windowSize, MAX_BLOCK_SIZE);
+ ensureOutputSpace(blockMaximumSize + windowSize);
+ }
+ if (contentChecksumFlag) {
+ hasher = new XxHash64();
+ }
+ }
+
+ private boolean ensureGotBlock() throws IOException
+ {
+ check(curHeader != null, "no current frame");
+ if (curBlockType == -1) {
+ // must have a block now
+ if (inputAvailable() < SIZE_OF_BLOCK_HEADER) {
+ readMoreInput();
+ // retry from start
+ return false;
+ }
+ int blkHeader = nextByte() | nextByte() << 8 | nextByte() << 16;
+ lastBlock = (blkHeader & 0b001) != 0;
+ curBlockType = (blkHeader & 0b110) >> 1;
+ curBlockSize = blkHeader >> 3;
+ ensureInputSpace(curBlockSize + SIZE_OF_INT);
+ }
+ if (inputAvailable() < curBlockSize + (contentChecksumFlag ? SIZE_OF_INT : 0)) {
+ readMoreInput();
+ // retry from start
+ return false;
+ }
+ return true;
+ }
+
+ int nextByte()
+ {
+ int r = 0xFF & inputBuffer[inputPosition];
+ inputPosition++;
+ return r;
+ }
+
+ long inputAddress()
+ {
+ return ARRAY_BYTE_BASE_OFFSET + inputPosition;
+ }
+
+ long inputLimit()
+ {
+ return ARRAY_BYTE_BASE_OFFSET + inputEnd;
+ }
+
+ long outputAddress()
+ {
+ return ARRAY_BYTE_BASE_OFFSET + outputEnd;
+ }
+
+ long outputLimit()
+ {
+ return ARRAY_BYTE_BASE_OFFSET + outputBuffer.length;
+ }
+
+ int decodeRaw()
+ {
+ check(inputAddress() + curBlockSize <= inputLimit(), "Not enough input bytes");
+ check(outputAddress() + curBlockSize <= outputLimit(), "Not enough output space");
+ return ZstdBlockDecompressor.decodeRawBlock(inputBuffer, inputAddress(), curBlockSize, outputBuffer, outputAddress(), outputLimit());
+ }
+
+ int decodeRle()
+ {
+ check(inputAddress() + 1 <= inputLimit(), "Not enough input bytes");
+ check(outputAddress() + curBlockSize <= outputLimit(), "Not enough output space");
+ return ZstdBlockDecompressor.decodeRleBlock(curBlockSize, inputBuffer, inputAddress(), outputBuffer, outputAddress(), outputLimit());
+ }
+
+ int decodeCompressed()
+ {
+ check(inputAddress() + curBlockSize <= inputLimit(), "Not enough input bytes");
+ check(outputAddress() + blockMaximumSize <= outputLimit(), "Not enough output space");
+ return blockDecompressor.decodeCompressedBlock(
+ inputBuffer, inputAddress(),
+ curBlockSize,
+ outputBuffer, outputAddress(), outputLimit(),
+ windowSize, ARRAY_BYTE_BASE_OFFSET);
+ }
+
+ private void decompressBlock()
+ {
+ check(outputPosition == outputEnd, "orphan output present");
+ switch (curBlockType) {
+ case RAW_BLOCK:
+ ensureOutputSpace(curBlockSize);
+ outputEnd += decodeRaw();
+ inputPosition += curBlockSize;
+ break;
+ case RLE_BLOCK:
+ ensureOutputSpace(curBlockSize);
+ outputEnd += decodeRle();
+ inputPosition += 1;
+ break;
+ case COMPRESSED_BLOCK:
+ check(curBlockSize < blockMaximumSize, "compressed block must be smaller than Block_Maximum_Size");
+ ensureOutputSpace(blockMaximumSize);
+ outputEnd += decodeCompressed();
+ inputPosition += curBlockSize;
+ break;
+ default:
+ throw fail(curInputFilePosition(), "Invalid block type " + curBlockType);
+ }
+ if (contentChecksumFlag) {
+ hasher.update(outputBuffer, outputPosition, outputAvailable());
+ }
+ curBlockType = -1;
+ if (lastBlock) {
+ curHeader = null;
+ blockDecompressor = null;
+ if (contentChecksumFlag) {
+ check(inputAvailable() >= SIZE_OF_INT, "missing checksum data");
+ long hash = hasher.hash();
+ int checksum = inputBB().getInt();
+ if (checksum != (int) hash) {
+ throw fail(curInputFilePosition(), String.format("Bad checksum. Expected: %s, actual: %s", Integer.toHexString(checksum), Integer.toHexString((int) hash)));
+ }
+ inputPosition += SIZE_OF_INT;
+ hasher = null;
+ }
+ }
+ }
+
+ private int inputAvailable()
+ {
+ return inputEnd - inputPosition;
+ }
+
+ private int inputSpace()
+ {
+ return inputBuffer.length - inputEnd;
+ }
+
+ private long curInputFilePosition()
+ {
+ return evictedInput + inputPosition;
+ }
+
+ private void ensureInputSpace(int size)
+ {
+ if (inputSpace() < size) {
+ if (size < inputPosition) {
+ System.arraycopy(inputBuffer, inputPosition, inputBuffer, 0, inputAvailable());
+ }
+ else {
+ int newSize = (inputBuffer.length + size + DEFAULT_BUFFER_SIZE) & BUFFER_SIZE_MASK;
+ byte[] newBuf = new byte[newSize];
+ System.arraycopy(inputBuffer, inputPosition, newBuf, 0, inputAvailable());
+ inputBuffer = newBuf;
+ }
+ evictedInput += inputPosition;
+ inputEnd = inputAvailable();
+ inputPosition = 0;
+ }
+ }
+
+ private int outputAvailable()
+ {
+ return outputEnd - outputPosition;
+ }
+
+ private int outputSpace()
+ {
+ return outputBuffer.length - outputEnd;
+ }
+
+ private void ensureOutputSpace(int size)
+ {
+ if (outputSpace() < size) {
+ check(outputAvailable() == 0, "logic error");
+ byte[] newBuf;
+ if (windowSize * 4 + size < outputPosition) {
+ // plenty space in old buffer
+ newBuf = outputBuffer;
+ }
+ else {
+ int newSize = (outputBuffer.length
+ + windowSize * 4
+ + size
+ + DEFAULT_BUFFER_SIZE) & BUFFER_SIZE_MASK;
+ newBuf = new byte[newSize];
+ }
+ // keep up to one window of old data
+ int sizeToKeep = Math.min(outputPosition, windowSize);
+ System.arraycopy(outputBuffer, outputPosition - sizeToKeep, newBuf, 0, sizeToKeep);
+ outputBuffer = newBuf;
+ outputEnd = sizeToKeep;
+ outputPosition = sizeToKeep;
+ }
+ }
+
+ private void throwIfClosed() throws IOException
+ {
+ if (isClosed) {
+ throw new IOException("Input stream is already closed");
+ }
+ }
+
+ private FrameHeader readFrameHeader()
+ {
+ long base = ARRAY_BYTE_BASE_OFFSET + inputPosition;
+ long limit = ARRAY_BYTE_BASE_OFFSET + inputEnd;
+ return ZstdFrameDecompressor.readFrameHeader(inputBuffer, base, limit);
+ }
+}
diff --git a/application/pom.xml b/application/pom.xml
index 8f9dc7999a0..d1aea1d6a78 100644
--- a/application/pom.xml
+++ b/application/pom.xml
@@ -53,7 +53,6 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>vespajlib</artifactId>
<version>${project.version}</version>
- <scope>compile</scope>
</dependency>
<!-- All dependencies that should be visible in test classpath, but not compile classpath,
@@ -64,11 +63,6 @@
<artifactId>icu4j</artifactId>
</dependency>
<dependency>
- <groupId>io.airlift</groupId>
- <artifactId>aircompressor</artifactId>
- <scope>compile</scope>
- </dependency>
- <dependency>
<groupId>org.antlr</groupId>
<artifactId>antlr-runtime</artifactId>
</dependency>
diff --git a/client/go/cmd/document.go b/client/go/cmd/document.go
index b2f71121d1a..9fe1492fed9 100644
--- a/client/go/cmd/document.go
+++ b/client/go/cmd/document.go
@@ -200,7 +200,7 @@ func printResult(cli *CLI, result util.OperationResult, payloadOnlyOnSuccess boo
if !result.Success {
fmt.Fprintln(out, color.RedString("Error:"), result.Message)
- } else if !(payloadOnlyOnSuccess && result.Payload != "") {
+ } else if !payloadOnlyOnSuccess || result.Payload == "" {
fmt.Fprintln(out, color.GreenString("Success:"), result.Message)
}
diff --git a/client/go/jvm/env.go b/client/go/jvm/env.go
index a23606e6d0d..dbc957e3ccb 100644
--- a/client/go/jvm/env.go
+++ b/client/go/jvm/env.go
@@ -15,10 +15,13 @@ import (
func (opts *Options) exportEnvSettings(ps *prog.Spec) {
c := opts.container
vespaHome := defaults.VespaHome()
- vlt := fmt.Sprintf("file:%s/logs/vespa/vespa.log", vespaHome)
+ lvd := fmt.Sprintf("%s/logs/vespa", vespaHome)
+ vlt := fmt.Sprintf("file:%s/vespa.log", lvd)
lcd := fmt.Sprintf("%s/var/db/vespa/logcontrol", vespaHome)
lcf := fmt.Sprintf("%s/%s.logcontrol", lcd, c.ServiceName())
dlp := fmt.Sprintf("%s/lib64", vespaHome)
+ opts.fixSpec.FixDir(lvd)
+ opts.fixSpec.FixDir(lcd)
ps.Setenv(envvars.VESPA_LOG_TARGET, vlt)
ps.Setenv(envvars.VESPA_LOG_CONTROL_DIR, lcd)
ps.Setenv(envvars.VESPA_LOG_CONTROL_FILE, lcf)
diff --git a/client/go/vespa/document.go b/client/go/vespa/document.go
index e08208eba6d..e8620c59033 100644
--- a/client/go/vespa/document.go
+++ b/client/go/vespa/document.go
@@ -7,6 +7,7 @@ package vespa
import (
"bytes"
"encoding/json"
+ "fmt"
"io"
"net/http"
"net/url"
@@ -70,7 +71,9 @@ func sendOperation(documentId string, jsonFile string, service *Service, operati
}
var doc map[string]interface{}
- json.Unmarshal(documentData, &doc)
+ if err := json.Unmarshal(documentData, &doc); err != nil {
+ return util.Failure(fmt.Sprintf("Document is not valid JSON: %s", err))
+ }
operationInFile := operationIn(doc)
if operation == anyOperation { // Operation is decided by file content
diff --git a/client/js/app/yarn.lock b/client/js/app/yarn.lock
index febfd8e32bd..c25724fdbc4 100644
--- a/client/js/app/yarn.lock
+++ b/client/js/app/yarn.lock
@@ -3512,9 +3512,9 @@ json-stable-stringify-without-jsonify@^1.0.1:
integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==
json5@^1.0.1:
- version "1.0.1"
- resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.1.tgz#779fb0018604fa854eacbf6252180d83543e3dbe"
- integrity sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.2.tgz#63d98d60f21b313b77c4d6da18bfa69d80e1d593"
+ integrity sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==
dependencies:
minimist "^1.2.0"
@@ -3692,9 +3692,9 @@ minimatch@^3.0.4, minimatch@^3.1.1, minimatch@^3.1.2:
brace-expansion "^1.1.7"
minimist@^1.1.1, minimist@^1.2.0, minimist@^1.2.6:
- version "1.2.6"
- resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44"
- integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==
+ version "1.2.7"
+ resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.7.tgz#daa1c4d91f507390437c6a8bc01078e7000c4d18"
+ integrity sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==
mixin-deep@^1.2.0:
version "1.3.2"
diff --git a/cloud-tenant-base-dependencies-enforcer/pom.xml b/cloud-tenant-base-dependencies-enforcer/pom.xml
index 5fa5f9ff9eb..e6445943107 100644
--- a/cloud-tenant-base-dependencies-enforcer/pom.xml
+++ b/cloud-tenant-base-dependencies-enforcer/pom.xml
@@ -24,9 +24,10 @@
<bouncycastle.version>1.72</bouncycastle.version>
<commons-codec.version>1.15</commons-codec.version>
<felix.version>7.0.1</felix.version>
- <httpclient5.version>5.1.3</httpclient5.version>
- <httpclient.version>4.5.13</httpclient.version>
- <httpcore.version>4.4.13</httpcore.version>
+ <httpclient5.version>5.1.4</httpclient5.version>
+ <httpcore5.version>5.1.5</httpcore5.version>
+ <httpclient.version>4.5.14</httpclient.version>
+ <httpcore.version>4.4.16</httpcore.version>
<junit5.version>5.8.1</junit5.version> <!-- TODO: in parent this is named 'junit.version' -->
<onnxruntime.version>1.12.1</onnxruntime.version>
<!-- END parent/pom.xml -->
@@ -147,6 +148,7 @@
<include>com.yahoo.vespa:vespalog:*:provided</include>
<!-- Vespa test dependencies -->
+ <include>com.yahoo.vespa:airlift-zstd:*:test</include>
<include>com.yahoo.vespa:application:*:test</include>
<include>com.yahoo.vespa:cloud-tenant-cd:*:test</include>
<include>com.yahoo.vespa:config-application-package:*:test</include>
@@ -178,7 +180,6 @@
<include>com.microsoft.onnxruntime:onnxruntime:jar:${onnxruntime.version}:test</include>
<include>com.thaiopensource:jing:20091111:test</include>
<include>commons-codec:commons-codec:${commons-codec.version}:test</include>
- <include>io.airlift:aircompressor:0.21:test</include>
<include>io.airlift:airline:0.9:test</include>
<include>io.prometheus:simpleclient:0.6.0:test</include>
<include>io.prometheus:simpleclient_common:0.6.0:test</include>
@@ -192,8 +193,8 @@
<include>org.apache.felix:org.apache.felix.framework:${felix.version}:test</include>
<include>org.apache.felix:org.apache.felix.log:1.0.1:test</include>
<include>org.apache.httpcomponents.client5:httpclient5:${httpclient5.version}:test</include>
- <include>org.apache.httpcomponents.core5:httpcore5:${httpclient5.version}:test</include>
- <include>org.apache.httpcomponents.core5:httpcore5-h2:${httpclient5.version}:test</include>
+ <include>org.apache.httpcomponents.core5:httpcore5:${httpcore5.version}:test</include>
+ <include>org.apache.httpcomponents.core5:httpcore5-h2:${httpcore5.version}:test</include>
<include>org.apache.httpcomponents:httpclient:${httpclient.version}:test</include>
<include>org.apache.httpcomponents:httpcore:${httpcore.version}:test</include>
<include>org.apache.httpcomponents:httpmime:${httpclient.version}:test</include>
diff --git a/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterController.java b/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterController.java
index 4395240acd3..1ac870c45de 100644
--- a/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterController.java
+++ b/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/ClusterController.java
@@ -53,10 +53,9 @@ public class ClusterController extends AbstractComponent
synchronized (controllers) {
FleetController controller = controllers.get(options.clusterName());
if (controller == null) {
- StatusHandler.ContainerStatusPageServer statusPageServer = new StatusHandler.ContainerStatusPageServer();
- controller = FleetController.create(options, statusPageServer, metricWrapper);
+ controller = FleetController.create(options, metricWrapper);
controllers.put(options.clusterName(), controller);
- status.put(options.clusterName(), statusPageServer);
+ status.put(options.clusterName(), controller.statusPageServer());
} else {
controller.updateOptions(options);
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
index 3a9ebde9da1..2952d3bf9ee 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
@@ -21,9 +21,9 @@ import com.yahoo.vespa.clustercontroller.core.status.ClusterStateRequestHandler;
import com.yahoo.vespa.clustercontroller.core.status.LegacyIndexPageRequestHandler;
import com.yahoo.vespa.clustercontroller.core.status.LegacyNodePageRequestHandler;
import com.yahoo.vespa.clustercontroller.core.status.NodeHealthRequestHandler;
+import com.yahoo.vespa.clustercontroller.core.status.StatusHandler;
import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageResponse;
import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageServer;
-import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageServerInterface;
import com.yahoo.vespa.clustercontroller.utils.util.MetricReporter;
import java.io.FileNotFoundException;
import java.time.Duration;
@@ -35,7 +35,6 @@ import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-import java.util.Objects;
import java.util.Queue;
import java.util.Set;
import java.util.TimeZone;
@@ -61,7 +60,7 @@ public class FleetController implements NodeListener, SlobrokListener, SystemSta
private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
private final StateVersionTracker stateVersionTracker;
- private final StatusPageServerInterface statusPageServer;
+ private final StatusHandler.ContainerStatusPageServer statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
private final MasterElectionHandler masterElectionHandler;
@@ -106,7 +105,6 @@ public class FleetController implements NodeListener, SlobrokListener, SystemSta
ContentCluster cluster,
NodeStateGatherer nodeStateGatherer,
Communicator communicator,
- StatusPageServerInterface statusPage,
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
@@ -130,7 +128,7 @@ public class FleetController implements NodeListener, SlobrokListener, SystemSta
this.systemStateBroadcaster = systemStateBroadcaster;
this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio());
this.metricUpdater = metricUpdater;
- this.statusPageServer = Objects.requireNonNull(statusPage, "statusPage cannot be null");
+ this.statusPageServer = new StatusHandler.ContainerStatusPageServer();
this.rpcServer = server;
this.masterElectionHandler = masterElectionHandler;
this.statusRequestRouter.addHandler(
@@ -150,9 +148,7 @@ public class FleetController implements NodeListener, SlobrokListener, SystemSta
propagateOptions();
}
- public static FleetController create(FleetControllerOptions options,
- StatusPageServerInterface statusPageServer,
- MetricReporter metricReporter) throws Exception {
+ public static FleetController create(FleetControllerOptions options, MetricReporter metricReporter) throws Exception {
var context = new FleetControllerContextImpl(options);
var timer = new RealTimer();
var metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex(), options.clusterName());
@@ -173,7 +169,7 @@ public class FleetController implements NodeListener, SlobrokListener, SystemSta
var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer);
var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex(), options.fleetControllerCount(), timer, timer);
var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator,
- statusPageServer, null, lookUp, database, stateGenerator,
+ null, lookUp, database, stateGenerator,
stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
@@ -1215,4 +1211,6 @@ public class FleetController implements NodeListener, SlobrokListener, SystemSta
return eventLog;
}
+ public StatusHandler.ContainerStatusPageServer statusPageServer() { return statusPageServer; }
+
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeLookup.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeLookup.java
index 882ae8894fa..b0e7cafd396 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeLookup.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeLookup.java
@@ -12,11 +12,4 @@ public interface NodeLookup {
boolean updateCluster(ContentCluster cluster, SlobrokListener listener);
- /**
- * Returns whether the lookup instance has been able to bootstrap itself with information about nodes.
- *
- * Calling updateCluster() _before_ isReady has returned true may not provide any useful data.
- */
- boolean isReady();
-
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
index dcef432aec0..5e740c5f03c 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.clustercontroller.core.rpc;
import com.yahoo.jrt.Acceptor;
import com.yahoo.jrt.ErrorCode;
import com.yahoo.jrt.Int32Value;
+import com.yahoo.jrt.ListenFailedException;
import com.yahoo.jrt.Method;
import com.yahoo.jrt.Request;
import com.yahoo.jrt.Spec;
@@ -23,7 +24,6 @@ import com.yahoo.vdslib.state.State;
import com.yahoo.vespa.clustercontroller.core.ContentCluster;
import com.yahoo.vespa.clustercontroller.core.MasterElectionHandler;
import com.yahoo.vespa.clustercontroller.core.NodeInfo;
-import com.yahoo.vespa.clustercontroller.core.Timer;
import com.yahoo.vespa.clustercontroller.core.listeners.NodeListener;
import java.io.PrintWriter;
import java.io.StringWriter;
@@ -39,7 +39,6 @@ public class RpcServer {
private static final Logger log = Logger.getLogger(RpcServer.class.getName());
- private final Timer timer;
private final Object monitor;
private final String clusterName;
private final int fleetControllerIndex;
@@ -51,11 +50,8 @@ public class RpcServer {
private final List<Request> rpcRequests = new LinkedList<>();
private MasterElectionHandler masterHandler;
private final BackOffPolicy slobrokBackOffPolicy;
- private long lastConnectErrorTime = 0;
- private String lastConnectError = "";
- public RpcServer(Timer timer, Object monitor, String clusterName, int fleetControllerIndex, BackOffPolicy bop) {
- this.timer = timer;
+ public RpcServer(Object monitor, String clusterName, int fleetControllerIndex, BackOffPolicy bop) {
this.monitor = monitor;
this.clusterName = clusterName;
this.fleetControllerIndex = fleetControllerIndex;
@@ -99,13 +95,8 @@ public class RpcServer {
log.log(Level.FINE, () -> "Fleetcontroller " + fleetControllerIndex + ": RPC server attempting to bind to port " + port);
try {
acceptor = supervisor.listen(new Spec(port));
- } catch (Exception e) {
- long time = timer.getCurrentTimeInMillis();
- if (!e.getMessage().equals(lastConnectError) || time - lastConnectErrorTime > 60 * 1000) {
- lastConnectError = e.getMessage();
- lastConnectErrorTime = time;
- log.log(Level.WARNING, "Failed to bind or initialize RPC server socket: " + e.getMessage());
- }
+ } catch (ListenFailedException e) {
+ throw new RuntimeException(e);
}
log.log(Level.FINE, () -> "Fleetcontroller " + fleetControllerIndex + ": RPC server listening to port " + acceptor.port());
SlobrokList slist = new SlobrokList();
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java
index d87e0e20908..559690e99e2 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java
@@ -80,11 +80,6 @@ public class SlobrokClient implements NodeLookup {
public Mirror getMirror() { return mirror; }
@Override
- public boolean isReady() {
- return mirror != null && mirror.ready();
- }
-
- @Override
public boolean updateCluster(ContentCluster cluster, SlobrokListener listener) {
if (mirror == null) return false;
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/StatusHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/StatusHandler.java
index 79e6a91f561..302832e4542 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/StatusHandler.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/StatusHandler.java
@@ -3,11 +3,9 @@ package com.yahoo.vespa.clustercontroller.core.status;
import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageResponse;
import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageServer;
-import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageServerInterface;
import com.yahoo.vespa.clustercontroller.utils.communication.http.HttpRequest;
import com.yahoo.vespa.clustercontroller.utils.communication.http.HttpRequestHandler;
import com.yahoo.vespa.clustercontroller.utils.communication.http.HttpResult;
-
import java.io.IOException;
import java.io.StringWriter;
import java.nio.charset.StandardCharsets;
@@ -27,7 +25,7 @@ public class StatusHandler implements HttpRequestHandler {
}
- public static class ContainerStatusPageServer implements StatusPageServerInterface {
+ public static class ContainerStatusPageServer {
StatusPageServer.HttpRequest request;
StatusPageResponse response;
@@ -36,13 +34,9 @@ public class StatusHandler implements HttpRequestHandler {
// Lock safety with fleetcontroller. Wait until completion
private final Object answerMonitor = new Object();
- @Override
public int getPort() { return 0; }
- @Override
public void shutdown() throws InterruptedException, IOException {}
- @Override
public void setPort(int port) {}
- @Override
public StatusPageServer.HttpRequest getCurrentHttpRequest() {
synchronized (answerMonitor) {
StatusPageServer.HttpRequest r = request;
@@ -50,7 +44,6 @@ public class StatusHandler implements HttpRequestHandler {
return r;
}
}
- @Override
public void answerCurrentStatusRequest(StatusPageResponse r) {
synchronized (answerMonitor) {
response = r;
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/statuspage/StatusPageServerInterface.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/statuspage/StatusPageServerInterface.java
deleted file mode 100644
index a06d069c59d..00000000000
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/statuspage/StatusPageServerInterface.java
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.clustercontroller.core.status.statuspage;
-
-public interface StatusPageServerInterface {
-
- int getPort();
- void shutdown() throws InterruptedException, java.io.IOException;
- void setPort(int port) throws java.io.IOException, InterruptedException;
- StatusPageServer.HttpRequest getCurrentHttpRequest();
- void answerCurrentStatusRequest(StatusPageResponse r);
-
-}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java
index 0485bd80aa0..413c8e7414c 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFeedBlockTest.java
@@ -7,7 +7,6 @@ import com.yahoo.vdslib.state.NodeType;
import com.yahoo.vdslib.state.State;
import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler;
import com.yahoo.vespa.clustercontroller.core.database.ZooKeeperDatabaseFactory;
-import com.yahoo.vespa.clustercontroller.core.status.StatusHandler;
import com.yahoo.vespa.clustercontroller.utils.util.NoMetricReporter;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
@@ -51,8 +50,7 @@ public class ClusterFeedBlockTest extends FleetControllerTest {
var stateGenerator = new StateChangeHandler(context, timer, eventLog);
var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer);
var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex(), options.fleetControllerCount(), timer, timer);
- var status = new StatusHandler.ContainerStatusPageServer();
- ctrl = new FleetController(context, timer, eventLog, cluster, stateGatherer, communicator, status, null, communicator, database,
+ ctrl = new FleetController(context, timer, eventLog, cluster, stateGatherer, communicator, null, communicator, database,
stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
ctrl.tick();
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DatabaseTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DatabaseTest.java
index 666b4f63801..a66294851b8 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DatabaseTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DatabaseTest.java
@@ -1,12 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.clustercontroller.core;
-import com.yahoo.jrt.ErrorCode;
-import com.yahoo.jrt.Request;
-import com.yahoo.jrt.Spec;
-import com.yahoo.jrt.StringValue;
import com.yahoo.jrt.Supervisor;
-import com.yahoo.jrt.Target;
import com.yahoo.jrt.Transport;
import com.yahoo.vdslib.state.Node;
import com.yahoo.vdslib.state.NodeState;
@@ -21,7 +16,6 @@ import java.util.TreeMap;
import java.util.logging.Logger;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
@ExtendWith(CleanupZookeeperLogsOnSuccess.class)
public class DatabaseTest extends FleetControllerTest {
@@ -156,18 +150,8 @@ public class DatabaseTest extends FleetControllerTest {
}
}
- // Note: different semantics than FleetControllerTest.setWantedState
private void setWantedState(Node n, NodeState ns, Map<Node, NodeState> wantedStates) {
- int rpcPort = fleetController().getRpcPort();
- Target connection = supervisor.connect(new Spec("localhost", rpcPort));
- assertTrue(connection.isValid());
-
- Request req = new Request("setNodeState");
- req.parameters().add(new StringValue("storage/cluster.mycluster/" + n.getType().toString() + "/" + n.getIndex()));
- req.parameters().add(new StringValue(ns.serialize(true)));
- connection.invokeSync(req, timeout());
- assertEquals(ErrorCode.NONE, req.errorCode(), req.toString());
- assertTrue(req.checkReturnTypes("s"), req.toString());
+ setWantedState(ns, ns.getDescription(), "storage/cluster.mycluster/" + n.getType().toString() + "/" + n.getIndex(), supervisor);
wantedStates.put(n, ns);
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyCommunicator.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyCommunicator.java
index 3127201a342..b82d4a135ae 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyCommunicator.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyCommunicator.java
@@ -154,8 +154,4 @@ public class DummyCommunicator implements Communicator, NodeLookup {
return false;
}
- @Override
- public boolean isReady() {
- return true;
- }
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyVdsNode.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyVdsNode.java
index 36fe5321788..f2494ea0301 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyVdsNode.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyVdsNode.java
@@ -31,7 +31,6 @@ import java.util.Optional;
import java.util.TreeMap;
import java.util.logging.Level;
import java.util.logging.Logger;
-import java.util.stream.Collectors;
/**
*
@@ -63,11 +62,11 @@ public class DummyVdsNode {
static class Req {
Request request;
- long timeout;
+ long timeToReply;
- Req(Request r, long timeout) {
+ Req(Request r, long timeToReply) {
request = r;
- this.timeout = timeout;
+ this.timeToReply = timeToReply;
}
}
static class BackOff implements BackOffPolicy {
@@ -76,6 +75,8 @@ public class DummyVdsNode {
public boolean shouldWarn(double v) { return false; }
public boolean shouldInform(double v) { return false; }
}
+
+ /** List of requests that should be replied to after a specified time */
private final List<Req> waitingRequests = new LinkedList<>();
/**
@@ -94,7 +95,7 @@ public class DummyVdsNode {
long currentTime = timer.getCurrentTimeInMillis();
for (Iterator<Req> it = waitingRequests.iterator(); it.hasNext(); ) {
Req r = it.next();
- if (r.timeout <= currentTime) {
+ if (currentTime >= r.timeToReply) {
log.log(Level.FINE, () -> "Dummy node " + DummyVdsNode.this + ": Responding to node state request at time " + currentTime);
r.request.returnValues().add(new StringValue(nodeState.serialize()));
if (r.request.methodName().equals("getnodestate3")) {
@@ -294,10 +295,11 @@ public class DummyVdsNode {
m.returnDesc(0, "returnCode", "Returncode of request. Should be 0 = OK");
supervisor.addMethod(m);
- m = new Method("getnodestate3", "sii", "ss", this::rpc_getNodeState2);
+ m = new Method("getnodestate3", "sii", "ss", this::rpc_getNodeState3);
m.methodDesc("Get nodeState of a node, answer when state changes from given state.");
m.paramDesc(0, "nodeStateIn", "The node state of the given node");
m.paramDesc(1, "timeout", "Time timeout in milliseconds set by the state requester.");
+ m.paramDesc(2, "index", "Node index.");
m.returnDesc(0, "nodeStateOut", "The node state of the given node");
m.returnDesc(1, "hostinfo", "Information on the host node is running on");
supervisor.addMethod(m);
@@ -339,31 +341,27 @@ public class DummyVdsNode {
return false;
}
- private void rpc_getNodeState2(Request req) {
+ private void rpc_getNodeState3(Request req) {
log.log(Level.FINE, () -> "Dummy node " + this + ": Got " + req.methodName() + " request");
try{
String oldState = req.parameters().get(0).asString();
int timeout = req.parameters().get(1).asInt32();
- int index = -1;
- if (req.parameters().size() > 2) {
- index = req.parameters().get(2).asInt32();
- }
+ int index = req.parameters().get(2).asInt32();
synchronized(timer) {
boolean sentReply = sendGetNodeStateReply(index);
NodeState givenState = (oldState.equals("unknown") ? null : NodeState.deserialize(type, oldState));
if (givenState != null && (givenState.equals(nodeState) || sentReply)) {
- log.log(Level.FINE, () -> "Dummy node " + this + ": Has same state as reported " + givenState + ". Queing request. Timeout is " + timeout + " ms. "
- + "Will be answered at time " + (timer.getCurrentTimeInMillis() + timeout * 800L / 1000));
+ long timeToReply = timer.getCurrentTimeInMillis() + timeout * 800L / 1000;
+ log.log(Level.FINE, () -> "Dummy node " + this + " has same state as reported (" + givenState + "). Queuing request. Timeout is " + timeout + " ms. "
+ + "Will be answered at time " + timeToReply);
req.detach();
- waitingRequests.add(new Req(req, timer.getCurrentTimeInMillis() + timeout * 800L / 1000));
- log.log(Level.FINE, () -> "Dummy node " + this + " has now " + waitingRequests.size() + " entries and is " + (waitingRequests.isEmpty() ? "empty" : "not empty"));
+ waitingRequests.add(new Req(req, timeToReply));
+ log.log(Level.FINE, () -> "Dummy node " + this + " has " + waitingRequests.size() + " requests waiting to be answered");
timer.notifyAll();
} else {
log.log(Level.FINE, () -> "Dummy node " + this + ": Request had " + (givenState == null ? "no state" : "different state(" + givenState +")") + ". Answering with " + nodeState);
req.returnValues().add(new StringValue(nodeState.serialize()));
- if (req.methodName().equals("getnodestate3")) {
- req.returnValues().add(new StringValue(hostInfo));
- }
+ req.returnValues().add(new StringValue(hostInfo));
++immediateStateReplies;
}
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java
index e8b56cbc142..aa49074ed4a 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java
@@ -19,7 +19,6 @@ import com.yahoo.vespa.clustercontroller.core.database.ZooKeeperDatabaseFactory;
import com.yahoo.vespa.clustercontroller.core.rpc.RPCCommunicator;
import com.yahoo.vespa.clustercontroller.core.rpc.RpcServer;
import com.yahoo.vespa.clustercontroller.core.rpc.SlobrokClient;
-import com.yahoo.vespa.clustercontroller.core.status.StatusHandler;
import com.yahoo.vespa.clustercontroller.core.testutils.WaitCondition;
import com.yahoo.vespa.clustercontroller.core.testutils.WaitTask;
import com.yahoo.vespa.clustercontroller.core.testutils.Waiter;
@@ -126,7 +125,7 @@ public abstract class FleetControllerTest implements Waiter {
options.nodeStateRequestTimeoutLatestPercentage(),
options.nodeStateRequestRoundTripTimeMaxSeconds());
var lookUp = new SlobrokClient(context, timer, new String[0]);
- var rpcServer = new RpcServer(timer, timer, options.clusterName(), options.fleetControllerIndex(), options.slobrokBackOffPolicy());
+ var rpcServer = new RpcServer(timer, options.clusterName(), options.fleetControllerIndex(), options.slobrokBackOffPolicy());
var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(context), timer, options.zooKeeperServerAddress(), timer);
// Setting this <1000 ms causes ECONNREFUSED on socket trying to connect to ZK server, in ZooKeeper,
@@ -138,8 +137,7 @@ public abstract class FleetControllerTest implements Waiter {
var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer);
var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex(), options.fleetControllerCount(), timer, timer);
- var status = new StatusHandler.ContainerStatusPageServer();
- var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator, status, rpcServer, lookUp,
+ var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator, rpcServer, lookUp,
database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
controller.start();
return controller;
@@ -298,12 +296,16 @@ public abstract class FleetControllerTest implements Waiter {
}
void setWantedState(DummyVdsNode node, State state, String reason, Supervisor supervisor) {
- NodeState ns = new NodeState(node.getType(), state);
- if (reason != null) ns.setDescription(reason);
+ setWantedState(new NodeState(node.getType(), state), reason, node.getSlobrokName(), supervisor);
+ }
+
+ void setWantedState(NodeState nodeState, String reason, String slobrokName, Supervisor supervisor) {
+ if (reason != null) nodeState.setDescription(reason);
Target connection = supervisor.connect(new Spec("localhost", fleetController().getRpcPort()));
+
Request req = new Request("setNodeState");
- req.parameters().add(new StringValue(node.getSlobrokName()));
- req.parameters().add(new StringValue(ns.serialize()));
+ req.parameters().add(new StringValue(slobrokName));
+ req.parameters().add(new StringValue(nodeState.serialize()));
connection.invokeSync(req, timeout());
if (req.isError()) {
fail("Failed to invoke setNodeState(): " + req.errorCode() + ": " + req.errorMessage());
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java
index d120dc06c9f..e0f6546e410 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java
@@ -64,7 +64,7 @@ public class RpcServerTest extends FleetControllerTest {
startingTest("RpcServerTest::testRebinding");
Slobrok slobrok = new Slobrok();
String[] slobrokConnectionSpecs = getSlobrokConnectionSpecs(slobrok);
- RpcServer server = new RpcServer(timer, new Object(), "mycluster", 0, new BackOff());
+ RpcServer server = new RpcServer(new Object(), "mycluster", 0, new BackOff());
server.setSlobrokConnectionSpecs(slobrokConnectionSpecs, 0);
int portUsed = server.getPort();
server.setSlobrokConnectionSpecs(slobrokConnectionSpecs, portUsed);
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java
index 5868ad723a4..7d2cc9b8df2 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java
@@ -9,7 +9,6 @@ import com.yahoo.vdslib.state.NodeType;
import com.yahoo.vdslib.state.State;
import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler;
import com.yahoo.vespa.clustercontroller.core.database.ZooKeeperDatabaseFactory;
-import com.yahoo.vespa.clustercontroller.core.status.StatusHandler;
import com.yahoo.vespa.clustercontroller.core.testutils.StateWaiter;
import com.yahoo.vespa.clustercontroller.utils.util.NoMetricReporter;
import org.junit.jupiter.api.Test;
@@ -52,8 +51,7 @@ public class StateChangeTest extends FleetControllerTest {
var stateGenerator = new StateChangeHandler(context, timer, eventLog);
var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer);
var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex(), options.fleetControllerCount(), timer, timer);
- var status = new StatusHandler.ContainerStatusPageServer();
- ctrl = new FleetController(context, timer, eventLog, cluster, stateGatherer, communicator, status, null, communicator, database,
+ ctrl = new FleetController(context, timer, eventLog, cluster, stateGatherer, communicator, null, communicator, database,
stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
ctrl.tick();
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 78f49f1e045..9a7ffc36f2d 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -111,7 +111,7 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"vekterli"}) default boolean useTwoPhaseDocumentGc() { return false; }
@ModelFeatureFlag(owners = {"tokle"}) default boolean useRestrictedDataPlaneBindings() { return false; }
@ModelFeatureFlag(owners = {"arnej","baldersheim"}) default boolean useOldJdiscContainerStartup() { return true; }
- @ModelFeatureFlag(owners = {"tokle, bjorncs"}) default boolean enableDataPlaneFilter() { return false; }
+ @ModelFeatureFlag(owners = {"tokle, bjorncs"}, removeAfter = "8.108") default boolean enableDataPlaneFilter() { return true; }
//Below are all flags that must be kept until 7 is out of the door
@ModelFeatureFlag(owners = {"vekterli"}, removeAfter="7.last") default boolean useThreePhaseUpdates() { return true; }
diff --git a/config-model-fat/pom.xml b/config-model-fat/pom.xml
index 2adcc450669..fa1a7558745 100644
--- a/config-model-fat/pom.xml
+++ b/config-model-fat/pom.xml
@@ -140,6 +140,7 @@
<i>com.yahoo.vespa:fat-model-dependencies:*:*</i>
<!-- Vespa artifacts embedded -->
+ <i>com.yahoo.vespa:airlift-zstd:*:*</i>
<i>com.yahoo.vespa:annotations:*:*</i>
<i>com.yahoo.vespa:component:*:*</i>
<i>com.yahoo.vespa:config:*:*</i>
@@ -204,7 +205,6 @@
<i>com.sun.xml.bind:jaxb-core:*:*</i>
<i>com.sun.xml.bind:jaxb-impl:*:*</i>
<i>com.thaiopensource:jing:*:*</i>
- <i>io.airlift:aircompressor:*:*</i>
<i>io.prometheus:simpleclient:*:*</i>
<i>io.prometheus:simpleclient_common:*:*</i>
<i>javax.inject:javax.inject:*:*</i>
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
index 328f1b19f10..6b8428a07ac 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
@@ -82,7 +82,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
private Architecture adminClusterNodeResourcesArchitecture = Architecture.getDefault();
private boolean useRestrictedDataPlaneBindings = false;
private Optional<CloudAccount> cloudAccount = Optional.empty();
- private boolean enableDataPlaneFilter = false;
@Override public ModelContext.FeatureFlags featureFlags() { return this; }
@Override public boolean multitenant() { return multitenant; }
@@ -138,7 +137,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public boolean useTwoPhaseDocumentGc() { return useTwoPhaseDocumentGc; }
@Override public boolean useRestrictedDataPlaneBindings() { return useRestrictedDataPlaneBindings; }
@Override public Optional<CloudAccount> cloudAccount() { return cloudAccount; }
- @Override public boolean enableDataPlaneFilter() { return enableDataPlaneFilter; }
public TestProperties sharedStringRepoNoReclaim(boolean sharedStringRepoNoReclaim) {
this.sharedStringRepoNoReclaim = sharedStringRepoNoReclaim;
@@ -368,11 +366,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
return this;
}
- public TestProperties setEnableDataPlaneFilter(boolean enableDataPlaneFilter) {
- this.enableDataPlaneFilter = enableDataPlaneFilter;
- return this;
- }
-
public static class Spec implements ConfigServerSpec {
private final String hostName;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
index 3d2c3faa985..348c006e8a6 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
@@ -56,7 +56,6 @@ public class VespaMetricSet {
metrics.add(new Metric("slobrok.heartbeats.failed.count"));
metrics.add(new Metric("logd.processed.lines.count"));
metrics.add(new Metric("worker.connections.max"));
- metrics.add(new Metric("routing.config.is-stale"));
metrics.add(new Metric("endpoint.certificate.expiry.seconds"));
// Java (JRT) TLS metrics
@@ -327,6 +326,12 @@ public class VespaMetricSet {
metrics.add(new Metric("query_latency.count"));
metrics.add(new Metric("query_latency.95percentile"));
metrics.add(new Metric("query_latency.99percentile"));
+ metrics.add(new Metric("query_timeout.min"));
+ metrics.add(new Metric("query_timeout.max"));
+ metrics.add(new Metric("query_timeout.sum"));
+ metrics.add(new Metric("query_timeout.count"));
+ metrics.add(new Metric("query_timeout.95percentile"));
+ metrics.add(new Metric("query_timeout.99percentile"));
metrics.add(new Metric("failed_queries.rate"));
metrics.add(new Metric("degraded_queries.rate"));
metrics.add(new Metric("hits_per_query.max"));
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/CloudDataPlaneFilterValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/CloudDataPlaneFilterValidator.java
index 60705ad9b51..83f8ea7b510 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/CloudDataPlaneFilterValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/CloudDataPlaneFilterValidator.java
@@ -25,7 +25,6 @@ public class CloudDataPlaneFilterValidator extends Validator {
public void validate(VespaModel model, DeployState deployState) {
if (!deployState.isHosted()) return;
if (!deployState.zone().system().isPublic()) return;
- if (!deployState.featureFlags().enableDataPlaneFilter()) return;
validateUniqueCertificates(deployState);
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/component/AccessLogComponent.java b/config-model/src/main/java/com/yahoo/vespa/model/container/component/AccessLogComponent.java
index 79c108cd867..ffdb717afa8 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/component/AccessLogComponent.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/component/AccessLogComponent.java
@@ -47,7 +47,7 @@ public final class AccessLogComponent extends SimpleComponent implements AccessL
clusterName.isEmpty() ? capitalize(logType.name()) :
capitalize(logType.name()) + "." + clusterName.get(),
-1,
- ((cluster instanceof ApplicationContainerCluster) ? 4*1024*1024 : null));
+ ((cluster instanceof ApplicationContainerCluster) ? 4 << 20 : null));
}
private static String capitalize(String name) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
index 2c12ddb34a3..f48f91e8cd0 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
@@ -98,7 +98,6 @@ import com.yahoo.vespa.model.container.xml.document.DocumentFactoryBuilder;
import com.yahoo.vespa.model.content.StorageGroup;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
-
import java.io.IOException;
import java.io.Reader;
import java.net.URI;
@@ -114,7 +113,6 @@ import java.util.Map;
import java.util.Optional;
import java.util.OptionalInt;
import java.util.Set;
-import java.util.UUID;
import java.util.function.Consumer;
import java.util.logging.Level;
import java.util.regex.Pattern;
@@ -192,7 +190,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
private ApplicationContainerCluster createContainerCluster(Element spec, ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ApplicationContainerCluster>() {
@Override
- protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
+ protected ApplicationContainerCluster doBuild(DeployState deployState, AbstractConfigProducer<?> ancestor, Element producerSpec) {
return new ApplicationContainerCluster(ancestor, modelContext.getProducerId(),
modelContext.getProducerId(), deployState);
}
@@ -455,13 +453,13 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
addHostedImplicitHttpIfNotPresent(deployState, cluster);
addHostedImplicitAccessControlIfNotPresent(deployState, cluster);
addDefaultConnectorHostedFilterBinding(cluster);
- addAdditionalHostedConnector(deployState, cluster, context);
+ addAdditionalHostedConnector(deployState, cluster);
addCloudDataPlaneFilter(deployState, cluster);
}
}
private static void addCloudDataPlaneFilter(DeployState deployState, ApplicationContainerCluster cluster) {
- if (!deployState.isHosted() || !deployState.zone().system().isPublic() || !deployState.featureFlags().enableDataPlaneFilter()) return;
+ if (!deployState.isHosted() || !deployState.zone().system().isPublic()) return;
// Setup secure filter chain
var secureChain = new Chain<Filter>(FilterChains.emptyChainSpec(ComponentId.fromString("cloud-data-plane-secure")));
@@ -555,7 +553,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
.ifPresent(accessControl -> accessControl.configureDefaultHostedConnector(cluster.getHttp())); ;
}
- private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster, ConfigModelContext context) {
+ private void addAdditionalHostedConnector(DeployState deployState, ApplicationContainerCluster cluster) {
JettyHttpServer server = cluster.getHttp().getHttpServer().get();
String serverName = server.getComponentId().getName();
@@ -798,10 +796,22 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
if (cluster.getJvmGCOptions().isEmpty()) {
String jvmGCOptions = extractAttribute(nodesElement, VespaDomBuilder.JVM_GC_OPTIONS);
+
+ if (jvmGCOptions != null && !jvmGCOptions.isEmpty()) {
+ DeployLogger logger = context.getDeployState().getDeployLogger();
+ logger.logApplicationPackage(WARNING, "'jvm-gc-options' is deprecated and will be removed in Vespa 9." +
+ " Please merge into 'gc-options' in 'jvm' element." +
+ " See https://docs.vespa.ai/en/reference/services-container.html#jvm");
+ }
+
cluster.setJvmGCOptions(buildJvmGCOptions(context, jvmGCOptions));
}
- applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
+ if (applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME)))
+ context.getDeployState().getDeployLogger()
+ .logApplicationPackage(WARNING, "'allocated-memory' is deprecated and will be removed in Vespa 9." +
+ " Please merge into 'allocated-memory' in 'jvm' element." +
+ " See https://docs.vespa.ai/en/reference/services-container.html#jvm");
}
private void extractJvmTag(List<ApplicationContainer> nodes, ApplicationContainerCluster cluster,
@@ -877,8 +887,8 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
return createNodesFromNodeList(context.getDeployState(), cluster, nodesElement);
}
- private static void applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
- if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
+ private static boolean applyMemoryPercentage(ApplicationContainerCluster cluster, String memoryPercentage) {
+ if (memoryPercentage == null || memoryPercentage.isEmpty()) return false;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
@@ -893,6 +903,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
+ return true;
}
/** Allocate a container cluster without a nodes tag */
@@ -1118,7 +1129,8 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
private static void validateAndAddConfiguredComponents(DeployState deployState,
ContainerCluster<? extends Container> cluster,
- Element spec, String componentName,
+ Element spec,
+ String componentName,
Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node); // throws exception here if something is wrong
@@ -1224,7 +1236,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS);
if (! jvmOptions.isEmpty())
- logger.logApplicationPackage(WARNING, "'jvm-options' is deprecated and will be removed in Vespa 8." +
+ logger.logApplicationPackage(WARNING, "'jvm-options' is deprecated and will be removed in Vespa 9." +
" Please merge 'jvm-options' into 'options' or 'gc-options' in 'jvm' element." +
" See https://docs.vespa.ai/en/reference/services-container.html#jvm");
}
diff --git a/config-model/src/main/resources/schema/common.rnc b/config-model/src/main/resources/schema/common.rnc
index fcd9a68ca89..538a8f069f5 100644
--- a/config-model/src/main/resources/schema/common.rnc
+++ b/config-model/src/main/resources/schema/common.rnc
@@ -1,8 +1,8 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
service.attlist &= attribute hostalias { xsd:NCName }
service.attlist &= attribute baseport { xsd:unsignedShort }?
-service.attlist &= attribute jvm-options { text }?
-service.attlist &= attribute jvm-gc-options { text }?
+service.attlist &= attribute jvm-options { text }? # Remove in Vespa 9
+service.attlist &= attribute jvm-gc-options { text }? # Remove in Vespa 9
# preload is for internal use only
service.attlist &= attribute preload { text }?
diff --git a/config-model/src/main/resources/schema/container-include.rnc b/config-model/src/main/resources/schema/container-include.rnc
index 8f6a8a3bada..b0cd9baab32 100644
--- a/config-model/src/main/resources/schema/container-include.rnc
+++ b/config-model/src/main/resources/schema/container-include.rnc
@@ -2,7 +2,6 @@
include "containercluster.rnc"
include "common.rnc"
include "container.rnc"
-include "docproc.rnc"
include "searchchains.rnc"
start = SearchInContainer | DocprocInContainer | ProcessingInContainer | Components
diff --git a/config-model/src/main/resources/schema/containercluster.rnc b/config-model/src/main/resources/schema/containercluster.rnc
index 938932c3df6..933ec528c42 100644
--- a/config-model/src/main/resources/schema/containercluster.rnc
+++ b/config-model/src/main/resources/schema/containercluster.rnc
@@ -187,7 +187,50 @@ DocprocInContainer = element document-processing {
ChainInDocprocInContainerCluster = element chain {
DocprocChainV3Contents
}
+SchemaMapping = element map {
+ element field {
+ attribute doctype { text }?,
+ attribute in-document { text },
+ attribute in-processor { text }
+ }+
+}
+
+# TODO Here we need a thorough cleaning
+DocprocClusterAttributes = attribute compressdocuments { xsd:boolean }? &
+ attribute numnodesperclient { xsd:positiveInteger }? & # TODO: Remove in Vespa 9
+ attribute preferlocalnode { xsd:boolean }? & # TODO: Remove in Vespa 9
+ attribute maxmessagesinqueue { xsd:nonNegativeInteger }? &
+ attribute maxqueuebytesize { xsd:string { minLength = "1" } }? & # TODO: Remove in Vespa 9
+ attribute maxqueuewait { xsd:positiveInteger }? &
+ attribute maxconcurrentfactor { xsd:double { minExclusive = "0.0" maxExclusive = "1.0" } }? &
+ attribute documentexpansionfactor { xsd:double { minExclusive = "0.0" } }? &
+ attribute containercorememory { xsd:nonNegativeInteger }?
+
+
+DocprocChainV3Contents = attribute name { xsd:NCName }? &
+ ComponentId &
+ SchemaMapping? &
+ DocprocChainInheritance &
+ attribute documentprocessors { text }? &
+ DocumentProcessorV3* &
+ Phase* &
+ GenericConfig*
+
+
+DocprocChainInheritance =
+ attribute inherits { text }? &
+ attribute excludes { text }? &
+ element inherits {
+ element docprocchain { ComponentSpec }* &
+ element exclude { ComponentSpec }*
+ }?
+DocumentProcessorV3 =
+ element documentprocessor {
+ BundleSpec &
+ SchemaMapping? &
+ GenericSearcherOrDocumentProcessor
+ }
# PROCESSING:
@@ -231,10 +274,10 @@ HttpClientApi = element http-client-api {
# NODES:
NodesOfContainerCluster = element nodes {
- attribute jvm-options { text }? &
- attribute jvm-gc-options { text }? &
+ attribute jvm-options { text }? & # Remove in Vespa 9
+ attribute jvm-gc-options { text }? & # Remove in Vespa 9
attribute preload { text }? &
- attribute allocated-memory { text }? &
+ attribute allocated-memory { text }? & # Remove in Vespa 9
attribute cpu-socket-affinity { xsd:boolean }? &
element jvm {
attribute options { text }? &
diff --git a/config-model/src/main/resources/schema/docproc-standalone.rnc b/config-model/src/main/resources/schema/docproc-standalone.rnc
deleted file mode 100644
index caba5327e25..00000000000
--- a/config-model/src/main/resources/schema/docproc-standalone.rnc
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-include "common.rnc"
-include "container.rnc"
-include "searchchains.rnc"
-include "docproc.rnc"
-start = DocprocChainsV3
diff --git a/config-model/src/main/resources/schema/docproc.rnc b/config-model/src/main/resources/schema/docproc.rnc
deleted file mode 100644
index 11f8e14fb2d..00000000000
--- a/config-model/src/main/resources/schema/docproc.rnc
+++ /dev/null
@@ -1,98 +0,0 @@
-# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-DocProc = element docproc {
- DocProcV3
-}
-
-SchemaMapping = element map {
- element field {
- attribute doctype { text }?,
- attribute in-document { text },
- attribute in-processor { text }
- }+
-}
-
-#Version 3 config:
-
-DocProcV3 = attribute version { "3.0" },
- (ClusterV3* &
- OuterDocprocChainsV3? &
- GenericConfig*
-)
-
-# TODO Here we need a thorough cleaning
-DocprocClusterAttributes = attribute compressdocuments { xsd:boolean }? &
- attribute numnodesperclient { xsd:positiveInteger }? & # TODO: Remove in Vespa 9
- attribute preferlocalnode { xsd:boolean }? & # TODO: Remove in Vespa 9
- attribute maxmessagesinqueue { xsd:nonNegativeInteger }? &
- attribute maxqueuebytesize { xsd:string { minLength = "1" } }? & # TODO: Remove in Vespa 9
- attribute maxqueuewait { xsd:positiveInteger }? &
- attribute maxconcurrentfactor { xsd:double { minExclusive = "0.0" maxExclusive = "1.0" } }? &
- attribute documentexpansionfactor { xsd:double { minExclusive = "0.0" } }? &
- attribute containercorememory { xsd:nonNegativeInteger }?
-
-# TODO Here we need a thorough cleaning
-ClusterV3 = element cluster {
- attribute name { xsd:NCName } &
- DocprocClusterAttributes? &
-
- element nodes {
- Resources? &
- attribute jvmargs { text }? &
- attribute preload { text }? &
- element node {
- GenericConfig* &
- service.attlist &
- attribute maxmessagesinqueue { xsd:nonNegativeInteger }? &
- attribute maxqueuebytesize { xsd:string { minLength = "1" } }? &
- attribute maxqueuewait { xsd:positiveInteger }?
- }+
- } &
- GenericConfig* &
- SchemaMapping? &
- Component* &
- Handler* &
- DocprocChainsV3?
-}
-
-DocprocChainsV3 =
- element docprocchains {
- DocumentProcessorV3* &
- DocprocChainV3* &
- GenericConfig*
- }
-
-OuterDocprocChainsV3 =
- element docprocchains {
- DocumentProcessorV3* &
- DocprocChainV3*
- }
-
-DocprocChainV3 =
- element docprocchain {
- DocprocChainV3Contents
- }
-
-DocprocChainV3Contents = attribute name { xsd:NCName }? &
- ComponentId &
- SchemaMapping? &
- DocprocChainInheritance &
- attribute documentprocessors { text }? &
- DocumentProcessorV3* &
- Phase* &
- GenericConfig*
-
-
-DocprocChainInheritance =
- attribute inherits { text }? &
- attribute excludes { text }? &
- element inherits {
- element docprocchain { ComponentSpec }* &
- element exclude { ComponentSpec }*
- }?
-
-DocumentProcessorV3 =
- element documentprocessor {
- BundleSpec &
- SchemaMapping? &
- GenericSearcherOrDocumentProcessor
- }
diff --git a/config-model/src/main/resources/schema/services.rnc b/config-model/src/main/resources/schema/services.rnc
index d3d642b8826..aed627203a0 100644
--- a/config-model/src/main/resources/schema/services.rnc
+++ b/config-model/src/main/resources/schema/services.rnc
@@ -2,7 +2,6 @@
include "common.rnc"
include "admin.rnc"
include "content.rnc"
-include "docproc.rnc"
include "routing.rnc"
include "containercluster.rnc"
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/CloudDataPlaneFilterValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/CloudDataPlaneFilterValidatorTest.java
index ed39260bdd2..515dd7cd75a 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/CloudDataPlaneFilterValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/CloudDataPlaneFilterValidatorTest.java
@@ -147,8 +147,7 @@ public class CloudDataPlaneFilterValidatorTest {
.properties(
new TestProperties()
.setEndpointCertificateSecrets(Optional.of(new EndpointCertificateSecrets("CERT", "KEY")))
- .setHostedVespa(true)
- .setEnableDataPlaneFilter(true))
+ .setHostedVespa(true))
.zone(new Zone(SystemName.PublicCd, Environment.dev, RegionName.defaultName()))
.build();
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/CloudDataPlaneFilterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/CloudDataPlaneFilterTest.java
index 2490e3df72f..94d92b355f9 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/CloudDataPlaneFilterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/CloudDataPlaneFilterTest.java
@@ -30,7 +30,6 @@ import javax.security.auth.x500.X500Principal;
import java.io.File;
import java.io.IOException;
import java.math.BigInteger;
-import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.security.KeyPair;
@@ -45,7 +44,6 @@ import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertIterableEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
public class CloudDataPlaneFilterTest extends ContainerModelBuilderTestBase {
@@ -78,7 +76,7 @@ public class CloudDataPlaneFilterTest extends ContainerModelBuilderTestBase {
.formatted(applicationFolder.toPath().relativize(certFile).toString()));
X509Certificate certificate = createCertificate(certFile);
- buildModel(true, clusterElem);
+ buildModel(clusterElem);
CloudDataPlaneFilterConfig config = root.getConfig(CloudDataPlaneFilterConfig.class, cloudDataPlaneFilterConfigId);
assertFalse(config.legacyMode());
@@ -106,7 +104,7 @@ public class CloudDataPlaneFilterTest extends ContainerModelBuilderTestBase {
Element clusterElem = DomBuilderTest.parse("<container version='1.0' />");
X509Certificate certificate = createCertificate(certFile);
- buildModel(true, clusterElem);
+ buildModel(clusterElem);
CloudDataPlaneFilterConfig config = root.getConfig(CloudDataPlaneFilterConfig.class, cloudDataPlaneFilterConfigId);
assertTrue(config.legacyMode());
@@ -120,34 +118,6 @@ public class CloudDataPlaneFilterTest extends ContainerModelBuilderTestBase {
}
@Test
- public void it_generates_correct_config_when_filter_not_enabled () throws IOException {
- Path certFile = securityFolder.resolve("clients.pem");
- Element clusterElem = DomBuilderTest.parse(
- """
- <container version='1.0'>
- <clients>
- <client id="foo" permissions="read,write">
- <certificate file="%s"/>
- </client>
- </clients>
- </container>
- """
- .formatted(applicationFolder.toPath().relativize(certFile).toString()));
- X509Certificate certificate = createCertificate(certFile);
-
- buildModel(false, clusterElem);
-
- // Data plane filter config is not configured
- assertFalse(root.getConfigIds().contains("container/component/com.yahoo.jdisc.http.filter.security.cloud.CloudDataPlaneFilter"));
-
- // Connector config configures ca certs from security/clients.pem
- ConnectorConfig connectorConfig = connectorConfig();
- var caCerts = X509CertificateUtils.certificateListFromPem(connectorConfig.ssl().caCertificate());
- assertEquals(1, caCerts.size());
- assertEquals(List.of(certificate), caCerts);
- }
-
- @Test
public void it_rejects_files_without_certificates() throws IOException {
Path certFile = securityFolder.resolve("foo.pem");
Element clusterElem = DomBuilderTest.parse(
@@ -163,7 +133,7 @@ public class CloudDataPlaneFilterTest extends ContainerModelBuilderTestBase {
.formatted(applicationFolder.toPath().relativize(certFile).toString()));
Files.writeString(certFile, "effectively empty");
- IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> buildModel(true, clusterElem));
+ IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> buildModel(clusterElem));
assertEquals("File security/foo.pem does not contain any certificates.", exception.getMessage());
}
@@ -179,7 +149,7 @@ public class CloudDataPlaneFilterTest extends ContainerModelBuilderTestBase {
</clients>
</container>
""");
- IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> buildModel(true, clusterElem));
+ IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> buildModel(clusterElem));
assertEquals("Invalid client id '_foo', id cannot start with '_'", exception.getMessage());
}
@@ -209,7 +179,7 @@ public class CloudDataPlaneFilterTest extends ContainerModelBuilderTestBase {
return certificate;
}
- public List<ContainerModel> buildModel(boolean enableFilter, Element... clusterElem) {
+ public List<ContainerModel> buildModel(Element... clusterElem) {
var applicationPackage = new MockApplicationPackage.Builder()
.withRoot(applicationFolder)
.build();
@@ -219,8 +189,7 @@ public class CloudDataPlaneFilterTest extends ContainerModelBuilderTestBase {
.properties(
new TestProperties()
.setEndpointCertificateSecrets(Optional.of(new EndpointCertificateSecrets("CERT", "KEY")))
- .setHostedVespa(true)
- .setEnableDataPlaneFilter(enableFilter))
+ .setHostedVespa(true))
.zone(new Zone(SystemName.PublicCd, Environment.dev, RegionName.defaultName()))
.build();
return createModel(root, state, null, clusterElem);
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java
index 45546e57808..3435e7faa5e 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java
@@ -21,7 +21,9 @@ import java.util.Collections;
import java.util.List;
import java.util.logging.Level;
-import static org.junit.jupiter.api.Assertions.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
/**
* @author baldersheim
@@ -137,10 +139,30 @@ public class JvmOptionsTest extends ContainerModelBuilderTestBase {
verifyLoggingOfJvmGcOptions(true, "-XX:MaxTenuringThreshold=15"); // No + or - after colon
}
+ @Test
+ void requireThatDeprecatedJvmOptionsAreLogged() throws IOException, SAXException {
+ String optionName = "jvm-options";
+ verifyLoggingOfLegacyJvmOptions(true, optionName, "-XX:+ParallelGCThreads=8", optionName);
+ verifyLoggingOfLegacyJvmOptions(false, optionName, "-XX:+ParallelGCThreads=8", optionName);
+ }
+
+ @Test
+ void requireThatDeprecatedJvmOptionsAreLogged_2() throws IOException, SAXException {
+ String optionName = "allocated-memory";
+ verifyLoggingOfLegacyJvmOptions(true, optionName, "50%", optionName);
+ verifyLoggingOfLegacyJvmOptions(false, optionName, "50%", optionName);
+ }
+
+ @Test
+ void requireThatDeprecatedJvmGcOptionsAreLogged() throws IOException, SAXException {
+ String optionName = "jvm-gc-options";
+ verifyLoggingOfLegacyJvmOptions(true, optionName, "-XX:+ParallelGCThreads=8", optionName);
+ verifyLoggingOfLegacyJvmOptions(false, optionName, "-XX:+ParallelGCThreads=8", optionName);
+ }
+
private void verifyThatInvalidJvmGcOptionsFailDeployment(String options, String expected) throws IOException, SAXException {
try {
- buildModelWithJvmOptions(new TestProperties().setHostedVespa(true),
- new TestLogger(), "gc-options", options);
+ buildModelWithJvmOptions(new TestProperties().setHostedVespa(true), "gc-options", options);
fail();
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith(expected));
@@ -158,18 +180,22 @@ public class JvmOptionsTest extends ContainerModelBuilderTestBase {
}
private void verifyLoggingOfJvmGcOptions(boolean isHosted, String override, String... invalidOptions) throws IOException, SAXException {
- verifyLoggingOfJvmOptions(isHosted, "gc-options", override, invalidOptions);
+ verifyLogMessage(isHosted, "gc-options", override, invalidOptions);
}
- private void verifyLoggingOfJvmOptions(boolean isHosted, String optionName, String override, String... invalidOptions) throws IOException, SAXException {
- TestLogger logger = new TestLogger();
- buildModelWithJvmOptions(isHosted, logger, optionName, override);
+ private void verifyLogMessage(boolean isHosted, String optionName, String override, String... invalidOptions) throws IOException, SAXException {
+ var logger = buildModelWithJvmOptions(isHosted, optionName, override);
+ var message = verifyLogMessage(logger, invalidOptions);
+ if (message != null)
+ assertTrue(message.contains("Invalid or misplaced JVM"), message);
+ }
+ private String verifyLogMessage(TestLogger logger, String... invalidOptions) {
List<String> strings = Arrays.asList(invalidOptions.clone());
// Verify that nothing is logged if there are no invalid options
if (strings.isEmpty()) {
assertEquals(0, logger.msgs.size(), logger.msgs.size() > 0 ? logger.msgs.get(0).getSecond() : "");
- return;
+ return null;
}
assertTrue(logger.msgs.size() > 0, "Expected 1 or more log messages for invalid JM options, got none");
@@ -177,17 +203,15 @@ public class JvmOptionsTest extends ContainerModelBuilderTestBase {
assertEquals(Level.WARNING, firstOption.getFirst());
Collections.sort(strings);
- assertEquals("Invalid or misplaced JVM" + (optionName.equals("gc-options") ? " GC" : "") +
- " options in services.xml: " + String.join(",", strings) + "." +
- " See https://docs.vespa.ai/en/reference/services-container.html#jvm"
- , firstOption.getSecond());
+ return firstOption.getSecond();
}
- private void buildModelWithJvmOptions(boolean isHosted, TestLogger logger, String optionName, String override) throws IOException, SAXException {
- buildModelWithJvmOptions(new TestProperties().setHostedVespa(isHosted), logger, optionName, override);
+ private TestLogger buildModelWithJvmOptions(boolean isHosted, String optionName, String override) throws IOException, SAXException {
+ return buildModelWithJvmOptions(new TestProperties().setHostedVespa(isHosted), optionName, override);
}
- private void buildModelWithJvmOptions(TestProperties properties, TestLogger logger, String optionName, String override) throws IOException, SAXException {
+ private TestLogger buildModelWithJvmOptions(TestProperties properties, String optionName, String override) throws IOException, SAXException {
+ TestLogger logger = new TestLogger();
String servicesXml =
"<container version='1.0'>" +
" <nodes>" +
@@ -195,6 +219,32 @@ public class JvmOptionsTest extends ContainerModelBuilderTestBase {
" <node hostalias='mockhost'/>" +
" </nodes>" +
"</container>";
+ buildModel(properties, logger, servicesXml);
+ return logger;
+ }
+
+ private void verifyLoggingOfLegacyJvmOptions(boolean isHosted, String optionName, String override, String... invalidOptions) throws IOException, SAXException {
+ var logger = buildModelWithLegacyJvmOptions(isHosted, optionName, override);
+
+ var message = verifyLogMessage(logger, invalidOptions);
+ if (message != null)
+ assertTrue(message.contains("'" + optionName + "' is deprecated and will be removed"), message);
+ }
+
+ private TestLogger buildModelWithLegacyJvmOptions(boolean isHosted, String optionName, String override) throws IOException, SAXException {
+ TestProperties properties = new TestProperties().setHostedVespa(isHosted);
+ TestLogger logger = new TestLogger();
+ String servicesXml =
+ "<container version='1.0'>" +
+ " <nodes " + optionName + "='" + override + "'>" +
+ " <node hostalias='mockhost'/>" +
+ " </nodes>" +
+ "</container>";
+ buildModel(properties, logger, servicesXml);
+ return logger;
+ }
+
+ private void buildModel(TestProperties properties, TestLogger logger, String servicesXml) throws IOException, SAXException {
ApplicationPackage app = new MockApplicationPackage.Builder().withServices(servicesXml).build();
new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
.applicationPackage(app)
@@ -206,18 +256,17 @@ public class JvmOptionsTest extends ContainerModelBuilderTestBase {
@Test
void requireThatValidJvmOptionsAreNotLogged() throws IOException, SAXException {
// Valid options, should not log anything
- verifyLoggingOfJvmOptions(true, "options", "-Xms2G");
- verifyLoggingOfJvmOptions(true, "options", "-Xlog:gc");
- verifyLoggingOfJvmOptions(true, "options", "-Djava.library.path=/opt/vespa/lib64:/home/y/lib64");
- verifyLoggingOfJvmOptions(true, "options", "-XX:-OmitStackTraceInFastThrow");
- verifyLoggingOfJvmOptions(false, "options", "-Xms2G");
+ verifyLogMessage(true, "options", "-Xms2G");
+ verifyLogMessage(true, "options", "-Xlog:gc");
+ verifyLogMessage(true, "options", "-Djava.library.path=/opt/vespa/lib64:/home/y/lib64");
+ verifyLogMessage(true, "options", "-XX:-OmitStackTraceInFastThrow");
+ verifyLogMessage(false, "options", "-Xms2G");
}
@Test
void requireThatInvalidJvmOptionsFailDeployment() throws IOException, SAXException {
try {
buildModelWithJvmOptions(new TestProperties().setHostedVespa(true),
- new TestLogger(),
"options",
"-Xms2G foo bar");
fail();
diff --git a/config-model/src/test/schema-test-files/services.xml b/config-model/src/test/schema-test-files/services.xml
index 7976b1f5524..8806a4e082a 100644
--- a/config-model/src/test/schema-test-files/services.xml
+++ b/config-model/src/test/schema-test-files/services.xml
@@ -219,6 +219,24 @@
<certificate file="security/file2.pem" />
</client>
</clients>
+
+ <document-processing>
+ <chain id="common">
+ <documentprocessor id="CommonDocproc" class="com.yahoo.vespatest.ExtraHitDocumentProcessor">
+ <config name="com.yahoo.vespatest.extra-hit">
+ <exampleString>A docproc for all clusters</exampleString>
+ </config>
+ </documentprocessor>
+ </chain>
+ <chain id="cluster1">
+ <documentprocessor id="Cluster1Docproc" class="com.yahoo.vespatest.ExtraHitDocumentProcessor">
+ <config name="com.yahoo.vespatest.extra-hit">
+ <exampleString>Docproc only for cluster1</exampleString>
+ </config>
+ </documentprocessor>
+ </chain>
+ </document-processing>
+
</container>
<container id='qrsCluster_2' version='1.0'>
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
index ccc9f615c93..b811fe21310 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
@@ -229,12 +229,15 @@ public class ConfigServerBootstrap extends AbstractComponent implements Runnable
List<ApplicationId> failedDeployments = checkDeployments(deployments);
executor.shutdown();
- executor.awaitTermination(365, TimeUnit.DAYS); // Timeout should never happen
+ if (! executor.awaitTermination(5, TimeUnit.HOURS)) {
+ log.log(Level.SEVERE, () -> "Unable to shutdown " + executor + ", waited 5 hours. Exiting");
+ System.exit(1);
+ }
return failedDeployments;
}
- private enum DeploymentStatus { inProgress, done, failed}
+ private enum DeploymentStatus { inProgress, done, failed }
private List<ApplicationId> checkDeployments(Map<ApplicationId, Future<?>> deployments) {
int applicationCount = deployments.size();
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index dcd59f6ceff..531bddec4cb 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -204,7 +204,6 @@ public class ModelContextImpl implements ModelContext {
private final boolean useRestrictedDataPlaneBindings;
private final int heapPercentage;
private final boolean useOldJdiscContainerStartup;
- private final boolean enableDataPlaneFilter;
public FeatureFlags(FlagSource source, ApplicationId appId, Version version) {
this.defaultTermwiseLimit = flagValue(source, appId, version, Flags.DEFAULT_TERM_WISE_LIMIT);
@@ -251,7 +250,6 @@ public class ModelContextImpl implements ModelContext {
this.useRestrictedDataPlaneBindings = flagValue(source, appId, version, Flags.RESTRICT_DATA_PLANE_BINDINGS);
this.heapPercentage = flagValue(source, appId, version, PermanentFlags.HEAP_SIZE_PERCENTAGE);
this.useOldJdiscContainerStartup = flagValue(source, appId, version, Flags.USE_OLD_JDISC_CONTAINER_STARTUP);
- this.enableDataPlaneFilter = flagValue(source, appId, version, Flags.ENABLE_DATAPLANE_FILTER);
}
@Override public boolean useOldJdiscContainerStartup() { return useOldJdiscContainerStartup; }
@@ -306,7 +304,6 @@ public class ModelContextImpl implements ModelContext {
}
@Override public boolean useTwoPhaseDocumentGc() { return useTwoPhaseDocumentGc; }
@Override public boolean useRestrictedDataPlaneBindings() { return useRestrictedDataPlaneBindings; }
- @Override public boolean enableDataPlaneFilter() { return enableDataPlaneFilter; }
private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, UnboundFlag<? extends V, ?, ?> flag) {
return flag.bindTo(source)
diff --git a/container-apache-http-client-bundle/pom.xml b/container-apache-http-client-bundle/pom.xml
index d2f70b91bb3..fa29043bf5f 100644
--- a/container-apache-http-client-bundle/pom.xml
+++ b/container-apache-http-client-bundle/pom.xml
@@ -43,17 +43,19 @@
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
- <scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpmime</artifactId>
- <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <!-- Workaround for maven issue preventing correct version resolving for transitive dependencies in some cases -->
+ <groupId>commons-codec</groupId>
+ <artifactId>commons-codec</artifactId>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents.client5</groupId>
<artifactId>httpclient5</artifactId>
- <scope>compile</scope>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
diff --git a/container-dev/pom.xml b/container-dev/pom.xml
index e088c40e20b..f62cc69a428 100644
--- a/container-dev/pom.xml
+++ b/container-dev/pom.xml
@@ -163,10 +163,6 @@
<artifactId>jna</artifactId>
</exclusion>
<exclusion>
- <groupId>io.airlift</groupId>
- <artifactId>aircompressor</artifactId>
- </exclusion>
- <exclusion>
<groupId>net.openhft</groupId>
<artifactId>zero-allocation-hashing</artifactId>
</exclusion>
diff --git a/container-messagebus/src/main/java/com/yahoo/container/jdisc/messagebus/SessionCache.java b/container-messagebus/src/main/java/com/yahoo/container/jdisc/messagebus/SessionCache.java
index baff0437b5d..16dedd0765d 100644
--- a/container-messagebus/src/main/java/com/yahoo/container/jdisc/messagebus/SessionCache.java
+++ b/container-messagebus/src/main/java/com/yahoo/container/jdisc/messagebus/SessionCache.java
@@ -5,7 +5,6 @@ import com.yahoo.component.annotation.Inject;
import com.yahoo.component.AbstractComponent;
import com.yahoo.container.jdisc.ContainerMbusConfig;
import com.yahoo.document.DocumentTypeManager;
-import com.yahoo.document.DocumentUtil;
import com.yahoo.documentapi.messagebus.protocol.DocumentProtocol;
import com.yahoo.documentapi.messagebus.protocol.DocumentProtocolPoliciesConfig;
import com.yahoo.jdisc.ReferencedResource;
@@ -109,33 +108,13 @@ public final class SessionCache extends AbstractComponent {
Protocol protocol) {
MessageBusParams mbusParams = new MessageBusParams().addProtocol(protocol);
- int maxPendingSize = DocumentUtil
- .calculateMaxPendingSize(mbusConfig.maxConcurrentFactor(), mbusConfig.documentExpansionFactor(),
- mbusConfig.containerCoreMemory());
- logSystemInfo(mbusConfig, maxPendingSize);
-
mbusParams.setMaxPendingCount(mbusConfig.maxpendingcount());
- mbusParams.setMaxPendingSize(maxPendingSize);
MessageBus bus = new MessageBus(net, mbusParams);
new ConfigAgent(messagebusConfig, bus); // Configure the wrapped MessageBus with a routing table.
return new SharedMessageBus(bus);
}
- private static void logSystemInfo(ContainerMbusConfig containerMbusConfig, long maxPendingSize) {
- log.log(Level.FINE,
- "Running with maximum heap size of " + (Runtime.getRuntime().maxMemory() / 1024L / 1024L) + " MB");
- log.log(Level.CONFIG,
- "Amount of memory reserved for container core: " + containerMbusConfig.containerCoreMemory() + " MB.");
- log.log(Level.CONFIG,
- "Running with document expansion factor " + containerMbusConfig.documentExpansionFactor() + "");
-
- String msgLimit =
- (containerMbusConfig.maxpendingcount() == 0) ? "unlimited" : "" + containerMbusConfig.maxpendingcount();
- log.log(Level.CONFIG, ("Starting message bus with max " + msgLimit + " pending messages and max " +
- (((double) (maxPendingSize / 1024L)) / 1024.0d) + " pending megabytes."));
- }
-
ReferencedResource<SharedIntermediateSession> retainIntermediate(final IntermediateSessionParams p) {
return intermediatesCreator.retain(intermediateLock, intermediates, p);
}
diff --git a/container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java b/container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java
index 09f15b4c5c2..28b6d64a94e 100644
--- a/container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java
+++ b/container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java
@@ -63,6 +63,7 @@ public class StatisticsSearcher extends Searcher {
private static final String FAILED_QUERIES_METRIC = "failed_queries";
private static final String MEAN_QUERY_LATENCY_METRIC = "mean_query_latency";
private static final String QUERY_LATENCY_METRIC = "query_latency";
+ private static final String QUERY_TIMEOUT_METRIC = "query_timeout";
private static final String QUERY_HIT_OFFSET_METRIC = "query_hit_offset";
private static final String QUERIES_METRIC = "queries";
private static final String PEAK_QPS_METRIC = "peak_qps";
@@ -125,6 +126,7 @@ public class StatisticsSearcher extends Searcher {
this.peakQpsReporter = new PeakQpsReporter();
this.metric = metric;
+ metricReceiver.declareGauge(QUERY_TIMEOUT_METRIC, Optional.empty(), new MetricSettings.Builder().histogram(true).build());
metricReceiver.declareGauge(QUERY_LATENCY_METRIC, Optional.empty(), new MetricSettings.Builder().histogram(true).build());
metricReceiver.declareGauge(HITS_PER_QUERY_METRIC, Optional.empty(), new MetricSettings.Builder().histogram(true).build());
metricReceiver.declareGauge(TOTALHITS_PER_QUERY_METRIC, Optional.empty(), new MetricSettings.Builder().histogram(true).build());
@@ -223,6 +225,7 @@ public class StatisticsSearcher extends Searcher {
logQuery(query);
long start_ns = getStartNanoTime(query);
qps(metricContext);
+ metric.set(QUERY_TIMEOUT_METRIC, query.getTimeout(), metricContext);
Result result;
//handle exceptions thrown below in searchers
try {
diff --git a/container-test/pom.xml b/container-test/pom.xml
index 829372ea8fb..d2806bb0330 100644
--- a/container-test/pom.xml
+++ b/container-test/pom.xml
@@ -47,6 +47,11 @@
for user projects must be added in compile scope here. These dependencies are explicitly excluded
(or set to non-compile scope) in the container and/or container-dev modules. -->
<dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>airlift-zstd</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
<groupId>com.ibm.icu</groupId>
<artifactId>icu4j</artifactId>
</dependency>
@@ -98,11 +103,6 @@
<groupId>xerces</groupId>
<artifactId>xercesImpl</artifactId>
</dependency>
- <dependency>
- <groupId>io.airlift</groupId>
- <artifactId>aircompressor</artifactId>
- <scope>compile</scope>
- </dependency>
<dependency> <!-- TODO: Remove on Vespa 9 -->
<!-- not used by Vespa, but was historically on test classpath -->
<groupId>org.json</groupId>
diff --git a/container/pom.xml b/container/pom.xml
index 10322758d1a..5a48394accd 100644
--- a/container/pom.xml
+++ b/container/pom.xml
@@ -30,6 +30,10 @@
<groupId>org.ow2.asm</groupId>
<artifactId>asm</artifactId>
</exclusion>
+ <exclusion>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>airlift-zstd</artifactId>
+ </exclusion>
</exclusions>
</dependency>
<dependency>
diff --git a/controller-api/pom.xml b/controller-api/pom.xml
index c7ac5d3518a..73e4522c521 100644
--- a/controller-api/pom.xml
+++ b/controller-api/pom.xml
@@ -81,6 +81,28 @@
<artifactId>annotations</artifactId>
<version>9.0.4</version>
</dependency>
+ <dependency>
+ <artifactId>aws-java-sdk-core</artifactId>
+ <groupId>com.amazonaws</groupId>
+ <exclusions>
+ <exclusion>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpclient</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.fasterxml.jackson.dataformat</groupId>
+ <artifactId>*</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>*</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>commons-logging</groupId>
+ <artifactId>*</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
<!-- test -->
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillingController.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillingController.java
index 40bc9b27449..8b0f58c79d2 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillingController.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillingController.java
@@ -62,6 +62,7 @@ public interface BillingController {
* @return The ID of the new bill.
*/
Bill.Id createBillForPeriod(TenantName tenant, ZonedDateTime startTime, ZonedDateTime endTime, String agent);
+ Bill.Id createBillForPeriod(TenantName tenant, LocalDate startDate, LocalDate endDate, String agent);
/**
* Create an unpersisted bill of unbilled use for the given tenant from the end of last bill until the given date.
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/MockBillingController.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/MockBillingController.java
index c9f9b7f50e4..6ea4c7442d8 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/MockBillingController.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/MockBillingController.java
@@ -77,6 +77,11 @@ public class MockBillingController implements BillingController {
}
@Override
+ public Bill.Id createBillForPeriod(TenantName tenant, LocalDate startDate, LocalDate endDate, String agent) {
+ return createBillForPeriod(tenant, startDate.atStartOfDay(ZoneOffset.UTC), endDate.plusDays(1).atStartOfDay(ZoneOffset.UTC), agent);
+ }
+
+ @Override
public Bill createUncommittedBill(TenantName tenant, LocalDate until) {
return uncommittedBills.getOrDefault(tenant, emptyBill());
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java
index cab32c7b5ce..b7a71cc7b91 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java
@@ -20,30 +20,20 @@ public class Cluster {
private final ClusterResources min;
private final ClusterResources max;
private final ClusterResources current;
- private final Optional<ClusterResources> target;
- private final Optional<ClusterResources> suggested;
- private final Utilization utilization;
+ private final Autoscaling target;
+ private final Autoscaling suggested;
private final List<ScalingEvent> scalingEvents;
- private final String autoscalingStatusCode;
- private final String autoscalingStatus;
private final Duration scalingDuration;
- private final double maxQueryGrowthRate;
- private final double currentQueryFractionOfMax;
public Cluster(ClusterSpec.Id id,
ClusterSpec.Type type,
ClusterResources min,
ClusterResources max,
ClusterResources current,
- Optional<ClusterResources> target,
- Optional<ClusterResources> suggested,
- Utilization utilization,
+ Autoscaling target,
+ Autoscaling suggested,
List<ScalingEvent> scalingEvents,
- String autoscalingStatusCode,
- String autoscalingStatus,
- Duration scalingDuration,
- double maxQueryGrowthRate,
- double currentQueryFractionOfMax) {
+ Duration scalingDuration) {
this.id = id;
this.type = type;
this.min = min;
@@ -51,69 +41,33 @@ public class Cluster {
this.current = current;
this.target = target;
this.suggested = suggested;
- this.utilization = utilization;
this.scalingEvents = scalingEvents;
- this.autoscalingStatusCode = autoscalingStatusCode;
- this.autoscalingStatus = autoscalingStatus;
this.scalingDuration = scalingDuration;
- this.maxQueryGrowthRate = maxQueryGrowthRate;
- this.currentQueryFractionOfMax = currentQueryFractionOfMax;
}
public ClusterSpec.Id id() { return id; }
+
public ClusterSpec.Type type() { return type; }
+
public ClusterResources min() { return min; }
+
public ClusterResources max() { return max; }
+
public ClusterResources current() { return current; }
- public Optional<ClusterResources> target() { return target; }
- public Optional<ClusterResources> suggested() { return suggested; }
- public Utilization utilization() { return utilization; }
+
+ public Autoscaling target() { return target; }
+
+ public Autoscaling suggested() { return suggested; }
+
public List<ScalingEvent> scalingEvents() { return scalingEvents; }
- public String autoscalingStatusCode() { return autoscalingStatusCode; }
- public String autoscalingStatus() { return autoscalingStatus; }
+
public Duration scalingDuration() { return scalingDuration; }
- public double maxQueryGrowthRate() { return maxQueryGrowthRate; }
- public double currentQueryFractionOfMax() { return currentQueryFractionOfMax; }
@Override
public String toString() {
return "cluster '" + id + "'";
}
- public static class Utilization {
-
- private final double idealCpu, peakCpu;
- private final double idealMemory, peakMemory;
- private final double idealDisk, peakDisk;
-
- public Utilization(double idealCpu, double peakCpu,
- double idealMemory, double peakMemory,
- double idealDisk, double peakDisk) {
- this.idealCpu = idealCpu;
- this.peakCpu = peakCpu;
-
- this.idealMemory = idealMemory;
- this.peakMemory = peakMemory;
-
- this.idealDisk = idealDisk;
- this.peakDisk = peakDisk;
- }
-
- public double idealCpu() { return idealCpu; }
- public double peakCpu() { return peakCpu; }
-
- public double idealMemory() { return idealMemory; }
- public double peakMemory() { return peakMemory; }
-
- public double idealDisk() { return idealDisk; }
- public double peakDisk() { return peakDisk; }
-
- public static Utilization empty() { return new Utilization(0, 0,
- 0, 0,
- 0, 0); }
-
- }
-
public static class ScalingEvent {
private final ClusterResources from, to;
@@ -127,10 +81,13 @@ public class Cluster {
this.completion = completion;
}
- public ClusterResources from() { return from; }
- public ClusterResources to() { return to; }
- public Instant at() { return at; }
- public Optional<Instant> completion() { return completion; }
+ public ClusterResources from() {return from;}
+
+ public ClusterResources to() {return to;}
+
+ public Instant at() {return at;}
+
+ public Optional<Instant> completion() {return completion;}
@Override
public boolean equals(Object o) {
@@ -148,12 +105,74 @@ public class Cluster {
@Override
public String toString() {
return "ScalingEvent{" +
- "from=" + from +
- ", to=" + to +
- ", at=" + at +
- ", completion=" + completion +
- '}';
+ "from=" + from +
+ ", to=" + to +
+ ", at=" + at +
+ ", completion=" + completion +
+ '}';
}
}
+ public static class Autoscaling {
+
+ private final String status;
+ private final String description;
+ private final Optional<ClusterResources> resources;
+ private final Instant at;
+ private final Load peak;
+ private final Load ideal;
+
+ public Autoscaling(String status, String description, Optional<ClusterResources> resources, Instant at,
+ Load peak, Load ideal) {
+ this.status = status;
+ this.description = description;
+ this.resources = resources;
+ this.at = at;
+ this.peak = peak;
+ this.ideal = ideal;
+ }
+
+ public String status() {return status;}
+ public String description() {return description;}
+ public Optional<ClusterResources> resources() {
+ return resources;
+ }
+ public Instant at() {return at;}
+ public Load peak() {return peak;}
+ public Load ideal() {return ideal;}
+
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof Autoscaling other)) return false;
+ if (!this.status.equals(other.status)) return false;
+ if (!this.description.equals(other.description)) return false;
+ if (!this.resources.equals(other.resources)) return false;
+ if (!this.at.equals(other.at)) return false;
+ if (!this.peak.equals(other.peak)) return false;
+ if (!this.ideal.equals(other.ideal)) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(status, description, at, peak, ideal);
+ }
+
+ @Override
+ public String toString() {
+ return (resources.isPresent() ? "Autoscaling to " + resources : "Don't autoscale") +
+ (description.isEmpty() ? "" : ": " + description);
+ }
+
+ public static Autoscaling empty() {
+ return new Autoscaling("unavailable",
+ "",
+ Optional.empty(),
+ Instant.EPOCH,
+ Load.zero(),
+ Load.zero());
+ }
+
+ }
+
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Load.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Load.java
index 548fac7d11b..f954d0c8392 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Load.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Load.java
@@ -1,6 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.integration.configserver;
+import java.util.Objects;
+
/**
* @author bratseth
*/
@@ -20,10 +22,20 @@ public class Load {
public double memory() { return memory; }
public double disk() { return disk; }
+ @Override
public String toString() {
return "load: cpu " + cpu + ", memory " + memory + ", disk " + disk;
}
+ @Override
+ public int hashCode() { return Objects.hash(cpu, memory, disk); }
+
+ @Override
+ public boolean equals(Object o) {
+ if ( ! (o instanceof Load other)) return false;
+ return cpu == other.cpu && memory == other.memory && disk == other.disk;
+ }
+
public static Load zero() { return new Load(0, 0, 0); }
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/AutoscalingData.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/AutoscalingData.java
new file mode 100644
index 00000000000..3541799b0d0
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/AutoscalingData.java
@@ -0,0 +1,44 @@
+package com.yahoo.vespa.hosted.controller.api.integration.noderepository;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.Cluster;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.Load;
+
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Optional;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class AutoscalingData {
+
+ @JsonProperty("status")
+ public String status;
+
+ @JsonProperty("description")
+ public String description;
+
+ @JsonProperty("resources")
+ public ClusterResourcesData resources;
+
+ @JsonProperty("at")
+ public Long at;
+
+ @JsonProperty("peak")
+ public LoadData peak;
+
+ @JsonProperty("ideal")
+ public LoadData ideal;
+
+ public Cluster.Autoscaling toAutoscaling() {
+ return new Cluster.Autoscaling(status == null ? "" : status,
+ description == null ? "" : description,
+ resources == null ? Optional.empty() : Optional.ofNullable(resources.toClusterResources()),
+ at == null ? Instant.EPOCH : Instant.ofEpochMilli(at),
+ peak == null ? Load.zero() : peak.toLoad(),
+ ideal == null ? Load.zero() : ideal.toLoad());
+ }
+
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterData.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterData.java
index 9c2104232f1..539f0545c88 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterData.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterData.java
@@ -21,30 +21,27 @@ public class ClusterData {
@JsonProperty("type")
public String type;
+
@JsonProperty("min")
public ClusterResourcesData min;
+
@JsonProperty("max")
public ClusterResourcesData max;
+
@JsonProperty("current")
public ClusterResourcesData current;
+
@JsonProperty("suggested")
- public ClusterResourcesData suggested;
+ public AutoscalingData suggested;
+
@JsonProperty("target")
- public ClusterResourcesData target;
- @JsonProperty("utilization")
- public ClusterUtilizationData utilization;
+ public AutoscalingData target;
+
@JsonProperty("scalingEvents")
public List<ScalingEventData> scalingEvents;
- @JsonProperty("autoscalingStatusCode")
- public String autoscalingStatusCode;
- @JsonProperty("autoscalingStatus")
- public String autoscalingStatus;
+
@JsonProperty("scalingDuration")
public Long scalingDuration;
- @JsonProperty("maxQueryGrowthRate")
- public Double maxQueryGrowthRate;
- @JsonProperty("currentQueryFractionOfMax")
- public Double currentQueryFractionOfMax;
public Cluster toCluster(String id) {
return new Cluster(ClusterSpec.Id.from(id),
@@ -52,16 +49,11 @@ public class ClusterData {
min.toClusterResources(),
max.toClusterResources(),
current.toClusterResources(),
- target == null ? Optional.empty() : Optional.of(target.toClusterResources()),
- suggested == null ? Optional.empty() : Optional.of(suggested.toClusterResources()),
- utilization == null ? Cluster.Utilization.empty() : utilization.toClusterUtilization(),
+ target == null ? Cluster.Autoscaling.empty() : target.toAutoscaling(),
+ suggested == null ? Cluster.Autoscaling.empty() : suggested.toAutoscaling(),
scalingEvents == null ? List.of()
: scalingEvents.stream().map(data -> data.toScalingEvent()).toList(),
- autoscalingStatusCode,
- autoscalingStatus,
- scalingDuration == null ? Duration.ofMillis(0) : Duration.ofMillis(scalingDuration),
- maxQueryGrowthRate == null ? -1 : maxQueryGrowthRate,
- currentQueryFractionOfMax == null ? -1 : currentQueryFractionOfMax);
+ scalingDuration == null ? Duration.ofMillis(0) : Duration.ofMillis(scalingDuration));
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterResourcesData.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterResourcesData.java
index 801ee4ee853..2a9ab1e3a55 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterResourcesData.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterResourcesData.java
@@ -23,6 +23,7 @@ public class ClusterResourcesData {
public NodeResources resources;
public ClusterResources toClusterResources() {
+ if (resources == null) return null; // TODO: Compatibility, remove after January 2023
return new ClusterResources(nodes, groups, resources.toNodeResources());
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterUtilizationData.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterUtilizationData.java
deleted file mode 100644
index b4fee25d1ad..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterUtilizationData.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.noderepository;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.yahoo.vespa.hosted.controller.api.integration.configserver.Cluster;
-
-/**
- * Utilization ratios
- *
- * @author bratseth
- */
-@JsonIgnoreProperties(ignoreUnknown = true)
-@JsonInclude(JsonInclude.Include.NON_NULL)
-public class ClusterUtilizationData {
-
- @JsonProperty("idealCpu")
- public Double idealCpu;
- @JsonProperty("peakCpu")
- public Double peakCpu;
-
- @JsonProperty("idealMemory")
- public Double idealMemory;
- @JsonProperty("peakMemory")
- public Double peakMemory;
-
- @JsonProperty("idealDisk")
- public Double idealDisk;
- @JsonProperty("peakDisk")
- public Double peakDisk;
-
- public Cluster.Utilization toClusterUtilization() {
- return new Cluster.Utilization(idealCpu, peakCpu, idealMemory, peakMemory, idealDisk, peakDisk);
- }
-
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/ArchiveAccess.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/ArchiveAccess.java
index fba361f9223..eb4bd2e2289 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/ArchiveAccess.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/ArchiveAccess.java
@@ -1,7 +1,9 @@
package com.yahoo.vespa.hosted.controller.tenant;
+import com.amazonaws.arn.Arn;
import com.yahoo.text.Text;
+import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
@@ -10,7 +12,6 @@ import java.util.stream.Collectors;
public class ArchiveAccess {
- private static final Pattern VALID_AWS_ARCHIVE_ACCESS_ROLE_PATTERN = Pattern.compile("arn:aws:iam::\\d{12}:.+");
private static final Pattern VALID_GCP_ARCHIVE_ACCESS_MEMBER_PATTERN = Pattern.compile("(?<prefix>[a-zA-Z]+):.+");
private static final Set<String> gcpMemberPrefixes = Set.of("user", "serviceAccount", "group", "domain");
@@ -56,14 +57,25 @@ public class ArchiveAccess {
return new ArchiveAccess(awsRole(), Optional.empty());
}
- private void validateAWSIAMRole(String role) {
- if (!VALID_AWS_ARCHIVE_ACCESS_ROLE_PATTERN.matcher(role).matches()) {
- throw new IllegalArgumentException(Text.format("Invalid archive access role '%s': Must match expected pattern: '%s'",
- awsRole.get(), VALID_AWS_ARCHIVE_ACCESS_ROLE_PATTERN.pattern()));
- }
+ private static final Pattern ACCOUNT_ID_PATTERN = Pattern.compile("\\d{12}");
+ private static void validateAWSIAMRole(String role) {
if (role.length() > 100) {
throw new IllegalArgumentException("Invalid archive access role too long, must be 100 or less characters");
}
+ try {
+ var arn = Arn.fromString(role);
+ if (!arn.getPartition().equals("aws")) throw new IllegalArgumentException("Partition must be 'aws'");
+ if (!arn.getService().equals("iam")) throw new IllegalArgumentException("Service must be 'iam'");
+ var resourceType = arn.getResource().getResourceType();
+ if (resourceType == null) throw new IllegalArgumentException("Missing resource type - must be 'role' or 'user'");
+ if (!List.of("user", "role").contains(resourceType))
+ throw new IllegalArgumentException("Invalid resource type - must be either a 'role' or 'user'");
+ var accountId = arn.getAccountId();
+ if (!ACCOUNT_ID_PATTERN.matcher(accountId).matches())
+ throw new IllegalArgumentException("Account id must be a 12-digit number");
+ } catch (IllegalArgumentException e) {
+ throw new IllegalArgumentException(Text.format("Invalid archive access IAM role '%s': %s", role, e.getMessage()));
+ }
}
private void validateGCPMember(String member) {
diff --git a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/tenant/ArchiveAccessTest.java b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/tenant/ArchiveAccessTest.java
new file mode 100644
index 00000000000..87e02793361
--- /dev/null
+++ b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/tenant/ArchiveAccessTest.java
@@ -0,0 +1,45 @@
+package com.yahoo.vespa.hosted.controller.tenant;// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import com.yahoo.text.Text;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+/**
+ * @author bjorncs
+ */
+class ArchiveAccessTest {
+
+ @Test
+ void validatesUserProvidedIamRole() {
+ assertValidIamRole("arn:aws:iam::012345678912:user/foo");
+ assertValidIamRole("arn:aws:iam::012345678912:role/foo");
+
+ assertInvalidIamRole("arn:aws:iam::012345678912:foo/foo", "Invalid resource type - must be either a 'role' or 'user'");
+ assertInvalidIamRole("arn:aws:iam::012345678912:foo", "Missing resource type - must be 'role' or 'user'");
+ assertInvalidIamRole("arn:aws:iam::012345678912:role", "Missing resource type - must be 'role' or 'user'");
+ assertInvalidIamRole("arn:aws:iam::012345678912:", "Malformed ARN - no resource specified");
+ assertInvalidIamRole("arn:aws:iam::01234567891:user/foo", "Account id must be a 12-digit number");
+ assertInvalidIamRole("arn:gcp:iam::012345678912:user/foo", "Partition must be 'aws'");
+ assertInvalidIamRole("uri:aws:iam::012345678912:user/foo", "Malformed ARN - doesn't start with 'arn:'");
+ assertInvalidIamRole("arn:aws:s3:::mybucket", "Service must be 'iam'");
+ assertInvalidIamRole("", "Malformed ARN - doesn't start with 'arn:'");
+ assertInvalidIamRole("foo", "Malformed ARN - doesn't start with 'arn:'");
+ }
+
+ private static void assertValidIamRole(String role) { assertDoesNotThrow(() -> archiveAccess(role)); }
+
+ private static void assertInvalidIamRole(String role, String expectedMessage) {
+ var t = assertThrows(IllegalArgumentException.class, () -> archiveAccess(role));
+ var expectedPrefix = Text.format("Invalid archive access IAM role '%s': ", role);
+ System.out.println(t.getMessage());
+ assertTrue(t.getMessage().startsWith(expectedPrefix), role);
+ assertEquals(expectedMessage, t.getMessage().substring(expectedPrefix.length()));
+ }
+
+ private static ArchiveAccess archiveAccess(String iamRole) { return new ArchiveAccess().withAWSRole(iamRole); }
+
+} \ No newline at end of file
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
index ef4ee16217e..ad2274c4e30 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
@@ -33,14 +33,15 @@ import com.yahoo.vespa.hosted.controller.versions.VespaVersion.Confidence;
import java.time.Duration;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
+import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
@@ -67,7 +68,6 @@ import static java.util.stream.Collectors.mapping;
import static java.util.stream.Collectors.toList;
import static java.util.stream.Collectors.toMap;
import static java.util.stream.Collectors.toSet;
-import static java.util.stream.Collectors.toUnmodifiableList;
/**
* Status of the deployment jobs of an {@link Application}.
@@ -444,7 +444,7 @@ public class DeploymentStatus {
* which does not downgrade any deployments in the instance,
* which is not already rolling out to the instance, and
* which causes at least one job to run if deployed to the instance.
- * For the "exclusive" revision upgrade policy it is the oldest such revision; otherwise, it is the latest.
+ * For the "next" revision target policy it is the oldest such revision; otherwise, it is the latest.
*/
public Change outstandingChange(InstanceName instance) {
StepStatus status = instanceSteps().get(instance);
@@ -503,25 +503,26 @@ public class DeploymentStatus {
.filter(run -> run.versions().equals(versions))
.findFirst())
.map(Run::start);
- Optional<Instant> systemTestedAt = testedAt(job.application(), systemTest(job.type()), versions);
- Optional<Instant> stagingTestedAt = testedAt(job.application(), stagingTest(job.type()), versions);
+ Optional<Instant> systemTestedAt = testedAt(job, systemTest(job.type()), versions);
+ Optional<Instant> stagingTestedAt = testedAt(job, stagingTest(job.type()), versions);
if (systemTestedAt.isEmpty() || stagingTestedAt.isEmpty()) return triggeredAt;
Optional<Instant> testedAt = systemTestedAt.get().isAfter(stagingTestedAt.get()) ? systemTestedAt : stagingTestedAt;
return triggeredAt.isPresent() && triggeredAt.get().isBefore(testedAt.get()) ? triggeredAt : testedAt;
}
- /** Earliest instant when versions were tested for the given instance */
- private Optional<Instant> testedAt(ApplicationId instance, JobType type, Versions versions) {
- return declaredTest(instance, type).map(__ -> allJobs.instance(instance.instance()))
- .orElse(allJobs)
- .type(type).asList().stream()
- .flatMap(status -> RunList.from(status)
- .on(versions)
- .matching(run -> run.id().type().zone().equals(type.zone()))
- .matching(Run::hasSucceeded)
- .asList().stream()
- .map(Run::start))
- .min(naturalOrder());
+ /** Earliest instant when versions were tested for the given instance. */
+ private Optional<Instant> testedAt(JobId job, JobType type, Versions versions) {
+ return prerequisiteTests(job, type).stream()
+ .map(test -> allJobs.get(test).stream()
+ .flatMap(status -> RunList.from(status)
+ .on(versions)
+ .matching(run -> run.id().type().zone().equals(type.zone()))
+ .matching(Run::hasSucceeded)
+ .asList().stream()
+ .map(run -> run.end().get()))
+ .min(naturalOrder()))
+ .reduce((o, n) -> o.isEmpty() || n.isEmpty() ? Optional.empty() : o.get().isBefore(n.get()) ? n : o)
+ .orElse(Optional.empty());
}
private Map<JobId, List<Job>> productionJobs(InstanceName instance, Change change, boolean assumeUpgradesSucceed) {
@@ -667,11 +668,10 @@ public class DeploymentStatus {
/** The test jobs that need to run prior to the given production deployment jobs. */
public Map<JobId, List<Job>> testJobs(Map<JobId, List<Job>> jobs) {
Map<JobId, List<Job>> testJobs = new LinkedHashMap<>();
- // First, look for a declared test in the instance of each production job.
jobs.forEach((job, versionsList) -> {
- for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) {
- if (job.type().isProduction() && job.type().isDeployment()) {
- declaredTest(job.application(), testType).ifPresent(testJob -> {
+ if (job.type().isProduction() && job.type().isDeployment()) {
+ for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) {
+ prerequisiteTests(job, testType).forEach(testJob -> {
for (Job productionJob : versionsList)
if (allJobs.successOn(testType, productionJob.versions())
.instance(testJob.application().instance())
@@ -685,26 +685,6 @@ public class DeploymentStatus {
}
}
});
- // If no declared test in the right instance was triggered, pick one from a different instance.
- jobs.forEach((job, versionsList) -> {
- for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) {
- for (Job productionJob : versionsList)
- if ( job.type().isProduction() && job.type().isDeployment()
- && allJobs.successOn(testType, productionJob.versions()).asList().isEmpty()
- && testJobs.keySet().stream()
- .noneMatch(test -> test.type().equals(testType) && test.type().zone().equals(testType.zone())
- && testJobs.get(test).stream().anyMatch(testJob -> test.type().isSystemTest() ? testJob.versions().targetsMatch(productionJob.versions())
- : testJob.versions().equals(productionJob.versions())))) {
- JobId testJob = firstDeclaredOrElseImplicitTest(testType);
- testJobs.merge(testJob,
- List.of(new Job(testJob.type(),
- productionJob.versions(),
- jobSteps.get(testJob).readyAt(productionJob.change),
- productionJob.change)),
- DeploymentStatus::union);
- }
- }
- });
return Collections.unmodifiableMap(testJobs);
}
@@ -749,6 +729,27 @@ public class DeploymentStatus {
return first;
}
+ /**
+ * Returns set of declared tests directly reachable from the given production job, or the first declared (or implicit) test.
+ * A test in instance {@code I} is directly reachable from a job in instance {@code K} if a chain of instances {@code I, J, ..., K}
+ * exists, such that only {@code I} has a declared test of the particular type.
+ * These are the declared tests that should be OK before we proceed with the corresponding production deployment.
+ * If no such tests exist, the first declared test, or a test in the first declared instance, is used instead.
+ */
+ private List<JobId> prerequisiteTests(JobId prodJob, JobType testType) {
+ List<JobId> tests = new ArrayList<>();
+ Deque<InstanceName> instances = new ArrayDeque<>();
+ instances.add(prodJob.application().instance());
+ while ( ! instances.isEmpty()) {
+ InstanceName instance = instances.poll();
+ Optional<JobId> test = declaredTest(application().id().instance(instance), testType);
+ if (test.isPresent()) tests.add(test.get());
+ else instances.addAll(instanceSteps().get(instance).dependencies().stream().map(StepStatus::instance).toList());
+ }
+ if (tests.isEmpty()) tests.add(firstDeclaredOrElseImplicitTest(testType));
+ return tests;
+ }
+
/** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */
private List<StepStatus> fillStep(Map<JobId, StepStatus> dependencies, List<StepStatus> allSteps, DeploymentSpec.Step step,
List<StepStatus> previous, InstanceName instance, Function<JobId, JobStatus> jobs,
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
index 169cde8437a..e905b60687e 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
@@ -452,12 +452,11 @@ public class DeploymentTrigger {
Predicate<RevisionId> revisionFilter = spec.revisionTarget() == DeploymentSpec.RevisionTarget.next
? failing -> status.application().require(instance).change().revision().get().compareTo(failing) == 0
: failing -> revision.compareTo(failing) > 0;
- switch (spec.revisionChange()) {
- case whenClear: return ! isChangingRevision;
- case whenFailing: return ! isChangingRevision || status.hasFailures(revisionFilter);
- case always: return true;
- default: throw new IllegalStateException("Unknown revision upgrade policy");
- }
+ return switch (spec.revisionChange()) {
+ case whenClear -> ! isChangingRevision;
+ case whenFailing -> ! isChangingRevision || status.hasFailures(revisionFilter);
+ case always -> true;
+ };
}
private Instance withRemainingChange(Instance instance, Change change, DeploymentStatus status, boolean allowOutdatedPlatform) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java
index 7ffaaabb1a7..71ff28c47e6 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java
@@ -14,6 +14,7 @@ import java.util.stream.Collectors;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.aborted;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.noTests;
+import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.reset;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.running;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.success;
import static com.yahoo.vespa.hosted.controller.deployment.Step.Status.succeeded;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notifier.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notifier.java
index aeae65ad9d2..1c76f58a6b2 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notifier.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notifier.java
@@ -34,16 +34,6 @@ import static com.yahoo.yolean.Exceptions.uncheck;
* @author enygaard
*/
public class Notifier {
- private static final String header = """
- <div style="background: #00598c; height: 55px; width: 100%">
- <img
- src="https://vespa.ai/assets/vespa-logo.png"
- style="width: auto; height: 34px; margin: 10px"
- />
- </div>
- <br>
- """;
-
private final CuratorDb curatorDb;
private final Mailer mailer;
private final FlagSource flagSource;
@@ -72,13 +62,11 @@ public class Notifier {
}
var tenant = curatorDb.readTenant(source.tenant());
tenant.stream().forEach(t -> {
- if (t instanceof CloudTenant) {
- var ct = (CloudTenant) t;
+ if (t instanceof CloudTenant ct) {
ct.info().contacts().all().stream()
.filter(c -> c.audiences().contains(TenantContacts.Audience.NOTIFICATIONS))
.collect(Collectors.groupingBy(TenantContacts.Contact::type, Collectors.toList()))
- .entrySet()
- .forEach(e -> notifications.forEach(n -> dispatch(n, e.getKey(), e.getValue())));
+ .forEach((type, contacts) -> notifications.forEach(n -> dispatch(n, type, contacts)));
}
});
}
@@ -95,22 +83,16 @@ public class Notifier {
private boolean skipSource(NotificationSource source) {
// Do not dispatch notification for dev and perf environments
- if (source.zoneId()
+ return source.zoneId()
.map(z -> z.environment())
.map(e -> e == Environment.dev || e == Environment.perf)
- .orElse(false)) {
- return true;
- }
- return false;
+ .orElse(false);
}
private void dispatch(Notification notification, TenantContacts.Type type, Collection<? extends TenantContacts.Contact> contacts) {
switch (type) {
- case EMAIL:
- dispatch(notification, contacts.stream().map(c -> (TenantContacts.EmailContact) c).toList());
- break;
- default:
- throw new IllegalArgumentException("Unknown TenantContacts type " + type.name());
+ case EMAIL -> dispatch(notification, contacts.stream().map(c -> (TenantContacts.EmailContact) c).toList());
+ default -> throw new IllegalArgumentException("Unknown TenantContacts type " + type.name());
}
}
@@ -136,6 +118,7 @@ public class Notifier {
.replace("[[NOTIFICATION_HEADER]]", content.messagePrefix())
.replace("[[NOTIFICATION_ITEMS]]", notification.messages().stream()
.map(Notifier::linkify)
+ .map(Notifier::capitalise)
.map(m -> "<p>" + m + "</p>")
.collect(Collectors.joining()))
.replace("[[LINK_TO_NOTIFICATION]]", notificationLink(notification.source()))
@@ -190,4 +173,8 @@ public class Notifier {
}
return uri.toString();
}
+
+ private static String capitalise(String m) {
+ return m.substring(0, 1).toUpperCase() + m.substring(1);
+ }
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index 2fffdc25875..c8597cff405 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -64,6 +64,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.configserver.Cluster;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.DeploymentResult;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.DeploymentResult.LogEntry;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.Load;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.LoadBalancer;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeFilter;
@@ -74,7 +75,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
-import com.yahoo.vespa.hosted.controller.api.integration.dns.VpcEndpointService.VpcEndpoint;
import com.yahoo.vespa.hosted.controller.api.integration.noderepository.RestartFilter;
import com.yahoo.vespa.hosted.controller.api.integration.secrets.TenantSecretStore;
import com.yahoo.vespa.hosted.controller.api.role.Role;
@@ -1348,17 +1348,13 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
toSlime(cluster.min(), clusterObject.setObject("min"));
toSlime(cluster.max(), clusterObject.setObject("max"));
toSlime(cluster.current(), clusterObject.setObject("current"));
- if (cluster.target().isPresent()
- && ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers()))
- toSlime(cluster.target().get(), clusterObject.setObject("target"));
- cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
- utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
+ toSlime(cluster.target(), cluster, clusterObject.setObject("target"));
+ toSlime(cluster.suggested(), cluster, clusterObject.setObject("suggested"));
+ legacyUtilizationToSlime(cluster.target().peak(), cluster.target().ideal(), clusterObject.setObject("utilization")); // TODO: Remove after January 2023
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
- clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
- clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
+ clusterObject.setString("autoscalingStatusCode", cluster.target().status()); // TODO: Remove after January 2023
+ clusterObject.setString("autoscalingStatus", cluster.target().description()); // TODO: Remove after January 2023
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
- clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
- clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax());
}
return new SlimeJsonResponse(slime);
}
@@ -2704,15 +2700,35 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
object.setDouble("cost", cost);
}
- private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) {
- utilizationObject.setDouble("idealCpu", utilization.idealCpu());
- utilizationObject.setDouble("peakCpu", utilization.peakCpu());
+ private void toSlime(Cluster.Autoscaling autoscaling, Cluster cluster, Cursor autoscalingObject) {
+ // TODO: Remove after January 2023
+ if (autoscaling.resources().isPresent()
+ && ! autoscaling.resources().get().justNumbers().equals(cluster.current().justNumbers()))
+ toSlime(autoscaling.resources().get(), autoscalingObject);
- utilizationObject.setDouble("idealMemory", utilization.idealMemory());
- utilizationObject.setDouble("peakMemory", utilization.peakMemory());
+ autoscalingObject.setString("status", autoscaling.status());
+ autoscalingObject.setString("description", autoscaling.description());
+ autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources")));
+ autoscalingObject.setLong("at", autoscaling.at().toEpochMilli());
+ toSlime(autoscaling.peak(), autoscalingObject.setObject("peak"));
+ toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal"));
+ }
+
+ private void toSlime(Load load, Cursor loadObject) {
+ loadObject.setDouble("cpu", load.cpu());
+ loadObject.setDouble("memory", load.memory());
+ loadObject.setDouble("disk", load.disk());
+ }
+
+ private void legacyUtilizationToSlime(Load peak, Load ideal, Cursor utilizationObject) {
+ utilizationObject.setDouble("idealCpu", ideal.cpu());
+ utilizationObject.setDouble("peakCpu", peak.cpu());
+
+ utilizationObject.setDouble("idealMemory", ideal.memory());
+ utilizationObject.setDouble("peakMemory", peak.memory());
- utilizationObject.setDouble("idealDisk", utilization.idealDisk());
- utilizationObject.setDouble("peakDisk", utilization.peakDisk());
+ utilizationObject.setDouble("idealDisk", ideal.disk());
+ utilizationObject.setDouble("peakDisk", peak.disk());
}
private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandler.java
index d1962b16120..307880682d9 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandler.java
@@ -246,10 +246,8 @@ public class BillingApiHandler extends ThreadedHttpRequestHandler {
LocalDate startDate = LocalDate.parse(getInspectorFieldOrThrow(inspector, "startTime"));
LocalDate endDate = LocalDate.parse(getInspectorFieldOrThrow(inspector, "endTime"));
- ZonedDateTime startTime = startDate.atStartOfDay(ZoneId.of("UTC"));
- ZonedDateTime endTime = endDate.plusDays(1).atStartOfDay(ZoneId.of("UTC"));
- var billId = billingController.createBillForPeriod(tenantName, startTime, endTime, userId);
+ var billId = billingController.createBillForPeriod(tenantName, startDate, endDate, userId);
Slime slime = new Slime();
Cursor root = slime.setObject();
@@ -476,7 +474,7 @@ public class BillingApiHandler extends ThreadedHttpRequestHandler {
private LocalDate untilParameter(String until) {
if (until == null || until.isEmpty() || until.isBlank())
- return LocalDate.now().plusDays(1);
+ return LocalDate.now();
return LocalDate.parse(until);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerV2.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerV2.java
index 8722e588fa7..0ddaa409ef8 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerV2.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerV2.java
@@ -345,12 +345,7 @@ public class BillingApiHandlerV2 extends RestApiRequestHandler<BillingApiHandler
private LocalDate untilParameter(RestApi.RequestContext ctx) {
return ctx.queryParameters().getString("until")
.map(LocalDate::parse)
- .map(date -> date.plusDays(1))
- .orElseGet(this::tomorrow);
- }
-
- private LocalDate tomorrow() {
- return LocalDate.now(clock).plusDays(1);
+ .orElseGet(() -> LocalDate.now(clock));
}
private static String getInspectorFieldOrThrow(Inspector inspector, String field) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/AthenzRoleFilter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/AthenzRoleFilter.java
index a93741fd8fb..e23f0205e5a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/AthenzRoleFilter.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/AthenzRoleFilter.java
@@ -79,8 +79,7 @@ public class AthenzRoleFilter extends JsonSecurityRequestFilterBase {
@Override
protected Optional<ErrorResponse> filter(DiscFilterRequest request) {
try {
- Principal principal = request.getUserPrincipal();
- if (principal instanceof AthenzPrincipal) {
+ if (request.getUserPrincipal() instanceof AthenzPrincipal principal) {
Optional<DecodedJWT> oktaAt = Optional.ofNullable((String) request.getAttribute("okta.access-token")).map(JWT::decode);
Optional<X509Certificate> cert = request.getClientCertificateChain().stream().findFirst();
Instant issuedAt = cert.map(X509Certificate::getNotBefore)
@@ -89,9 +88,8 @@ public class AthenzRoleFilter extends JsonSecurityRequestFilterBase {
Instant expireAt = cert.map(X509Certificate::getNotAfter)
.or(() -> oktaAt.map(Payload::getExpiresAt))
.map(Date::toInstant).orElse(Instant.MAX);
- request.setAttribute(SecurityContext.ATTRIBUTE_NAME, new SecurityContext(principal,
- roles((AthenzPrincipal) principal, request.getUri()),
- issuedAt, expireAt));
+ request.setAttribute(SecurityContext.ATTRIBUTE_NAME,
+ new SecurityContext(principal, roles(principal, request.getUri()), issuedAt, expireAt));
}
}
catch (Exception e) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java
index 4184601179f..232f25f5674 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java
@@ -252,7 +252,7 @@ public class RoutingApiHandler extends AuditLoggingRequestHandler {
for (var application : applications) {
var instances = instanceId == null
? application.instances().values()
- : List.of(application.instances().get(instanceId.instance()));
+ : List.of(application.require(instanceId.instance()));
EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application);
for (var instance : instances) {
var zones = zoneId == null
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/response/application.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/response/application.json
index e4d0de9eb9f..37da498b6ec 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/response/application.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/response/application.json
@@ -56,9 +56,7 @@
"architecture": "x86_64"
}
},
- "scalingDuration": 400000,
- "maxQueryGrowthRate": 0.7,
- "currentQueryFractionOfMax": 0.3
+ "scalingDuration": 400000
},
"logserver": {
"type": "admin",
@@ -102,21 +100,34 @@
}
},
"suggested": {
- "nodes": 2,
- "groups": 1,
- "resources": {
- "vcpu": 2.0,
- "memoryGb": 4.0,
- "diskGb": 50.0,
- "bandwidthGbps": 0.3,
- "diskSpeed": "fast",
- "storageType": "local",
- "architecture": "x86_64"
+ "status" : "unavailable",
+ "description" : "",
+ "resources" : {
+ "nodes": 2,
+ "groups": 1,
+ "resources": {
+ "vcpu": 2.0,
+ "memoryGb": 4.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 0.3,
+ "diskSpeed": "fast",
+ "storageType": "local",
+ "architecture": "x86_64"
+ },
+ "at" : 123,
+ "peak" : {
+ "cpu" : 0.1,
+ "memory" : 0.2,
+ "disk" : 0.3
+ },
+ "ideal" : {
+ "cpu" : 0.4,
+ "memory" : 0.5,
+ "disk" : 0.6
+ }
}
},
- "scalingDuration": 90000,
- "maxQueryGrowthRate": 0.7,
- "currentQueryFractionOfMax": 0.3
+ "scalingDuration": 90000
},
"music": {
"type": "content",
@@ -172,9 +183,7 @@
"architecture": "x86_64"
}
},
- "scalingDuration": 1000000,
- "maxQueryGrowthRate": 0.7,
- "currentQueryFractionOfMax": 0.3
+ "scalingDuration": 1000000
}
}
} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
index b38bdbb1eaf..f7e4feb4da1 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
@@ -453,7 +453,9 @@ public class DeploymentContext {
/** Pulls the ready job trigger, and then runs the whole of job for the given instance, successfully. */
private DeploymentContext runJob(JobType type, ApplicationId instance) {
triggerJobs();
- var job = currentRun(new JobId(instance, type)).id().job();
+ Run run = currentRun(new JobId(instance, type));
+ assertEquals(type.zone(), run.id().type().zone());
+ JobId job = run.id().job();
doDeploy(job);
if (job.type().isDeployment()) {
doUpgrade(job);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
index 9fdecfa625e..d5f636b5294 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
@@ -45,6 +45,7 @@ import java.util.Set;
import java.util.stream.Collectors;
import static ai.vespa.validation.Validation.require;
+import static com.yahoo.config.provision.Environment.prod;
import static com.yahoo.config.provision.SystemName.cd;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.applicationPackage;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.productionApNortheast1;
@@ -2458,7 +2459,7 @@ public class DeploymentTriggerTest {
ZoneApiMock.Builder builder = ZoneApiMock.newBuilder().withCloud("centauri").withSystem(tester.controller().system());
ZoneApi testAlphaCentauri = builder.with(ZoneId.from(Environment.test, alphaCentauri)).build();
ZoneApi stagingAlphaCentauri = builder.with(ZoneId.from(Environment.staging, alphaCentauri)).build();
- ZoneApi prodAlphaCentauri = builder.with(ZoneId.from(Environment.prod, alphaCentauri)).build();
+ ZoneApi prodAlphaCentauri = builder.with(ZoneId.from(prod, alphaCentauri)).build();
tester.controllerTester().zoneRegistry().addZones(testAlphaCentauri, stagingAlphaCentauri, prodAlphaCentauri);
tester.controllerTester().setRoutingMethod(tester.controllerTester().zoneRegistry().zones().all().ids(), RoutingMethod.sharedLayer4);
@@ -2474,6 +2475,8 @@ public class DeploymentTriggerTest {
JobId stagingTestJob = new JobId(tests.instanceId(), stagingTest);
JobId mainJob = new JobId(main.instanceId(), productionUsEast3);
JobId centauriJob = new JobId(main.instanceId(), JobType.deploymentTo(prodAlphaCentauri.getId()));
+ JobType centuariTest = JobType.systemTest(tester.controllerTester().zoneRegistry(), CloudName.from("centauri"));
+ JobType centuariStaging = JobType.stagingTest(tester.controllerTester().zoneRegistry(), CloudName.from("centauri"));
assertEquals(Set.of(systemTestJob, stagingTestJob, mainJob, centauriJob), tests.deploymentStatus().jobsToRun().keySet());
tests.runJob(systemTest).runJob(stagingTest).triggerJobs();
@@ -2482,13 +2485,13 @@ public class DeploymentTriggerTest {
tests.triggerJobs();
assertEquals(3, tester.jobs().active().size());
- tests.runJob(systemTest);
+ tests.runJob(centuariTest);
tester.outstandingChangeDeployer().run();
assertEquals(2, tester.jobs().active().size());
main.assertRunning(productionUsEast3);
- tests.runJob(stagingTest);
+ tests.runJob(centuariStaging);
main.runJob(productionUsEast3).runJob(centauriJob.type());
assertEquals(Change.empty(), tests.instance().change());
@@ -2512,16 +2515,16 @@ public class DeploymentTriggerTest {
Version version3 = new Version("6.4");
tester.controllerTester().upgradeSystem(version3);
tests.runJob(systemTest) // Success in default cloud.
- .failDeployment(systemTest); // Failure in centauri cloud.
+ .failDeployment(centuariTest); // Failure in centauri cloud.
tester.upgrader().run();
assertEquals(Change.of(version3), tests.instance().change());
assertEquals(Change.empty(), main.instance().change());
assertEquals(Set.of(systemTestJob), tests.deploymentStatus().jobsToRun().keySet());
- tests.runJob(systemTest).runJob(systemTest);
+ tests.runJob(systemTest).runJob(centuariTest);
tester.upgrader().run();
- tests.runJob(stagingTest).runJob(stagingTest);
+ tests.runJob(stagingTest).runJob(centuariStaging);
assertEquals(Change.empty(), tests.instance().change());
assertEquals(Change.of(version3), main.instance().change());
@@ -2579,7 +2582,7 @@ public class DeploymentTriggerTest {
assertEquals(Change.empty(), main.instance().change());
assertEquals(Set.of(systemTestJob, stagingTestJob), tests.deploymentStatus().jobsToRun().keySet());
- tests.runJob(systemTest);
+ tests.runJob(centuariTest);
tester.outstandingChangeDeployer().run();
tester.outstandingChangeDeployer().run();
@@ -2587,7 +2590,7 @@ public class DeploymentTriggerTest {
assertEquals(Change.of(revision3.get()), main.instance().change());
assertEquals(Set.of(stagingTestJob, mainJob, centauriJob), tests.deploymentStatus().jobsToRun().keySet());
- tests.runJob(stagingTest);
+ tests.runJob(centuariStaging);
assertEquals(Change.empty(), tests.instance().change());
assertEquals(Change.of(revision3.get()), main.instance().change());
@@ -2663,20 +2666,28 @@ public class DeploymentTriggerTest {
ZoneApiMock.Builder builder = ZoneApiMock.newBuilder().withCloud("centauri").withSystem(tester.controller().system());
ZoneApi testAlphaCentauri = builder.with(ZoneId.from(Environment.test, alphaCentauri)).build();
ZoneApi stagingAlphaCentauri = builder.with(ZoneId.from(Environment.staging, alphaCentauri)).build();
- ZoneApi prodAlphaCentauri = builder.with(ZoneId.from(Environment.prod, alphaCentauri)).build();
+ ZoneApi prodAlphaCentauri = builder.with(ZoneId.from(prod, alphaCentauri)).build();
tester.controllerTester().zoneRegistry().addZones(testAlphaCentauri, stagingAlphaCentauri, prodAlphaCentauri);
tester.controllerTester().setRoutingMethod(tester.controllerTester().zoneRegistry().zones().all().ids(), RoutingMethod.sharedLayer4);
tester.configServer().bootstrap(tester.controllerTester().zoneRegistry().zones().all().ids(), SystemApplication.notController());
ApplicationPackage appPackage = ApplicationPackageBuilder.fromDeploymentXml(spec);
- DeploymentContext app = tester.newDeploymentContext("tenant", "application", "alpha").submit(appPackage).deploy();
- app.submit(appPackage);
- Map<JobId, List<DeploymentStatus.Job>> jobs = app.deploymentStatus().jobsToRun();
+ DeploymentContext alpha = tester.newDeploymentContext("tenant", "application", "alpha").submit(appPackage).deploy();
+ DeploymentContext beta = tester.newDeploymentContext("tenant", "application", "beta");
+ DeploymentContext gamma = tester.newDeploymentContext("tenant", "application", "gamma");
+ DeploymentContext nu = tester.newDeploymentContext("tenant", "application", "nu");
+ DeploymentContext omega = tester.newDeploymentContext("tenant", "application", "omega");
+ DeploymentContext separate = tester.newDeploymentContext("tenant", "application", "separate");
+ DeploymentContext independent = tester.newDeploymentContext("tenant", "application", "independent");
+ DeploymentContext dependent = tester.newDeploymentContext("tenant", "application", "dependent");
+ alpha.submit(appPackage);
+ Map<JobId, List<DeploymentStatus.Job>> jobs = alpha.deploymentStatus().jobsToRun();
JobType centauriTest = JobType.systemTest(tester.controller().zoneRegistry(), CloudName.from("centauri"));
JobType centauriStaging = JobType.stagingTest(tester.controller().zoneRegistry(), CloudName.from("centauri"));
- assertQueued("separate", jobs, centauriTest);
+ JobType centauriProd = JobType.deploymentTo(ZoneId.from(prod, alphaCentauri));
+ assertQueued("separate", jobs, systemTest, centauriTest);
assertQueued("separate", jobs, stagingTest, centauriStaging);
assertQueued("independent", jobs, systemTest, centauriTest);
assertQueued("alpha", jobs, systemTest);
@@ -2684,8 +2695,51 @@ public class DeploymentTriggerTest {
assertQueued("gamma", jobs, centauriTest);
// Once alpha runs its default system test, it also runs the centauri system test, as omega depends on it.
- app.runJob(systemTest);
- assertQueued("alpha", app.deploymentStatus().jobsToRun(), centauriTest);
+ alpha.runJob(systemTest);
+ assertQueued("alpha", alpha.deploymentStatus().jobsToRun(), centauriTest);
+
+ // Run tests, and see production jobs are triggered as they are verified.
+ for (DeploymentContext app : List.of(alpha, beta, gamma, nu, omega, separate, independent, dependent))
+ tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(alpha.lastSubmission().get()));
+
+ // Missing separate staging test.
+ alpha.triggerJobs().assertNotRunning(productionUsEast3);
+
+ beta.runJob(centauriTest);
+ // Missing separate centauri staging.
+ beta.triggerJobs().assertNotRunning(centauriProd);
+
+ gamma.runJob(centauriTest);
+
+ // Missing alpha centauri test, and nu centauri staging.
+ omega.triggerJobs().assertNotRunning(centauriProd);
+ alpha.runJob(centauriTest);
+ omega.triggerJobs().assertNotRunning(centauriProd);
+ nu.runJob(centauriStaging);
+ omega.triggerJobs().assertRunning(centauriProd);
+
+ separate.triggerJobs().assertNotRunning(centauriProd);
+
+ separate.runJob(centauriStaging);
+ separate.triggerJobs().assertNotRunning(centauriProd);
+ beta.triggerJobs().assertRunning(centauriProd);
+
+ separate.runJob(centauriTest);
+ separate.triggerJobs().assertRunning(centauriProd);
+
+ dependent.triggerJobs().assertNotRunning(productionUsEast3);
+
+ separate.runJob(systemTest).runJob(stagingTest).triggerJobs();
+ dependent.triggerJobs().assertRunning(productionUsEast3);
+ alpha.triggerJobs().assertRunning(productionUsEast3);
+
+ separate.runJob(centauriProd);
+ alpha.runJob(productionUsEast3);
+ beta.runJob(centauriProd);
+ omega.runJob(centauriProd);
+ dependent.runJob(productionUsEast3);
+ independent.runJob(centauriTest).runJob(systemTest);
+ assertEquals(Map.of(), alpha.deploymentStatus().jobsToRun());
}
private static void assertQueued(String instance, Map<JobId, List<DeploymentStatus.Job>> jobs, JobType... expected) {
@@ -2788,7 +2842,7 @@ public class DeploymentTriggerTest {
}
@Test
- void test() {
+ void testOrderOfTests() {
String deploymentXml = """
<deployment version="1.0">
<test/>
@@ -2802,9 +2856,6 @@ public class DeploymentTriggerTest {
</prod>
</deployment>""";
- // TODO jonmv: recreate problem where revision starts, then upgrade, while prod is blocked,
- // then both are tested as separate upgrades, but prod-test has them reversed.
-
Version version1 = new Version("7.1");
tester.controllerTester().upgradeSystem(version1);
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
index f576c90e195..448bb9ac15f 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
@@ -30,6 +30,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.configserver.Cluster;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServer;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.DeploymentResult;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.Load;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.LoadBalancer;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.LoadBalancer.PrivateServiceInfo;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
@@ -43,7 +44,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.noderepository.RestartF
import com.yahoo.vespa.hosted.controller.api.integration.secrets.TenantSecretStore;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
-import wiremock.org.checkerframework.checker.units.qual.A;
import java.io.ByteArrayInputStream;
import java.io.IOException;
@@ -116,20 +116,18 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
new ClusterResources(2, 1, new NodeResources(1, 4, 20, 1, slow, remote)),
new ClusterResources(2, 1, new NodeResources(4, 16, 90, 1, slow, remote)),
current,
- Optional.of(new ClusterResources(2, 1, new NodeResources(3, 8, 50, 1, slow, remote))),
- Optional.empty(),
- new Cluster.Utilization(0.2, 0.35,
- 0.5, 0.65,
- 0.8, 1.0),
+ new Cluster.Autoscaling("ideal",
+ "Cluster is ideally scaled",
+ Optional.of(new ClusterResources(2, 1, new NodeResources(3, 8, 50, 1, slow, remote))),
+ Instant.ofEpochMilli(123),
+ new Load(0.35, 0.65, 1.0),
+ new Load(0.2, 0.5, 0.8)),
+ Cluster.Autoscaling.empty(),
List.of(new Cluster.ScalingEvent(new ClusterResources(0, 0, NodeResources.unspecified()),
current,
Instant.ofEpochMilli(1234),
Optional.of(Instant.ofEpochMilli(2234)))),
- "ideal",
- "Cluster is ideally scaled",
- Duration.ofMinutes(6),
- 0.7,
- 0.3);
+ Duration.ofMinutes(6));
nodeRepository.putApplication(zone,
new com.yahoo.vespa.hosted.controller.api.integration.configserver.Application(application,
List.of(cluster)));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json
index 6527237840e..c013ccb00fe 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json
@@ -52,7 +52,48 @@
"diskSpeed": "slow",
"storageType": "remote"
},
- "cost": 0.29
+ "cost": 0.29,
+ "status": "ideal",
+ "description": "Cluster is ideally scaled",
+ "resources": {
+ "nodes": 2,
+ "groups": 1,
+ "nodeResources": {
+ "vcpu": 3.0,
+ "memoryGb": 8.0,
+ "diskGb": 50.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "slow",
+ "storageType": "remote"
+ },
+ "cost": 0.29
+ },
+ "at" : 123,
+ "peak": {
+ "cpu": 0.35,
+ "memory": 0.65,
+ "disk": 1.0
+ },
+ "ideal": {
+ "cpu": 0.2,
+ "memory": 0.5,
+ "disk": 0.8
+ }
+ },
+ "suggested": {
+ "status": "unavailable",
+ "description": "",
+ "at": 0,
+ "peak": {
+ "cpu": 0.0,
+ "memory": 0.0,
+ "disk": 0.0
+ },
+ "ideal": {
+ "cpu": 0.0,
+ "memory": 0.0,
+ "disk": 0.0
+ }
},
"utilization": {
"idealCpu": 0.2,
@@ -96,9 +137,7 @@
],
"autoscalingStatusCode": "ideal",
"autoscalingStatus": "Cluster is ideally scaled",
- "scalingDuration": 360000,
- "maxQueryGrowthRate": 0.7,
- "currentQueryFractionOfMax": 0.3
+ "scalingDuration": 360000
}
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerTest.java
index 73d4daf92da..79007a4439a 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerTest.java
@@ -114,8 +114,8 @@ public class BillingApiHandlerTest extends ControllerContainerCloudTest {
bills = billingController.getBillsForTenant(tenant);
assertEquals(1, bills.size());
Bill bill = bills.get(0);
- assertEquals("2020-04-20T00:00Z[UTC]", bill.getStartTime().toString());
- assertEquals("2020-05-21T00:00Z[UTC]", bill.getEndTime().toString());
+ assertEquals("2020-04-20T00:00Z", bill.getStartTime().toString());
+ assertEquals("2020-05-21T00:00Z", bill.getEndTime().toString());
assertEquals("2020-04-20", bill.getStartDate().toString());
assertEquals("2020-05-20", bill.getEndDate().toString());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiTest.java
index cb402d700e2..b90c886f10d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiTest.java
@@ -215,6 +215,12 @@ public class RoutingApiTest extends ControllerContainerTest {
"", Request.Method.GET),
new File("rotation/deployment-status-initial.json"));
+ // GET initial deployment status: unknown instance
+ tester.assertResponse(operatorRequest("http://localhost:8080/routing/v1/status/tenant/tenant/application/application/instance/foo/environment/prod/region/us-west-1",
+ "", Request.Method.GET),
+ "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Unknown instance 'foo' in 'tenant.application'\"}",
+ 400);
+
// POST sets deployment out
tester.assertResponse(operatorRequest("http://localhost:8080/routing/v1/inactive/tenant/tenant/application/application/instance/default/environment/prod/region/us-west-1",
"", Request.Method.POST),
diff --git a/controller-server/src/test/resources/mail/notification.txt b/controller-server/src/test/resources/mail/notification.txt
index 946d9c5fd68..35db37fbc12 100644
--- a/controller-server/src/test/resources/mail/notification.txt
+++ b/controller-server/src/test/resources/mail/notification.txt
@@ -445,7 +445,7 @@
<p>
There are problems with tests for default.default:
</p>
- <p>test package has production tests, but no production tests are declared in deployment.xml</p><p>see <a href="https://docs.vespa.ai/en/testing.html">https://docs.vespa.ai/en/testing.html</a> for details on how to write system tests for Vespa</p>
+ <p>Test package has production tests, but no production tests are declared in deployment.xml</p><p>See <a href="https://docs.vespa.ai/en/testing.html">https://docs.vespa.ai/en/testing.html</a> for details on how to write system tests for Vespa</p>
</div>
</td>
</tr>
diff --git a/dist/vespa.spec b/dist/vespa.spec
index 3aada93c69b..2f45cf0ffa1 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -767,6 +767,7 @@ fi
%dir %{_prefix}
%dir %{_prefix}/lib
%dir %{_prefix}/lib/jars
+%{_prefix}/lib/jars/airlift-zstd.jar
%{_prefix}/lib/jars/application-model-jar-with-dependencies.jar
%{_prefix}/lib/jars/bc*-jdk18on-*.jar
%{_prefix}/lib/jars/config-bundle-jar-with-dependencies.jar
diff --git a/document/src/main/java/com/yahoo/document/DocumentUtil.java b/document/src/main/java/com/yahoo/document/DocumentUtil.java
index f1a75f78b09..458c2bdbf6f 100644
--- a/document/src/main/java/com/yahoo/document/DocumentUtil.java
+++ b/document/src/main/java/com/yahoo/document/DocumentUtil.java
@@ -6,6 +6,7 @@ package com.yahoo.document;
* @author Einar M Rosenvinge
* @since 5.1.9
*/
+@Deprecated(forRemoval = true)
public class DocumentUtil {
/**
* A convenience method that can be used to calculate a max pending queue size given
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index d8756a22506..124238f6e96 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -226,7 +226,7 @@ public class Flags {
public static final UnboundBooleanFlag ENABLED_HORIZON_DASHBOARD = defineFeatureFlag(
"enabled-horizon-dashboard", false,
- List.of("olaa"), "2021-09-13", "2023-01-01",
+ List.of("olaa"), "2021-09-13", "2023-06-01",
"Enable Horizon dashboard",
"Takes effect immediately",
TENANT_ID, CONSOLE_USER_EMAIL
@@ -311,7 +311,7 @@ public class Flags {
public static final UnboundBooleanFlag SEPARATE_METRIC_CHECK_CONFIG = defineFeatureFlag(
"separate-metric-check-config", false,
- List.of("olaa"), "2022-07-04", "2023-01-01",
+ List.of("olaa"), "2022-07-04", "2023-06-01",
"Determines whether one metrics config check should be written per Vespa node",
"Takes effect on next tick",
HOSTNAME);
@@ -326,7 +326,7 @@ public class Flags {
public static final UnboundBooleanFlag CLEANUP_TENANT_ROLES = defineFeatureFlag(
"cleanup-tenant-roles", false,
- List.of("olaa"), "2022-08-10", "2023-01-01",
+ List.of("olaa"), "2022-08-10", "2023-02-01",
"Determines whether old tenant roles should be deleted",
"Takes effect next maintenance run"
);
@@ -345,21 +345,9 @@ public class Flags {
"Takes effect at redeployment",
APPLICATION_ID);
- public static final UnboundStringFlag CSRF_MODE = defineStringFlag(
- "csrf-mode", "disabled",
- List.of("bjorncs", "tokle"), "2022-09-22", "2023-06-01",
- "Set mode for CSRF filter ('disabled', 'log_only', 'enabled')",
- "Takes effect on controller restart/redeployment");
-
- public static final UnboundListFlag<String> CSRF_USERS = defineListFlag(
- "csrf-users", List.of(), String.class,
- List.of("bjorncs", "tokle"), "2022-09-22", "2023-06-01",
- "List of users to enable CSRF filter for. Use empty list for everyone.",
- "Takes effect on controller restart/redeployment");
-
public static final UnboundBooleanFlag ENABLE_OTELCOL = defineFeatureFlag(
"enable-otel-collector", false,
- List.of("olaa"), "2022-09-23", "2023-01-01",
+ List.of("olaa"), "2022-09-23", "2023-06-01",
"Whether an OpenTelemetry collector should be enabled",
"Takes effect at next tick",
APPLICATION_ID);
@@ -385,15 +373,9 @@ public class Flags {
"Takes effect on host admin restart",
HOSTNAME);
- public static final UnboundStringFlag AUTH0_SESSION_LOGOUT = defineStringFlag(
- "auth0-session-logout", "disabled",
- List.of("bjorncs", "tokle"), "2022-10-17", "2023-06-01",
- "Set mode for Auth0 session logout ('disabled', 'log_only', 'enabled')",
- "Takes effect on controller restart/redeployment");
-
public static final UnboundBooleanFlag ENABLED_MAIL_VERIFICATION = defineFeatureFlag(
"enabled-mail-verification", false,
- List.of("olaa"), "2022-10-28", "2023-01-01",
+ List.of("olaa"), "2022-10-28", "2023-02-01",
"Enable mail verification",
"Takes effect immediately");
@@ -418,13 +400,6 @@ public class Flags {
"Takes effect on redeployment",
APPLICATION_ID);
- public static final UnboundBooleanFlag USE_LOCKS_IN_FILEDISTRIBUTION = defineFeatureFlag(
- "use-locks-in-filedistribution", true,
- List.of("hmusum"), "2022-11-16", "2023-01-31",
- "If true, use locks when writing and deleting file references.",
- "Takes effect immediately",
- ZONE_ID, APPLICATION_ID);
-
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners,
String createdAt, String expiresAt, String description,
diff --git a/fnet/src/tests/frt/parallel_rpc/parallel_rpc_test.cpp b/fnet/src/tests/frt/parallel_rpc/parallel_rpc_test.cpp
index c6008820bbc..624f5a73ae6 100644
--- a/fnet/src/tests/frt/parallel_rpc/parallel_rpc_test.cpp
+++ b/fnet/src/tests/frt/parallel_rpc/parallel_rpc_test.cpp
@@ -137,7 +137,6 @@ void perform_test(size_t thread_id, Client &client, Result &result, bool vital =
}
CryptoEngine::SP null_crypto = std::make_shared<NullCryptoEngine>();
-CryptoEngine::SP xor_crypto = std::make_shared<XorCryptoEngine>();
CryptoEngine::SP tls_crypto = std::make_shared<vespalib::TlsCryptoEngine>(vespalib::test::make_tls_options_for_testing());
namespace {
uint32_t getNumThreads() {
@@ -148,9 +147,6 @@ namespace {
TEST_MT_FFF("parallel rpc with 1/1 transport threads and num_cores user threads (no encryption)",
getNumThreads(), Server(null_crypto, 1), Client(null_crypto, 1, f1), Result(num_threads)) { perform_test(thread_id, f2, f3); }
-TEST_MT_FFF("parallel rpc with 1/1 transport threads and num_cores user threads (xor encryption)",
- getNumThreads(), Server(xor_crypto, 1), Client(xor_crypto, 1, f1), Result(num_threads)) { perform_test(thread_id, f2, f3); }
-
TEST_MT_FFF("parallel rpc with 1/1 transport threads and num_cores user threads (tls encryption)",
getNumThreads(), Server(tls_crypto, 1), Client(tls_crypto, 1, f1), Result(num_threads)) { perform_test(thread_id, f2, f3); }
@@ -160,9 +156,6 @@ TEST_MT_FFF("parallel rpc with 1/1 transport threads and num_cores user threads
TEST_MT_FFF("parallel rpc with 8/8 transport threads and num_cores user threads (no encryption)",
getNumThreads(), Server(null_crypto, 8), Client(null_crypto, 8, f1), Result(num_threads)) { perform_test(thread_id, f2, f3, true); }
-TEST_MT_FFF("parallel rpc with 8/8 transport threads and num_cores user threads (xor encryption)",
- getNumThreads(), Server(xor_crypto, 8), Client(xor_crypto, 8, f1), Result(num_threads)) { perform_test(thread_id, f2, f3); }
-
TEST_MT_FFF("parallel rpc with 8/8 transport threads and num_cores user threads (tls encryption)",
getNumThreads(), Server(tls_crypto, 8), Client(tls_crypto, 8, f1), Result(num_threads)) { perform_test(thread_id, f2, f3, true); }
diff --git a/fnet/src/tests/frt/parallel_rpc/tls_rpc_bench.cpp b/fnet/src/tests/frt/parallel_rpc/tls_rpc_bench.cpp
index 417dba8d803..d15fca93c0b 100644
--- a/fnet/src/tests/frt/parallel_rpc/tls_rpc_bench.cpp
+++ b/fnet/src/tests/frt/parallel_rpc/tls_rpc_bench.cpp
@@ -17,7 +17,6 @@ using namespace vespalib;
using vespalib::test::TimeTracer;
CryptoEngine::SP null_crypto = std::make_shared<NullCryptoEngine>();
-CryptoEngine::SP xor_crypto = std::make_shared<XorCryptoEngine>();
CryptoEngine::SP tls_crypto = std::make_shared<vespalib::TlsCryptoEngine>(vespalib::test::make_tls_options_for_testing());
TT_Tag req_tag("request");
@@ -143,12 +142,6 @@ TEST_F("^^^-- rpc with null encryption", Fixture(null_crypto)) {
benchmark_rpc(f1, true);
}
-TEST_F("^^^-- rpc with xor encryption", Fixture(xor_crypto)) {
- fprintf(stderr, "vvv-- rpc with xor encryption\n");
- benchmark_rpc(f1, false);
- benchmark_rpc(f1, true);
-}
-
TEST_F("^^^-- rpc with tls encryption", Fixture(tls_crypto)) {
fprintf(stderr, "vvv-- rpc with tls encryption\n");
benchmark_rpc(f1, false);
diff --git a/fnet/src/tests/frt/rpc/CMakeLists.txt b/fnet/src/tests/frt/rpc/CMakeLists.txt
index 35150cad7b6..baf70e3494d 100644
--- a/fnet/src/tests/frt/rpc/CMakeLists.txt
+++ b/fnet/src/tests/frt/rpc/CMakeLists.txt
@@ -6,7 +6,6 @@ vespa_add_executable(fnet_invoke_test_app TEST
fnet
)
vespa_add_test(NAME fnet_invoke_test_app COMMAND fnet_invoke_test_app)
-vespa_add_test(NAME fnet_invoke_test_app_xor COMMAND fnet_invoke_test_app ENVIRONMENT "CRYPTOENGINE=xor")
vespa_add_test(NAME fnet_invoke_test_app_tls COMMAND fnet_invoke_test_app ENVIRONMENT "CRYPTOENGINE=tls")
vespa_add_test(NAME fnet_invoke_test_app_tls_maybe_yes COMMAND fnet_invoke_test_app ENVIRONMENT "CRYPTOENGINE=tls_maybe_yes")
vespa_add_test(NAME fnet_invoke_test_app_tls_maybe_no COMMAND fnet_invoke_test_app ENVIRONMENT "CRYPTOENGINE=tls_maybe_no")
diff --git a/fnet/src/tests/frt/rpc/my_crypto_engine.hpp b/fnet/src/tests/frt/rpc/my_crypto_engine.hpp
index 219b4dafd05..8ffe204fa28 100644
--- a/fnet/src/tests/frt/rpc/my_crypto_engine.hpp
+++ b/fnet/src/tests/frt/rpc/my_crypto_engine.hpp
@@ -12,10 +12,7 @@ vespalib::CryptoEngine::SP my_crypto_engine() {
return std::make_shared<vespalib::NullCryptoEngine>();
}
std::string engine(env_str);
- if (engine == "xor") {
- fprintf(stderr, "crypto engine: xor\n");
- return std::make_shared<vespalib::XorCryptoEngine>();
- } else if (engine == "tls") {
+ if (engine == "tls") {
fprintf(stderr, "crypto engine: tls\n");
return std::make_shared<vespalib::TlsCryptoEngine>(
vespalib::test::make_telemetry_only_capability_tls_options_for_testing());
diff --git a/jrt/src/com/yahoo/jrt/XorCryptoEngine.java b/jrt/src/com/yahoo/jrt/XorCryptoEngine.java
deleted file mode 100644
index b3356dcedf4..00000000000
--- a/jrt/src/com/yahoo/jrt/XorCryptoEngine.java
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.jrt;
-
-
-import java.nio.channels.SocketChannel;
-
-
-/**
- * Very simple crypto engine that requires connection handshaking and
- * data transformation. Used to test encryption integration separate
- * from TLS.
- **/
-public class XorCryptoEngine implements CryptoEngine {
- @Override public CryptoSocket createClientCryptoSocket(SocketChannel channel, Spec spec) {
- return new XorCryptoSocket(channel, false);
- }
- @Override public CryptoSocket createServerCryptoSocket(SocketChannel channel) {
- return new XorCryptoSocket(channel, true);
- }
-}
diff --git a/jrt/src/com/yahoo/jrt/XorCryptoSocket.java b/jrt/src/com/yahoo/jrt/XorCryptoSocket.java
deleted file mode 100644
index c017f42650a..00000000000
--- a/jrt/src/com/yahoo/jrt/XorCryptoSocket.java
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.jrt;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.SocketChannel;
-import java.security.SecureRandom;
-import java.util.ArrayDeque;
-import java.util.Queue;
-
-/**
- * A very simple CryptoSocket that performs connection handshaking and
- * data transformation. Used to test encryption integration separate
- * from TLS.
- *
- * @author havardpe
- */
-public class XorCryptoSocket implements CryptoSocket {
-
- private static final int CHUNK_SIZE = 4096;
- enum OP { READ_KEY, WRITE_KEY }
-
- private Queue<OP> opList = new ArrayDeque<>();
- private byte myKey = genKey();
- private byte peerKey;
- private Buffer input = new Buffer(CHUNK_SIZE);
- private Buffer output = new Buffer(CHUNK_SIZE);
- private SocketChannel channel;
-
- private static byte genKey() {
- return (byte) new SecureRandom().nextInt(256);
- }
-
- private HandshakeResult readKey() throws IOException {
- int res = channel.read(input.getWritable(1));
- if (res > 0) {
- peerKey = input.getReadable().get();
- return HandshakeResult.DONE;
- } else if (res == 0) {
- return HandshakeResult.NEED_READ;
- } else {
- throw new IOException("EOF during handshake");
- }
- }
- private HandshakeResult writeKey() throws IOException {
- if (output.bytes() == 0) {
- output.getWritable(1).put(myKey);
- }
- if (channel.write(output.getReadable()) == 0) {
- return HandshakeResult.NEED_WRITE;
- }
- return HandshakeResult.DONE;
- }
- private HandshakeResult perform(OP op) throws IOException {
- switch (op) {
- case READ_KEY: return readKey();
- case WRITE_KEY: return writeKey();
- }
- throw new IOException("invalid handshake operation");
- }
-
- public XorCryptoSocket(SocketChannel channel, boolean isServer) {
- this.channel = channel;
- if (isServer) {
- opList.add(OP.READ_KEY);
- opList.add(OP.WRITE_KEY);
- } else {
- opList.add(OP.WRITE_KEY);
- opList.add(OP.READ_KEY);
- }
- }
- @Override public SocketChannel channel() { return channel; }
- @Override public HandshakeResult handshake() throws IOException {
- while (!opList.isEmpty()) {
- HandshakeResult partialResult = perform(opList.element());
- if (partialResult != HandshakeResult.DONE) {
- return partialResult;
- }
- opList.remove();
- }
- return HandshakeResult.DONE;
- }
- @Override public void doHandshakeWork() {}
- @Override public int getMinimumReadBufferSize() { return 1; }
- @Override public int read(ByteBuffer dst) throws IOException {
- if (input.bytes() == 0) {
- if (channel.read(input.getWritable(CHUNK_SIZE)) == -1) {
- return -1; // EOF
- }
- }
- return drain(dst);
- }
- @Override public int drain(ByteBuffer dst) throws IOException {
- int cnt = 0;
- ByteBuffer src = input.getReadable();
- while (src.hasRemaining() && dst.hasRemaining()) {
- dst.put((byte)(src.get() ^ myKey));
- cnt++;
- }
- return cnt;
- }
- @Override public int write(ByteBuffer src) throws IOException {
- int cnt = 0;
- if (flush() == FlushResult.DONE) {
- ByteBuffer dst = output.getWritable(CHUNK_SIZE);
- while (src.hasRemaining() && dst.hasRemaining()) {
- dst.put((byte)(src.get() ^ peerKey));
- cnt++;
- }
- }
- return cnt;
- }
- @Override public FlushResult flush() throws IOException {
- ByteBuffer src = output.getReadable();
- channel.write(src);
- if (src.hasRemaining()) {
- return FlushResult.NEED_WRITE;
- } else {
- return FlushResult.DONE;
- }
- }
- @Override public void dropEmptyBuffers() {
- input.shrink(0);
- output.shrink(0);
- }
-}
diff --git a/jrt/tests/com/yahoo/jrt/EchoTest.java b/jrt/tests/com/yahoo/jrt/EchoTest.java
index 11742fa42e2..47169210f00 100644
--- a/jrt/tests/com/yahoo/jrt/EchoTest.java
+++ b/jrt/tests/com/yahoo/jrt/EchoTest.java
@@ -54,10 +54,6 @@ public class EchoTest {
},
null},
{
- new XorCryptoEngine(),
- null,
- null},
- {
new TlsCryptoEngine(createTestTlsContext()),
(MetricsAssertions) metrics -> {
assertEquals(1, metrics.serverTlsConnectionsEstablished());
diff --git a/jrt/tests/com/yahoo/jrt/LatencyTest.java b/jrt/tests/com/yahoo/jrt/LatencyTest.java
index 945833e51a8..f36dc0c5ba9 100644
--- a/jrt/tests/com/yahoo/jrt/LatencyTest.java
+++ b/jrt/tests/com/yahoo/jrt/LatencyTest.java
@@ -176,14 +176,6 @@ public class LatencyTest {
}
@org.junit.Test
- public void testXorCryptoLatency() throws Throwable {
- try (Network network = new Network(new XorCryptoEngine(), 1)) {
- new Client(false, network, 1).measureLatency("[xor crypto, no reconnect] ");
- new Client(true, network, 1).measureLatency("[xor crypto, reconnect] ");
- }
- }
-
- @org.junit.Test
public void testTlsCryptoLatency() throws Throwable {
try (Network network = new Network(new TlsCryptoEngine(createTestTlsContext()), 1)) {
new Client(false, network, 1).measureLatency("[tls crypto, no reconnect] ");
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java b/messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java
index 0c3f3168568..4b911d7c38e 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java
@@ -72,7 +72,6 @@ public class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler,
private final Messenger msn;
private final Resender resender;
private int maxPendingCount;
- private int maxPendingSize;
private int pendingCount = 0;
private int pendingSize = 0;
private final Thread careTaker = new Thread(this::sendBlockedMessages);
@@ -142,7 +141,6 @@ public class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler,
public MessageBus(NetworkMultiplexer net, MessageBusParams params) {
// Add all known protocols to the repository.
maxPendingCount = params.getMaxPendingCount();
- maxPendingSize = params.getMaxPendingSize();
for (int i = 0, len = params.getNumProtocols(); i < len; ++i) {
protocolRepository.putProtocol(params.getProtocol(i));
}
@@ -375,7 +373,7 @@ public class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler,
}
private boolean doAccounting() {
- return (maxPendingCount > 0 || maxPendingSize > 0);
+ return (maxPendingCount > 0);
}
/**
* <p>This method handles choking input data so that message bus does not
@@ -392,8 +390,7 @@ public class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler,
if (doAccounting()) {
synchronized (this) {
- busy = ((maxPendingCount > 0 && pendingCount >= maxPendingCount) ||
- (maxPendingSize > 0 && pendingSize >= maxPendingSize));
+ busy = (maxPendingCount > 0 && pendingCount >= maxPendingCount);
if (!busy) {
pendingCount++;
pendingSize += size;
@@ -487,7 +484,7 @@ public class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler,
*
* @return The resender.
*/
- @Deprecated // Remove on 9
+ @Deprecated (forRemoval = true)// Remove on 9
public Resender getResender() {
return resender;
}
@@ -520,7 +517,7 @@ public class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler,
*
* @param maxCount The max count.
*/
- @Deprecated // Remove on 9
+ @Deprecated(forRemoval = true) // Remove on 9
public void setMaxPendingCount(int maxCount) {
maxPendingCount = maxCount;
}
@@ -529,7 +526,7 @@ public class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler,
* Gets maximum number of messages that can be received without being
* replied to yet.
*/
- @Deprecated // Remove on 9
+ @Deprecated (forRemoval = true)// Remove on 9
public int getMaxPendingCount() {
return maxPendingCount;
}
@@ -540,18 +537,18 @@ public class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler,
*
* @param maxSize The max size.
*/
- @Deprecated // Remove on 9
+ @Deprecated (forRemoval = true)// Remove on 9
public void setMaxPendingSize(int maxSize) {
- maxPendingSize = maxSize;
+
}
/**
* Gets maximum combined size of messages that can be received without
* being replied to yet.
*/
- @Deprecated // Remove on 9
+ @Deprecated (forRemoval = true)// Remove on 9
public int getMaxPendingSize() {
- return maxPendingSize;
+ return Integer.MAX_VALUE;
}
/**
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/MessageBusParams.java b/messagebus/src/main/java/com/yahoo/messagebus/MessageBusParams.java
index 1b18178d638..198c562e26a 100755
--- a/messagebus/src/main/java/com/yahoo/messagebus/MessageBusParams.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/MessageBusParams.java
@@ -18,7 +18,6 @@ public class MessageBusParams {
private final List<Protocol> protocols = new ArrayList<>();
private RetryPolicy retryPolicy;
private int maxPendingCount;
- private int maxPendingSize;
private MessagebusConfig config;
/**
@@ -27,7 +26,6 @@ public class MessageBusParams {
public MessageBusParams() {
retryPolicy = new RetryTransientErrorsPolicy();
maxPendingCount = 1024;
- maxPendingSize = 128 * 1024 * 1024;
config = null;
}
@@ -40,7 +38,6 @@ public class MessageBusParams {
protocols.addAll(params.protocols);
retryPolicy = params.retryPolicy;
maxPendingCount = params.maxPendingCount;
- maxPendingSize = params.maxPendingSize;
config = params.config;
}
@@ -132,8 +129,9 @@ public class MessageBusParams {
*
* @return The size limit.
*/
+ @Deprecated(forRemoval = true)
public int getMaxPendingSize() {
- return maxPendingSize;
+ return Integer.MAX_VALUE;
}
/**
@@ -142,8 +140,8 @@ public class MessageBusParams {
* @param maxSize The size limit to set.
* @return This, to allow chaining.
*/
+ @Deprecated(forRemoval = true)
public MessageBusParams setMaxPendingSize(int maxSize) {
- this.maxPendingSize = maxSize;
return this;
}
diff --git a/messagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java b/messagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java
index a8a1fc33284..55b76f7c41e 100755
--- a/messagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java
+++ b/messagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java
@@ -51,7 +51,7 @@ public class ChokeTestCase {
}
@Test
- @SuppressWarnings("deprecation")
+ @SuppressWarnings({"deprecation", "removal"})
void testMaxCount() {
int max = 10;
dstServer.mb.setMaxPendingCount(max);
@@ -99,56 +99,6 @@ public class ChokeTestCase {
assertEquals(0, dstServer.mb.getPendingCount());
}
- @Test
- @SuppressWarnings("deprecation")
- void testMaxSize() {
- int size = createMessage("msg").getApproxSize();
- int max = size * 10;
- dstServer.mb.setMaxPendingSize(max);
- List<Message> lst = new ArrayList<>();
- for (int i = 0; i < max * 2; i += size) {
- if (i < max) {
- assertEquals(i, dstServer.mb.getPendingSize());
- } else {
- assertEquals(max, dstServer.mb.getPendingSize());
- }
- assertTrue(srcSession.send(createMessage("msg"), Route.parse("dst/session")).isAccepted());
- if (i < max) {
- Message msg = ((Receptor) dstSession.getMessageHandler()).getMessage(60);
- assertNotNull(msg);
- lst.add(msg);
- } else {
- Reply reply = ((Receptor) srcSession.getReplyHandler()).getReply(60);
- assertNotNull(reply);
- assertEquals(1, reply.getNumErrors());
- assertEquals(ErrorCode.SESSION_BUSY, reply.getError(0).getCode());
- }
- }
- for (int i = 0; i < 5; ++i) {
- Message msg = lst.remove(0);
- dstSession.acknowledge(msg);
-
- Reply reply = ((Receptor) srcSession.getReplyHandler()).getReply(60);
- assertNotNull(reply);
- assertFalse(reply.hasErrors());
- assertNotNull(msg = reply.getMessage());
- assertTrue(srcSession.send(msg, Route.parse("dst/session")).isAccepted());
-
- assertNotNull(msg = ((Receptor) dstSession.getMessageHandler()).getMessage(60));
- lst.add(msg);
- }
- while (!lst.isEmpty()) {
- assertEquals(size * lst.size(), dstServer.mb.getPendingSize());
- Message msg = lst.remove(0);
- dstSession.acknowledge(msg);
-
- Reply reply = ((Receptor) srcSession.getReplyHandler()).getReply(60);
- assertNotNull(reply);
- assertFalse(reply.hasErrors());
- }
- assertEquals(0, dstServer.mb.getPendingSize());
- }
-
private static Message createMessage(String msg) {
Message ret = new SimpleMessage(msg);
ret.getTrace().setLevel(9);
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorException.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorException.java
index 917b65b606c..472b8f39c05 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorException.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorException.java
@@ -7,7 +7,7 @@ import com.yahoo.vespa.hosted.node.admin.nodeadmin.ConvergenceException;
public class OrchestratorException extends ConvergenceException {
/** Creates a transient convergence exception. */
public OrchestratorException(String message) {
- this(message, true);
+ this(message, false);
}
protected OrchestratorException(String message, boolean isError) {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java
index d85ded15ca5..3a12191a0de 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java
@@ -82,12 +82,14 @@ public class SyncFileInfo {
compression = filename.endsWith(".zst") ? Compression.NONE : Compression.ZSTD;
if (rotatedOnly && compression != Compression.NONE)
dir = null;
+ else if (filename.contains(".metrics-proxy.")) // See AccessLogComponent.java for filename.
+ dir = null;
else if (filename.startsWith("JsonAccessLog.") || filename.startsWith("access"))
dir = "logs/access/";
else if (filename.startsWith("ConnectionLog."))
dir = "logs/connection/";
else
- return Optional.empty();
+ dir = null;
}
if (dir == null) return Optional.empty();
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java
index 8be1ef23bb2..3c91a9f32d1 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java
@@ -29,8 +29,11 @@ public class SyncFileInfoTest {
private static final Path accessLogPath2 = fileSystem.getPath("/opt/vespa/logs/access/access.log.20210212.zst");
private static final Path accessLogPath3 = fileSystem.getPath("/opt/vespa/logs/access/access-json.log.20210213.zst");
private static final Path accessLogPath4 = fileSystem.getPath("/opt/vespa/logs/access/JsonAccessLog.20210214.zst");
+ private static final Path accessLogPath5 = fileSystem.getPath("/opt/vespa/logs/access/JsonAccessLog.container.20210214.zst");
+ private static final Path accessLogPath6 = fileSystem.getPath("/opt/vespa/logs/access/JsonAccessLog.metrics-proxy.20210214.zst");
private static final Path connectionLogPath1 = fileSystem.getPath("/opt/vespa/logs/access/ConnectionLog.20210210");
private static final Path connectionLogPath2 = fileSystem.getPath("/opt/vespa/logs/access/ConnectionLog.20210212.zst");
+ private static final Path connectionLogPath3 = fileSystem.getPath("/opt/vespa/logs/access/ConnectionLog.metrics-proxy.20210210");
private static final Path vespaLogPath1 = fileSystem.getPath("/opt/vespa/logs/vespa.log");
private static final Path vespaLogPath2 = fileSystem.getPath("/opt/vespa/logs/vespa.log-2021-02-12");
private static final Path zkLogPath0 = fileSystem.getPath("/opt/vespa/logs/zookeeper.configserver.0.log");
@@ -49,6 +52,12 @@ public class SyncFileInfoTest {
assertForLogFile(accessLogPath4, "s3://vespa-data-bucket/vespa/music/main/h432a/logs/access/JsonAccessLog.20210214.zst", NONE, true);
assertForLogFile(accessLogPath4, "s3://vespa-data-bucket/vespa/music/main/h432a/logs/access/JsonAccessLog.20210214.zst", NONE, false);
+
+ assertForLogFile(accessLogPath5, "s3://vespa-data-bucket/vespa/music/main/h432a/logs/access/JsonAccessLog.container.20210214.zst", NONE, true);
+ assertForLogFile(accessLogPath5, "s3://vespa-data-bucket/vespa/music/main/h432a/logs/access/JsonAccessLog.container.20210214.zst", NONE, false);
+
+ assertEquals(Optional.empty(), SyncFileInfo.forLogFile(nodeArchiveUri, accessLogPath6, true, ApplicationId.defaultId()));
+ assertEquals(Optional.empty(), SyncFileInfo.forLogFile(nodeArchiveUri, accessLogPath6, false, ApplicationId.defaultId()));
}
@Test
@@ -58,6 +67,9 @@ public class SyncFileInfoTest {
assertForLogFile(connectionLogPath2, "s3://vespa-data-bucket/vespa/music/main/h432a/logs/connection/ConnectionLog.20210212.zst", NONE, true);
assertForLogFile(connectionLogPath2, "s3://vespa-data-bucket/vespa/music/main/h432a/logs/connection/ConnectionLog.20210212.zst", NONE, false);
+
+ assertEquals(Optional.empty(), SyncFileInfo.forLogFile(nodeArchiveUri, connectionLogPath3, true, ApplicationId.defaultId()));
+ assertEquals(Optional.empty(), SyncFileInfo.forLogFile(nodeArchiveUri, connectionLogPath3, false, ApplicationId.defaultId()));
}
@Test
@@ -89,4 +101,5 @@ public class SyncFileInfoTest {
assertEquals(compression, sfi.map(SyncFileInfo::uploadCompression).orElse(null));
assertEquals(minDurationBetweenSync, sfi.flatMap(SyncFileInfo::minDurationBetweenSync).orElse(null));
}
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index c490c50c940..ae1ea6c4d71 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -24,7 +24,7 @@ import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.NodeAcl;
import com.yahoo.vespa.hosted.provision.node.Nodes;
import com.yahoo.vespa.hosted.provision.os.OsVersions;
-import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
+import com.yahoo.vespa.hosted.provision.persistence.CuratorDb;
import com.yahoo.vespa.hosted.provision.persistence.DnsNameResolver;
import com.yahoo.vespa.hosted.provision.persistence.JobControlFlags;
import com.yahoo.vespa.hosted.provision.persistence.NameResolver;
@@ -46,7 +46,7 @@ import java.util.Optional;
*/
public class NodeRepository extends AbstractComponent {
- private final CuratorDatabaseClient db;
+ private final CuratorDb db;
private final Clock clock;
private final Zone zone;
private final Nodes nodes;
@@ -122,7 +122,7 @@ public class NodeRepository extends AbstractComponent {
zone.cloud().dynamicProvisioning(), provisionServiceProvider.getHostProvisioner().map(__ -> "present").orElse("empty")));
this.flagSource = flagSource;
- this.db = new CuratorDatabaseClient(flavors, curator, clock, useCuratorClientCache, nodeCacheSize);
+ this.db = new CuratorDb(flavors, curator, clock, useCuratorClientCache, nodeCacheSize);
this.zone = zone;
this.clock = clock;
this.applications = new Applications(db);
@@ -145,7 +145,7 @@ public class NodeRepository extends AbstractComponent {
}
/** Returns the curator database client used by this */
- public CuratorDatabaseClient database() { return db; }
+ public CuratorDb database() { return db; }
/** Returns the nodes of the node repo. */
public Nodes nodes() { return nodes; }
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java
index 2cf6d290dc8..9cfed5d046c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java
@@ -6,7 +6,7 @@ import com.yahoo.config.provision.ApplicationLockException;
import com.yahoo.config.provision.ApplicationTransaction;
import com.yahoo.transaction.Mutex;
import com.yahoo.transaction.NestedTransaction;
-import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
+import com.yahoo.vespa.hosted.provision.persistence.CuratorDb;
import java.time.Duration;
import java.util.List;
@@ -21,9 +21,9 @@ import java.util.Optional;
*/
public class Applications {
- private final CuratorDatabaseClient db;
+ private final CuratorDb db;
- public Applications(CuratorDatabaseClient db) {
+ public Applications(CuratorDb db) {
this.db = db;
// read and write all to make sure they are stored in the latest version of the serialized format
for (ApplicationId id : ids()) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java
deleted file mode 100644
index ea15b6a42cb..00000000000
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.applications;
-
-import java.util.Objects;
-
-/**
- * The current autoscaling status of a cluster.
- * A value object.
- *
- * @author bratseth
- */
-public class AutoscalingStatus {
-
- public enum Status {
-
- /** No status is available: Aautoscaling is disabled, or a brand new application. */
- unavailable,
-
- /** Autoscaling is not taking any action at the moment due to recent changes or a lack of data */
- waiting,
-
- /** The cluster is ideally scaled to the current load */
- ideal,
-
- /** The cluster should be rescaled further, but no better configuration is allowed by the current limits */
- insufficient,
-
- /** Rescaling of this cluster has been scheduled */
- rescaling
-
- };
-
- private final Status status;
- private final String description;
-
- public AutoscalingStatus(Status status, String description) {
- this.status = status;
- this.description = description;
- }
-
- public Status status() { return status; }
- public String description() { return description; }
-
- public static AutoscalingStatus empty() { return new AutoscalingStatus(Status.unavailable, ""); }
-
- @Override
- public boolean equals(Object o) {
- if (o == this) return true;
- if ( ! ( o instanceof AutoscalingStatus other)) return false;
-
- if ( other.status != this.status ) return false;
- if ( ! other.description.equals(this.description) ) return false;
- return true;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(status, description);
- }
-
- @Override
- public String toString() {
- return "autoscaling status: " + status +
- ( description.isEmpty() ? "" : " (" + description + ")");
- }
-
-}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
index eb317c62776..0a731f66418 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
@@ -7,6 +7,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
+import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
@@ -33,7 +34,6 @@ public class Cluster {
/** The maxScalingEvents last scaling events of this, sorted by increasing time (newest last) */
private final List<ScalingEvent> scalingEvents;
- private final AutoscalingStatus autoscalingStatus;
public Cluster(ClusterSpec.Id id,
boolean exclusive,
@@ -42,8 +42,7 @@ public class Cluster {
boolean required,
Autoscaling suggested,
Autoscaling target,
- List<ScalingEvent> scalingEvents,
- AutoscalingStatus autoscalingStatus) {
+ List<ScalingEvent> scalingEvents) {
this.id = Objects.requireNonNull(id);
this.exclusive = exclusive;
this.min = Objects.requireNonNull(minResources);
@@ -56,7 +55,6 @@ public class Cluster {
else
this.target = target;
this.scalingEvents = List.copyOf(scalingEvents);
- this.autoscalingStatus = autoscalingStatus;
}
public ClusterSpec.Id id() { return id; }
@@ -105,21 +103,18 @@ public class Cluster {
return Optional.of(scalingEvents.get(scalingEvents.size() - 1));
}
- /** The latest autoscaling status of this cluster, or unknown (never null) if none */
- public AutoscalingStatus autoscalingStatus() { return autoscalingStatus; }
-
public Cluster withConfiguration(boolean exclusive, Capacity capacity) {
return new Cluster(id, exclusive,
capacity.minResources(), capacity.maxResources(), capacity.isRequired(),
- suggested, target, scalingEvents, autoscalingStatus);
+ suggested, target, scalingEvents);
}
public Cluster withSuggested(Autoscaling suggested) {
- return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents, autoscalingStatus);
+ return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents);
}
public Cluster withTarget(Autoscaling target) {
- return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents, autoscalingStatus);
+ return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents);
}
/** Add or update (based on "at" time) a scaling event */
@@ -133,12 +128,7 @@ public class Cluster {
scalingEvents.add(scalingEvent);
prune(scalingEvents);
- return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents, autoscalingStatus);
- }
-
- public Cluster with(AutoscalingStatus autoscalingStatus) {
- if (autoscalingStatus.equals(this.autoscalingStatus)) return this;
- return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents, autoscalingStatus);
+ return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents);
}
@Override
@@ -169,7 +159,45 @@ public class Cluster {
public static Cluster create(ClusterSpec.Id id, boolean exclusive, Capacity requested) {
return new Cluster(id, exclusive, requested.minResources(), requested.maxResources(), requested.isRequired(),
- Autoscaling.empty(), Autoscaling.empty(), List.of(), AutoscalingStatus.empty());
+ Autoscaling.empty(), Autoscaling.empty(), List.of());
+ }
+
+ /** The predicted time it will take to rescale this cluster. */
+ public Duration scalingDuration(ClusterSpec clusterSpec) {
+ int completedEventCount = 0;
+ Duration totalDuration = Duration.ZERO;
+ for (ScalingEvent event : scalingEvents()) {
+ if (event.duration().isEmpty()) continue;
+ completedEventCount++;
+ // Assume we have missed timely recording completion if it is longer than 4 days
+ totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get()));
+ }
+ if (completedEventCount == 0) { // Use defaults
+ if (clusterSpec.isStateful()) return Duration.ofHours(12);
+ return Duration.ofMinutes(10);
+ }
+ else {
+ Duration predictedDuration = totalDuration.dividedBy(completedEventCount);
+
+ if ( clusterSpec.isStateful() ) // TODO: Remove when we have reliable completion for content clusters
+ predictedDuration = minimum(Duration.ofHours(12), predictedDuration);
+
+ predictedDuration = minimum(Duration.ofMinutes(5), predictedDuration);
+
+ return predictedDuration;
+ }
+ }
+
+ private static Duration minimum(Duration smallestAllowed, Duration duration) {
+ if (duration.minus(smallestAllowed).isNegative())
+ return smallestAllowed;
+ return duration;
+ }
+
+ private static Duration maximum(Duration largestAllowed, Duration duration) {
+ if ( ! duration.minus(largestAllowed).isNegative())
+ return largestAllowed;
+ return duration;
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
index c816abc060c..eacafb444b5 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
@@ -6,12 +6,11 @@ import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
-import com.yahoo.vespa.hosted.provision.applications.AutoscalingStatus;
-import com.yahoo.vespa.hosted.provision.applications.AutoscalingStatus.Status;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
+import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling.Status;
import java.time.Duration;
-import java.util.Objects;
+import java.time.Instant;
import java.util.Optional;
/**
@@ -41,7 +40,7 @@ public class Autoscaler {
* @param clusterNodes the list of all the active nodes in a cluster
* @return scaling advice for this cluster
*/
- public Advice suggest(Application application, Cluster cluster, NodeList clusterNodes) {
+ public Autoscaling suggest(Application application, Cluster cluster, NodeList clusterNodes) {
return autoscale(application, cluster, clusterNodes, Limits.empty());
}
@@ -51,13 +50,11 @@ public class Autoscaler {
* @param clusterNodes the list of all the active nodes in a cluster
* @return scaling advice for this cluster
*/
- public Advice autoscale(Application application, Cluster cluster, NodeList clusterNodes) {
- if (cluster.minResources().equals(cluster.maxResources()))
- return Advice.none(Status.unavailable, "Autoscaling is not enabled");
+ public Autoscaling autoscale(Application application, Cluster cluster, NodeList clusterNodes) {
return autoscale(application, cluster, clusterNodes, Limits.of(cluster));
}
- private Advice autoscale(Application application, Cluster cluster, NodeList clusterNodes, Limits limits) {
+ private Autoscaling autoscale(Application application, Cluster cluster, NodeList clusterNodes, Limits limits) {
ClusterModel clusterModel = new ClusterModel(nodeRepository.zone(),
application,
clusterNodes.clusterSpec(),
@@ -65,24 +62,28 @@ public class Autoscaler {
clusterNodes,
nodeRepository.metricsDb(),
nodeRepository.clock());
+ if (clusterModel.isEmpty()) return Autoscaling.empty();
+
+ if (! limits.isEmpty() && cluster.minResources().equals(cluster.maxResources()))
+ return Autoscaling.dontScale(Autoscaling.Status.unavailable, "Autoscaling is not enabled", clusterModel);
if ( ! clusterIsStable(clusterNodes, nodeRepository))
- return Advice.none(Status.waiting, "Cluster change in progress");
+ return Autoscaling.dontScale(Status.waiting, "Cluster change in progress", clusterModel);
var currentAllocation = new AllocatableClusterResources(clusterNodes, nodeRepository);
Optional<AllocatableClusterResources> bestAllocation =
allocationOptimizer.findBestAllocation(clusterModel.loadAdjustment(), currentAllocation, clusterModel, limits);
if (bestAllocation.isEmpty())
- return Advice.dontScale(Status.insufficient, "No allocations are possible within configured limits");
+ return Autoscaling.dontScale(Status.insufficient, "No allocations are possible within configured limits", clusterModel);
if (! worthRescaling(currentAllocation.realResources(), bestAllocation.get().realResources())) {
if (bestAllocation.get().fulfilment() < 1)
- return Advice.dontScale(Status.insufficient, "Configured limits prevents better scaling of this cluster");
+ return Autoscaling.dontScale(Status.insufficient, "Configured limits prevents better scaling of this cluster", clusterModel);
else
- return Advice.dontScale(Status.ideal, "Cluster is ideally scaled");
+ return Autoscaling.dontScale(Status.ideal, "Cluster is ideally scaled", clusterModel);
}
- return Advice.scaleTo(bestAllocation.get().advertisedResources());
+ return Autoscaling.scaleTo(bestAllocation.get().advertisedResources(), clusterModel);
}
public static boolean clusterIsStable(NodeList clusterNodes, NodeRepository nodeRepository) {
@@ -122,53 +123,4 @@ public class Autoscaler {
return Duration.ofHours(48);
}
- public static class Advice {
-
- private final boolean present;
- private final Optional<ClusterResources> target;
- private final AutoscalingStatus reason;
-
- private Advice(Optional<ClusterResources> target, boolean present, AutoscalingStatus reason) {
- this.target = target;
- this.present = present;
- this.reason = Objects.requireNonNull(reason);
- }
-
- /**
- * Returns the autoscaling target that should be set by this advice.
- * This is empty if the advice is to keep the current allocation.
- */
- public Optional<ClusterResources> target() { return target; }
-
- /** True if this does not provide any advice */
- public boolean isEmpty() { return ! present; }
-
- /** True if this provides advice (which may be to keep the current allocation) */
- public boolean isPresent() { return present; }
-
- /** The reason for this advice */
- public AutoscalingStatus reason() { return reason; }
-
- private static Advice none(Status status, String description) {
- return new Advice(Optional.empty(), false, new AutoscalingStatus(status, description));
- }
-
- private static Advice dontScale(Status status, String description) {
- return new Advice(Optional.empty(), true, new AutoscalingStatus(status, description));
- }
-
- private static Advice scaleTo(ClusterResources target) {
- return new Advice(Optional.of(target), true,
- new AutoscalingStatus(AutoscalingStatus.Status.rescaling,
- "Rescaling initiated due to load changes"));
- }
-
- @Override
- public String toString() {
- return "autoscaling advice: " +
- (present ? (target.isPresent() ? "Scale to " + target.get() : "Don't scale") : "None");
- }
-
- }
-
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java
index 9da17a39242..ebab2efbaa6 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java
@@ -5,53 +5,134 @@ import com.yahoo.config.provision.ClusterResources;
import java.time.Instant;
import java.util.Objects;
import java.util.Optional;
-import java.util.function.Consumer;
/**
- * An autoscaling result.
+ * An autoscaling conclusion and the context that led to it.
*
* @author bratseth
*/
public class Autoscaling {
+ private final Status status;
+ private final String description;
private final Optional<ClusterResources> resources;
private final Instant at;
+ private final Load peak;
+ private final Load ideal;
- public Autoscaling(ClusterResources resources, Instant at) {
- this(Optional.of(resources), at);
- }
-
- public Autoscaling(Optional<ClusterResources> resources, Instant at) {
+ public Autoscaling(Status status, String description, Optional<ClusterResources> resources, Instant at,
+ Load peak, Load ideal) {
+ this.status = status;
+ this.description = description;
this.resources = resources;
this.at = at;
+ this.peak = peak;
+ this.ideal = ideal;
}
- /** Returns the resource target of this, or empty if non target. */
+ public Status status() { return status; }
+
+ public String description() { return description; }
+
+ /** Returns the resource target of this, or empty if none (meaning keep the current allocation). */
public Optional<ClusterResources> resources() {
return resources;
}
- /** Returns the time this target was decided. */
+ /** Returns the time this was decided. */
public Instant at() { return at; }
+ /** Returns the peak load seen in the period considered in this. */
+ public Load peak() { return peak; }
+
+ /** Returns the ideal load the cluster in question should have. */
+ public Load ideal() { return ideal; }
+
+ public Autoscaling with(Status status, String description) {
+ return new Autoscaling(status, description, resources, at, peak, ideal);
+ }
+
+ /** Converts this autoscaling into an ideal one at the completion of it. */
+ public Autoscaling asIdeal(Instant at) {
+ return new Autoscaling(Status.ideal,
+ "Cluster is ideally scaled within configured limits",
+ Optional.empty(),
+ at,
+ peak,
+ ideal);
+ }
+
+ public boolean isEmpty() { return this.equals(empty()); }
+
@Override
public boolean equals(Object o) {
if ( ! (o instanceof Autoscaling other)) return false;
- if ( ! this.at.equals(other.at)) return false;
+ if ( ! this.status.equals(other.status)) return false;
+ if ( ! this.description.equals(other.description)) return false;
if ( ! this.resources.equals(other.resources)) return false;
+ if ( ! this.at.equals(other.at)) return false;
+ if ( ! this.peak.equals(other.peak)) return false;
+ if ( ! this.ideal.equals(other.ideal)) return false;
return true;
}
@Override
public int hashCode() {
- return Objects.hash(resources, at);
+ return Objects.hash(status, description, at, peak, ideal);
}
@Override
public String toString() {
- return "autoscaling to " + resources + ", made at " + at;
+ return (resources.isPresent() ? "Autoscaling to " + resources : "Don't autoscale") +
+ (description.isEmpty() ? "" : ": " + description);
}
- public static Autoscaling empty() { return new Autoscaling(Optional.empty(), Instant.EPOCH); }
+ public static Autoscaling empty() {
+ return new Autoscaling(Status.unavailable,
+ "",
+ Optional.empty(),
+ Instant.EPOCH,
+ Load.zero(),
+ Load.zero());
+ }
+
+ /** Creates an autoscaling conclusion which does not change the current allocation for a specified reason. */
+ public static Autoscaling dontScale(Status status, String description, ClusterModel clusterModel) {
+ return new Autoscaling(status,
+ description,
+ Optional.empty(),
+ clusterModel.at(),
+ clusterModel.peakLoad(),
+ clusterModel.idealLoad());
+ }
+
+ /** Creates an autoscaling conclusion to scale. */
+ public static Autoscaling scaleTo(ClusterResources target, ClusterModel clusterModel) {
+ return new Autoscaling(Status.rescaling,
+ "Rescaling initiated due to load changes",
+ Optional.of(target),
+ clusterModel.at(),
+ clusterModel.peakLoad(),
+ clusterModel.idealLoad());
+ }
+
+ public enum Status {
+
+ /** No status is available: Aautoscaling is disabled, or a brand new application. */
+ unavailable,
+
+ /** Autoscaling is not taking any action at the moment due to recent changes or a lack of data */
+ waiting,
+
+ /** The cluster is ideally scaled to the current load */
+ ideal,
+
+ /** The cluster should be rescaled further, but no better configuration is allowed by the current limits */
+ insufficient,
+
+ /** Rescaling of this cluster has been scheduled */
+ rescaling
+
+ };
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
index 1928a784763..388c77e4e5d 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
@@ -53,6 +53,7 @@ public class ClusterModel {
private final Duration scalingDuration;
private final ClusterTimeseries clusterTimeseries;
private final ClusterNodesTimeseries nodeTimeseries;
+ private final Instant at;
// Lazily initialized members
private Double queryFractionOfMax = null;
@@ -72,9 +73,10 @@ public class ClusterModel {
this.cluster = cluster;
this.nodes = clusterNodes;
this.clock = clock;
- this.scalingDuration = computeScalingDuration(cluster, clusterSpec);
+ this.scalingDuration = cluster.scalingDuration(clusterSpec);
this.clusterTimeseries = metricsDb.getClusterTimeseries(application.id(), cluster.id());
this.nodeTimeseries = new ClusterNodesTimeseries(scalingDuration(), cluster, nodes, metricsDb);
+ this.at = clock.instant();
}
ClusterModel(Zone zone,
@@ -95,12 +97,17 @@ public class ClusterModel {
this.scalingDuration = scalingDuration;
this.clusterTimeseries = clusterTimeseries;
this.nodeTimeseries = nodeTimeseries;
+ this.at = clock.instant();
}
public Application application() { return application; }
public ClusterSpec clusterSpec() { return clusterSpec; }
public Cluster cluster() { return cluster; }
+ public boolean isEmpty() {
+ return nodeTimeseries().isEmpty();
+ }
+
/** Returns the relative load adjustment that should be made to this cluster given available measurements. */
public Load loadAdjustment() {
if (nodeTimeseries().isEmpty()) return Load.one();
@@ -151,12 +158,6 @@ public class ClusterModel {
return averageQueryRate = clusterTimeseries().queryRate(scalingDuration(), clock);
}
- /** Returns the average of the last load measurement from each node. */
- public Load currentLoad() { return nodeTimeseries().currentLoad(); }
-
- /** Returns the average of all load measurements from all nodes*/
- public Load averageLoad() { return nodeTimeseries().averageLoad(); }
-
/** Returns the average of the peak load measurement in each dimension, from each node. */
public Load peakLoad() { return nodeTimeseries().peakLoad(); }
@@ -239,6 +240,9 @@ public class ClusterModel {
(1 - queryCpuFraction) * idealWriteCpuLoad;
}
+ /** Returns the instant this model was created. */
+ public Instant at() { return at;}
+
/** Returns the headroom for growth during organic traffic growth as a multiple of current resources. */
private double growthRateHeadroom() {
if ( ! zone.environment().isProduction()) return 1;
@@ -288,43 +292,6 @@ public class ClusterModel {
return queryRateFraction * relativeQueryCost / (queryRateFraction * relativeQueryCost + writeFraction);
}
- private static Duration computeScalingDuration(Cluster cluster, ClusterSpec clusterSpec) {
- int completedEventCount = 0;
- Duration totalDuration = Duration.ZERO;
- for (ScalingEvent event : cluster.scalingEvents()) {
- if (event.duration().isEmpty()) continue;
- completedEventCount++;
- // Assume we have missed timely recording completion if it is longer than 4 days
- totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get()));
- }
- if (completedEventCount == 0) { // Use defaults
- if (clusterSpec.isStateful()) return Duration.ofHours(12);
- return Duration.ofMinutes(10);
- }
- else {
- Duration predictedDuration = totalDuration.dividedBy(completedEventCount);
-
- if ( clusterSpec.isStateful() ) // TODO: Remove when we have reliable completion for content clusters
- predictedDuration = minimum(Duration.ofHours(12), predictedDuration);
-
- predictedDuration = minimum(Duration.ofMinutes(5), predictedDuration);
-
- return predictedDuration;
- }
- }
-
- private static Duration minimum(Duration smallestAllowed, Duration duration) {
- if (duration.minus(smallestAllowed).isNegative())
- return smallestAllowed;
- return duration;
- }
-
- private static Duration maximum(Duration largestAllowed, Duration duration) {
- if ( ! duration.minus(largestAllowed).isNegative())
- return largestAllowed;
- return duration;
- }
-
private double idealMemoryLoad() {
if (clusterSpec.type().isContainer()) return idealContainerMemoryLoad;
if (clusterSpec.type() == ClusterSpec.Type.admin) return idealContainerMemoryLoad; // Not autoscaled, but ideal shown in console
@@ -339,7 +306,7 @@ public class ClusterModel {
/**
* Create a cluster model if possible and logs a warning and returns empty otherwise.
- * This is useful in cases where it's possible to continue without the cluser model,
+ * This is useful in cases where it's possible to continue without the cluster model,
* as QuestDb is known to temporarily fail during reading of data.
*/
public static Optional<ClusterModel> create(Zone zone,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancers.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancers.java
index 7cbb8ef2764..1d218e6c973 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancers.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancers.java
@@ -2,7 +2,7 @@
package com.yahoo.vespa.hosted.provision.lb;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
+import com.yahoo.vespa.hosted.provision.persistence.CuratorDb;
import java.util.function.Predicate;
@@ -14,9 +14,9 @@ import java.util.function.Predicate;
*/
public class LoadBalancers {
- private final CuratorDatabaseClient db;
+ private final CuratorDb db;
- public LoadBalancers(CuratorDatabaseClient db) {
+ public LoadBalancers(CuratorDb db) {
this.db = db;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
index c3838dcee75..4aa54b7f6fa 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
@@ -70,9 +70,9 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer {
if (cluster.isEmpty()) return;
Cluster updatedCluster = updateCompletion(cluster.get(), clusterNodes);
- var advice = autoscaler.autoscale(application.get(), updatedCluster, clusterNodes);
+ var autoscaling = autoscaler.autoscale(application.get(), updatedCluster, clusterNodes);
- if ( ! anyChanges(advice, cluster.get(), updatedCluster, clusterNodes)) return;
+ if ( ! anyChanges(autoscaling, cluster.get(), updatedCluster, clusterNodes)) return;
try (var lock = nodeRepository().applications().lock(applicationId)) {
application = nodeRepository().applications().get(applicationId);
@@ -82,30 +82,30 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer {
clusterNodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId).cluster(clusterId);
// 1. Update cluster info
- updatedCluster = updateCompletion(cluster.get(), clusterNodes)
- .with(advice.reason())
- .withTarget(new Autoscaling(advice.target(), nodeRepository().clock().instant()));
+ updatedCluster = updateCompletion(cluster.get(), clusterNodes);
+ if ( ! autoscaling.isEmpty()) // Ignore empties we'll get from servers recently started
+ updatedCluster = updatedCluster.withTarget(autoscaling);
applications().put(application.get().with(updatedCluster), lock);
var current = new AllocatableClusterResources(clusterNodes, nodeRepository()).advertisedResources();
- if (advice.isPresent() && advice.target().isPresent() && !current.equals(advice.target().get())) {
+ if (autoscaling.resources().isPresent() && !current.equals(autoscaling.resources().get())) {
// 2. Also autoscale
try (MaintenanceDeployment deployment = new MaintenanceDeployment(applicationId, deployer, metric, nodeRepository())) {
if (deployment.isValid()) {
deployment.activate();
- logAutoscaling(current, advice.target().get(), applicationId, clusterNodes);
+ logAutoscaling(current, autoscaling.resources().get(), applicationId, clusterNodes);
}
}
}
}
}
- private boolean anyChanges(Autoscaler.Advice advice, Cluster cluster, Cluster updatedCluster, NodeList clusterNodes) {
- if (advice.isPresent() && !cluster.target().resources().equals(advice.target())) return true;
+ private boolean anyChanges(Autoscaling autoscaling, Cluster cluster, Cluster updatedCluster, NodeList clusterNodes) {
if (updatedCluster != cluster) return true;
- if ( ! advice.reason().equals(cluster.autoscalingStatus())) return true;
- if (advice.target().isPresent() &&
- !advice.target().get().equals(new AllocatableClusterResources(clusterNodes, nodeRepository()).advertisedResources())) return true;
+ if ( ! cluster.target().resources().equals(autoscaling.resources())) return true;
+ if ( ! cluster.target().status().equals(autoscaling.status())) return true;
+ if (autoscaling.resources().isPresent() &&
+ !autoscaling.resources().get().equals(new AllocatableClusterResources(clusterNodes, nodeRepository()).advertisedResources())) return true;
return false;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureVersions.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureVersions.java
index a1f36a4f1a5..19d6c79d680 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureVersions.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureVersions.java
@@ -5,7 +5,7 @@ import com.yahoo.component.Version;
import com.yahoo.component.Vtag;
import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.curator.Lock;
-import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
+import com.yahoo.vespa.hosted.provision.persistence.CuratorDb;
import java.util.Collections;
import java.util.Map;
@@ -23,14 +23,14 @@ public class InfrastructureVersions {
private static final Logger logger = Logger.getLogger(InfrastructureVersions.class.getName());
- private final CuratorDatabaseClient db;
+ private final CuratorDb db;
private final Version defaultVersion;
- public InfrastructureVersions(CuratorDatabaseClient db) {
+ public InfrastructureVersions(CuratorDb db) {
this(db, Vtag.currentVersion);
}
- InfrastructureVersions(CuratorDatabaseClient db, Version defaultVersion) {
+ InfrastructureVersions(CuratorDb db, Version defaultVersion) {
this.db = db;
this.defaultVersion = defaultVersion;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
index 0264d0df837..44c92434a0f 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
@@ -11,7 +11,7 @@ import com.yahoo.vespa.hosted.provision.lb.LoadBalancer.State;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerId;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerService;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerSpec;
-import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
+import com.yahoo.vespa.hosted.provision.persistence.CuratorDb;
import java.time.Duration;
import java.time.Instant;
@@ -46,7 +46,7 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer {
private static final Duration inactiveExpiry = Duration.ofHours(1);
private final LoadBalancerService service;
- private final CuratorDatabaseClient db;
+ private final CuratorDb db;
public LoadBalancerExpirer(NodeRepository nodeRepository, Duration interval, LoadBalancerService service, Metric metric) {
super(nodeRepository, interval, metric);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java
index 536a1917eb1..af368934188 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java
@@ -14,7 +14,6 @@ import com.yahoo.vespa.hosted.provision.applications.Applications;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
-import com.yahoo.vespa.hosted.provision.autoscale.MetricsDb;
import java.time.Duration;
import java.util.Map;
@@ -63,12 +62,12 @@ public class ScalingSuggestionsMaintainer extends NodeRepositoryMaintainer {
Optional<Cluster> cluster = application.cluster(clusterId);
if (cluster.isEmpty()) return true;
var suggestion = autoscaler.suggest(application, cluster.get(), clusterNodes);
- if (suggestion.isEmpty()) return true;
+ if (suggestion.status() == Autoscaling.Status.waiting) return true;
+ if ( ! shouldUpdateSuggestion(cluster.get().suggested(), suggestion)) return true;
+
// Wait only a short time for the lock to avoid interfering with change deployments
try (Mutex lock = nodeRepository().applications().lock(applicationId, Duration.ofSeconds(1))) {
- // empty suggested resources == keep the current allocation, so we record that
- var suggestedResources = suggestion.target().orElse(clusterNodes.not().retired().toResources());
- applications().get(applicationId).ifPresent(a -> updateSuggestion(suggestedResources, clusterId, a, lock));
+ applications().get(applicationId).ifPresent(a -> updateSuggestion(suggestion, clusterId, a, lock));
return true;
}
catch (ApplicationLockException e) {
@@ -76,18 +75,19 @@ public class ScalingSuggestionsMaintainer extends NodeRepositoryMaintainer {
}
}
- private void updateSuggestion(ClusterResources suggestion,
+ private boolean shouldUpdateSuggestion(Autoscaling currentSuggestion, Autoscaling newSuggestion) {
+ return currentSuggestion.resources().isEmpty()
+ || currentSuggestion.at().isBefore(nodeRepository().clock().instant().minus(Duration.ofDays(7)))
+ || (newSuggestion.resources().isPresent() && isHigher(newSuggestion.resources().get(), currentSuggestion.resources().get()));
+ }
+
+ private void updateSuggestion(Autoscaling autoscaling,
ClusterSpec.Id clusterId,
Application application,
Mutex lock) {
Optional<Cluster> cluster = application.cluster(clusterId);
if (cluster.isEmpty()) return;
- var at = nodeRepository().clock().instant();
- var currentSuggestion = cluster.get().suggested();
- if (currentSuggestion.resources().isEmpty()
- || currentSuggestion.at().isBefore(at.minus(Duration.ofDays(7)))
- || isHigher(suggestion, currentSuggestion.resources().get()))
- applications().put(application.with(cluster.get().withSuggested(new Autoscaling(suggestion, at))), lock);
+ applications().put(application.with(cluster.get().withSuggested(autoscaling)), lock);
}
private boolean isHigher(ClusterResources r1, ClusterResources r2) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index e5c85647cdd..807710eac72 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -20,7 +20,7 @@ import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.applications.Applications;
import com.yahoo.vespa.hosted.provision.maintenance.NodeFailer;
import com.yahoo.vespa.hosted.provision.node.filter.NodeFilter;
-import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
+import com.yahoo.vespa.hosted.provision.persistence.CuratorDb;
import com.yahoo.vespa.orchestrator.HostNameNotFoundException;
import com.yahoo.vespa.orchestrator.Orchestrator;
@@ -58,13 +58,13 @@ public class Nodes {
private static final Logger log = Logger.getLogger(Nodes.class.getName());
- private final CuratorDatabaseClient db;
+ private final CuratorDb db;
private final Zone zone;
private final Clock clock;
private final Orchestrator orchestrator;
private final Applications applications;
- public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator, Applications applications) {
+ public Nodes(CuratorDb db, Zone zone, Clock clock, Orchestrator orchestrator, Applications applications) {
this.zone = zone;
this.clock = clock;
this.db = db;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java
index 2939b940486..75ab91c4efc 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java
@@ -9,7 +9,7 @@ import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Status;
-import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
+import com.yahoo.vespa.hosted.provision.persistence.CuratorDb;
import java.util.List;
import java.util.Objects;
@@ -33,7 +33,7 @@ public class OsVersions {
private static final Logger log = Logger.getLogger(OsVersions.class.getName());
private final NodeRepository nodeRepository;
- private final CuratorDatabaseClient db;
+ private final CuratorDb db;
private final Cloud cloud;
public OsVersions(NodeRepository nodeRepository) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java
index f57c811b6d5..2f5b057e927 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java
@@ -10,11 +10,11 @@ import com.yahoo.slime.ObjectTraverser;
import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.provision.applications.Application;
-import com.yahoo.vespa.hosted.provision.applications.AutoscalingStatus;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
import com.yahoo.vespa.hosted.provision.applications.Status;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
+import com.yahoo.vespa.hosted.provision.autoscale.Load;
import java.io.IOException;
import java.io.UncheckedIOException;
@@ -56,9 +56,13 @@ public class ApplicationSerializer {
private static final String groupsKey = "groups";
private static final String nodeResourcesKey = "resources";
private static final String scalingEventsKey = "scalingEvents";
- private static final String autoscalingStatusKey = "autoscalingStatus";
private static final String autoscalingStatusObjectKey = "autoscalingStatusObject";
private static final String descriptionKey = "description";
+ private static final String peakKey = "peak";
+ private static final String idealKey = "ideal";
+ private static final String cpuKey = "cpu";
+ private static final String memoryKey = "memory";
+ private static final String diskKey = "disk";
private static final String fromKey = "from";
private static final String toKey = "to";
private static final String generationKey = "generation";
@@ -122,7 +126,6 @@ public class ApplicationSerializer {
toSlime(cluster.suggested(), clusterObject.setObject(suggestedKey));
toSlime(cluster.target(), clusterObject.setObject(targetKey));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray(scalingEventsKey));
- toSlime(cluster.autoscalingStatus(), clusterObject.setObject(autoscalingStatusObjectKey));
}
private static Cluster clusterFromSlime(String id, Inspector clusterObject) {
@@ -131,15 +134,18 @@ public class ApplicationSerializer {
clusterResourcesFromSlime(clusterObject.field(minResourcesKey)),
clusterResourcesFromSlime(clusterObject.field(maxResourcesKey)),
clusterObject.field(requiredKey).asBool(),
- autoscalingFromSlime(clusterObject.field(suggestedKey)),
- autoscalingFromSlime(clusterObject.field(targetKey)),
- scalingEventsFromSlime(clusterObject.field(scalingEventsKey)),
- autoscalingStatusFromSlime(clusterObject.field(autoscalingStatusObjectKey), clusterObject));
+ autoscalingFromSlime(clusterObject.field(suggestedKey), clusterObject.field("nonExisting")),
+ autoscalingFromSlime(clusterObject.field(targetKey), clusterObject.field(autoscalingStatusObjectKey)),
+ scalingEventsFromSlime(clusterObject.field(scalingEventsKey)));
}
private static void toSlime(Autoscaling autoscaling, Cursor autoscalingObject) {
+ autoscalingObject.setString(statusKey, toAutoscalingStatusCode(autoscaling.status()));
+ autoscalingObject.setString(descriptionKey, autoscaling.description());
autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject(resourcesKey)));
autoscalingObject.setLong(atKey, autoscaling.at().toEpochMilli());
+ toSlime(autoscaling.peak(), autoscalingObject.setObject(peakKey));
+ toSlime(autoscaling.ideal(), autoscalingObject.setObject(idealKey));
}
private static void toSlime(ClusterResources resources, Cursor clusterResourcesObject) {
@@ -159,15 +165,46 @@ public class ApplicationSerializer {
NodeResourcesSerializer.resourcesFromSlime(clusterResourcesObject.field(nodeResourcesKey)));
}
- private static Autoscaling autoscalingFromSlime(Inspector autoscalingObject) {
+ private static void toSlime(Load load, Cursor loadObject) {
+ loadObject.setDouble(cpuKey, load.cpu());
+ loadObject.setDouble(memoryKey, load.memory());
+ loadObject.setDouble(diskKey, load.disk());
+ }
+
+ private static Load loadFromSlime(Inspector loadObject) {
+ return new Load(loadObject.field(cpuKey).asDouble(),
+ loadObject.field(memoryKey).asDouble(),
+ loadObject.field(diskKey).asDouble());
+ }
+
+ private static Autoscaling autoscalingFromSlime(Inspector autoscalingObject,
+ Inspector legacyAutoscalingStatusObject) {
if ( ! autoscalingObject.valid()) return Autoscaling.empty();
- if ( ! autoscalingObject.field(atKey).valid()) { // TODO: Remove clause after January 2023
- return new Autoscaling(optionalClusterResourcesFromSlime(autoscalingObject), Instant.EPOCH);
+ if ( ! autoscalingObject.field(atKey).valid()) { // TODO: Remove after January 2023
+ return new Autoscaling(fromAutoscalingStatusCode(legacyAutoscalingStatusObject.field(statusKey).asString()),
+ legacyAutoscalingStatusObject.field(descriptionKey).asString(),
+ optionalClusterResourcesFromSlime(autoscalingObject),
+ Instant.EPOCH,
+ Load.zero(),
+ Load.zero());
+ }
+
+ if (legacyAutoscalingStatusObject.valid()) { // TODO: Remove after January 2023
+ return new Autoscaling(fromAutoscalingStatusCode(legacyAutoscalingStatusObject.field(statusKey).asString()),
+ legacyAutoscalingStatusObject.field(descriptionKey).asString(),
+ optionalClusterResourcesFromSlime(autoscalingObject.field(resourcesKey)),
+ Instant.ofEpochMilli(autoscalingObject.field(atKey).asLong()),
+ loadFromSlime(autoscalingObject.field(peakKey)),
+ loadFromSlime(autoscalingObject.field(idealKey)));
}
- return new Autoscaling(optionalClusterResourcesFromSlime(autoscalingObject.field(resourcesKey)),
- Instant.ofEpochMilli(autoscalingObject.field(atKey).asLong()));
+ return new Autoscaling(fromAutoscalingStatusCode(autoscalingObject.field(statusKey).asString()),
+ autoscalingObject.field(descriptionKey).asString(),
+ optionalClusterResourcesFromSlime(autoscalingObject.field(resourcesKey)),
+ Instant.ofEpochMilli(autoscalingObject.field(atKey).asLong()),
+ loadFromSlime(autoscalingObject.field(peakKey)),
+ loadFromSlime(autoscalingObject.field(idealKey)));
}
private static void scalingEventsToSlime(List<ScalingEvent> scalingEvents, Cursor eventArray) {
@@ -194,36 +231,26 @@ public class ApplicationSerializer {
optionalInstant(inspector.field(completionKey)));
}
- private static void toSlime(AutoscalingStatus status, Cursor object) {
- object.setString(statusKey, toAutoscalingStatusCode(status.status()));
- object.setString(descriptionKey, status.description());
+ private static String toAutoscalingStatusCode(Autoscaling.Status status) {
+ return switch (status) {
+ case unavailable -> "unavailable";
+ case waiting -> "waiting";
+ case ideal -> "ideal";
+ case insufficient -> "insufficient";
+ case rescaling -> "rescaling";
+ };
}
- private static AutoscalingStatus autoscalingStatusFromSlime(Inspector object, Inspector parent) {
- return new AutoscalingStatus(fromAutoscalingStatusCode(object.field(statusKey).asString()),
- object.field(descriptionKey).asString());
- }
-
- private static String toAutoscalingStatusCode(AutoscalingStatus.Status status) {
- switch (status) {
- case unavailable : return "unavailable";
- case waiting : return "waiting";
- case ideal : return "ideal";
- case insufficient : return "insufficient";
- case rescaling : return "rescaling";
- default : throw new IllegalArgumentException("Unknown autoscaling status " + status);
- }
- }
-
- private static AutoscalingStatus.Status fromAutoscalingStatusCode(String code) {
- switch (code) {
- case "unavailable" : return AutoscalingStatus.Status.unavailable;
- case "waiting" : return AutoscalingStatus.Status.waiting;
- case "ideal" : return AutoscalingStatus.Status.ideal;
- case "insufficient" : return AutoscalingStatus.Status.insufficient;
- case "rescaling" : return AutoscalingStatus.Status.rescaling;
- default : throw new IllegalArgumentException("Unknown autoscaling status '" + code + "'");
- }
+ private static Autoscaling.Status fromAutoscalingStatusCode(String code) {
+ return switch (code) {
+ case "" -> Autoscaling.Status.unavailable;
+ case "unavailable" -> Autoscaling.Status.unavailable;
+ case "waiting" -> Autoscaling.Status.waiting;
+ case "ideal" -> Autoscaling.Status.ideal;
+ case "insufficient" -> Autoscaling.Status.insufficient;
+ case "rescaling" -> Autoscaling.Status.rescaling;
+ default -> throw new IllegalArgumentException("Unknown autoscaling status '" + code + "'");
+ };
}
private static Optional<Instant> optionalInstant(Inspector inspector) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabase.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CachingCurator.java
index aa935eaf272..eb73fd51f4a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabase.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CachingCurator.java
@@ -18,18 +18,20 @@ import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
-import java.util.stream.Collectors;
/**
- * This encapsulated the curator database of the node repo.
+ * A caching wrapper for {@link Curator}.
+ *
* It serves reads from an in-memory cache of the content which is invalidated when changed on another node
* using a global, shared counter. The counter is updated on all write operations, ensured by wrapping write
- * operations in a try block, with the counter increment in a finally block. Locks must be used to ensure consistency.
+ * operations in a try block, with the counter increment in a finally block.
+ *
+ * Locks must be used to ensure consistency.
*
* @author bratseth
* @author jonmv
*/
-public class CuratorDatabase {
+public class CachingCurator {
private final Curator curator;
@@ -40,7 +42,7 @@ public class CuratorDatabase {
private final AtomicReference<Cache> cache = new AtomicReference<>();
/** Whether we should return data from the cache or always read from ZooKeeper */
- private final boolean useCache;
+ private final boolean enabled;
private final Object cacheCreationLock = new Object();
@@ -50,8 +52,8 @@ public class CuratorDatabase {
* @param curator the curator instance
* @param root the file system root of the db
*/
- public CuratorDatabase(Curator curator, Path root, boolean useCache) {
- this.useCache = useCache;
+ public CachingCurator(Curator curator, Path root, boolean enabled) {
+ this.enabled = enabled;
this.curator = curator;
changeGenerationCounter = new CuratorCounter(curator, root.append("changeCounter"));
cache.set(newCache(changeGenerationCounter.get()));
@@ -60,10 +62,10 @@ public class CuratorDatabase {
/** Returns all hosts configured to be part of this ZooKeeper cluster */
public List<HostName> cluster() {
return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(","))
- .filter(hostAndPort -> !hostAndPort.isEmpty())
- .map(hostAndPort -> hostAndPort.split(":")[0])
- .map(HostName::of)
- .toList();
+ .filter(hostAndPort -> !hostAndPort.isEmpty())
+ .map(hostAndPort -> hostAndPort.split(":")[0])
+ .map(HostName::of)
+ .toList();
}
/** Create a reentrant lock */
@@ -123,7 +125,7 @@ public class CuratorDatabase {
/** Caches must only be instantiated using this method */
private Cache newCache(long generation) {
- return useCache ? new Cache(generation, curator) : new NoCache(generation, curator);
+ return enabled ? new Cache(generation, curator) : new NoCache(generation, curator);
}
/**
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
index 37583f00547..c52d98fadc9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
@@ -47,21 +47,23 @@ import java.util.stream.IntStream;
import static com.yahoo.stream.CustomCollectors.toLinkedMap;
import static java.util.stream.Collectors.collectingAndThen;
-/**
- * Client which reads and writes nodes to a curator database.
- * Nodes are stored in files named <code>/provision/v1/[nodestate]/[hostname]</code>.
+/**.
+ * A persistent database for the contents of the node repository, backed by {@link CachingCurator}.
+ *
+ * Nodes are stored in files named /provision/v1/[nodestate]/[hostname].
*
* The responsibility of this class is to turn operations on the level of node states, applications and nodes
* into operations on the level of file paths and bytes.
*
* @author bratseth
*/
-public class CuratorDatabaseClient {
+public class CuratorDb {
- private static final Logger log = Logger.getLogger(CuratorDatabaseClient.class.getName());
+ private static final Logger log = Logger.getLogger(CuratorDb.class.getName());
private static final Path root = Path.fromString("/provision/v1");
private static final Path lockPath = root.append("locks");
+ private static final Path nodesPath = root.append("nodes");
private static final Path loadBalancersPath = root.append("loadBalancers");
private static final Path applicationsPath = root.append("applications");
private static final Path inactiveJobsPath = root.append("inactiveJobs");
@@ -73,13 +75,13 @@ public class CuratorDatabaseClient {
private static final Duration defaultLockTimeout = Duration.ofMinutes(1);
private final NodeSerializer nodeSerializer;
- private final CuratorDatabase db;
+ private final CachingCurator db;
private final Clock clock;
private final CuratorCounter provisionIndexCounter;
- public CuratorDatabaseClient(NodeFlavors flavors, Curator curator, Clock clock, boolean useCache, long nodeCacheSize) {
+ public CuratorDb(NodeFlavors flavors, Curator curator, Clock clock, boolean useCache, long nodeCacheSize) {
this.nodeSerializer = new NodeSerializer(flavors, nodeCacheSize);
- this.db = new CuratorDatabase(curator, root, useCache);
+ this.db = new CachingCurator(curator, root, useCache);
this.clock = clock;
this.provisionIndexCounter = new CuratorCounter(curator, root.append("provisionIndexCounter"));
initZK();
@@ -91,6 +93,8 @@ public class CuratorDatabaseClient {
private void initZK() {
db.create(root);
+ db.create(nodesPath);
+ // TODO(mpolden): Remove state paths after migration to nodesPath
for (Node.State state : Node.State.values())
db.create(toPath(state));
db.create(applicationsPath);
@@ -225,14 +229,21 @@ public class CuratorDatabaseClient {
private void writeNode(Node.State toState, CuratorTransaction curatorTransaction, Node node, Node newNode) {
byte[] nodeData = nodeSerializer.toJson(newNode);
- String currentNodePath = toPath(node).getAbsolute();
- String newNodePath = toPath(toState, newNode.hostname()).getAbsolute();
- if (newNodePath.equals(currentNodePath)) {
- curatorTransaction.add(CuratorOperations.setData(currentNodePath, nodeData));
- } else {
- curatorTransaction.add(CuratorOperations.delete(currentNodePath))
- .add(CuratorOperations.create(newNodePath, nodeData));
+ { // TODO(mpolden): Remove this after migration to nodesPath
+ String currentNodePath = toPath(node).getAbsolute();
+ String newNodePath = toPath(toState, newNode.hostname()).getAbsolute();
+ if (newNodePath.equals(currentNodePath)) {
+ curatorTransaction.add(CuratorOperations.setData(currentNodePath, nodeData));
+ } else {
+ curatorTransaction.add(CuratorOperations.delete(currentNodePath))
+ .add(CuratorOperations.create(newNodePath, nodeData));
+ }
+ }
+ Path nodePath = nodePath(newNode);
+ if (db.exists(nodePath)) {
+ curatorTransaction.add(CuratorOperations.delete(nodePath.getAbsolute()));
}
+ curatorTransaction.add(CuratorOperations.create(nodePath.getAbsolute(), nodeData));
}
private Status newNodeStatus(Node node, Node.State toState) {
@@ -251,7 +262,7 @@ public class CuratorDatabaseClient {
List<Node> nodes = new ArrayList<>();
if (states.length == 0)
states = Node.State.values();
- CuratorDatabase.Session session = db.getSession();
+ CachingCurator.Session session = db.getSession();
for (Node.State state : states) {
for (String hostname : session.getChildren(toPath(state))) {
Optional<Node> node = readNode(session, hostname, state);
@@ -265,7 +276,7 @@ public class CuratorDatabaseClient {
* Returns a particular node, or empty if this node is not in any of the given states.
* If no states are given this returns the node if it is present in any state.
*/
- public Optional<Node> readNode(CuratorDatabase.Session session, String hostname, Node.State ... states) {
+ public Optional<Node> readNode(CachingCurator.Session session, String hostname, Node.State ... states) {
if (states.length == 0)
states = Node.State.values();
for (Node.State state : states) {
@@ -294,11 +305,15 @@ public class CuratorDatabaseClient {
return root.append(toDir(nodeState)).append(nodeName);
}
+ private Path nodePath(Node node) {
+ return nodesPath.append(node.hostname());
+ }
+
/** Creates and returns the path to the lock for this application */
private Path lockPath(ApplicationId application) {
- Path lockPath = CuratorDatabaseClient.lockPath.append(application.tenant().value())
- .append(application.application().value())
- .append(application.instance().value());
+ Path lockPath = CuratorDb.lockPath.append(application.tenant().value())
+ .append(application.application().value())
+ .append(application.instance().value());
db.create(lockPath);
return lockPath;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/JobControlFlags.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/JobControlFlags.java
index 13cc1ce5027..0632fc0818b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/JobControlFlags.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/JobControlFlags.java
@@ -16,10 +16,10 @@ import java.util.Set;
*/
public class JobControlFlags implements JobControlState {
- private final CuratorDatabaseClient curator;
+ private final CuratorDb curator;
private final ListFlag<String> inactiveJobsFlag;
- public JobControlFlags(CuratorDatabaseClient curator, FlagSource flagSource) {
+ public JobControlFlags(CuratorDb curator, FlagSource flagSource) {
this.curator = curator;
this.inactiveJobsFlag = PermanentFlags.INACTIVE_MAINTENANCE_JOBS.bindTo(flagSource);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
index 94707f7f429..f448266b94b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
@@ -48,7 +48,6 @@ import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ExecutionException;
-import java.util.stream.Collectors;
/**
* Serializes a node to/from JSON.
@@ -69,6 +68,7 @@ public class NodeSerializer {
private final NodeFlavors flavors;
// Node fields
+ private static final String stateKey = "state";
private static final String hostnameKey = "hostname";
private static final String ipAddressesKey = "ipAddresses";
private static final String ipAddressPoolKey = "additionalIpAddresses";
@@ -166,6 +166,7 @@ public class NodeSerializer {
private void toSlime(Node node, Cursor object) {
object.setString(hostnameKey, node.hostname());
+ object.setString(stateKey, toString(node.state()));
toSlime(node.ipConfig().primary(), object.setArray(ipAddressesKey));
toSlime(node.ipConfig().pool().ipSet(), object.setArray(ipAddressPoolKey));
toSlime(node.ipConfig().pool().getAddressList(), object);
@@ -539,4 +540,35 @@ public class NodeSerializer {
};
}
+ static Node.State nodeStateFromString(String state) {
+ return switch (state) {
+ case "active" -> Node.State.active;
+ case "dirty" -> Node.State.dirty;
+ case "failed" -> Node.State.failed;
+ case "inactive" -> Node.State.inactive;
+ case "parked" -> Node.State.parked;
+ case "provisioned" -> Node.State.provisioned;
+ case "ready" -> Node.State.ready;
+ case "reserved" -> Node.State.reserved;
+ case "deprovisioned" -> Node.State.deprovisioned;
+ case "breakfixed" -> Node.State.breakfixed;
+ default -> throw new IllegalArgumentException("Unknown node state '" + state + "'");
+ };
+ }
+
+ static String toString(Node.State state) {
+ return switch (state) {
+ case active -> "active";
+ case dirty -> "dirty";
+ case failed -> "failed";
+ case inactive -> "inactive";
+ case parked -> "parked";
+ case provisioned -> "provisioned";
+ case ready -> "ready";
+ case reserved -> "reserved";
+ case deprovisioned -> "deprovisioned";
+ case breakfixed -> "breakfixed";
+ };
+ }
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
index a85377229b4..caf936e8aeb 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
@@ -13,8 +13,8 @@ import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
-import com.yahoo.vespa.hosted.provision.applications.AutoscalingStatus;
import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
+import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.Allocation;
@@ -120,8 +120,7 @@ class Activator {
}
if (cluster.target().resources().isPresent()
&& cluster.target().resources().get().justNumbers().equals(currentResources.justNumbers())) {
- cluster = cluster.with(new AutoscalingStatus(AutoscalingStatus.Status.ideal,
- "Cluster is ideally scaled within configured limits"));
+ cluster = cluster.withTarget(cluster.target().asIdeal(nodeRepository.clock().instant()));
}
if (cluster != modified.cluster(clusterEntry.getKey()).get())
modified = modified.with(cluster);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ArchiveUris.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ArchiveUris.java
index e057fabc4fc..7855ee7c42c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ArchiveUris.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ArchiveUris.java
@@ -6,7 +6,7 @@ import com.yahoo.lang.CachedSupplier;
import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.node.Allocation;
-import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
+import com.yahoo.vespa.hosted.provision.persistence.CuratorDb;
import java.time.Duration;
import java.util.Map;
import java.util.Optional;
@@ -27,10 +27,10 @@ public class ArchiveUris {
private static final Pattern validUriPattern = Pattern.compile("[a-z0-9]+://(?:(?:[a-z0-9]+(?:[-_][a-z0-9.]+)*)+/)+");
private static final Duration cacheTtl = Duration.ofMinutes(1);
- private final CuratorDatabaseClient db;
+ private final CuratorDb db;
private final CachedSupplier<Map<TenantName, String>> archiveUris;
- public ArchiveUris(CuratorDatabaseClient db) {
+ public ArchiveUris(CuratorDb db) {
this.db = db;
this.archiveUris = new CachedSupplier<>(db::readArchiveUris, cacheTtl);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FirmwareChecks.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FirmwareChecks.java
index 475d03e92e4..ff375f2abe1 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FirmwareChecks.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FirmwareChecks.java
@@ -2,7 +2,7 @@
package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.lang.CachedSupplier;
-import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
+import com.yahoo.vespa.hosted.provision.persistence.CuratorDb;
import java.time.Clock;
import java.time.Duration;
@@ -24,11 +24,11 @@ public class FirmwareChecks {
private static final Duration cacheExpiry = Duration.ofMinutes(1);
- private final CuratorDatabaseClient database;
+ private final CuratorDb database;
private final Clock clock;
private final CachedSupplier<Optional<Instant>> checkAfter;
- public FirmwareChecks(CuratorDatabaseClient database, Clock clock) {
+ public FirmwareChecks(CuratorDb database, Clock clock) {
this.database = database;
this.clock = clock;
this.checkAfter = new CachedSupplier<>(database::readFirmwareCheck, cacheExpiry);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index 8794d0b373e..3e8124d5309 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -6,7 +6,6 @@ import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.ApplicationTransaction;
import com.yahoo.config.provision.CloudAccount;
import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.ClusterSpec.Type;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.LoadBalancerSettings;
import com.yahoo.config.provision.NodeType;
@@ -26,7 +25,7 @@ import com.yahoo.vespa.hosted.provision.lb.LoadBalancerService;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerSpec;
import com.yahoo.vespa.hosted.provision.lb.Real;
import com.yahoo.vespa.hosted.provision.node.IP;
-import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
+import com.yahoo.vespa.hosted.provision.persistence.CuratorDb;
import java.time.Instant;
import java.util.ArrayList;
@@ -34,7 +33,6 @@ import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
-import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
@@ -42,7 +40,6 @@ import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
-import static java.util.Objects.requireNonNullElse;
import static java.util.stream.Collectors.groupingBy;
import static java.util.stream.Collectors.reducing;
@@ -62,7 +59,7 @@ public class LoadBalancerProvisioner {
private static final Logger log = Logger.getLogger(LoadBalancerProvisioner.class.getName());
private final NodeRepository nodeRepository;
- private final CuratorDatabaseClient db;
+ private final CuratorDb db;
private final LoadBalancerService service;
private final BooleanFlag deactivateRouting;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
index 9479696143b..cb927c72eb5 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
@@ -9,14 +9,12 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
-import com.yahoo.vespa.hosted.provision.autoscale.ClusterModel;
+import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
import com.yahoo.vespa.hosted.provision.autoscale.Limits;
import com.yahoo.vespa.hosted.provision.autoscale.Load;
-import com.yahoo.vespa.hosted.provision.autoscale.MetricsDb;
import java.net.URI;
import java.util.List;
-import java.util.Optional;
/**
* Serializes application information for nodes/v2/application responses
@@ -27,49 +25,38 @@ public class ApplicationSerializer {
public static Slime toSlime(Application application,
NodeList applicationNodes,
- MetricsDb metricsDb,
NodeRepository nodeRepository,
URI applicationUri) {
Slime slime = new Slime();
- toSlime(application, applicationNodes, metricsDb, nodeRepository, slime.setObject(), applicationUri);
+ toSlime(application, applicationNodes, nodeRepository, slime.setObject(), applicationUri);
return slime;
}
private static void toSlime(Application application,
NodeList applicationNodes,
- MetricsDb metricsDb,
NodeRepository nodeRepository,
Cursor object,
URI applicationUri) {
object.setString("url", applicationUri.toString());
object.setString("id", application.id().toFullString());
- clustersToSlime(application, applicationNodes, metricsDb, nodeRepository, object.setObject("clusters"));
+ clustersToSlime(application, applicationNodes, nodeRepository, object.setObject("clusters"));
}
private static void clustersToSlime(Application application,
NodeList applicationNodes,
- MetricsDb metricsDb,
NodeRepository nodeRepository,
Cursor clustersObject) {
- application.clusters().values().forEach(cluster -> toSlime(application, cluster, applicationNodes, metricsDb, nodeRepository, clustersObject));
+ application.clusters().values().forEach(cluster -> toSlime(application, cluster, applicationNodes, nodeRepository, clustersObject));
}
private static void toSlime(Application application,
Cluster cluster,
NodeList applicationNodes,
- MetricsDb metricsDb,
NodeRepository nodeRepository,
Cursor clustersObject) {
NodeList nodes = applicationNodes.not().retired().cluster(cluster.id());
if (nodes.isEmpty()) return;
ClusterResources currentResources = nodes.toResources();
- Optional<ClusterModel> clusterModel = ClusterModel.create(nodeRepository.zone(),
- application,
- nodes.clusterSpec(),
- cluster,
- nodes,
- metricsDb,
- nodeRepository.clock());
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", nodes.clusterSpec().type().name());
Limits limits = Limits.of(cluster).fullySpecified(nodes.clusterSpec(), nodeRepository, application.id());
@@ -77,15 +64,19 @@ public class ApplicationSerializer {
toSlime(limits.max(), clusterObject.setObject("max"));
toSlime(currentResources, clusterObject.setObject("current"));
if (cluster.shouldSuggestResources(currentResources))
- cluster.suggested().resources().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
- cluster.target().resources().ifPresent(target -> toSlime(target, clusterObject.setObject("target")));
- clusterModel.ifPresent(model -> clusterUtilizationToSlime(model, clusterObject.setObject("utilization")));
+ toSlime(cluster.suggested(), clusterObject.setObject("suggested"));
+ toSlime(cluster.target(), clusterObject.setObject("target"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
- clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatus().status().name());
- clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus().description());
- clusterModel.ifPresent(model -> clusterObject.setLong("scalingDuration", model.scalingDuration().toMillis()));
- clusterModel.ifPresent(model -> clusterObject.setDouble("maxQueryGrowthRate", model.maxQueryGrowthRate()));
- clusterModel.ifPresent(model -> clusterObject.setDouble("currentQueryFractionOfMax", model.queryFractionOfMax()));
+ clusterObject.setLong("scalingDuration", cluster.scalingDuration(nodes.clusterSpec()).toMillis());
+ }
+
+ private static void toSlime(Autoscaling autoscaling, Cursor autoscalingObject) {
+ autoscalingObject.setString("status", autoscaling.status().name());
+ autoscalingObject.setString("description", autoscaling.description());
+ autoscaling.resources().ifPresent(resources -> toSlime(resources, autoscalingObject.setObject("resources")));
+ autoscalingObject.setLong("at", autoscaling.at().toEpochMilli());
+ toSlime(autoscaling.peak(), autoscalingObject.setObject("peak"));
+ toSlime(autoscaling.ideal(), autoscalingObject.setObject("ideal"));
}
private static void toSlime(ClusterResources resources, Cursor clusterResourcesObject) {
@@ -94,18 +85,10 @@ public class ApplicationSerializer {
NodeResourcesSerializer.toSlime(resources.nodeResources(), clusterResourcesObject.setObject("resources"));
}
- private static void clusterUtilizationToSlime(ClusterModel clusterModel, Cursor utilizationObject) {
- Load idealLoad = clusterModel.idealLoad();
- Load peakLoad = clusterModel.peakLoad();
-
- utilizationObject.setDouble("idealCpu", idealLoad.cpu());
- utilizationObject.setDouble("peakCpu", peakLoad.cpu());
-
- utilizationObject.setDouble("idealMemory", idealLoad.memory());
- utilizationObject.setDouble("peakMemory", peakLoad.memory());
-
- utilizationObject.setDouble("idealDisk", idealLoad.disk());
- utilizationObject.setDouble("peakDisk", peakLoad.disk());
+ private static void toSlime(Load load, Cursor utilizationObject) {
+ utilizationObject.setDouble("cpu", load.cpu());
+ utilizationObject.setDouble("memory", load.memory());
+ utilizationObject.setDouble("disk", load.disk());
}
private static void scalingEventsToSlime(List<ScalingEvent> scalingEvents, Cursor scalingEventsArray) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
index 83372f25d6e..fce5ccbdddc 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
@@ -33,7 +33,6 @@ import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.autoscale.Load;
-import com.yahoo.vespa.hosted.provision.autoscale.MetricsDb;
import com.yahoo.vespa.hosted.provision.node.Address;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.IP;
@@ -75,16 +74,14 @@ public class NodesV2ApiHandler extends ThreadedHttpRequestHandler {
private final Orchestrator orchestrator;
private final NodeRepository nodeRepository;
- private final MetricsDb metricsDb;
private final NodeFlavors nodeFlavors;
@Inject
public NodesV2ApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Orchestrator orchestrator,
- NodeRepository nodeRepository, MetricsDb metricsDb, NodeFlavors flavors) {
+ NodeRepository nodeRepository, NodeFlavors flavors) {
super(parentCtx);
this.orchestrator = orchestrator;
this.nodeRepository = nodeRepository;
- this.metricsDb = metricsDb;
this.nodeFlavors = flavors;
}
@@ -454,7 +451,6 @@ public class NodesV2ApiHandler extends ThreadedHttpRequestHandler {
return ErrorResponse.notFoundError("No application '" + id + "'");
Slime slime = ApplicationSerializer.toSlime(application.get(),
nodeRepository.nodes().list(Node.State.active).owner(id),
- metricsDb,
nodeRepository,
withPath("/nodes/v2/applications/" + id, uri));
return new SlimeJsonResponse(slime);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
index 5bd53a2f8af..91c8f803429 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
@@ -30,6 +30,7 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
+import com.yahoo.vespa.hosted.provision.autoscale.Load;
import com.yahoo.vespa.hosted.provision.autoscale.MemoryMetricsDb;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.IP;
@@ -196,11 +197,20 @@ public class MockNodeRepository extends NodeRepository {
null), app1Id, provisioner);
Application app1 = applications().get(app1Id).get();
Cluster cluster1 = app1.cluster(cluster1Id.id()).get();
- cluster1 = cluster1.withSuggested(new Autoscaling(new ClusterResources(6, 2,
- new NodeResources(3, 20, 100, 1)),
- clock().instant()));
- cluster1 = cluster1.withTarget(new Autoscaling(new ClusterResources(4, 1,
- new NodeResources(3, 16, 100, 1)), clock().instant()));
+ cluster1 = cluster1.withSuggested(new Autoscaling(Autoscaling.Status.unavailable,
+ "",
+ Optional.of(new ClusterResources(6, 2,
+ new NodeResources(3, 20, 100, 1))),
+ clock().instant(),
+ Load.zero(),
+ Load.zero()));
+ cluster1 = cluster1.withTarget(new Autoscaling(Autoscaling.Status.unavailable,
+ "",
+ Optional.of(new ClusterResources(4, 1,
+ new NodeResources(3, 16, 100, 1))),
+ clock().instant(),
+ Load.zero(),
+ Load.zero()));
try (Mutex lock = applications().lock(app1Id)) {
applications().put(app1.with(cluster1), lock);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java
index c65ebae9b3b..158c5116e19 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java
@@ -44,7 +44,6 @@ public class AutoscalingIntegrationTest {
}
var scaledResources = autoscaler.suggest(fixture.application(), fixture.cluster(), fixture.nodes());
- assertTrue(scaledResources.isPresent());
}
private static class MockHttpClient implements MetricsV2MetricsFetcher.AsyncHttpClient {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index abe25b05955..4ce07d53ea9 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -36,15 +36,15 @@ public class AutoscalingTest {
fixture.autoscale());
fixture.deploy(Capacity.from(scaledResources));
- assertTrue("Cluster in flux -> No further change", fixture.autoscale().isEmpty());
+ assertEquals("Cluster in flux -> No further change", Autoscaling.Status.waiting, fixture.autoscale().status());
fixture.deactivateRetired(Capacity.from(scaledResources));
fixture.loader().applyCpuLoad(0.19f, 10);
- assertEquals("Load change is small -> No change", Optional.empty(), fixture.autoscale().target());
+ assertEquals("Load change is small -> No change", Optional.empty(), fixture.autoscale().resources());
fixture.loader().applyCpuLoad(0.1f, 10);
- assertEquals("Too little time passed for downscaling -> No change", Optional.empty(), fixture.autoscale().target());
+ assertEquals("Too little time passed for downscaling -> No change", Optional.empty(), fixture.autoscale().resources());
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.1f, 10);
@@ -57,13 +57,13 @@ public class AutoscalingTest {
@Test
public void test_no_autoscaling_with_no_measurements() {
var fixture = AutoscalingTester.fixture().awsProdSetup(true).build();
- assertTrue(fixture.autoscale().target().isEmpty());
+ assertTrue(fixture.autoscale().resources().isEmpty());
}
@Test
public void test_no_autoscaling_with_no_measurements_exclusive() {
var fixture = AutoscalingTester.fixture().awsProdSetup(false).build();
- assertTrue(fixture.autoscale().target().isEmpty());
+ assertTrue(fixture.autoscale().resources().isEmpty());
}
/** Using too many resources for a short period is proof we should scale up regardless of the time that takes. */
@@ -277,7 +277,7 @@ public class AutoscalingTest {
fixture.deactivateRetired(capacity);
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyCpuLoad(0.8, 120);
- assertEquals(DiskSpeed.any, fixture.autoscale(capacity).target().get().nodeResources().diskSpeed());
+ assertEquals(DiskSpeed.any, fixture.autoscale(capacity).resources().get().nodeResources().diskSpeed());
}
@Test
@@ -357,10 +357,9 @@ public class AutoscalingTest {
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
var fixture = AutoscalingTester.fixture().awsProdSetup(true).capacity(Capacity.from(min, min)).build();
- // deploy
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyCpuLoad(0.25, 120);
- assertTrue(fixture.autoscale().isEmpty());
+ assertEquals(Autoscaling.Status.unavailable, fixture.autoscale().status());
}
@Test
@@ -379,12 +378,12 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.01, 0.01, 0.01), 120);
- Autoscaler.Advice suggestion = fixture.suggest();
+ Autoscaling suggestion = fixture.suggest();
fixture.tester().assertResources("Choosing the remote disk flavor as it has less disk",
2, 1, 3.0, 100.0, 10.0,
suggestion);
assertEquals("Choosing the remote disk flavor as it has less disk",
- StorageType.remote, suggestion.target().get().nodeResources().storageType());
+ StorageType.remote, suggestion.resources().get().nodeResources().storageType());
}
@Test
@@ -415,7 +414,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.9, 0.6, 0.7), 1, false, true, 120);
assertTrue("Not scaling up since nodes were measured while cluster was out of service",
- fixture.autoscale().target().isEmpty());
+ fixture.autoscale().resources().isEmpty());
}
@Test
@@ -424,7 +423,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.9, 0.6, 0.7), 1, true, false, 120);
assertTrue("Not scaling up since nodes were measured while cluster was unstable",
- fixture.autoscale().target().isEmpty());
+ fixture.autoscale().resources().isEmpty());
}
@Test
@@ -521,7 +520,7 @@ public class AutoscalingTest {
public void scaling_down_only_after_delay() {
var fixture = AutoscalingTester.fixture().awsProdSetup(true).build();
fixture.loader().applyCpuLoad(0.02, 120);
- assertTrue("Too soon after initial deployment", fixture.autoscale().target().isEmpty());
+ assertTrue("Too soon after initial deployment", fixture.autoscale().resources().isEmpty());
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.02, 120);
fixture.tester().assertResources("Scaling down since enough time has passed",
@@ -670,7 +669,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 200);
assertTrue("Not attempting to scale up because policies dictate we'll only get one node",
- fixture.autoscale().target().isEmpty());
+ fixture.autoscale().resources().isEmpty());
}
/** Same setup as test_autoscaling_in_dev(), just with required = true */
@@ -731,12 +730,14 @@ public class AutoscalingTest {
fixture.currentResources().advertisedResources());
fixture.tester().deploy(fixture.applicationId(), clusterSpec(false), fixture.capacity());
+ fixture.loader().applyLoad(new Load(0.1, 0.1, 0.1), 100);
fixture.tester().assertResources("With non-exclusive nodes, a better solution is " +
"50% more nodes with half the cpu",
- 3, 1, 1, 4, 145.6,
+ 3, 1, 1, 4, 100.0,
fixture.autoscale());
fixture.tester().deploy(fixture.applicationId(), clusterSpec(true), fixture.capacity());
+ fixture.loader().applyLoad(new Load(0.1, 0.1, 0.1), 100);
fixture.tester().assertResources("Reverts to the initial resources",
2, 1, 2, 4, 100,
fixture.currentResources().advertisedResources());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
index fd98af21134..4e6b8dec9ef 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
@@ -134,8 +134,7 @@ class AutoscalingTester {
cluster.required(),
cluster.suggested(),
cluster.target(),
- List.of(), // Remove scaling events
- cluster.autoscalingStatus());
+ List.of()); // Remove scaling events
cluster = cluster.with(ScalingEvent.create(cluster.minResources(), cluster.minResources(),
0,
clock().instant().minus(Duration.ofDays(1).minus(duration))).withCompletion(clock().instant().minus(Duration.ofDays(1))));
@@ -143,7 +142,7 @@ class AutoscalingTester {
nodeRepository().applications().put(application, nodeRepository().applications().lock(applicationId));
}
- public Autoscaler.Advice autoscale(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) {
+ public Autoscaling autoscale(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) {
capacity = capacityPolicies.applyOn(capacity, applicationId, capacityPolicies.decideExclusivity(capacity, cluster).isExclusive());
Application application = nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId))
.withCluster(cluster.id(), false, capacity);
@@ -154,7 +153,7 @@ class AutoscalingTester {
nodeRepository().nodes().list(Node.State.active).owner(applicationId));
}
- public Autoscaler.Advice suggest(ApplicationId applicationId, ClusterSpec.Id clusterId,
+ public Autoscaling suggest(ApplicationId applicationId, ClusterSpec.Id clusterId,
ClusterResources min, ClusterResources max) {
Application application = nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId))
.withCluster(clusterId, false, Capacity.from(min, max));
@@ -177,10 +176,10 @@ class AutoscalingTester {
public ClusterResources assertResources(String message,
int nodeCount, int groupCount,
double approxCpu, double approxMemory, double approxDisk,
- Autoscaler.Advice advice) {
- assertTrue("Resources are present: " + message + " (" + advice + ": " + advice.reason() + ")",
- advice.target().isPresent());
- var resources = advice.target().get();
+ Autoscaling autoscaling) {
+ assertTrue("Resources are present: " + message + " (" + autoscaling + ": " + autoscaling.status() + ")",
+ autoscaling.resources().isPresent());
+ var resources = autoscaling.resources().get();
assertResources(message, nodeCount, groupCount, approxCpu, approxMemory, approxDisk, resources);
return resources;
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
index 311428de8ff..a43746db6d9 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
@@ -95,17 +95,17 @@ public class Fixture {
public Loader loader() { return loader; }
/** Autoscale within the deployed capacity of this. */
- public Autoscaler.Advice autoscale() {
+ public Autoscaling autoscale() {
return autoscale(capacity);
}
/** Autoscale within the given capacity. */
- public Autoscaler.Advice autoscale(Capacity capacity) {
+ public Autoscaling autoscale(Capacity capacity) {
return tester().autoscale(applicationId, clusterSpec, capacity);
}
/** Compute an autoscaling suggestion for this. */
- public Autoscaler.Advice suggest() {
+ public Autoscaling suggest() {
return tester().suggest(applicationId, clusterSpec.id(), capacity.minResources(), capacity.maxResources());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java
index 214d842e4bb..3084ce9215a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java
@@ -16,6 +16,7 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
import com.yahoo.vespa.hosted.provision.autoscale.ClusterModel;
+import com.yahoo.vespa.hosted.provision.autoscale.Load;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.History;
import com.yahoo.vespa.hosted.provision.testutils.MockDeployer;
@@ -28,6 +29,7 @@ import java.util.Optional;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
/**
@@ -53,7 +55,6 @@ public class AutoscalingMaintainerTest {
new MockDeployer.ApplicationContext(app1, cluster1, Capacity.from(new ClusterResources(2, 1, lowResources))),
new MockDeployer.ApplicationContext(app2, cluster2, Capacity.from(new ClusterResources(2, 1, highResources))));
-
tester.maintainer().maintain(); // noop
assertTrue(tester.deployer().lastDeployTime(app1).isEmpty());
assertTrue(tester.deployer().lastDeployTime(app2).isEmpty());
@@ -218,7 +219,7 @@ public class AutoscalingMaintainerTest {
tester.deploy(app1, cluster1, capacity);
tester.addMeasurements(1.0f, 0.3f, 0.3f, 0, 4, app1, cluster1.id());
tester.maintainer().maintain();
- assertEquals("Scale up: " + tester.cluster(app1, cluster1).autoscalingStatus(),
+ assertEquals("Scale up: " + tester.cluster(app1, cluster1).target().status(),
1,
tester.cluster(app1, cluster1).lastScalingEvent().get().generation());
@@ -299,6 +300,33 @@ public class AutoscalingMaintainerTest {
.size());
}
+ @Test
+ public void empty_autoscaling_is_ignored() {
+ ApplicationId app1 = AutoscalingMaintainerTester.makeApplicationId("app1");
+ ClusterSpec cluster1 = AutoscalingMaintainerTester.containerClusterSpec();
+ NodeResources resources = new NodeResources(4, 4, 10, 1);
+ ClusterResources min = new ClusterResources(2, 1, resources);
+ ClusterResources max = new ClusterResources(20, 1, resources);
+ var capacity = Capacity.from(min, max);
+ var tester = new AutoscalingMaintainerTester(new MockDeployer.ApplicationContext(app1, cluster1, capacity));
+
+ // Add a scaling event
+ tester.deploy(app1, cluster1, capacity);
+ tester.addMeasurements(1.0f, 0.3f, 0.3f, 0, 4, app1, cluster1.id());
+ tester.maintainer().maintain();
+ assertEquals("Scale up: " + tester.cluster(app1, cluster1).target().status(),
+ 1,
+ tester.cluster(app1, cluster1).lastScalingEvent().get().generation());
+ Load peak = tester.cluster(app1, cluster1).target().peak();
+ assertNotEquals(Load.zero(), peak);
+
+ // Old measurements go out of scope and no new ones are made
+ tester.clock().advance(Duration.ofDays(1));
+ tester.maintainer().maintain();
+ Load newPeak = tester.cluster(app1, cluster1).target().peak();
+ assertEquals("Old measurements are retained", peak, newPeak);
+ }
+
private void autoscale(boolean down, Duration completionTime, Duration expectedWindow,
ManualClock clock, ApplicationId application, ClusterSpec cluster,
AutoscalingMaintainerTester tester) {
@@ -322,7 +350,7 @@ public class AutoscalingMaintainerTest {
tester.addMeasurements(load, load, load, generation, 200, application, cluster.id());
tester.maintainer().maintain();
assertEquals("We passed window duration so a new autoscaling is started: " +
- tester.cluster(application, cluster).autoscalingStatus(),
+ tester.cluster(application, cluster).target().status(),
generation + 1,
tester.cluster(application, cluster).lastScalingEvent().get().generation());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java
index e6460b4a610..a05cc388bea 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java
@@ -2,16 +2,15 @@
package com.yahoo.vespa.hosted.provision.persistence;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.applications.Application;
-import com.yahoo.vespa.hosted.provision.applications.AutoscalingStatus;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
import com.yahoo.vespa.hosted.provision.applications.Status;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
+import com.yahoo.vespa.hosted.provision.autoscale.Load;
import org.junit.Test;
import java.time.Instant;
@@ -38,26 +37,33 @@ public class ApplicationSerializerTest {
true,
Autoscaling.empty(),
Autoscaling.empty(),
- List.of(),
- AutoscalingStatus.empty()));
+ List.of()));
var minResources = new NodeResources(1, 2, 3, 4);
clusters.add(new Cluster(ClusterSpec.Id.from("c2"),
true,
new ClusterResources( 8, 4, minResources),
new ClusterResources(14, 7, new NodeResources(3, 6, 21, 24)),
false,
- new Autoscaling(new ClusterResources(20, 10,
- new NodeResources(0.5, 4, 14, 16)),
- Instant.ofEpochMilli(1234L)),
- new Autoscaling(new ClusterResources(10, 5,
- new NodeResources(2, 4, 14, 16)),
- Instant.ofEpochMilli(5678L)),
+ new Autoscaling(Autoscaling.Status.unavailable,
+ "",
+ Optional.of(new ClusterResources(20, 10,
+ new NodeResources(0.5, 4, 14, 16))),
+ Instant.ofEpochMilli(1234L),
+ new Load(0.1, 0.2, 0.3),
+ new Load(0.4, 0.5, 0.6)),
+ new Autoscaling(Autoscaling.Status.insufficient,
+ "Autoscaling status",
+ Optional.of(new ClusterResources(10, 5,
+ new NodeResources(2, 4, 14, 16))),
+ Instant.ofEpochMilli(5678L),
+ Load.zero(),
+ Load.one()),
List.of(new ScalingEvent(new ClusterResources(10, 5, minResources),
new ClusterResources(12, 6, minResources),
7L,
Instant.ofEpochMilli(12345L),
- Optional.of(Instant.ofEpochMilli(67890L)))),
- new AutoscalingStatus(AutoscalingStatus.Status.insufficient, "Autoscaling status")));
+ Optional.of(Instant.ofEpochMilli(67890L))))
+ ));
Application original = new Application(ApplicationId.from("myTenant", "myApplication", "myInstance"),
Status.initial().withCurrentReadShare(0.3).withMaxReadShare(0.5),
clusters);
@@ -82,7 +88,6 @@ public class ApplicationSerializerTest {
assertEquals(originalCluster.suggested(), serializedCluster.suggested());
assertEquals(originalCluster.target(), serializedCluster.target());
assertEquals(originalCluster.scalingEvents(), serializedCluster.scalingEvents());
- assertEquals(originalCluster.autoscalingStatus(), serializedCluster.autoscalingStatus());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CachingCuratorTest.java
index 40fb741fde8..01d1ab5c8d0 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CachingCuratorTest.java
@@ -26,12 +26,12 @@ import static org.junit.Assert.fail;
*
* @author bratseth
*/
-public class CuratorDatabaseTest {
+public class CachingCuratorTest {
@Test
public void testTransactionsIncreaseCounter() throws Exception {
MockCurator curator = new MockCurator();
- CuratorDatabase database = new CuratorDatabase(curator, Path.fromString("/"), true);
+ CachingCurator database = new CachingCurator(curator, Path.fromString("/"), true);
assertEquals(0L, (long)curator.counter("/changeCounter").get().get().postValue());
@@ -56,7 +56,7 @@ public class CuratorDatabaseTest {
@Test
public void testCacheInvalidation() throws Exception {
MockCurator curator = new MockCurator();
- CuratorDatabase database = new CuratorDatabase(curator, Path.fromString("/"), true);
+ CachingCurator database = new CachingCurator(curator, Path.fromString("/"), true);
assertEquals(0L, (long)curator.counter("/changeCounter").get().get().postValue());
commitCreate("/1", database);
@@ -74,7 +74,7 @@ public class CuratorDatabaseTest {
@Test
public void testTransactionsWithDeactivatedCache() throws Exception {
MockCurator curator = new MockCurator();
- CuratorDatabase database = new CuratorDatabase(curator, Path.fromString("/"), false);
+ CachingCurator database = new CachingCurator(curator, Path.fromString("/"), false);
assertEquals(0L, (long)curator.counter("/changeCounter").get().get().postValue());
@@ -93,7 +93,7 @@ public class CuratorDatabaseTest {
@Test
public void testThatCounterIncreasesExactlyOnCommitFailure() throws Exception {
MockCurator curator = new MockCurator();
- CuratorDatabase database = new CuratorDatabase(curator, Path.fromString("/"), true);
+ CachingCurator database = new CachingCurator(curator, Path.fromString("/"), true);
assertEquals(0L, (long)curator.counter("/changeCounter").get().get().postValue());
@@ -116,14 +116,14 @@ public class CuratorDatabaseTest {
assertEquals(3L, (long)curator.counter("/changeCounter").get().get().postValue());
}
- private void commitCreate(String path, CuratorDatabase database) {
+ private void commitCreate(String path, CachingCurator database) {
NestedTransaction t = new NestedTransaction();
CuratorTransaction c = database.newCuratorTransactionIn(t);
c.add(CuratorOperations.create(path));
t.commit();
}
- private void commitReadingWrite(String path, byte[] data, CuratorDatabase database) {
+ private void commitReadingWrite(String path, byte[] data, CachingCurator database) {
NestedTransaction transaction = new NestedTransaction();
byte[] oldData = database.getData(Path.fromString(path)).get();
CuratorTransaction curatorTransaction = database.newCuratorTransactionIn(transaction);
@@ -134,7 +134,7 @@ public class CuratorDatabaseTest {
}
/** Commit an operation which fails during commit. */
- private void commitFailing(CuratorDatabase database) {
+ private void commitFailing(CachingCurator database) {
NestedTransaction t = new NestedTransaction();
CuratorTransaction c = database.newCuratorTransactionIn(t);
c.add(new DummyOperation(() -> { throw new RuntimeException(); }));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java
index f11cfa9a15d..42a0dd982ad 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java
@@ -20,10 +20,10 @@ import static org.junit.Assert.assertEquals;
/**
* @author mpolden
*/
-public class CuratorDatabaseClientTest {
+public class CuratorDbTest {
private final Curator curator = new MockCurator();
- private final CuratorDatabaseClient zkClient = new CuratorDatabaseClient(
+ private final CuratorDb zkClient = new CuratorDb(
FlavorConfigBuilder.createDummies("default"), curator, Clock.systemUTC(), true, 1000);
@Test
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json
index 02aef6fa9c4..ead4e5fd06a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json
@@ -44,39 +44,61 @@
}
},
"suggested" : {
- "nodes" : 6,
- "groups" : 2,
+ "status" : "unavailable",
+ "description" : "",
"resources" : {
- "vcpu" : 3.0,
- "memoryGb" : 20.0,
- "diskGb" : 100.0,
- "bandwidthGbps" : 1.0,
- "diskSpeed" : "fast",
- "storageType" : "any",
- "architecture":"x86_64"
+ "nodes": 6,
+ "groups": 2,
+ "resources": {
+ "vcpu": 3.0,
+ "memoryGb": 20.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "any",
+ "architecture": "x86_64"
+ }
+ },
+ "at" : 123,
+ "peak" : {
+ "cpu" : 0.0,
+ "memory" : 0.0,
+ "disk" : 0.0
+ },
+ "ideal" : {
+ "cpu" : 0.0,
+ "memory" : 0.0,
+ "disk" : 0.0
}
},
"target" : {
- "nodes" : 4,
- "groups" : 1,
+ "status" : "unavailable",
+ "description" : "",
"resources" : {
- "vcpu" : 3.0,
- "memoryGb" : 16.0,
- "diskGb" : 100.0,
- "bandwidthGbps" : 1.0,
- "diskSpeed" : "fast",
- "storageType" : "any",
- "architecture":"x86_64"
+ "nodes" : 4,
+ "groups" : 1,
+ "resources" : {
+ "vcpu": 3.0,
+ "memoryGb": 16.0,
+ "diskGb": 100.0,
+ "bandwidthGbps": 1.0,
+ "diskSpeed": "fast",
+ "storageType": "any",
+ "architecture": "x86_64"
+ }
+ },
+ "at" : 123,
+ "peak" : {
+ "cpu" : 0.0,
+ "memory" : 0.0,
+ "disk" : 0.0
+ },
+ "ideal" : {
+ "cpu" : 0.0,
+ "memory" : 0.0,
+ "disk" : 0.0
}
},
- "utilization" : {
- "idealCpu": 0.40750000000000003,
- "peakCpu": 0.0,
- "idealMemory": 0.8,
- "peakMemory": 0.0,
- "idealDisk": 0.95,
- "peakDisk": 0.0
- },
"scalingEvents" : [
{
"from": {
@@ -108,11 +130,7 @@
"at" : 123
}
],
- "autoscalingStatusCode": "unavailable",
- "autoscalingStatus": "",
- "scalingDuration": 600000,
- "maxQueryGrowthRate": 0.1,
- "currentQueryFractionOfMax": 0.5
+ "scalingDuration": 600000
}
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json
index 341fa5f6031..f60fdf3e602 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json
@@ -43,13 +43,20 @@
"architecture":"x86_64"
}
},
- "utilization" : {
- "idealCpu": 0.42670157068062825,
- "peakCpu": 0.0,
- "idealMemory": 0.325,
- "peakMemory": 0.0,
- "idealDisk": 0.3,
- "peakDisk": 0.0
+ "target" : {
+ "status" : "unavailable",
+ "description" : "",
+ "at" : 0,
+ "peak" : {
+ "cpu" : 0.0,
+ "memory" : 0.0,
+ "disk" : 0.0
+ },
+ "ideal" : {
+ "cpu" : 0.0,
+ "memory" : 0.0,
+ "disk" : 0.0
+ }
},
"scalingEvents" : [
{
@@ -82,11 +89,7 @@
"at" : 123
}
],
- "autoscalingStatusCode": "unavailable",
- "autoscalingStatus" : "",
- "scalingDuration": 43200000,
- "maxQueryGrowthRate": 0.1,
- "currentQueryFractionOfMax": 0.5
+ "scalingDuration": 43200000
}
}
}
diff --git a/parent/pom.xml b/parent/pom.xml
index 2f61600424d..d1fc3e4f76a 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -653,11 +653,6 @@
</dependency>
<dependency>
<groupId>io.airlift</groupId>
- <artifactId>aircompressor</artifactId>
- <version>${aircompressor.version}</version>
- </dependency>
- <dependency>
- <groupId>io.airlift</groupId>
<artifactId>airline</artifactId>
<version>${airline.version}</version>
</dependency>
@@ -775,7 +770,7 @@
<dependency>
<groupId>org.apache.httpcomponents.core5</groupId>
<artifactId>httpcore5</artifactId>
- <version>${apache.httpclient5.version}</version>
+ <version>${apache.httpcore5.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
@@ -1092,13 +1087,13 @@
</dependencyManagement>
<properties>
- <aircompressor.version>0.21</aircompressor.version>
<airline.version>0.9</airline.version>
<antlr.version>3.5.2</antlr.version>
<antlr4.version>4.9.3</antlr4.version>
- <apache.httpclient.version>4.5.13</apache.httpclient.version>
- <apache.httpcore.version>4.4.13</apache.httpcore.version>
- <apache.httpclient5.version>5.1.3</apache.httpclient5.version> <!-- WARNING: sync cloud-tenant-base-dependencies-enforcer/pom.xml -->
+ <apache.httpclient.version>4.5.14</apache.httpclient.version>
+ <apache.httpcore.version>4.4.16</apache.httpcore.version>
+ <apache.httpclient5.version>5.1.4</apache.httpclient5.version> <!-- WARNING: sync cloud-tenant-base-dependencies-enforcer/pom.xml -->
+ <apache.httpcore5.version>5.1.5</apache.httpcore5.version> <!-- WARNING: sync cloud-tenant-base-dependencies-enforcer/pom.xml -->
<asm.version>9.3</asm.version>
<!-- Athenz dependencies. Make sure these dependencies match those in Vespa's internal repositories -->
@@ -1147,7 +1142,7 @@
<maven-source-plugin.version>3.2.1</maven-source-plugin.version>
<maven-wagon.version>2.10</maven-wagon.version>
<mockito.version>4.0.0</mockito.version>
- <netty.version>4.1.85.Final</netty.version>
+ <netty.version>4.1.86.Final</netty.version>
<netty-tcnative.version>2.0.54.Final</netty-tcnative.version>
<onnxruntime.version>1.12.1</onnxruntime.version> <!-- WARNING: sync cloud-tenant-base-dependencies-enforcer/pom.xml -->
<org.json.version>20220320</org.json.version>
diff --git a/pom.xml b/pom.xml
index 1f3f3968c42..56dc6ad81ca 100644
--- a/pom.xml
+++ b/pom.xml
@@ -24,6 +24,7 @@
</developers>
<modules>
+ <module>airlift-zstd</module>
<module>application</module>
<module>application-model</module>
<module>athenz-identity-provider-service</module>
diff --git a/screwdriver/release-container-image.sh b/screwdriver/release-container-image.sh
index 0b61a6be281..6a9db770c82 100755
--- a/screwdriver/release-container-image.sh
+++ b/screwdriver/release-container-image.sh
@@ -73,6 +73,6 @@ if grep $VESPA_VERSION <<< "$IMAGE_TAGS" &> /dev/null; then
else
docker login --username aressem --password "$GHCR_DEPLOY_KEY" ghcr.io
docker buildx build --progress plain --push --platform linux/amd64,linux/arm64 --build-arg VESPA_VERSION=$VESPA_VERSION \
- --tag ghcr.io/vespa-engine/vespa:$VESPA_VERSION --tag docker.io/vespaengine/$IMAGE_NAME:$VESPA_MAJOR \
+ --tag ghcr.io/vespa-engine/vespa:$VESPA_VERSION --tag ghcr.io/vespa-engine/vespa:$VESPA_MAJOR \
--tag ghcr.io/vespa-engine/vespa:latest .
fi
diff --git a/searchcore/src/vespa/searchcore/proton/common/scheduled_forward_executor.cpp b/searchcore/src/vespa/searchcore/proton/common/scheduled_forward_executor.cpp
index b38de7a2456..f19ff36dbfb 100644
--- a/searchcore/src/vespa/searchcore/proton/common/scheduled_forward_executor.cpp
+++ b/searchcore/src/vespa/searchcore/proton/common/scheduled_forward_executor.cpp
@@ -3,8 +3,8 @@
#include "scheduled_forward_executor.h"
#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
-#include <atomic>
#include <thread>
+#include <condition_variable>
#include <cassert>
using vespalib::makeLambdaTask;
@@ -14,44 +14,56 @@ namespace proton {
class ScheduledForwardExecutor::State {
public:
State() :
+ _mutex(),
+ _cond(),
_handle(),
_start_success(0),
_start_failed(0),
_running(false)
{}
~State() {
+ std::lock_guard guard(_mutex);
assert( !_handle );
- assert(!isRunning());
+ assert( ! _running);
}
/// Returns false if it was already running
bool start() {
- bool already_running = _running.exchange(true);
+ std::lock_guard guard(_mutex);
+ bool already_running = _running;
+ _running = true;
if (already_running) {
_start_failed++;
} else {
_start_success++;
}
+ _cond.notify_all();
return ! already_running;
}
void complete() {
- bool was_running = _running.exchange(false);
+ std::lock_guard guard(_mutex);
+ bool was_running = _running;
+ _running = false;
assert(was_running);
+ _cond.notify_all();
}
void setHandle(Handle handle) {
+ std::lock_guard guard(_mutex);
_handle = std::move(handle);
}
void cancel() {
+ std::unique_lock guard(_mutex);
_handle.reset();
- while(isRunning()) {
- std::this_thread::sleep_for(1ms);
+ while(_running) {
+ _cond.wait(guard);
}
}
private:
- bool isRunning() const { return _running.load(std::memory_order_relaxed); }
- Handle _handle;
- std::atomic<uint64_t> _start_success;
- std::atomic<uint64_t> _start_failed;
- std::atomic<bool> _running;
+ std::mutex _mutex;
+ std::condition_variable _cond;
+ Handle _handle;
+ uint64_t _start_success;
+ uint64_t _start_failed;
+ bool _running;
};
class ScheduledForwardExecutor::Registration : public vespalib::IDestructorCallback {
diff --git a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.cpp b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.cpp
index 9e09e968b49..1f3bb524f74 100644
--- a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.cpp
@@ -41,8 +41,8 @@ DiskMemUsageSampler::setConfig(const Config &config, IScheduledExecutor & execut
if (_periodicHandle && (_sampleInterval == config.sampleInterval) && !wasChanged) {
return;
}
- _sampleInterval = config.sampleInterval;
_periodicHandle.reset();
+ _sampleInterval = config.sampleInterval;
sampleAndReportUsage();
vespalib::duration maxInterval = std::min(vespalib::duration(1s), _sampleInterval);
_periodicHandle = executor.scheduleAtFixedRate(makeLambdaTask([this]() {
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
index f32b03beb78..3b3c043d578 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
@@ -175,7 +175,6 @@ getStoreConfig(const ProtonConfig::Summary::Cache & cache, const HwInfo & hwInfo
? (hwInfo.memory().sizeBytes()*std::min(INT64_C(50), -cache.maxbytes))/100l
: cache.maxbytes;
return DocumentStore::Config(deriveCompression(cache.compression), maxBytes, cache.initialentries)
- .allowVisitCaching(cache.allowvisitcaching)
.updateStrategy(derive(cache.updateStrategy));
}
diff --git a/searchlib/src/tests/docstore/document_store/document_store_test.cpp b/searchlib/src/tests/docstore/document_store/document_store_test.cpp
index 5050592dc87..9fef6f57e85 100644
--- a/searchlib/src/tests/docstore/document_store/document_store_test.cpp
+++ b/searchlib/src/tests/docstore/document_store/document_store_test.cpp
@@ -91,7 +91,7 @@ vespalib::stringref S1("this is a string long enough to be compressed and is jus
"Adding some repeatble sequences like aaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbb to ensure compression"
"xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz");
-Value createValue(vespalib::stringref s, const CompressionConfig & cfg) {
+Value createValue(vespalib::stringref s, CompressionConfig cfg) {
Value v(7);
vespalib::DataBuffer input;
input.writeBytes(s.data(), s.size());
diff --git a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
index ad0bf60fbe3..414e06bd464 100644
--- a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
+++ b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
@@ -528,8 +528,7 @@ VisitCacheStore::VerifyVisitor::~VerifyVisitor() {
VisitCacheStore::VisitCacheStore(UpdateStrategy strategy) :
_myDir("visitcache"),
_repo(makeDocTypeRepoConfig()),
- _config(DocumentStore::Config(CompressionConfig::LZ4, 1000000, 0)
- .allowVisitCaching(true).updateStrategy(strategy),
+ _config(DocumentStore::Config(CompressionConfig::LZ4, 1000000, 0).updateStrategy(strategy),
LogDataStore::Config().setMaxFileSize(50000).setMaxBucketSpread(3.0)
.setFileConfig(WriteableFileChunk::Config(CompressionConfig(), 16_Ki))),
_fileHeaderContext(),
diff --git a/searchlib/src/vespa/searchlib/docstore/chunk.cpp b/searchlib/src/vespa/searchlib/docstore/chunk.cpp
index 2c0e47301d9..e83b190a7c0 100644
--- a/searchlib/src/vespa/searchlib/docstore/chunk.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/chunk.cpp
@@ -58,12 +58,12 @@ Chunk::hasRoom(size_t len) const
}
size_t
-Chunk::getMaxPackSize(const CompressionConfig & compression) const {
+Chunk::getMaxPackSize(CompressionConfig compression) const {
return _format->getMaxPackSize(compression);
}
void
-Chunk::pack(uint64_t lastSerial, vespalib::DataBuffer & compressed, const CompressionConfig & compression)
+Chunk::pack(uint64_t lastSerial, vespalib::DataBuffer & compressed, CompressionConfig compression)
{
_lastSerial = lastSerial;
std::lock_guard guard(_lock);
diff --git a/searchlib/src/vespa/searchlib/docstore/chunk.h b/searchlib/src/vespa/searchlib/docstore/chunk.h
index 158d754eb9a..93fc98311cd 100644
--- a/searchlib/src/vespa/searchlib/docstore/chunk.h
+++ b/searchlib/src/vespa/searchlib/docstore/chunk.h
@@ -97,8 +97,8 @@ public:
size_t size() const;
const LidList & getLids() const { return _lids; }
LidList getUniqueLids() const;
- size_t getMaxPackSize(const CompressionConfig & compression) const;
- void pack(uint64_t lastSerial, vespalib::DataBuffer & buffer, const CompressionConfig & compression);
+ size_t getMaxPackSize(CompressionConfig compression) const;
+ void pack(uint64_t lastSerial, vespalib::DataBuffer & buffer, CompressionConfig compression);
uint64_t getLastSerial() const { return _lastSerial; }
uint32_t getId() const { return _id; }
bool validSerial() const { return getLastSerial() != static_cast<uint64_t>(-1l); }
diff --git a/searchlib/src/vespa/searchlib/docstore/chunkformat.cpp b/searchlib/src/vespa/searchlib/docstore/chunkformat.cpp
index 79a6742f9da..5b352b91c38 100644
--- a/searchlib/src/vespa/searchlib/docstore/chunkformat.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/chunkformat.cpp
@@ -19,7 +19,7 @@ ChunkException::ChunkException(const vespalib::string & msg, vespalib::stringref
}
void
-ChunkFormat::pack(uint64_t lastSerial, vespalib::DataBuffer & compressed, const CompressionConfig & compression)
+ChunkFormat::pack(uint64_t lastSerial, vespalib::DataBuffer & compressed, CompressionConfig compression)
{
vespalib::nbostream & os = _dataBuf;
os << lastSerial;
@@ -46,7 +46,7 @@ ChunkFormat::pack(uint64_t lastSerial, vespalib::DataBuffer & compressed, const
}
size_t
-ChunkFormat::getMaxPackSize(const CompressionConfig & compression) const
+ChunkFormat::getMaxPackSize(CompressionConfig compression) const
{
const size_t OVERHEAD(0);
const size_t MINSIZE(1 + 1 + 4 + 4 + (includeSerializedSize() ? 4 : 0)); // version + type + real length + crc + lastserial
diff --git a/searchlib/src/vespa/searchlib/docstore/chunkformat.h b/searchlib/src/vespa/searchlib/docstore/chunkformat.h
index 6fa13428b21..343f1dbb9c7 100644
--- a/searchlib/src/vespa/searchlib/docstore/chunkformat.h
+++ b/searchlib/src/vespa/searchlib/docstore/chunkformat.h
@@ -31,7 +31,7 @@ public:
* @param compressed The buffer where the serialized data shall be placed.
* @param compression What kind of compression shall be employed.
*/
- void pack(uint64_t lastSerial, vespalib::DataBuffer & compressed, const CompressionConfig & compression);
+ void pack(uint64_t lastSerial, vespalib::DataBuffer & compressed, CompressionConfig compression);
/**
* Will deserialize and create a representation of the uncompressed data.
* param buffer Pointer to the serialized data
@@ -45,7 +45,7 @@ public:
* @param compression Compression config to be used.
* @return maximum number of bytes a packet can take in serialized form.
*/
- size_t getMaxPackSize(const CompressionConfig & compression) const;
+ size_t getMaxPackSize(CompressionConfig compression) const;
protected:
/**
* Constructor used when deserializing
diff --git a/searchlib/src/vespa/searchlib/docstore/compacter.cpp b/searchlib/src/vespa/searchlib/docstore/compacter.cpp
index 33ed84ff4d0..04ff150c741 100644
--- a/searchlib/src/vespa/searchlib/docstore/compacter.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/compacter.cpp
@@ -23,7 +23,7 @@ Compacter::write(LockGuard guard, uint32_t chunkId, uint32_t lid, const void *bu
_ds.write(std::move(guard), fileId, lid, buffer, sz);
}
-BucketCompacter::BucketCompacter(size_t maxSignificantBucketBits, const CompressionConfig & compression, LogDataStore & ds,
+BucketCompacter::BucketCompacter(size_t maxSignificantBucketBits, CompressionConfig compression, LogDataStore & ds,
Executor & executor, const IBucketizer & bucketizer, FileId source, FileId destination) :
_unSignificantBucketBits((maxSignificantBucketBits > 8) ? (maxSignificantBucketBits - 8) : 0),
_sourceFileId(source),
diff --git a/searchlib/src/vespa/searchlib/docstore/compacter.h b/searchlib/src/vespa/searchlib/docstore/compacter.h
index 9c5775c0c4a..0d7633b1699 100644
--- a/searchlib/src/vespa/searchlib/docstore/compacter.h
+++ b/searchlib/src/vespa/searchlib/docstore/compacter.h
@@ -35,7 +35,7 @@ class BucketCompacter : public IWriteData, public StoreByBucket::IWrite
using Executor = vespalib::Executor;
public:
using FileId = FileChunk::FileId;
- BucketCompacter(size_t maxSignificantBucketBits, const CompressionConfig & compression, LogDataStore & ds,
+ BucketCompacter(size_t maxSignificantBucketBits, CompressionConfig compression, LogDataStore & ds,
Executor & executor, const IBucketizer & bucketizer, FileId source, FileId destination);
void write(LockGuard guard, uint32_t chunkId, uint32_t lid, const void *buffer, size_t sz) override ;
void write(BucketId bucketId, uint32_t chunkId, uint32_t lid, const void *buffer, size_t sz) override;
diff --git a/searchlib/src/vespa/searchlib/docstore/documentstore.cpp b/searchlib/src/vespa/searchlib/docstore/documentstore.cpp
index 0853672e949..1e04b7c61db 100644
--- a/searchlib/src/vespa/searchlib/docstore/documentstore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/documentstore.cpp
@@ -51,7 +51,7 @@ namespace docstore {
class BackingStore {
public:
- BackingStore(IDataStore &store, const CompressionConfig &compression) :
+ BackingStore(IDataStore &store, CompressionConfig compression) :
_backingStore(store),
_compression(compression)
{ }
@@ -60,11 +60,11 @@ public:
void visit(const IDocumentStore::LidVector &lids, const DocumentTypeRepo &repo, IDocumentVisitor &visitor) const;
void write(DocumentIdT, const Value &);
void erase(DocumentIdT) {}
- const CompressionConfig &getCompression() const { return _compression; }
- void reconfigure(const CompressionConfig &compression);
+ CompressionConfig getCompression() const { return _compression.load(std::memory_order_relaxed); }
+ void reconfigure(CompressionConfig compression);
private:
IDataStore &_backingStore;
- CompressionConfig _compression;
+ std::atomic<CompressionConfig> _compression;
};
void
@@ -80,7 +80,7 @@ BackingStore::read(DocumentIdT key, Value &value) const {
vespalib::DataBuffer buf(4_Ki);
ssize_t len = _backingStore.read(key, buf);
if (len > 0) {
- value.set(std::move(buf), len, _compression);
+ value.set(std::move(buf), len, getCompression());
found = true;
}
return found;
@@ -95,8 +95,8 @@ BackingStore::write(DocumentIdT lid, const Value & value)
}
void
-BackingStore::reconfigure(const CompressionConfig &compression) {
- _compression = compression;
+BackingStore::reconfigure(CompressionConfig compression) {
+ _compression.store(compression, std::memory_order_relaxed);
}
using CacheParams = vespalib::CacheParam<
@@ -117,7 +117,6 @@ using docstore::Value;
bool
DocumentStore::Config::operator == (const Config &rhs) const {
return (_maxCacheBytes == rhs._maxCacheBytes) &&
- (_allowVisitCaching == rhs._allowVisitCaching) &&
(_initialCacheEntries == rhs._initialCacheEntries) &&
(_updateStrategy == rhs._updateStrategy) &&
(_compression == rhs._compression);
@@ -126,11 +125,11 @@ DocumentStore::Config::operator == (const Config &rhs) const {
DocumentStore::DocumentStore(const Config & config, IDataStore & store)
: IDocumentStore(),
- _config(config),
_backingStore(store),
_store(std::make_unique<docstore::BackingStore>(_backingStore, config.getCompression())),
_cache(std::make_unique<docstore::Cache>(*_store, config.getMaxCacheBytes())),
_visitCache(std::make_unique<docstore::VisitCache>(store, config.getMaxCacheBytes(), config.getCompression())),
+ _updateStrategy(config.updateStrategy()),
_uncached_lookups(0)
{
_cache->reserveElements(config.getInitialCacheEntries());
@@ -142,9 +141,8 @@ void
DocumentStore::reconfigure(const Config & config) {
_cache->setCapacityBytes(config.getMaxCacheBytes());
_store->reconfigure(config.getCompression());
- _visitCache->reconfigure(_config.getMaxCacheBytes(), config.getCompression());
-
- _config = config;
+ _visitCache->reconfigure(config.getMaxCacheBytes(), config.getCompression());
+ _updateStrategy.store(config.updateStrategy(), std::memory_order_relaxed);
}
bool
@@ -152,10 +150,14 @@ DocumentStore::useCache() const {
return (_cache->capacityBytes() != 0) && (_cache->capacity() != 0);
}
+DocumentStore::Config::UpdateStrategy DocumentStore::updateStrategy() const {
+ return _updateStrategy.load(std::memory_order_relaxed);
+}
+
void
DocumentStore::visit(const LidVector & lids, const DocumentTypeRepo &repo, IDocumentVisitor & visitor) const
{
- if (useCache() && _config.allowVisitCaching() && visitor.allowVisitCaching()) {
+ if (useCache() && visitor.allowVisitCaching()) {
docstore::BlobSet blobSet = _visitCache->read(lids).getBlobSet();
DocumentVisitorAdapter adapter(repo, visitor);
for (DocumentIdT lid : lids) {
@@ -204,7 +206,7 @@ DocumentStore::write(uint64_t syncToken, DocumentIdT lid, const document::Docume
void
DocumentStore::write(uint64_t syncToken, DocumentIdT lid, const vespalib::nbostream & stream) {
if (useCache()) {
- switch (_config.updateStrategy()) {
+ switch (updateStrategy()) {
case Config::UpdateStrategy::INVALIDATE:
_backingStore.write(syncToken, lid, stream.peek(), stream.size());
_cache->invalidate(lid);
@@ -286,14 +288,14 @@ class DocumentStore::WrapVisitor : public IDataStoreVisitor
{
Visitor &_visitor;
const DocumentTypeRepo &_repo;
- const CompressionConfig &_compression;
+ const CompressionConfig _compression;
IDocumentStore &_ds;
uint64_t _syncToken;
public:
void visit(uint32_t lid, const void *buffer, size_t sz) override;
- WrapVisitor(Visitor &visitor, const DocumentTypeRepo &repo, const CompressionConfig &compresion,
+ WrapVisitor(Visitor &visitor, const DocumentTypeRepo &repo, CompressionConfig compresion,
IDocumentStore &ds, uint64_t syncToken);
void rewrite(uint32_t lid, const document::Document &doc);
@@ -381,7 +383,7 @@ DocumentStore::WrapVisitor<Visitor>::visit(uint32_t lid, const void *buffer, siz
template <class Visitor>
DocumentStore::WrapVisitor<Visitor>::
-WrapVisitor(Visitor &visitor, const DocumentTypeRepo &repo, const CompressionConfig &compression,
+WrapVisitor(Visitor &visitor, const DocumentTypeRepo &repo, CompressionConfig compression,
IDocumentStore &ds, uint64_t syncToken)
: _visitor(visitor),
_repo(repo),
diff --git a/searchlib/src/vespa/searchlib/docstore/documentstore.h b/searchlib/src/vespa/searchlib/docstore/documentstore.h
index 6270108efb8..024b26b79c6 100644
--- a/searchlib/src/vespa/searchlib/docstore/documentstore.h
+++ b/searchlib/src/vespa/searchlib/docstore/documentstore.h
@@ -29,30 +29,25 @@ public:
_compression(CompressionConfig::LZ4, 9, 70),
_maxCacheBytes(1000000000),
_initialCacheEntries(0),
- _updateStrategy(INVALIDATE),
- _allowVisitCaching(false)
+ _updateStrategy(INVALIDATE)
{ }
- Config(const CompressionConfig & compression, size_t maxCacheBytes, size_t initialCacheEntries) :
+ Config(CompressionConfig compression, size_t maxCacheBytes, size_t initialCacheEntries) :
_compression((maxCacheBytes != 0) ? compression : CompressionConfig::NONE),
_maxCacheBytes(maxCacheBytes),
_initialCacheEntries(initialCacheEntries),
- _updateStrategy(INVALIDATE),
- _allowVisitCaching(false)
+ _updateStrategy(INVALIDATE)
{ }
- const CompressionConfig & getCompression() const { return _compression; }
+ CompressionConfig getCompression() const { return _compression; }
size_t getMaxCacheBytes() const { return _maxCacheBytes; }
size_t getInitialCacheEntries() const { return _initialCacheEntries; }
- bool allowVisitCaching() const { return _allowVisitCaching; }
- Config & allowVisitCaching(bool allow) { _allowVisitCaching = allow; return *this; }
Config & updateStrategy(UpdateStrategy strategy) { _updateStrategy = strategy; return *this; }
UpdateStrategy updateStrategy() const { return _updateStrategy; }
bool operator == (const Config &) const;
private:
CompressionConfig _compression;
- size_t _maxCacheBytes;
- size_t _initialCacheEntries;
- UpdateStrategy _updateStrategy;
- bool _allowVisitCaching;
+ size_t _maxCacheBytes;
+ size_t _initialCacheEntries;
+ UpdateStrategy _updateStrategy;
};
/**
@@ -105,14 +100,15 @@ public:
private:
bool useCache() const;
+ Config::UpdateStrategy updateStrategy() const;
template <class> class WrapVisitor;
class WrapVisitorProgress;
- Config _config;
IDataStore & _backingStore;
std::unique_ptr<docstore::BackingStore> _store;
std::unique_ptr<docstore::Cache> _cache;
std::unique_ptr<docstore::VisitCache> _visitCache;
+ std::atomic<Config::UpdateStrategy> _updateStrategy;
mutable std::atomic<uint64_t> _uncached_lookups;
};
diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.h b/searchlib/src/vespa/searchlib/docstore/logdatastore.h
index 9366eb413d9..3289ceb19ba 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdatastore.h
+++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.h
@@ -58,7 +58,7 @@ public:
uint32_t getMaxNumLids() const { return _maxNumLids; }
bool crcOnReadDisabled() const { return _skipCrcOnRead; }
- const CompressionConfig & compactCompression() const { return _compactCompression; }
+ CompressionConfig compactCompression() const { return _compactCompression; }
const WriteableFileChunk::Config & getFileConfig() const { return _fileConfig; }
Config & disableCrcOnRead(bool v) { _skipCrcOnRead = v; return *this;}
diff --git a/searchlib/src/vespa/searchlib/docstore/storebybucket.cpp b/searchlib/src/vespa/searchlib/docstore/storebybucket.cpp
index 651ff111f4e..6d3c39a51dc 100644
--- a/searchlib/src/vespa/searchlib/docstore/storebybucket.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/storebybucket.cpp
@@ -13,7 +13,7 @@ using document::BucketId;
using vespalib::CpuUsage;
using vespalib::makeLambdaTask;
-StoreByBucket::StoreByBucket(MemoryDataStore & backingMemory, Executor & executor, const CompressionConfig & compression) noexcept
+StoreByBucket::StoreByBucket(MemoryDataStore & backingMemory, Executor & executor, CompressionConfig compression) noexcept
: _chunkSerial(0),
_current(),
_where(),
diff --git a/searchlib/src/vespa/searchlib/docstore/storebybucket.h b/searchlib/src/vespa/searchlib/docstore/storebybucket.h
index 1b5f9dc1204..dfe6199aa2e 100644
--- a/searchlib/src/vespa/searchlib/docstore/storebybucket.h
+++ b/searchlib/src/vespa/searchlib/docstore/storebybucket.h
@@ -24,7 +24,7 @@ class StoreByBucket
using ConstBufferRef = vespalib::ConstBufferRef;
using CompressionConfig = vespalib::compression::CompressionConfig;
public:
- StoreByBucket(MemoryDataStore & backingMemory, Executor & executor, const CompressionConfig & compression) noexcept;
+ StoreByBucket(MemoryDataStore & backingMemory, Executor & executor, CompressionConfig compression) noexcept;
//TODO Putting the below move constructor into cpp file fails for some unknown reason. Needs to be resolved.
StoreByBucket(StoreByBucket &&) noexcept = default;
StoreByBucket(const StoreByBucket &) = delete;
diff --git a/searchlib/src/vespa/searchlib/docstore/value.cpp b/searchlib/src/vespa/searchlib/docstore/value.cpp
index bd4d75d944a..8ac43f7a2de 100644
--- a/searchlib/src/vespa/searchlib/docstore/value.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/value.cpp
@@ -55,7 +55,7 @@ compact(size_t sz, vespalib::alloc::Alloc buf) {
}
void
-Value::set(vespalib::DataBuffer &&buf, ssize_t len, const CompressionConfig &compression) {
+Value::set(vespalib::DataBuffer &&buf, ssize_t len, CompressionConfig compression) {
assert(len < std::numeric_limits<uint32_t>::max());
//Underlying buffer must be identical to allow swap.
vespalib::DataBuffer compressed(buf.getData(), 0u);
diff --git a/searchlib/src/vespa/searchlib/docstore/value.h b/searchlib/src/vespa/searchlib/docstore/value.h
index e8f98f0a54e..9e98d4d4122 100644
--- a/searchlib/src/vespa/searchlib/docstore/value.h
+++ b/searchlib/src/vespa/searchlib/docstore/value.h
@@ -35,7 +35,7 @@ public:
* Compress buffer into temporary buffer and copy temporary buffer to
* value along with compression config.
*/
- void set(vespalib::DataBuffer &&buf, ssize_t len, const CompressionConfig &compression);
+ void set(vespalib::DataBuffer &&buf, ssize_t len, CompressionConfig compression);
// Keep buffer uncompressed
void set(vespalib::DataBuffer &&buf, ssize_t len);
diff --git a/searchlib/src/vespa/searchlib/docstore/visitcache.cpp b/searchlib/src/vespa/searchlib/docstore/visitcache.cpp
index fd6146dae47..60c08c281df 100644
--- a/searchlib/src/vespa/searchlib/docstore/visitcache.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/visitcache.cpp
@@ -84,7 +84,7 @@ CompressedBlobSet::CompressedBlobSet() :
CompressedBlobSet::~CompressedBlobSet() = default;
-CompressedBlobSet::CompressedBlobSet(const CompressionConfig &compression, const BlobSet & uncompressed) :
+CompressedBlobSet::CompressedBlobSet(CompressionConfig compression, const BlobSet & uncompressed) :
_compression(compression.type),
_positions(uncompressed.getPositions()),
_buffer()
@@ -144,24 +144,24 @@ bool
VisitCache::BackingStore::read(const KeySet &key, CompressedBlobSet &blobs) const {
VisitCollector collector;
_backingStore.read(key.getKeys(), collector);
- blobs = CompressedBlobSet(_compression, collector.getBlobSet());
+ blobs = CompressedBlobSet(_compression.load(std::memory_order_relaxed), collector.getBlobSet());
return ! blobs.empty();
}
void
-VisitCache::BackingStore::reconfigure(const CompressionConfig &compression) {
- _compression = compression;
+VisitCache::BackingStore::reconfigure(CompressionConfig compression) {
+ _compression.store(compression, std::memory_order_relaxed);
}
-VisitCache::VisitCache(IDataStore &store, size_t cacheSize, const CompressionConfig &compression) :
+VisitCache::VisitCache(IDataStore &store, size_t cacheSize, CompressionConfig compression) :
_store(store, compression),
_cache(std::make_unique<Cache>(_store, cacheSize))
{
}
void
-VisitCache::reconfigure(size_t cacheSize, const CompressionConfig &compression) {
+VisitCache::reconfigure(size_t cacheSize, CompressionConfig compression) {
_store.reconfigure(compression);
_cache->setCapacityBytes(cacheSize);
}
diff --git a/searchlib/src/vespa/searchlib/docstore/visitcache.h b/searchlib/src/vespa/searchlib/docstore/visitcache.h
index 558f01f5e80..9a07c0ceab2 100644
--- a/searchlib/src/vespa/searchlib/docstore/visitcache.h
+++ b/searchlib/src/vespa/searchlib/docstore/visitcache.h
@@ -56,7 +56,6 @@ public:
BlobSet &operator = (BlobSet &&) = default;
~BlobSet();
void append(uint32_t lid, vespalib::ConstBufferRef blob);
- void remove(uint32_t lid);
const Positions & getPositions() const { return _positions; }
vespalib::ConstBufferRef get(uint32_t lid) const;
vespalib::ConstBufferRef getBuffer() const { return vespalib::ConstBufferRef(_buffer.data(), _buffer.size()); }
@@ -75,7 +74,7 @@ class CompressedBlobSet {
public:
using CompressionConfig = vespalib::compression::CompressionConfig;
CompressedBlobSet();
- CompressedBlobSet(const CompressionConfig &compression, const BlobSet & uncompressed);
+ CompressedBlobSet(CompressionConfig compression, const BlobSet & uncompressed);
CompressedBlobSet(CompressedBlobSet && rhs) = default;
CompressedBlobSet & operator=(CompressedBlobSet && rhs) = default;
CompressedBlobSet(const CompressedBlobSet & rhs) = default;
@@ -98,14 +97,14 @@ private:
class VisitCache {
public:
using CompressionConfig = vespalib::compression::CompressionConfig;
- VisitCache(IDataStore &store, size_t cacheSize, const CompressionConfig &compression);
+ VisitCache(IDataStore &store, size_t cacheSize, CompressionConfig compression);
CompressedBlobSet read(const IDocumentStore::LidVector & keys) const;
void remove(uint32_t key);
void invalidate(uint32_t key) { remove(key); }
vespalib::CacheStats getCacheStats() const;
- void reconfigure(size_t cacheSize, const CompressionConfig &compression);
+ void reconfigure(size_t cacheSize, CompressionConfig compression);
private:
/**
* This implments the interface the cache uses when it has a cache miss.
@@ -115,18 +114,18 @@ private:
*/
class BackingStore {
public:
- BackingStore(IDataStore &store, const CompressionConfig &compression) :
+ BackingStore(IDataStore &store, CompressionConfig compression) :
_backingStore(store),
_compression(compression)
{ }
bool read(const KeySet &key, CompressedBlobSet &blobs) const;
void write(const KeySet &, const CompressedBlobSet &) { }
void erase(const KeySet &) { }
- void reconfigure(const CompressionConfig &compression);
+ void reconfigure(CompressionConfig compression);
private:
IDataStore &_backingStore;
- CompressionConfig _compression;
+ std::atomic<CompressionConfig> _compression;
};
using CacheParams = vespalib::CacheParam<
diff --git a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h
index 4a34b9c9e05..37bb925f11e 100644
--- a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h
+++ b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h
@@ -26,12 +26,12 @@ public:
using CompressionConfig = vespalib::compression::CompressionConfig;
Config() : Config({CompressionConfig::LZ4, 9, 60}, 0x10000) { }
- Config(const CompressionConfig &compression, size_t maxChunkBytes)
+ Config(CompressionConfig compression, size_t maxChunkBytes)
: _compression(compression),
_maxChunkBytes(maxChunkBytes)
{ }
- const CompressionConfig & getCompression() const { return _compression; }
+ CompressionConfig getCompression() const { return _compression; }
size_t getMaxChunkBytes() const { return _maxChunkBytes; }
bool operator == (const Config & rhs) const {
return (_compression == rhs._compression) && (_maxChunkBytes == rhs._maxChunkBytes);
diff --git a/standalone-container/src/main/sh/standalone-container.sh b/standalone-container/src/main/sh/standalone-container.sh
index 56ceab18dac..c81302c9a2a 100755
--- a/standalone-container/src/main/sh/standalone-container.sh
+++ b/standalone-container/src/main/sh/standalone-container.sh
@@ -124,6 +124,7 @@ StartCommand() {
fi
local pidfile="${VESPA_HOME}/var/run/$service.pid"
+ FixDataDirectory "$(dirname "$pidfile")"
if [ "$force" = false ] && test -r "$pidfile"; then
echo "$service is already running as PID $(< "$pidfile") according to $pidfile"
return
@@ -152,7 +153,6 @@ StartCommand() {
# may be invoked w/o path. In any case, checkjava verifies bare 'java'.
checkjava
- FixDataDirectory "$(dirname "$pidfile")"
local vespa_log="${VESPA_HOME}/logs/vespa/vespa.log"
export VESPA_LOG_TARGET="file:$vespa_log"
diff --git a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
index 030c51b93dc..abf1029d51c 100644
--- a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
+++ b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
@@ -52,23 +52,22 @@ commons-codec:commons-codec:1.15
commons-fileupload:commons-fileupload:1.4
commons-io:commons-io:2.11.0
commons-logging:commons-logging:1.2
-io.airlift:aircompressor:0.21
io.airlift:airline:0.9
io.dropwizard.metrics:metrics-core:3.2.5
io.jsonwebtoken:jjwt-api:0.11.2
io.jsonwebtoken:jjwt-impl:0.11.2
io.jsonwebtoken:jjwt-jackson:0.11.2
-io.netty:netty-buffer:4.1.85.Final
-io.netty:netty-codec:4.1.85.Final
-io.netty:netty-common:4.1.85.Final
-io.netty:netty-handler:4.1.85.Final
-io.netty:netty-resolver:4.1.85.Final
+io.netty:netty-buffer:4.1.86.Final
+io.netty:netty-codec:4.1.86.Final
+io.netty:netty-common:4.1.86.Final
+io.netty:netty-handler:4.1.86.Final
+io.netty:netty-resolver:4.1.86.Final
io.netty:netty-tcnative:2.0.54.Final
io.netty:netty-tcnative-classes:2.0.54.Final
-io.netty:netty-transport:4.1.85.Final
-io.netty:netty-transport-classes-epoll:4.1.85.Final
-io.netty:netty-transport-native-epoll:4.1.85.Final
-io.netty:netty-transport-native-unix-common:4.1.85.Final
+io.netty:netty-transport:4.1.86.Final
+io.netty:netty-transport-classes-epoll:4.1.86.Final
+io.netty:netty-transport-native-epoll:4.1.86.Final
+io.netty:netty-transport-native-unix-common:4.1.86.Final
io.prometheus:simpleclient:0.6.0
io.prometheus:simpleclient_common:0.6.0
javax.annotation:javax.annotation-api:1.2
@@ -93,12 +92,12 @@ org.apache.curator:curator-framework:5.3.0
org.apache.curator:curator-recipes:5.3.0
org.apache.felix:org.apache.felix.framework:7.0.1
org.apache.felix:org.apache.felix.log:1.0.1
-org.apache.httpcomponents:httpclient:4.5.13
-org.apache.httpcomponents:httpcore:4.4.13
-org.apache.httpcomponents:httpmime:4.5.13
-org.apache.httpcomponents.client5:httpclient5:5.1.3
-org.apache.httpcomponents.core5:httpcore5:5.1.3
-org.apache.httpcomponents.core5:httpcore5-h2:5.1.3
+org.apache.httpcomponents:httpclient:4.5.14
+org.apache.httpcomponents:httpcore:4.4.16
+org.apache.httpcomponents:httpmime:4.5.14
+org.apache.httpcomponents.client5:httpclient5:5.1.4
+org.apache.httpcomponents.core5:httpcore5:5.1.5
+org.apache.httpcomponents.core5:httpcore5-h2:5.1.5
org.apache.maven:maven-archiver:3.6.0
org.apache.maven:maven-artifact:3.8.6
org.apache.maven:maven-artifact-manager:2.2.1
diff --git a/vespa-feed-client-api/abi-spec.json b/vespa-feed-client-api/abi-spec.json
index 137c7f32bfe..64b049dc75d 100644
--- a/vespa-feed-client-api/abi-spec.json
+++ b/vespa-feed-client-api/abi-spec.json
@@ -112,6 +112,23 @@
],
"fields" : [ ]
},
+ "ai.vespa.feed.client.FeedClientBuilder$Compression" : {
+ "superClass" : "java.lang.Enum",
+ "interfaces" : [ ],
+ "attributes" : [
+ "public",
+ "final",
+ "enum"
+ ],
+ "methods" : [
+ "public static ai.vespa.feed.client.FeedClientBuilder$Compression[] values()",
+ "public static ai.vespa.feed.client.FeedClientBuilder$Compression valueOf(java.lang.String)"
+ ],
+ "fields" : [
+ "public static final enum ai.vespa.feed.client.FeedClientBuilder$Compression none",
+ "public static final enum ai.vespa.feed.client.FeedClientBuilder$Compression gzip"
+ ]
+ },
"ai.vespa.feed.client.FeedClientBuilder" : {
"superClass" : "java.lang.Object",
"interfaces" : [ ],
@@ -142,6 +159,7 @@
"public abstract ai.vespa.feed.client.FeedClientBuilder setCaCertificates(java.util.Collection)",
"public abstract ai.vespa.feed.client.FeedClientBuilder setEndpointUris(java.util.List)",
"public abstract ai.vespa.feed.client.FeedClientBuilder setProxy(java.net.URI)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setCompression(ai.vespa.feed.client.FeedClientBuilder$Compression)",
"public abstract ai.vespa.feed.client.FeedClient build()"
],
"fields" : [
diff --git a/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
index e8cb5344aff..d48c3c31348 100644
--- a/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
@@ -123,6 +123,11 @@ public interface FeedClientBuilder {
/** Specify HTTP(S) proxy for all endpoints */
FeedClientBuilder setProxy(URI uri);
+ /** What compression to use for request bodies; default {@code NONE}. */
+ FeedClientBuilder setCompression(Compression compression);
+
+ enum Compression { none, gzip }
+
/** Constructs instance of {@link FeedClient} from builder configuration */
FeedClient build();
diff --git a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java
index fd36749b109..42f9713c54e 100644
--- a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java
+++ b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client.impl;
+import ai.vespa.feed.client.FeedClientBuilder.Compression;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
@@ -24,6 +25,8 @@ import java.util.Optional;
import java.util.OptionalDouble;
import java.util.OptionalInt;
+import static ai.vespa.feed.client.FeedClientBuilder.Compression.none;
+
/**
* Parses command line arguments
*
@@ -58,6 +61,7 @@ class CliArguments {
private static final String STDIN_OPTION = "stdin";
private static final String DOOM_OPTION = "max-failure-seconds";
private static final String PROXY_OPTION = "proxy";
+ private static final String COMPRESSION = "compression";
private final CommandLine arguments;
@@ -181,6 +185,15 @@ class CliArguments {
boolean speedTest() { return has(SPEED_TEST_OPTION); }
+ Compression compression() throws CliArgumentsException {
+ try {
+ return stringValue(COMPRESSION).map(Compression::valueOf).orElse(none);
+ }
+ catch (IllegalArgumentException e) {
+ throw new CliArgumentsException("Invalid " + COMPRESSION + " argument: " + e.getMessage(), e);
+ }
+ }
+
OptionalInt testPayloadSize() throws CliArgumentsException { return intValue(TEST_PAYLOAD_SIZE_OPTION); }
Optional<URI> proxy() throws CliArgumentsException {
@@ -354,6 +367,12 @@ class CliArguments {
.desc("URI to proxy endpoint")
.hasArg()
.type(URL.class)
+ .build())
+ .addOption(Option.builder()
+ .longOpt(COMPRESSION)
+ .desc("Compression mode for feed requests: 'none' (default), 'gzip'")
+ .hasArg()
+ .type(Compression.class)
.build());
}
diff --git a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliClient.java b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliClient.java
index 43a16c2abf0..39462d8ba68 100644
--- a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliClient.java
+++ b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliClient.java
@@ -1,7 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client.impl;
-import ai.vespa.feed.client.DocumentId;
import ai.vespa.feed.client.FeedClient;
import ai.vespa.feed.client.FeedClientBuilder;
import ai.vespa.feed.client.FeedException;
@@ -10,7 +9,6 @@ import ai.vespa.feed.client.JsonFeeder.ResultCallback;
import ai.vespa.feed.client.OperationStats;
import ai.vespa.feed.client.Result;
import ai.vespa.feed.client.ResultException;
-import ai.vespa.feed.client.impl.CliArguments.CliArgumentsException;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
@@ -29,14 +27,12 @@ import java.time.Instant;
import java.util.Enumeration;
import java.util.Map;
import java.util.Random;
-import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BooleanSupplier;
-import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static java.util.stream.Collectors.joining;
@@ -163,6 +159,7 @@ public class CliClient {
cliArgs.headers().forEach(builder::addRequestHeader);
builder.setDryrun(cliArgs.dryrunEnabled());
builder.setSpeedTest(cliArgs.speedTest());
+ builder.setCompression(cliArgs.compression());
cliArgs.doomSeconds().ifPresent(doom -> builder.setCircuitBreaker(new GracePeriodCircuitBreaker(Duration.ofSeconds(10),
Duration.ofSeconds(doom))));
cliArgs.proxy().ifPresent(builder::setProxy);
diff --git a/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/impl/CliArgumentsTest.java b/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/impl/CliArgumentsTest.java
index 073ea4a58db..21e279b0584 100644
--- a/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/impl/CliArgumentsTest.java
+++ b/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/impl/CliArgumentsTest.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client.impl;
+import ai.vespa.feed.client.FeedClientBuilder.Compression;
import ai.vespa.feed.client.impl.CliArguments.CliArgumentsException;
import org.junit.jupiter.api.Test;
@@ -24,12 +25,27 @@ class CliArgumentsTest {
@Test
void parses_parameters_correctly() throws CliArguments.CliArgumentsException {
CliArguments args = CliArguments.fromRawArgs(new String[]{
- "--endpoint=https://vespa.ai:4443/", "--file=feed.json", "--connections=10",
- "--max-streams-per-connection=128", "--certificate=cert.pem", "--private-key=key.pem",
- "--ca-certificates=ca-certs.pem", "--disable-ssl-hostname-verification",
- "--header=\"My-Header: my-value\"", "--header", "Another-Header: another-value", "--benchmark",
- "--route=myroute", "--timeout=0.125", "--trace=9", "--verbose", "--silent",
- "--show-errors", "--show-all", "--max-failure-seconds=30", "--proxy", "https://myproxy:1234"});
+ "--endpoint", "https://vespa.ai:4443/",
+ "--file", "feed.json",
+ "--connections", "10",
+ "--max-streams-per-connection", "128",
+ "--certificate", "cert.pem",
+ "--private-key", "key.pem",
+ "--ca-certificates", "ca-certs.pem",
+ "--disable-ssl-hostname-verification",
+ "--header", "\"My-Header: my-value\"",
+ "--header", "Another-Header: another-value",
+ "--benchmark",
+ "--route", "myroute",
+ "--timeout", "0.125",
+ "--trace", "9",
+ "--verbose",
+ "--silent",
+ "--compression", "gzip",
+ "--show-errors",
+ "--show-all",
+ "--max-failure-seconds", "30",
+ "--proxy", "https://myproxy:1234"});
assertEquals(URI.create("https://vespa.ai:4443/"), args.endpoint());
assertEquals(Paths.get("feed.json"), args.inputFile().get());
assertEquals(10, args.connections().getAsInt());
@@ -52,14 +68,15 @@ class CliArgumentsTest {
assertTrue(args.showErrors());
assertTrue(args.showSuccesses());
assertFalse(args.showProgress());
+ assertEquals(Compression.gzip, args.compression());
assertEquals(URI.create("https://myproxy:1234"), args.proxy().orElse(null));
}
@Test
void fails_on_missing_parameters() {
- CliArguments.CliArgumentsException exception = assertThrows(
+ CliArguments.CliArgumentsException exception = assertThrows(
CliArguments.CliArgumentsException.class,
- () -> CliArguments.fromRawArgs(new String[] {"--file=/path/to/file", "--stdin"}));
+ () -> CliArguments.fromRawArgs(new String[] {"--file", "/path/to/file", "--stdin"}));
assertEquals("Endpoint must be specified", exception.getMessage());
}
@@ -67,20 +84,20 @@ class CliArgumentsTest {
void fails_on_conflicting_parameters() throws CliArgumentsException {
assertEquals("Exactly one of 'file' and 'stdin' must be specified",
assertThrows(CliArgumentsException.class,
- () -> CliArguments.fromRawArgs(new String[] {"--endpoint=https://endpoint", "--file=/path/to/file", "--stdin"}))
+ () -> CliArguments.fromRawArgs(new String[] {"--endpoint", "https://endpoint", "--file", "/path/to/file", "--stdin"}))
.getMessage());
assertEquals("Exactly one of 'file' and 'stdin' must be specified",
assertThrows(CliArgumentsException.class,
- () -> CliArguments.fromRawArgs(new String[] {"--endpoint=https://endpoint"}))
+ () -> CliArguments.fromRawArgs(new String[] {"--endpoint", "https://endpoint"}))
.getMessage());
assertEquals("At most one of 'file', 'stdin' and 'test-payload-size' may be specified",
assertThrows(CliArgumentsException.class,
- () -> CliArguments.fromRawArgs(new String[] {"--endpoint=https://endpoint", "--speed-test", "--test-payload-size=123", "--file=file"}))
+ () -> CliArguments.fromRawArgs(new String[] {"--endpoint", "https://endpoint", "--speed-test", "--test-payload-size", "123", "--file", "file"}))
.getMessage());
- CliArguments.fromRawArgs(new String[] {"--endpoint=foo", "--speed-test"});
+ CliArguments.fromRawArgs(new String[] {"--endpoint", "foo", "--speed-test"});
}
@Test
diff --git a/vespa-feed-client-cli/src/test/resources/help.txt b/vespa-feed-client-cli/src/test/resources/help.txt
index e41a78bc932..f33dde82f7b 100644
--- a/vespa-feed-client-cli/src/test/resources/help.txt
+++ b/vespa-feed-client-cli/src/test/resources/help.txt
@@ -6,6 +6,9 @@ Vespa feed client
certificates encoded as PEM
--certificate <arg> Path to PEM encoded X.509
certificate file
+ --compression <arg> Compression mode for feed
+ requests: 'none' (default),
+ 'gzip'
--connections <arg> Number of concurrent HTTP/2
connections
--disable-ssl-hostname-verification Disable SSL hostname
diff --git a/vespa-feed-client/pom.xml b/vespa-feed-client/pom.xml
index 1cc2f2adee1..01b9b00b8a0 100644
--- a/vespa-feed-client/pom.xml
+++ b/vespa-feed-client/pom.xml
@@ -46,6 +46,11 @@
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>com.github.tomakehurst</groupId>
+ <artifactId>wiremock-jre8-standalone</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java
index 3ffbaf136f2..1dda8912046 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client.impl;
+import ai.vespa.feed.client.FeedClientBuilder.Compression;
import ai.vespa.feed.client.HttpResponse;
import org.apache.hc.client5.http.async.methods.SimpleHttpRequest;
import org.apache.hc.client5.http.async.methods.SimpleHttpResponse;
@@ -10,6 +11,8 @@ import org.apache.hc.client5.http.impl.async.HttpAsyncClients;
import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder;
import org.apache.hc.core5.concurrent.FutureCallback;
import org.apache.hc.core5.http.ContentType;
+import org.apache.hc.core5.http.Header;
+import org.apache.hc.core5.http.HttpHeaders;
import org.apache.hc.core5.http.HttpHost;
import org.apache.hc.core5.http.message.BasicHeader;
import org.apache.hc.core5.http2.config.H2Config;
@@ -18,6 +21,7 @@ import org.apache.hc.core5.reactor.IOReactorConfig;
import org.apache.hc.core5.util.Timeout;
import javax.net.ssl.SSLContext;
+import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
@@ -29,8 +33,8 @@ import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.zip.GZIPOutputStream;
-import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hc.core5.http.ssl.TlsCiphers.excludeH2Blacklisted;
import static org.apache.hc.core5.http.ssl.TlsCiphers.excludeWeak;
@@ -40,9 +44,11 @@ import static org.apache.hc.core5.http.ssl.TlsCiphers.excludeWeak;
class ApacheCluster implements Cluster {
private final List<Endpoint> endpoints = new ArrayList<>();
- private final List<BasicHeader> defaultHeaders = Arrays.asList(new BasicHeader("User-Agent", String.format("vespa-feed-client/%s", Vespa.VERSION)),
+ private final List<BasicHeader> defaultHeaders = Arrays.asList(new BasicHeader(HttpHeaders.USER_AGENT, String.format("vespa-feed-client/%s", Vespa.VERSION)),
new BasicHeader("Vespa-Client-Version", Vespa.VERSION));
+ private final Header gzipEncodingHeader = new BasicHeader(HttpHeaders.CONTENT_ENCODING, "gzip");
private final RequestConfig requestConfig;
+ private final boolean gzip;
private int someNumber = 0;
private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(t -> new Thread(t, "request-timeout-thread"));
@@ -52,6 +58,7 @@ class ApacheCluster implements Cluster {
for (URI endpoint : builder.endpoints)
endpoints.add(new Endpoint(createHttpClient(builder), endpoint));
this.requestConfig = createRequestConfig(builder);
+ this.gzip = builder.compression == Compression.gzip;
}
@Override
@@ -77,8 +84,14 @@ class ApacheCluster implements Cluster {
request.setConfig(requestConfig);
defaultHeaders.forEach(request::setHeader);
wrapped.headers().forEach((name, value) -> request.setHeader(name, value.get()));
- if (wrapped.body() != null)
- request.setBody(wrapped.body(), ContentType.APPLICATION_JSON);
+ if (wrapped.body() != null) {
+ byte[] body = wrapped.body();
+ if (gzip) {
+ request.setHeader(gzipEncodingHeader);
+ body = gzipped(body);
+ }
+ request.setBody(body, ContentType.APPLICATION_JSON);
+ }
Future<?> future = endpoint.client.execute(request,
new FutureCallback<SimpleHttpResponse>() {
@@ -96,6 +109,14 @@ class ApacheCluster implements Cluster {
vessel.whenComplete((__, ___) -> endpoint.inflight.decrementAndGet());
}
+ private byte[] gzipped(byte[] content) throws IOException{
+ ByteArrayOutputStream buffer = new ByteArrayOutputStream(1 << 10);
+ try (GZIPOutputStream zip = new GZIPOutputStream(buffer)) {
+ zip.write(content);
+ }
+ return buffer.toByteArray();
+ }
+
@Override
public void close() {
Throwable thrown = null;
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java
index 1960991792f..6886dc3d2b9 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java
@@ -21,6 +21,7 @@ import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
+import static ai.vespa.feed.client.FeedClientBuilder.Compression.none;
import static java.util.Objects.requireNonNull;
/**
@@ -50,6 +51,7 @@ public class FeedClientBuilderImpl implements FeedClientBuilder {
boolean benchmark = true;
boolean dryrun = false;
boolean speedTest = false;
+ Compression compression = none;
URI proxy;
@@ -200,6 +202,12 @@ public class FeedClientBuilderImpl implements FeedClientBuilder {
return this;
}
+ @Override
+ public FeedClientBuilderImpl setCompression(Compression compression) {
+ this.compression = compression;
+ return this;
+ }
+
/** Constructs instance of {@link ai.vespa.feed.client.FeedClient} from builder configuration */
@Override
public FeedClient build() {
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/ApacheClusterTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/ApacheClusterTest.java
new file mode 100644
index 00000000000..33c043ea271
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/ApacheClusterTest.java
@@ -0,0 +1,74 @@
+package ai.vespa.feed.client.impl;
+
+import ai.vespa.feed.client.FeedClientBuilder.Compression;
+import ai.vespa.feed.client.HttpResponse;
+import com.github.tomakehurst.wiremock.matching.RequestPatternBuilder;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+import java.time.Duration;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.zip.GZIPOutputStream;
+
+import static com.github.tomakehurst.wiremock.client.WireMock.any;
+import static com.github.tomakehurst.wiremock.client.WireMock.anyRequestedFor;
+import static com.github.tomakehurst.wiremock.client.WireMock.anyUrl;
+import static com.github.tomakehurst.wiremock.client.WireMock.equalTo;
+import static com.github.tomakehurst.wiremock.client.WireMock.okJson;
+import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor;
+import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo;
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+class ApacheClusterTest {
+
+ @RegisterExtension
+ final WireMockExtension server = new WireMockExtension();
+
+ @Test
+ void testClient() throws IOException, ExecutionException, InterruptedException, TimeoutException {
+ for (Compression compression : Compression.values()) {
+ try (ApacheCluster cluster = new ApacheCluster(new FeedClientBuilderImpl(List.of(URI.create("http://localhost:" + server.port())))
+ .setCompression(compression))) {
+ server.stubFor(any(anyUrl()))
+ .setResponse(okJson("{}").build());
+
+ CompletableFuture<HttpResponse> vessel = new CompletableFuture<>();
+ cluster.dispatch(new HttpRequest("POST",
+ "/path",
+ Map.of("name1", () -> "value1",
+ "name2", () -> "value2"),
+ "content".getBytes(UTF_8),
+ Duration.ofSeconds(1)),
+ vessel);
+ HttpResponse response = vessel.get(5, TimeUnit.SECONDS);
+ assertEquals("{}", new String(response.body(), UTF_8));
+ assertEquals(200, response.code());
+
+ ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+ try (OutputStream zip = new GZIPOutputStream(buffer)) { zip.write("content".getBytes(UTF_8)); }
+ server.verify(1, anyRequestedFor(anyUrl()));
+ RequestPatternBuilder expected = postRequestedFor(urlEqualTo("/path")).withHeader("name1", equalTo("value1"))
+ .withHeader("name2", equalTo("value2"))
+ .withHeader("Content-Type", equalTo("application/json; charset=UTF-8"))
+ .withRequestBody(equalTo("content"));
+ expected = switch (compression) {
+ case none -> expected.withoutHeader("Content-Encoding");
+ case gzip -> expected.withHeader("Content-Encoding", equalTo("gzip"));
+ };
+ server.verify(1, expected);
+ server.resetRequests();
+ }
+ }
+ }
+
+}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/WireMockExtension.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/WireMockExtension.java
new file mode 100644
index 00000000000..ef61213889b
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/WireMockExtension.java
@@ -0,0 +1,42 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client.impl;
+
+import com.github.tomakehurst.wiremock.WireMockServer;
+import com.github.tomakehurst.wiremock.core.Options;
+import com.github.tomakehurst.wiremock.core.WireMockConfiguration;
+import org.junit.jupiter.api.extension.AfterEachCallback;
+import org.junit.jupiter.api.extension.BeforeEachCallback;
+import org.junit.jupiter.api.extension.ExtensionContext;
+
+/**
+ * Allows wiremock to be used as a JUnit 5 extension, like
+ * <pre>
+ *
+ * &#64RegisterExtension
+ * WireMockExtension mockServer1 = new WireMockExtension();
+ * </pre>
+ */
+public class WireMockExtension extends WireMockServer implements BeforeEachCallback, AfterEachCallback {
+
+ public WireMockExtension() {
+ this(WireMockConfiguration.options()
+ .dynamicPort()
+ .dynamicHttpsPort());
+ }
+
+ public WireMockExtension(Options options) {
+ super(options);
+ }
+
+ @Override
+ public void beforeEach(ExtensionContext extensionContext) {
+ start();
+ }
+
+ @Override
+ public void afterEach(ExtensionContext extensionContext) {
+ stop();
+ resetAll();
+ }
+
+}
diff --git a/vespaclient-java/src/main/java/com/yahoo/dummyreceiver/DummyReceiver.java b/vespaclient-java/src/main/java/com/yahoo/dummyreceiver/DummyReceiver.java
index c2985996bd0..51668a2d7f7 100755
--- a/vespaclient-java/src/main/java/com/yahoo/dummyreceiver/DummyReceiver.java
+++ b/vespaclient-java/src/main/java/com/yahoo/dummyreceiver/DummyReceiver.java
@@ -71,7 +71,6 @@ public class DummyReceiver implements MessageHandler {
params.setRPCNetworkParams(new RPCNetworkParams().setIdentity(new Identity(name)));
params.setDocumentManagerConfigId("client");
params.getMessageBusParams().setMaxPendingCount(0);
- params.getMessageBusParams().setMaxPendingSize(0);
da = new MessageBusDocumentAccess(params);
queue = new LinkedBlockingDeque<>();
session = da.getMessageBus().createDestinationSession("default", true, this);
diff --git a/vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/CipherUtils.java b/vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/CipherUtils.java
index 5cb40aa8f3b..5834a166fb6 100644
--- a/vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/CipherUtils.java
+++ b/vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/CipherUtils.java
@@ -1,6 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.security.tool.crypto;
+import ai.vespa.airlift.zstd.ZstdInputStream;
+import com.yahoo.compress.ZstdOutputStream;
import com.yahoo.security.AeadCipher;
import java.io.IOException;
@@ -29,4 +31,26 @@ public class CipherUtils {
}
}
+ private static OutputStream maybeWrapCompress(OutputStream out, boolean compressZstd) throws IOException {
+ return compressZstd ? new ZstdOutputStream(out) : out;
+ }
+
+ public static void streamEncrypt(InputStream input, OutputStream output, AeadCipher cipher, boolean compressZstd) throws IOException {
+ try (var out = maybeWrapCompress(cipher.wrapOutputStream(output), compressZstd)) {
+ input.transferTo(out);
+ out.flush();
+ }
+ }
+
+ private static InputStream maybeWrapDecompress(InputStream in, boolean decompressZstd) throws IOException {
+ return decompressZstd ? new ZstdInputStream(in) : in;
+ }
+
+ public static void streamDecrypt(InputStream input, OutputStream output, AeadCipher cipher, boolean decompressZstd) throws IOException {
+ try (var in = maybeWrapDecompress(cipher.wrapInputStream(input), decompressZstd)) {
+ in.transferTo(output);
+ output.flush();
+ }
+ }
+
}
diff --git a/vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/DecryptTool.java b/vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/DecryptTool.java
index ea79fe12c3d..ce3f5a89cd5 100644
--- a/vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/DecryptTool.java
+++ b/vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/DecryptTool.java
@@ -29,6 +29,7 @@ public class DecryptTool implements Tool {
static final String OUTPUT_FILE_OPTION = "output-file";
static final String EXPECTED_KEY_ID_OPTION = "expected-key-id";
+ static final String ZSTD_DECOMPRESS_OPTION = "zstd-decompress";
static final String TOKEN_OPTION = "token";
private static final List<Option> OPTIONS = List.of(
@@ -65,6 +66,12 @@ public class DecryptTool implements Tool {
.required(false)
.desc("Expected key ID in token. If this is not provided, the key ID is not verified.")
.build(),
+ Option.builder("z")
+ .longOpt(ZSTD_DECOMPRESS_OPTION)
+ .hasArg(false)
+ .required(false)
+ .desc("Decrypted data will be transparently Zstd-decompressed before being output.")
+ .build(),
Option.builder("t")
.longOpt(TOKEN_OPTION)
.hasArg(true)
@@ -107,10 +114,11 @@ public class DecryptTool implements Tool {
!CliUtils.useStdIo(inputArg) && !CliUtils.useStdIo(outputArg));
var secretShared = SharedKeyGenerator.fromSealedKey(sealedSharedKey, privateKey);
var cipher = SharedKeyGenerator.makeAesGcmDecryptionCipher(secretShared);
+ boolean unZstd = arguments.hasOption(ZSTD_DECOMPRESS_OPTION);
try (var inStream = CliUtils.inputStreamFromFileOrStream(inputArg, invocation.stdIn());
var outStream = CliUtils.outputStreamToFileOrStream(outputArg, invocation.stdOut())) {
- CipherUtils.streamEncipher(inStream, outStream, cipher);
+ CipherUtils.streamDecrypt(inStream, outStream, cipher, unZstd);
}
} catch (IOException e) {
throw new RuntimeException(e);
diff --git a/vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/EncryptTool.java b/vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/EncryptTool.java
index 962b42f4c22..81a3eecce6b 100644
--- a/vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/EncryptTool.java
+++ b/vespaclient-java/src/main/java/com/yahoo/vespa/security/tool/crypto/EncryptTool.java
@@ -28,6 +28,7 @@ public class EncryptTool implements Tool {
static final String OUTPUT_FILE_OPTION = "output-file";
static final String KEY_ID_OPTION = "key-id";
static final String RECIPIENT_PUBLIC_KEY_OPTION = "recipient-public-key";
+ static final String ZSTD_COMPRESS_OPTION = "zstd-compress";
private static final List<Option> OPTIONS = List.of(
Option.builder("o")
@@ -47,6 +48,12 @@ public class EncryptTool implements Tool {
.hasArg(true)
.required(false)
.desc("ID of recipient key")
+ .build(),
+ Option.builder("z")
+ .longOpt(ZSTD_COMPRESS_OPTION)
+ .hasArg(false)
+ .required(false)
+ .desc("Input data will be transparently Zstd-compressed before being encrypted.")
.build());
@Override
@@ -78,13 +85,14 @@ public class EncryptTool implements Tool {
var outputPath = Paths.get(CliUtils.optionOrThrow(arguments, OUTPUT_FILE_OPTION));
var recipientPubKey = KeyUtils.fromBase58EncodedX25519PublicKey(CliUtils.optionOrThrow(arguments, RECIPIENT_PUBLIC_KEY_OPTION).strip());
- var keyId = KeyId.ofString(CliUtils.optionOrThrow(arguments, KEY_ID_OPTION));
- var shared = SharedKeyGenerator.generateForReceiverPublicKey(recipientPubKey, keyId);
- var cipher = SharedKeyGenerator.makeAesGcmEncryptionCipher(shared);
+ var keyId = KeyId.ofString(CliUtils.optionOrThrow(arguments, KEY_ID_OPTION));
+ var shared = SharedKeyGenerator.generateForReceiverPublicKey(recipientPubKey, keyId);
+ var cipher = SharedKeyGenerator.makeAesGcmEncryptionCipher(shared);
+ boolean zstd = arguments.hasOption(ZSTD_COMPRESS_OPTION);
try (var inStream = CliUtils.inputStreamFromFileOrStream(inputArg, invocation.stdIn());
var outStream = Files.newOutputStream(outputPath)) {
- CipherUtils.streamEncipher(inStream, outStream, cipher);
+ CipherUtils.streamEncrypt(inStream, outStream, cipher, zstd);
}
invocation.stdOut().println(shared.sealedSharedKey().toTokenString());
diff --git a/vespaclient-java/src/test/java/com/yahoo/vespa/security/tool/CryptoToolsTest.java b/vespaclient-java/src/test/java/com/yahoo/vespa/security/tool/CryptoToolsTest.java
index d7b8f1f09ae..f55278342e1 100644
--- a/vespaclient-java/src/test/java/com/yahoo/vespa/security/tool/CryptoToolsTest.java
+++ b/vespaclient-java/src/test/java/com/yahoo/vespa/security/tool/CryptoToolsTest.java
@@ -537,6 +537,50 @@ public class CryptoToolsTest {
assertEquals("", procOut.stdOut());
}
+ @Test
+ void can_transparently_compress_and_decompress_plaintext() throws IOException {
+ String substring = "here is some stuff that can be compressed!";
+ String compressibleSecret = substring.repeat(100);
+
+ var secretFile = pathInTemp("secret.txt");
+ Files.writeString(secretFile, compressibleSecret);
+
+ var privKeyFile = pathInTemp("my-priv.txt");
+ writePrivateKeyFile(privKeyFile, TEST_PRIV_KEY);
+
+ var encryptedPath = pathInTemp("encrypted.bin");
+ var procOut = runMain(List.of(
+ "encrypt",
+ absPathOf(secretFile),
+ "--output-file", absPathOf(encryptedPath),
+ "--recipient-public-key", TEST_PUB_KEY,
+ "--key-id", "1234",
+ "--zstd-compress"));
+ assertEquals(0, procOut.exitCode());
+ assertEquals("", procOut.stdErr());
+
+ var token = procOut.stdOut();
+ assertFalse(token.isBlank());
+
+ assertTrue(Files.exists(encryptedPath));
+ assertTrue(Files.size(encryptedPath) < compressibleSecret.length());
+
+ var decryptedPath = pathInTemp("decrypted.txt");
+ procOut = runMain(List.of(
+ "decrypt",
+ absPathOf(encryptedPath),
+ "--output-file", absPathOf(decryptedPath),
+ "--private-key-file", absPathOf(privKeyFile),
+ "--token", token,
+ "--zstd-decompress"
+ ));
+ assertEquals(0, procOut.exitCode());
+ assertEquals("", procOut.stdOut());
+ assertEquals("", procOut.stdErr());
+
+ assertEquals(compressibleSecret, Files.readString(decryptedPath));
+ }
+
private ProcessOutput runMain(List<String> args) {
return runMain(args, EMPTY_BYTES);
}
diff --git a/vespaclient-java/src/test/resources/expected-decrypt-help-output.txt b/vespaclient-java/src/test/resources/expected-decrypt-help-output.txt
index f00db2bb6b9..ab47d11c602 100644
--- a/vespaclient-java/src/test/resources/expected-decrypt-help-output.txt
+++ b/vespaclient-java/src/test/resources/expected-decrypt-help-output.txt
@@ -21,5 +21,7 @@ the quotes).
plaintext to STDOUT instead of a file.
-t,--token <arg> Token generated when the input file was
encrypted
+ -z,--zstd-decompress Decrypted data will be transparently
+ Zstd-decompressed before being output.
Note: this is a BETA tool version; its interface may be changed at any
time
diff --git a/vespaclient-java/src/test/resources/expected-encrypt-help-output.txt b/vespaclient-java/src/test/resources/expected-encrypt-help-output.txt
index 46185b29986..55cdfc73e9a 100644
--- a/vespaclient-java/src/test/resources/expected-encrypt-help-output.txt
+++ b/vespaclient-java/src/test/resources/expected-encrypt-help-output.txt
@@ -12,5 +12,7 @@ the quotes).
already exists)
-r,--recipient-public-key <arg> Recipient X25519 public key in Base58
encoded format
+ -z,--zstd-compress Input data will be transparently
+ Zstd-compressed before being encrypted.
Note: this is a BETA tool version; its interface may be changed at any
time
diff --git a/vespaclient/CMakeLists.txt b/vespaclient/CMakeLists.txt
index 9593304cccd..912b35fa763 100644
--- a/vespaclient/CMakeLists.txt
+++ b/vespaclient/CMakeLists.txt
@@ -14,6 +14,5 @@ vespa_define_module(
src/vespa/vespaclient/clusterlist
APPS
- src/vespa/vespaclient/vdsstates
src/vespa/vespaclient/vesparoute
)
diff --git a/vespaclient/src/vespa/vespaclient/vdsstates/.gitignore b/vespaclient/src/vespa/vespaclient/vdsstates/.gitignore
deleted file mode 100644
index 30187c17166..00000000000
--- a/vespaclient/src/vespa/vespaclient/vdsstates/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-.*.swp
-.depend
-Makefile
-vdsgetnodestate
-vdsgetnodestate-bin
diff --git a/vespaclient/src/vespa/vespaclient/vdsstates/CMakeLists.txt b/vespaclient/src/vespa/vespaclient/vdsstates/CMakeLists.txt
deleted file mode 100644
index 01367a788ae..00000000000
--- a/vespaclient/src/vespa/vespaclient/vdsstates/CMakeLists.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(vespaclient_vdsgetnodestate_app
- SOURCES
- statesapp.cpp
- OUTPUT_NAME vdsgetnodestate-bin
- DEPENDS
- vespaclient_clusterlist
-)
diff --git a/vespaclient/src/vespa/vespaclient/vdsstates/statesapp.cpp b/vespaclient/src/vespa/vespaclient/vdsstates/statesapp.cpp
deleted file mode 100644
index 017abfdf28c..00000000000
--- a/vespaclient/src/vespa/vespaclient/vdsstates/statesapp.cpp
+++ /dev/null
@@ -1,463 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/defaults.h>
-#include <vespa/slobrok/sbmirror.h>
-#include <vespa/fnet/frt/supervisor.h>
-#include <vespa/fnet/frt/target.h>
-#include <vespa/fnet/frt/rpcrequest.h>
-#include <vespa/vdslib/distribution/distribution.h>
-#include <vespa/vdslib/state/clusterstate.h>
-#include <vespa/vespalib/util/programoptions.h>
-#include <vespa/vespaclient/clusterlist/clusterlist.h>
-#include <vespa/vespalib/util/time.h>
-#include <vespa/vespalib/text/lowercase.h>
-#include <vespa/config-stor-distribution.h>
-#include <vespa/config/helper/configgetter.hpp>
-#include <vespa/vespalib/util/signalhandler.h>
-#include <sstream>
-#include <iostream>
-#include <thread>
-#include <cstdlib>
-#include <sys/time.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP("vdsstatetool");
-
-namespace storage {
-
-enum Mode { SETNODESTATE, GETNODESTATE, GETCLUSTERSTATE };
-
-namespace {
- Mode getMode(std::string calledAs) {
- std::string::size_type pos = calledAs.rfind('/');
- if (pos != std::string::npos) {
- calledAs = calledAs.substr(pos + 1);
- }
- if (calledAs == "vdssetnodestate-bin") return SETNODESTATE;
- if (calledAs == "vdsgetclusterstate-bin") return GETCLUSTERSTATE;
- if (calledAs == "vdsgetsystemstate-bin") return GETCLUSTERSTATE;
- if (calledAs == "vdsgetnodestate-bin") return GETNODESTATE;
- std::cerr << "Tool called through unknown name '" << calledAs << "'. Assuming you want to "
- << "get node state.\n";
- return GETNODESTATE;
- }
-
- uint64_t getTimeInMillis() {
- struct timeval t;
- gettimeofday(&t, 0);
- return (t.tv_sec * uint64_t(1000)) + (t.tv_usec / uint64_t(1000));
- }
-
- struct Sorter {
- bool operator()(const std::pair<std::string, std::string>& first,
- const std::pair<std::string, std::string>& second)
- { return (first.first < second.first); }
- };
-
- const lib::State* getState(const std::string& s) {
- vespalib::string lower = vespalib::LowerCase::convert(s);
- if (lower == "up") return &lib::State::UP;
- if (lower == "down") return &lib::State::DOWN;
- if (lower == "retired") return &lib::State::RETIRED;
- if (lower == "maintenance") return &lib::State::MAINTENANCE;
- return 0;
- }
-
- template<typename T>
- struct ConfigReader : public T::Subscriber
- {
- T config;
-
- ConfigReader(const std::string& configId) {
- T::subscribe(configId, *this);
- }
- void configure(const T& c) { config = c; }
- };
-}
-
-struct Options : public vespalib::ProgramOptions {
- Mode _mode;
- bool _showSyntax;
- std::string _clusterName;
- vespaclient::ClusterList::Cluster _cluster;
- uint32_t _nodeIndex;
- std::string _slobrokConfigId;
- std::string _slobrokConnectionSpec;
- std::string _nodeType;
- bool _nonfriendlyOutput;
- std::string _state;
- std::string _message;
- std::string _doc;
- uint32_t _slobrokTimeout;
-
- Options(Mode mode);
- ~Options();
-
- bool validate() {
- if (_nodeType != ""
- && _nodeType != "storage" && _nodeType != "distributor")
- {
- std::cerr << "Illegal nodetype '" << _nodeType << "'.\n";
- return false;
- }
- if (_mode == SETNODESTATE) {
- const lib::State* state = getState(_state);
- if (state == 0) {
- std::cerr << "Illegal state '" << _state << "'.\n";
- return false;
- }
- if (*state == lib::State::RETIRED ||
- *state == lib::State::MAINTENANCE)
- {
- if (_nodeType != "storage") {
- std::cerr << "Given state is only valid for storage nodes. "
- << "Thus you need to specify only to\n"
- << "set state of storage nodes.\n";
- return false;
- }
- }
- if (*state != lib::State::UP && *state != lib::State::RETIRED
- && _message == "")
- {
- std::cerr << "You should always have a reason for setting the "
- "node in a non-available state.\n";
- return false;
- }
- }
-
- vespaclient::ClusterList clusterList;
- try {
- _cluster = clusterList.verifyContentCluster(_clusterName);
- _clusterName = _cluster.getName();
- } catch (const vespaclient::ClusterList::ClusterNotFoundException& e) {
- std::cerr << e.getMessage() << "\n";
- std::_Exit(1);
- }
- return true;
- }
-};
-
-Options::Options(Mode mode)
- : _mode(mode), _cluster("", ""), _nodeIndex(0xffffffff), _nonfriendlyOutput(false), _slobrokTimeout(0)
-{
- _doc = "https://yahoo.github.io/vespa/";
- if (_mode == SETNODESTATE) {
- setSyntaxMessage(
- "Set the wanted node state of a storage node. This will "
- "override the state the node is in in the cluster state, if "
- "the current state is \"better\" than the wanted state. "
- "For instance, a node that is currently in initializing state "
- "can be forced into down state, while a node that is currently"
- " down can not be forced into retired state, but can be forced"
- " into maintenance state.\n\n"
- "For more info on states refer to\n" + _doc
- );
- } else if (_mode == GETCLUSTERSTATE) {
- setSyntaxMessage(
- "Get the cluster state of a given cluster.\n\n"
- "For more info on states refer to\n" + _doc
- );
- } else {
- setSyntaxMessage(
- "Retrieve the state of a one or more storage services from the "
- "fleet controller. Will list the state of the locally running "
- "services, possibly restricted to less by options.\n\n"
- "The result will show the slobrok address of the service, and "
- "three states. The first state will show how the state of that "
- "given service looks in the current cluster state. This state "
- "is the state the fleetcontroller is reporting to all nodes "
- "in the cluster this service is in. The second state is the "
- "reported state, which is the state the given node is reporting"
- " to be in itself. The third state is the wanted state, which "
- "is the state we want the node to be in. In most cases this "
- "should be the up state, but in some cases the fleet controller"
- " or an administrator may have set the wanted state otherwise, "
- "in order to get problem nodes out of the cluster.\n\n"
- "For more info on states refer to\n" + _doc
- );
- }
- addOption("h help", _showSyntax, false,
- "Show this help page.");
-
- addOption("c cluster", _clusterName, std::string("storage"),
- "Which cluster to connect to. By default it will attempt to connect to cluster named 'storage'.");
- if (_mode != GETCLUSTERSTATE) {
- addOption("t type", _nodeType, std::string(""),
- "Node type to query. This can either be 'storage' or "
- "'distributor'. If not specified, the operation will "
- "affect both types.");
- addOption("i index", _nodeIndex, uint32_t(0xffffffff),
- "The node index of the distributor or storage node to "
- "contact. If not specified, all indexes running locally "
- "on this node will be queried");
- }
- if (_mode != SETNODESTATE) {
- addOption("r raw", _nonfriendlyOutput, false,
- "Show the serialized state formats directly instead of "
- "reformatting them to look more user friendly.");
- }
- if (_mode == SETNODESTATE) {
- addArgument("Wanted state", _state,
- "Wanted state to set node in. "
- "This must be one of up, down or maintenance. Or if "
- "it's not a distributor it can also be retired.");
- addArgument("Reason", _message, std::string(""),
- "Give a reason for why you're altering the wanted "
- "state, which will show up in various admin tools. "
- "(Use double quotes to give a reason with whitespace "
- "in it)");
- }
- addOptionHeader("Advanced options. Not needed for most usecases");
- addOption("l slobrokconfig", _slobrokConfigId,
- std::string("client"),
- "Config id of slobrok. Will use the default config id of client if not specified.");
- addOption("p slobrokspec", _slobrokConnectionSpec, std::string(""),
- "Slobrok connection spec. By setting this, this application "
- "will not need config at all, but will use the given "
- "connection spec to talk with slobrok.");
- addOption("s slobroktimeout", _slobrokTimeout, uint32_t(5 * 60),
- "Seconds to wait for slobrok client to connect to a slobrok server before failing.");
-}
-Options::~Options() {}
-
-
-struct StateApp {
- Options _options;
-
- StateApp(std::string calledAs) : _options(getMode(calledAs)) {}
-
- int main(int argc, char **argv) {
- _options.setCommandLineArguments(argc, argv);
- try{
- _options.parse();
- } catch (vespalib::InvalidCommandLineArgumentsException& e) {
- if (!_options._showSyntax) {
- std::cerr << e.getMessage() << "\n";
- _options.writeSyntaxPage(std::cerr, false);
- std::cerr << "\n";
- return 1;
- }
- }
- if (_options._showSyntax) {
- _options.writeSyntaxPage(std::cerr, false);
- std::cerr << "\n";
- return 0;
- }
- if (!_options.validate()) {
- _options.writeSyntaxPage(std::cerr, false);
- return 1;
- }
- return run();
- }
-
- int run() {
- fnet::frt::StandaloneFRT supervisor;
-
- std::unique_ptr<slobrok::api::MirrorAPI> slobrok;
- if (_options._slobrokConnectionSpec == "") {
- config::ConfigUri config(_options._slobrokConfigId);
- slobrok = std::make_unique<slobrok::api::MirrorAPI>(supervisor.supervisor(), slobrok::ConfiguratorFactory(config));
- } else {
- std::vector<std::string> specList;
- specList.push_back(_options._slobrokConnectionSpec);
- slobrok = std::make_unique<slobrok::api::MirrorAPI>(supervisor.supervisor(), slobrok::ConfiguratorFactory(specList));
- }
- LOG(debug, "Waiting for slobrok data to be available.");
- uint64_t startTime = getTimeInMillis();
- uint64_t warnTime = 5 * 1000;
- uint64_t timeout = _options._slobrokTimeout * 1000;
- while (true) {
- uint64_t currentTime = getTimeInMillis();
- if (currentTime >= startTime + timeout) break;
- if (slobrok->ready()) break;
- if (currentTime >= startTime + warnTime) {
- if (warnTime > 5000) {
- std::cerr << "Still waiting for slobrok to respond. Have "
- << "gotten no response in "
- << ((currentTime - startTime) / 1000)
- << " seconds.\n";
- } else {
- std::cerr << "Waiting for slobrok server to respond. Have "
- << "gotten no response in "
- << ((currentTime - startTime) / 1000) << "\n"
- << "seconds. Likely cause being one or more "
- << "slobrok server nodes being down.\n(Thus not "
- << "replying that socket is closed)\n";
- }
- warnTime *= 4;
- }
- std::this_thread::sleep_for(10ms);
- }
- if (!slobrok->ready()) {
- std::cerr << "Slobrok not ready.\n";
- return 1;
- }
-
- config::ConfigUri uri(_options._cluster.getConfigId());
- lib::Distribution distribution(*config::ConfigGetter<vespa::config::content::StorDistributionConfig>::getConfig(uri.getConfigId(), uri.getContext()));
-
- LOG(debug, "Got slobrok data");
- std::string mask = "storage/cluster." + _options._cluster.getName() + "/fleetcontroller/*";
- slobrok::api::MirrorAPI::SpecList specs = slobrok->lookup(mask);
- if (specs.size() == 0) {
- std::cerr << "No fleet controller could be found for '"
- << mask << ".\n";
- return 1;
- }
- std::sort(specs.begin(), specs.end(), Sorter());
- LOG(debug, "Found fleet controller %s - %s\n",
- specs.front().first.c_str(), specs.front().second.c_str());
- FRT_Target *target = supervisor.supervisor().GetTarget(specs.front().second.c_str());
- if (!_options._nonfriendlyOutput && _options._mode == GETNODESTATE)
- {
- std::cerr <<
-"Shows the various states of one or more nodes in a Vespa Storage cluster.\n"
-"There exist three different type of node states. They are:\n"
-"\n"
-" Reported state - The state reported to the fleet controller by the node.\n"
-" Wanted state - The state administrators want the node to be in.\n"
-" Current state - The state of a given node in the current cluster state.\n"
-" This is the state all the other nodes know about. This\n"
-" state is a product of the other two states and fleet\n"
-" controller logic to keep the cluster stable.\n"
-"\n"
-"For more information about states of Vespa storage nodes, refer to\n"
- << _options._doc << "\n\n";
- }
- bool failed = false;
- for (int i=0; i<2; ++i) {
- std::string nodeType(_options._nodeType);
- if ((_options._nodeType != "" || _options._mode == GETCLUSTERSTATE)
- && i > 0)
- {
- break;
- }
- if (_options._nodeType == "") {
- nodeType = (i == 0 ? "storage" : "distributor");
- }
- std::vector<uint32_t> indexes;
- if (_options._nodeIndex != 0xffffffff
- || _options._mode == GETCLUSTERSTATE)
- {
- indexes.push_back(_options._nodeIndex);
- } else {
- std::string hostname(vespa::Defaults::vespaHostname());
- FRT_RPCRequest* req = supervisor.supervisor().AllocRPCRequest();
- req->SetMethodName("getNodeList");
- target->InvokeSync(req, 10.0);
- std::string prefix = _options._cluster.getConfigId() + "/" + nodeType + "/";
- failed = (req->GetErrorCode() != FRTE_NO_ERROR);
- if (failed) {
- std::cerr << "Failed RPC call against "
- << specs.front().second << ".\nError "
- << req->GetErrorCode() << " : "
- << req->GetErrorMessage() << "\n";
- break;
- }
- uint32_t arraySize(
- req->GetReturn()->GetValue(0)._string_array._len);
- for (uint32_t j=0; j<arraySize; ++j) {
- std::string slobrokAddress(req->GetReturn()->GetValue(0)
- ._string_array._pt[j]._str);
- std::string rpcAddress(req->GetReturn()->GetValue(1)
- ._string_array._pt[j]._str);
- std::string::size_type pos = slobrokAddress.find(prefix);
- std::string::size_type match = rpcAddress.find(hostname);
- //std::cerr << "1. '" << slobrokAddress << "'.\n";
- //std::cerr << "2. '" << rpcAddress << "'.\n";
- if (pos != std::string::npos && match != std::string::npos)
- {
- uint32_t index = atoi(slobrokAddress.substr(
- pos + prefix.size()).c_str());
- indexes.push_back(index);
- }
- }
- }
- if (indexes.size() == 0) {
- std::cerr << "Could not find any storage or distributor "
- << "services on this node.\n"
- << "Specify node index with --index parameter.\n";
- failed = true;
- break;
- }
- for (uint32_t j=0; j<indexes.size(); ++j) {
- FRT_RPCRequest* req = supervisor.supervisor().AllocRPCRequest();
- if (_options._mode == GETNODESTATE) {
- req->SetMethodName("getNodeState");
- req->GetParams()->AddString(nodeType.c_str());
- req->GetParams()->AddInt32(indexes[j]);
- } else if (_options._mode == SETNODESTATE) {
- req->SetMethodName("setNodeState");
- std::ostringstream address;
- address << _options._cluster.getConfigId() << "/"
- << nodeType << "/" << indexes[j];
- lib::NodeState ns(lib::NodeType::get(nodeType),
- *getState(_options._state));
- ns.setDescription(_options._message);
- req->GetParams()->AddString(address.str().c_str());
- req->GetParams()->AddString(ns.toString(false).c_str());
- } else {
- req->SetMethodName("getSystemState");
- }
- target->InvokeSync(req, 10.0);
- failed = (req->GetErrorCode() != FRTE_NO_ERROR);
- if (failed) {
- std::cerr << "Failed RPC call against "
- << specs.front().second
- << ".\nError " << req->GetErrorCode() << " : "
- << req->GetErrorMessage() << "\n";
- break;
- } else {
- bool friendly = !_options._nonfriendlyOutput;
- if (_options._mode == GETNODESTATE) {
- lib::NodeState current(
- req->GetReturn()->GetValue(0)._string._str);
- lib::NodeState reported(
- req->GetReturn()->GetValue(1)._string._str);
- lib::NodeState wanted(
- req->GetReturn()->GetValue(2)._string._str);
- std::cout << "Node state of "
- << _options._cluster.getConfigId() << "/" << nodeType
- << "/" << indexes[j];
- std::cout << "\nCurrent state: ";
- current.print(std::cout, friendly, " ");
- std::cout << "\nReported state ";
- reported.print(std::cout, friendly, " ");
- std::cout << "\nWanted state: ";
- wanted.print(std::cout, friendly, " ");
- std::cout << "\n\n";
- } else if (_options._mode == SETNODESTATE) {
- std::string result(
- req->GetReturn()->GetValue(0)._string._str);
- if (result != "") {
- std::cout << result << "\n";
- }
- } else {
- std::string rawstate(
- req->GetReturn()->GetValue(1)._string._str);
- lib::ClusterState state(rawstate);
- if (friendly) {
- state.printStateGroupwise(std::cout, distribution,
- true, "");
- } else {
- std::cout << rawstate << "\n";
- }
- std::cout << "\n";
- }
- }
- req->SubRef();
- }
- }
- target->SubRef();
- return (failed ? 1 : 0);
- }
-};
-
-} // storage
-
-int main(int argc, char **argv) {
- vespalib::SignalHandler::PIPE.ignore();
- assert(argc > 0);
- storage::StateApp client(argv[0]);
- return client.main(argc, argv);
-}
diff --git a/vespajlib/pom.xml b/vespajlib/pom.xml
index d903fb5ec0d..ff244ad07f8 100644
--- a/vespajlib/pom.xml
+++ b/vespajlib/pom.xml
@@ -32,8 +32,9 @@
<artifactId>jna</artifactId>
</dependency>
<dependency>
- <groupId>io.airlift</groupId>
- <artifactId>aircompressor</artifactId>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>airlift-zstd</artifactId>
+ <version>${project.version}</version>
</dependency>
<dependency>
<groupId>net.openhft</groupId>
diff --git a/vespajlib/src/main/java/com/yahoo/compress/ZstdCompressor.java b/vespajlib/src/main/java/com/yahoo/compress/ZstdCompressor.java
index 29a58dbde47..4900351bdd7 100644
--- a/vespajlib/src/main/java/com/yahoo/compress/ZstdCompressor.java
+++ b/vespajlib/src/main/java/com/yahoo/compress/ZstdCompressor.java
@@ -11,8 +11,8 @@ import java.util.Arrays;
*/
public class ZstdCompressor {
- private static final io.airlift.compress.zstd.ZstdCompressor compressor = new io.airlift.compress.zstd.ZstdCompressor();
- private static final io.airlift.compress.zstd.ZstdDecompressor decompressor = new io.airlift.compress.zstd.ZstdDecompressor();
+ private static final ai.vespa.airlift.zstd.ZstdCompressor compressor = new ai.vespa.airlift.zstd.ZstdCompressor();
+ private static final ai.vespa.airlift.zstd.ZstdDecompressor decompressor = new ai.vespa.airlift.zstd.ZstdDecompressor();
public byte[] compress(byte[] input, int inputOffset, int inputLength) {
int maxCompressedLength = getMaxCompressedLength(inputLength);
@@ -46,7 +46,7 @@ public class ZstdCompressor {
}
public static int getDecompressedLength(byte[] input, int inputOffset, int inputLength) {
- return (int) io.airlift.compress.zstd.ZstdDecompressor.getDecompressedSize(input, inputOffset, inputLength);
+ return (int) ai.vespa.airlift.zstd.ZstdDecompressor.getDecompressedSize(input, inputOffset, inputLength);
}
}
diff --git a/vespalib/src/tests/compression/compression_test.cpp b/vespalib/src/tests/compression/compression_test.cpp
index eb1fce0a683..27326243b60 100644
--- a/vespalib/src/tests/compression/compression_test.cpp
+++ b/vespalib/src/tests/compression/compression_test.cpp
@@ -67,6 +67,11 @@ TEST("requiret that zstd compression/decompression works") {
EXPECT_EQUAL(_G_compressableText, vespalib::string(decompress.data(), decompress.size()));
}
+TEST("require that CompressionConfig is Atomic") {
+ EXPECT_EQUAL(8u, sizeof(CompressionConfig));
+ EXPECT_TRUE(std::atomic<CompressionConfig>::is_always_lock_free);
+}
+
TEST_MAIN() {
TEST_RUN_ALL();
}
diff --git a/vespalib/src/tests/net/crypto_socket/crypto_socket_test.cpp b/vespalib/src/tests/net/crypto_socket/crypto_socket_test.cpp
index 37b773426a1..08445ab74c2 100644
--- a/vespalib/src/tests/net/crypto_socket/crypto_socket_test.cpp
+++ b/vespalib/src/tests/net/crypto_socket/crypto_socket_test.cpp
@@ -224,12 +224,6 @@ TEST_MT_FFF("require that encrypted async socket io works with NullCryptoEngine"
TEST_DO(verify_crypto_socket(f1, f2, (thread_id == 0)));
}
-TEST_MT_FFF("require that encrypted async socket io works with XorCryptoEngine",
- 2, SocketPair(), XorCryptoEngine(), TimeBomb(60))
-{
- TEST_DO(verify_crypto_socket(f1, f2, (thread_id == 0)));
-}
-
TEST_MT_FFF("require that encrypted async socket io works with TlsCryptoEngine",
2, SocketPair(), TlsCryptoEngine(make_tls_options_for_testing()), TimeBomb(60))
{
diff --git a/vespalib/src/tests/net/sync_crypto_socket/sync_crypto_socket_test.cpp b/vespalib/src/tests/net/sync_crypto_socket/sync_crypto_socket_test.cpp
index d689ef2b348..37f61437ae6 100644
--- a/vespalib/src/tests/net/sync_crypto_socket/sync_crypto_socket_test.cpp
+++ b/vespalib/src/tests/net/sync_crypto_socket/sync_crypto_socket_test.cpp
@@ -115,12 +115,6 @@ TEST_MT_FFF("require that encrypted sync socket io works with NullCryptoEngine",
TEST_DO(verify_crypto_socket(f1, f2, (thread_id == 0)));
}
-TEST_MT_FFF("require that encrypted sync socket io works with XorCryptoEngine",
- 2, SocketPair(), XorCryptoEngine(), TimeBomb(60))
-{
- TEST_DO(verify_crypto_socket(f1, f2, (thread_id == 0)));
-}
-
TEST_MT_FFF("require that encrypted sync socket io works with TlsCryptoEngine",
2, SocketPair(), TlsCryptoEngine(make_tls_options_for_testing()), TimeBomb(60))
{
diff --git a/vespalib/src/tests/portal/portal_test.cpp b/vespalib/src/tests/portal/portal_test.cpp
index fb9b58fc248..52c6d802354 100644
--- a/vespalib/src/tests/portal/portal_test.cpp
+++ b/vespalib/src/tests/portal/portal_test.cpp
@@ -76,12 +76,10 @@ struct Encryption {
Encryption::~Encryption() = default;
auto null_crypto() { return std::make_shared<NullCryptoEngine>(); }
-auto xor_crypto() { return std::make_shared<XorCryptoEngine>(); }
auto tls_crypto() { return std::make_shared<TlsCryptoEngine>(make_tls_options_for_testing()); }
auto maybe_tls_crypto(bool client_tls) { return std::make_shared<MaybeTlsCryptoEngine>(tls_crypto(), client_tls); }
std::vector<Encryption> crypto_list = {{"no encryption", null_crypto()},
- {"ad-hoc xor", xor_crypto()},
{"always TLS", tls_crypto()},
{"maybe TLS; yes", maybe_tls_crypto(true)},
{"maybe TLS; no", maybe_tls_crypto(false)}};
diff --git a/vespalib/src/vespa/vespalib/coro/async_io.cpp b/vespalib/src/vespa/vespalib/coro/async_io.cpp
index a38092a79fd..8d628fd7887 100644
--- a/vespalib/src/vespa/vespalib/coro/async_io.cpp
+++ b/vespalib/src/vespa/vespalib/coro/async_io.cpp
@@ -38,12 +38,12 @@ struct SelectorThread : AsyncIo {
_epoll_read(false), _epoll_write(false),
_reader(nullptr), _writer(nullptr) {}
};
+ struct RunGuard;
using ThreadId = std::atomic<std::thread::id>;
-
+
std::map<int,FdContext> _state;
std::set<int> _check;
Selector<FdContext> _selector;
- bool _shutdown;
std::thread _thread;
ThreadId _thread_id;
bool _check_queue;
@@ -55,7 +55,6 @@ struct SelectorThread : AsyncIo {
: _state(),
_check(),
_selector(),
- _shutdown(false),
_thread(),
_thread_id(std::thread::id()),
_check_queue(false),
@@ -70,7 +69,13 @@ struct SelectorThread : AsyncIo {
void init_shutdown() override;
void fini_shutdown() override;
~SelectorThread();
- bool is_my_thread() const {
+ bool running() const noexcept {
+ return (_thread_id.load(std::memory_order_relaxed) != std::thread::id());
+ }
+ bool stopped() const noexcept {
+ return (_thread_id.load(std::memory_order_relaxed) == std::thread::id());
+ }
+ bool in_thread() const noexcept {
return (std::this_thread::get_id() == _thread_id.load(std::memory_order_relaxed));
}
auto protect() { return std::lock_guard(_lock); }
@@ -80,12 +85,12 @@ struct SelectorThread : AsyncIo {
awaiter(SelectorThread &self_in, bool ready_in) noexcept
: awaiter_base(self_in), ready(ready_in) {}
bool await_ready() const noexcept { return ready; }
- bool await_resume() const noexcept { return self.is_my_thread(); }
+ bool await_resume() const noexcept { return self.in_thread(); }
bool await_suspend(Handle handle) __attribute__((noinline)) {
bool need_wakeup = false;
{
auto guard = self.protect();
- if (self._shutdown) {
+ if (self.stopped()) {
return false;
}
need_wakeup = self._queue.empty();
@@ -99,14 +104,14 @@ struct SelectorThread : AsyncIo {
};
return awaiter(*this, ready);
}
- auto enter_thread() { return queue_self_unless(is_my_thread()); }
+ auto enter_thread() { return queue_self_unless(in_thread()); }
auto readable(int fd) {
struct awaiter : awaiter_base {
int fd;
awaiter(SelectorThread &self_in, int fd_in) noexcept
: awaiter_base(self_in), fd(fd_in) {}
- bool await_ready() const noexcept { return (fd < 0) || self._shutdown; }
- void await_resume() const noexcept {}
+ bool await_ready() const noexcept { return (fd < 0) || self.stopped(); }
+ bool await_resume() const noexcept { return self.running(); }
void await_suspend(Handle handle) __attribute__((noinline)) {
auto [pos, ignore] = self._state.try_emplace(fd, fd);
FdContext &state = pos->second;
@@ -115,8 +120,7 @@ struct SelectorThread : AsyncIo {
self._check.insert(state._fd);
}
};
- fprintf(stderr, "await readable(%d)\n", fd);
- REQUIRE(is_my_thread());
+ REQUIRE(in_thread());
return awaiter(*this, fd);
}
auto writable(int fd) {
@@ -124,8 +128,8 @@ struct SelectorThread : AsyncIo {
int fd;
awaiter(SelectorThread &self_in, int fd_in) noexcept
: awaiter_base(self_in), fd(fd_in) {}
- bool await_ready() const noexcept { return (fd < 0) || self._shutdown; }
- void await_resume() const noexcept {}
+ bool await_ready() const noexcept { return (fd < 0) || self.stopped(); }
+ bool await_resume() const noexcept { return self.running(); }
void await_suspend(Handle handle) __attribute__((noinline)) {
auto [pos, ignore] = self._state.try_emplace(fd, fd);
FdContext &state = pos->second;
@@ -134,8 +138,7 @@ struct SelectorThread : AsyncIo {
self._check.insert(state._fd);
}
};
- fprintf(stderr, "await writable(%d)\n", fd);
- REQUIRE(is_my_thread());
+ REQUIRE(in_thread());
return awaiter(*this, fd);
}
void update_epoll_state() {
@@ -150,20 +153,15 @@ struct SelectorThread : AsyncIo {
bool read_changed = ctx._epoll_read != bool(ctx._reader);
bool write_changed = ctx._epoll_write != bool(ctx._writer);
if (read_changed || write_changed) {
- fprintf(stderr, "epoll update %d %s %s\n", ctx._fd,
- ctx._reader ? "read" : "-", ctx._writer ? "write" : "-");
_selector.update(ctx._fd, ctx, bool(ctx._reader), bool(ctx._writer));
}
} else {
- fprintf(stderr, "epoll add %d %s %s\n", ctx._fd,
- ctx._reader ? "read" : "-", ctx._writer ? "write" : "-");
_selector.add(ctx._fd, ctx, bool(ctx._reader), bool(ctx._writer));
}
ctx._epoll_read = bool(ctx._reader);
ctx._epoll_write = bool(ctx._writer);
} else {
if (was_added) {
- fprintf(stderr, "epoll remove %d\n", ctx._fd);
_selector.remove(ctx._fd);
}
_state.erase(pos);
@@ -171,28 +169,6 @@ struct SelectorThread : AsyncIo {
}
_check.clear();
}
- void cancel_epoll_state() {
- REQUIRE(_shutdown);
- _check.clear();
- for (auto &entry: _state) {
- FdContext &ctx = entry.second;
- const bool was_added = (ctx._epoll_read || ctx._epoll_write);
- if (was_added) {
- fprintf(stderr, "epoll remove %d (shutdown)\n", ctx._fd);
- _selector.remove(ctx._fd);
- }
- if (ctx._reader) {
- auto reader = std::exchange(ctx._reader, nullptr);
- reader.resume();
- }
- if (ctx._writer) {
- auto writer = std::exchange(ctx._writer, nullptr);
- writer.resume();
- }
- }
- _state.clear();
- REQUIRE(_check.empty());
- }
void handle_wakeup() { _check_queue = true; }
void handle_queue() {
if (!_check_queue) {
@@ -203,26 +179,18 @@ struct SelectorThread : AsyncIo {
auto guard = protect();
std::swap(_todo, _queue);
}
- fprintf(stderr, "todo list: %zu items\n", _todo.size());
for (auto &&handle: _todo) {
handle.resume();
}
_todo.clear();
}
- void force_handle_queue() {
- REQUIRE(_shutdown);
- _check_queue = true;
- handle_queue();
- }
void handle_event(FdContext &ctx, bool read, bool write) {
_check.insert(ctx._fd);
if (read && ctx._reader) {
- fprintf(stderr, "resume readable(%d)\n", ctx._fd);
auto reader = std::exchange(ctx._reader, nullptr);
reader.resume();
}
if (write && ctx._writer) {
- fprintf(stderr, "resume writable(%d)\n", ctx._fd);
auto writer = std::exchange(ctx._writer, nullptr);
writer.resume();
}
@@ -231,35 +199,32 @@ struct SelectorThread : AsyncIo {
return "selector-thread";
}
Lazy<SocketHandle> accept(ServerSocket &server_socket) override {
- fprintf(stderr, "async accept(%d)\n", server_socket.get_fd());
- bool in_my_thread = co_await enter_thread();
- if (in_my_thread) {
- co_await readable(server_socket.get_fd());
- if (!_shutdown) {
+ bool in_thread = co_await enter_thread();
+ if (in_thread) {
+ bool can_read = co_await readable(server_socket.get_fd());
+ if (can_read) {
co_return server_socket.accept();
}
}
co_return SocketHandle(-ECANCELED);
}
Lazy<SocketHandle> connect(const SocketAddress &addr) override {
- fprintf(stderr, "async connect(%s)\n", addr.spec().c_str());
- bool in_my_thread = co_await enter_thread();
- if (in_my_thread) {
+ bool in_thread = co_await enter_thread();
+ if (in_thread) {
auto tweak = [](SocketHandle &handle){ return handle.set_blocking(false); };
auto socket = addr.connect(tweak);
- co_await writable(socket.get());
- if (!_shutdown) {
+ bool can_write = co_await writable(socket.get());
+ if (can_write) {
co_return std::move(socket);
}
}
co_return SocketHandle(-ECANCELED);
}
Lazy<ssize_t> read(SocketHandle &socket, char *buf, size_t len) override {
- fprintf(stderr, "async read(%d)\n", socket.get());
- bool in_my_thread = co_await enter_thread();
- if (in_my_thread) {
- co_await readable(socket.get());
- if (!_shutdown) {
+ bool in_thread = co_await enter_thread();
+ if (in_thread) {
+ bool can_read = co_await readable(socket.get());
+ if (can_read) {
ssize_t res = socket.read(buf, len);
co_return (res < 0) ? -errno : res;
}
@@ -267,11 +232,10 @@ struct SelectorThread : AsyncIo {
co_return -ECANCELED;
}
Lazy<ssize_t> write(SocketHandle &socket, const char *buf, size_t len) override {
- fprintf(stderr, "async write(%d)\n", socket.get());
- bool in_my_thread = co_await enter_thread();
- if (in_my_thread) {
- co_await writable(socket.get());
- if (!_shutdown) {
+ bool in_thread = co_await enter_thread();
+ if (in_thread) {
+ bool can_write = co_await writable(socket.get());
+ if (can_write) {
ssize_t res = socket.write(buf, len);
co_return (res < 0) ? -errno : res;
}
@@ -282,11 +246,10 @@ struct SelectorThread : AsyncIo {
co_return co_await queue_self_unless(false);
}
Detached async_shutdown() {
- bool in_my_thread = co_await enter_thread();
- REQUIRE(in_my_thread && "unable to initialize shutdown of internal thread");
+ bool in_thread = co_await enter_thread();
+ REQUIRE(in_thread && "unable to initialize shutdown of internal thread");
{
auto guard = protect();
- _shutdown = true;
_thread_id = std::thread::id();
}
}
@@ -295,42 +258,63 @@ struct SelectorThread : AsyncIo {
void
SelectorThread::start()
{
- fprintf(stderr, "start\n");
_thread = std::thread(&SelectorThread::main, this);
_thread_id.wait(std::thread::id());
}
+struct SelectorThread::RunGuard {
+ SelectorThread &self;
+ RunGuard(SelectorThread &self_in) noexcept : self(self_in) {
+ self._thread_id = std::this_thread::get_id();
+ self._thread_id.notify_all();
+ }
+ ~RunGuard() {
+ REQUIRE(self.stopped());
+ self._check.clear();
+ for (auto &entry: self._state) {
+ FdContext &ctx = entry.second;
+ const bool was_added = (ctx._epoll_read || ctx._epoll_write);
+ if (was_added) {
+ self._selector.remove(ctx._fd);
+ }
+ if (ctx._reader) {
+ auto reader = std::exchange(ctx._reader, nullptr);
+ reader.resume();
+ }
+ if (ctx._writer) {
+ auto writer = std::exchange(ctx._writer, nullptr);
+ writer.resume();
+ }
+ }
+ self._state.clear();
+ REQUIRE(self._check.empty());
+ self._check_queue = true;
+ self.handle_queue();
+ }
+};
+
void
SelectorThread::main()
{
- _thread_id = std::this_thread::get_id();
- _thread_id.notify_all();
- while (!_shutdown) {
+ RunGuard guard(*this);
+ while (running()) {
update_epoll_state();
- fprintf(stderr, "--> epoll wait\n");
_selector.poll(1000);
- fprintf(stderr, "<-- epoll wait: got %zu events\n", _selector.num_events());
_selector.dispatch(*this);
handle_queue();
}
- fprintf(stderr, "event loop cleanup\n");
- cancel_epoll_state();
- force_handle_queue();
}
void
SelectorThread::init_shutdown()
{
- fprintf(stderr, "init_shutdown\n");
async_shutdown();
}
void
SelectorThread::fini_shutdown()
{
- fprintf(stderr, "--> fini_shutdown\n");
_thread.join();
- fprintf(stderr, "<-- fini_shutdown\n");
}
SelectorThread::~SelectorThread()
diff --git a/vespalib/src/vespa/vespalib/geo/zcurve.cpp b/vespalib/src/vespa/vespalib/geo/zcurve.cpp
index 3bd68857154..c207f966704 100644
--- a/vespalib/src/vespa/vespalib/geo/zcurve.cpp
+++ b/vespalib/src/vespa/vespalib/geo/zcurve.cpp
@@ -3,6 +3,8 @@
#include <vespa/vespalib/geo/zcurve.h>
#include <vespa/vespalib/util/priority_queue.h>
#include <vespa/vespalib/util/fiddle.h>
+#include <algorithm>
+#include <limits>
namespace vespalib::geo {
@@ -30,7 +32,7 @@ public:
int64_t total_estimate() const { return _total_estimate; }
void put(Area area) {
- _total_estimate += area.estimate();
+ _total_estimate += std::min(area.estimate(), std::numeric_limits<int64_t>::max() - _total_estimate);
_queue.push(std::move(area));
}
@@ -130,8 +132,15 @@ ZCurve::RangeVector
ZCurve::find_ranges(int min_x, int min_y,
int max_x, int max_y)
{
- int64_t total_size = ((static_cast<int64_t>(max_x) - min_x + 1) * (static_cast<int64_t>(max_y) - min_y + 1));
- int64_t estimate_target = (total_size * 4);
+ uint64_t x_size = (static_cast<int64_t>(max_x) - min_x + 1);
+ uint64_t y_size = (static_cast<int64_t>(max_y) - min_y + 1);
+ uint64_t total_size = (x_size > std::numeric_limits<uint32_t>::max() &&
+ y_size > std::numeric_limits<uint32_t>::max()) ?
+ std::numeric_limits<uint64_t>::max() :
+ (x_size * y_size);
+ int64_t estimate_target = (total_size > std::numeric_limits<int64_t>::max() / 4) ?
+ std::numeric_limits<int64_t>::max() :
+ (total_size * 4);
ZAreaSplitter splitter(min_x, min_y, max_x, max_y);
while (splitter.total_estimate() > estimate_target && splitter.num_ranges() < 42) {
splitter.split_worst();
diff --git a/vespalib/src/vespa/vespalib/net/crypto_engine.cpp b/vespalib/src/vespa/vespalib/net/crypto_engine.cpp
index d2b02e7cc7c..f826e74e450 100644
--- a/vespalib/src/vespa/vespalib/net/crypto_engine.cpp
+++ b/vespalib/src/vespa/vespalib/net/crypto_engine.cpp
@@ -1,7 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "crypto_engine.h"
-#include <vespa/vespalib/data/smart_buffer.h>
#include <vespa/vespalib/crypto/crypto_exception.h>
#include <vespa/vespalib/net/tls/authorization_mode.h>
#include <vespa/vespalib/net/tls/auto_reloading_tls_crypto_engine.h>
@@ -12,11 +11,6 @@
#include <vespa/vespalib/net/tls/transport_security_options_reading.h>
#include <vespa/vespalib/stllike/string.h>
#include <vespa/vespalib/util/size_literals.h>
-#include <vector>
-#include <chrono>
-#include <thread>
-#include <xxhash.h>
-#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".vespalib.net.crypto_engine");
@@ -25,19 +19,6 @@ namespace vespalib {
namespace {
-struct HashState {
- using clock = std::chrono::high_resolution_clock;
- const void *self;
- clock::time_point now;
- HashState() : self(this), now(clock::now()) {}
-};
-
-char gen_key() {
- HashState hash_state;
- std::this_thread::sleep_for(std::chrono::microseconds(42));
- return XXH64(&hash_state, sizeof(hash_state), 0);
-}
-
class NullCryptoSocket : public CryptoSocket
{
private:
@@ -56,143 +37,6 @@ public:
void drop_empty_buffers() override {}
};
-class XorCryptoSocket : public CryptoSocket
-{
-private:
- static constexpr size_t CHUNK_SIZE = 16_Ki;
- enum class OP { READ_KEY, WRITE_KEY };
- std::vector<OP> _op_stack;
- char _my_key;
- char _peer_key;
- SmartBuffer _input;
- SmartBuffer _output;
- SocketHandle _socket;
-
- bool is_blocked(ssize_t res, int error) const {
- return ((res < 0) && ((error == EWOULDBLOCK) || (error == EAGAIN)));
- }
-
- HandshakeResult try_read_key() {
- ssize_t res = _socket.read(&_peer_key, 1);
- if (is_blocked(res, errno)) {
- return HandshakeResult::NEED_READ;
- }
- return (res == 1)
- ? HandshakeResult::DONE
- : HandshakeResult::FAIL;
- }
-
- HandshakeResult try_write_key() {
- ssize_t res = _socket.write(&_my_key, 1);
- if (is_blocked(res, errno)) {
- return HandshakeResult::NEED_WRITE;
- }
- return (res == 1)
- ? HandshakeResult::DONE
- : HandshakeResult::FAIL;
- }
-
- HandshakeResult perform_hs_op(OP op) {
- if (op == OP::READ_KEY) {
- return try_read_key();
- } else {
- assert(op == OP::WRITE_KEY);
- return try_write_key();
- }
- }
-
-public:
- XorCryptoSocket(SocketHandle socket, bool is_server)
- : _op_stack(is_server
- ? std::vector<OP>({OP::WRITE_KEY, OP::READ_KEY})
- : std::vector<OP>({OP::READ_KEY, OP::WRITE_KEY})),
- _my_key(gen_key()),
- _peer_key(0),
- _input(CHUNK_SIZE * 2),
- _output(CHUNK_SIZE * 2),
- _socket(std::move(socket)) {}
- int get_fd() const override { return _socket.get(); }
- HandshakeResult handshake() override {
- while (!_op_stack.empty()) {
- HandshakeResult partial_result = perform_hs_op(_op_stack.back());
- if (partial_result != HandshakeResult::DONE) {
- return partial_result;
- }
- _op_stack.pop_back();
- }
- return HandshakeResult::DONE;
- }
- void do_handshake_work() override {}
- size_t min_read_buffer_size() const override { return 1; }
- ssize_t read(char *buf, size_t len) override {
- if (_input.obtain().size == 0) {
- auto dst = _input.reserve(CHUNK_SIZE);
- ssize_t res = _socket.read(dst.data, dst.size);
- if (res > 0) {
- _input.commit(res);
- } else {
- return res; // eof/error
- }
- }
- return drain(buf, len);
- }
- ssize_t drain(char *buf, size_t len) override {
- auto src = _input.obtain();
- size_t frame = std::min(len, src.size);
- for (size_t i = 0; i < frame; ++i) {
- buf[i] = (src.data[i] ^ _my_key);
- }
- _input.evict(frame);
- return frame;
- }
- ssize_t write(const char *buf, size_t len) override {
- if (_output.obtain().size >= CHUNK_SIZE) {
- if (flush() < 0) {
- return -1;
- }
- if (_output.obtain().size > 0) {
- errno = EWOULDBLOCK;
- return -1;
- }
- }
- size_t frame = std::min(len, CHUNK_SIZE);
- auto dst = _output.reserve(frame);
- for (size_t i = 0; i < frame; ++i) {
- dst.data[i] = (buf[i] ^ _peer_key);
- }
- _output.commit(frame);
- return frame;
- }
- ssize_t flush() override {
- auto pending = _output.obtain();
- if (pending.size > 0) {
- ssize_t res = _socket.write(pending.data, pending.size);
- if (res > 0) {
- _output.evict(res);
- return 1; // progress
- } else {
- assert(res < 0);
- return -1; // error
- }
- }
- return 0; // done
- }
- ssize_t half_close() override {
- auto flush_res = flush();
- while (flush_res > 0) {
- flush_res = flush();
- }
- if (flush_res < 0) {
- return flush_res;
- }
- return _socket.half_close();
- }
- void drop_empty_buffers() override {
- _input.drop_if_empty();
- _output.drop_if_empty();
- }
-};
-
using net::tls::AuthorizationMode;
AuthorizationMode authorization_mode_from_env() {
@@ -269,16 +113,4 @@ NullCryptoEngine::create_server_crypto_socket(SocketHandle socket)
return std::make_unique<NullCryptoSocket>(std::move(socket));
}
-CryptoSocket::UP
-XorCryptoEngine::create_client_crypto_socket(SocketHandle socket, const SocketSpec &)
-{
- return std::make_unique<XorCryptoSocket>(std::move(socket), false);
-}
-
-CryptoSocket::UP
-XorCryptoEngine::create_server_crypto_socket(SocketHandle socket)
-{
- return std::make_unique<XorCryptoSocket>(std::move(socket), true);
-}
-
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/net/crypto_engine.h b/vespalib/src/vespa/vespalib/net/crypto_engine.h
index d6de53bd3e0..7f4b5287415 100644
--- a/vespalib/src/vespa/vespalib/net/crypto_engine.h
+++ b/vespalib/src/vespa/vespalib/net/crypto_engine.h
@@ -37,16 +37,4 @@ struct NullCryptoEngine : public CryptoEngine {
CryptoSocket::UP create_server_crypto_socket(SocketHandle socket) override;
};
-/**
- * Very simple crypto engine that requires connection handshaking and
- * data transformation. Used to test encryption integration separate
- * from TLS.
- **/
-struct XorCryptoEngine : public CryptoEngine {
- bool use_tls_when_client() const override { return false; }
- bool always_use_tls_when_server() const override { return false; }
- CryptoSocket::UP create_client_crypto_socket(SocketHandle socket, const SocketSpec &spec) override;
- CryptoSocket::UP create_server_crypto_socket(SocketHandle socket) override;
-};
-
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/util/compressionconfig.h b/vespalib/src/vespa/vespalib/util/compressionconfig.h
index c868d2e611d..2a09fa7e086 100644
--- a/vespalib/src/vespa/vespalib/util/compressionconfig.h
+++ b/vespalib/src/vespa/vespalib/util/compressionconfig.h
@@ -9,7 +9,7 @@
namespace vespalib::compression {
struct CompressionConfig {
- enum Type {
+ enum Type : uint8_t {
NONE = 0,
NONE_MULTI = 1,
HISTORIC_2 = 2,
@@ -21,15 +21,15 @@ struct CompressionConfig {
};
CompressionConfig() noexcept
- : type(NONE), compressionLevel(0), threshold(90), minSize(0) {}
+ : CompressionConfig(NONE, 0, 90) {}
CompressionConfig(Type t) noexcept
- : type(t), compressionLevel(9), threshold(90), minSize(0) {}
+ : CompressionConfig(t, 9, 90) {}
CompressionConfig(Type t, uint8_t level, uint8_t minRes) noexcept
- : type(t), compressionLevel(level), threshold(minRes), minSize(0) {}
+ : CompressionConfig(t, level, minRes, 0) {}
CompressionConfig(Type t, uint8_t lvl, uint8_t minRes, size_t minSz) noexcept
- : type(t), compressionLevel(lvl), threshold(minRes), minSize(minSz) {}
+ : minSize(minSz), type(t), compressionLevel(lvl), threshold(minRes) {}
bool operator==(const CompressionConfig& o) const {
return (type == o.type
@@ -66,30 +66,12 @@ struct CompressionConfig {
}
bool useCompression() const { return isCompressed(type); }
- Type type;
- uint8_t compressionLevel;
- uint8_t threshold;
- size_t minSize;
+ uint32_t minSize;
+ Type type;
+ uint8_t compressionLevel;
+ uint8_t threshold;
};
-class CompressionInfo
-{
-public:
- CompressionInfo(size_t uncompressedSize, size_t compressedSize)
- : _uncompressedSize(uncompressedSize), _compressedSize(compressedSize) { }
- size_t getUncompressedSize() const { return _uncompressedSize; }
- size_t getCompressedSize() const { return _compressedSize; }
- double getCompressionRatio() const { return _uncompressedSize/_compressedSize; }
-private:
- size_t _uncompressedSize;
- size_t _compressedSize;
-};
-
-inline CompressionInfo operator + (const CompressionInfo & a, const CompressionInfo & b)
-{
- return CompressionInfo(a.getUncompressedSize() + b.getUncompressedSize(), a.getCompressedSize() + b.getCompressedSize());
-}
-
}
diff --git a/vespalib/src/vespa/vespalib/util/compressor.cpp b/vespalib/src/vespa/vespalib/util/compressor.cpp
index c477b021956..4d708473423 100644
--- a/vespalib/src/vespa/vespalib/util/compressor.cpp
+++ b/vespalib/src/vespa/vespalib/util/compressor.cpp
@@ -15,7 +15,7 @@ namespace vespalib::compression {
namespace {
template <typename F>
-void with_compressor(const CompressionConfig::Type &type, F &&f) {
+void with_compressor(CompressionConfig::Type type, F &&f) {
switch (type) {
case CompressionConfig::LZ4:
{
@@ -40,7 +40,7 @@ void with_compressor(const CompressionConfig::Type &type, F &&f) {
//-----------------------------------------------------------------------------
CompressionConfig::Type
-compress(ICompressor & compressor, const CompressionConfig & compression, const ConstBufferRef & org, DataBuffer & dest)
+compress(ICompressor & compressor, CompressionConfig compression, const ConstBufferRef & org, DataBuffer & dest)
{
CompressionConfig::Type type(CompressionConfig::NONE);
dest.ensureFree(compressor.adjustProcessLen(0, org.size()));
@@ -55,7 +55,7 @@ compress(ICompressor & compressor, const CompressionConfig & compression, const
}
CompressionConfig::Type
-docompress(const CompressionConfig & compression, const ConstBufferRef & org, DataBuffer & dest)
+docompress(CompressionConfig compression, const ConstBufferRef & org, DataBuffer & dest)
{
switch (compression.type) {
case CompressionConfig::LZ4:
@@ -80,8 +80,9 @@ CompressionConfig::Type
compress(CompressionConfig::Type compression, const ConstBufferRef & org, DataBuffer & dest, bool allowSwap) {
return compress(CompressionConfig(compression), org, dest, allowSwap);
}
+
CompressionConfig::Type
-compress(const CompressionConfig & compression, const ConstBufferRef & org, DataBuffer & dest, bool allowSwap)
+compress(CompressionConfig compression, const ConstBufferRef & org, DataBuffer & dest, bool allowSwap)
{
CompressionConfig::Type type(CompressionConfig::NONE);
if (org.size() >= compression.minSize) {
@@ -124,7 +125,7 @@ decompress(ICompressor & decompressor, size_t uncompressedLen, const ConstBuffer
}
void
-decompress(const CompressionConfig::Type & type, size_t uncompressedLen, const ConstBufferRef & org, DataBuffer & dest, bool allowSwap)
+decompress(CompressionConfig::Type type, size_t uncompressedLen, const ConstBufferRef & org, DataBuffer & dest, bool allowSwap)
{
switch (type) {
case CompressionConfig::LZ4:
@@ -169,7 +170,7 @@ size_t computeMaxCompressedsize(CompressionConfig::Type type, size_t payloadSize
//-----------------------------------------------------------------------------
-Compress::Compress(const CompressionConfig &config,
+Compress::Compress(CompressionConfig config,
const char *uncompressed_data, size_t uncompressed_size)
: _space(),
_type(CompressionConfig::NONE),
@@ -194,7 +195,7 @@ Compress::Compress(const CompressionConfig &config,
}
}
-Decompress::Decompress(const CompressionConfig::Type &type, size_t uncompressed_size,
+Decompress::Decompress(CompressionConfig::Type type, size_t uncompressed_size,
const char *compressed_data, size_t compressed_size)
: _space(),
_data(compressed_data),
diff --git a/vespalib/src/vespa/vespalib/util/compressor.h b/vespalib/src/vespa/vespalib/util/compressor.h
index fd29c028af2..2a02d684719 100644
--- a/vespalib/src/vespa/vespalib/util/compressor.h
+++ b/vespalib/src/vespa/vespalib/util/compressor.h
@@ -13,7 +13,7 @@ class ICompressor
{
public:
virtual ~ICompressor() { }
- virtual bool process(const CompressionConfig& config, const void * input, size_t inputLen, void * output, size_t & outputLen) = 0;
+ virtual bool process(CompressionConfig config, const void * input, size_t inputLen, void * output, size_t & outputLen) = 0;
virtual bool unprocess(const void * input, size_t inputLen, void * output, size_t & outputLen) = 0;
virtual size_t adjustProcessLen(uint16_t options, size_t len) const = 0;
};
@@ -28,7 +28,7 @@ public:
* @param allowSwap will tell it the data must be appended or if it can be swapped in if it is uncompressable or config is NONE.
*/
CompressionConfig::Type compress(CompressionConfig::Type compression, const ConstBufferRef & org, DataBuffer & dest, bool allowSwap);
-CompressionConfig::Type compress(const CompressionConfig & compression, const vespalib::ConstBufferRef & org, vespalib::DataBuffer & dest, bool allowSwap);
+CompressionConfig::Type compress(CompressionConfig compression, const vespalib::ConstBufferRef & org, vespalib::DataBuffer & dest, bool allowSwap);
/**
* Will try to decompress a buffer according to the config.
@@ -41,7 +41,7 @@ CompressionConfig::Type compress(const CompressionConfig & compression, const ve
* Then it will be swapped in.
* @param allowSwap will tell it the data must be appended or if it can be swapped in if compression type is NONE.
*/
-void decompress(const CompressionConfig::Type & compression, size_t uncompressedLen, const vespalib::ConstBufferRef & org, vespalib::DataBuffer & dest, bool allowSwap);
+void decompress(CompressionConfig::Type compression, size_t uncompressedLen, const vespalib::ConstBufferRef & org, vespalib::DataBuffer & dest, bool allowSwap);
size_t computeMaxCompressedsize(CompressionConfig::Type type, size_t uncompressedSize);
@@ -53,14 +53,13 @@ size_t computeMaxCompressedsize(CompressionConfig::Type type, size_t uncompresse
**/
class Compress {
private:
- alloc::Alloc _space;
+ alloc::Alloc _space;
CompressionConfig::Type _type;
- const char *_data;
- size_t _size;
+ const char *_data;
+ size_t _size;
public:
- Compress(const CompressionConfig &config,
- const char *uncompressed_data, size_t uncompressed_size);
- const CompressionConfig::Type &type() const { return _type; }
+ Compress(CompressionConfig config, const char *uncompressed_data, size_t uncompressed_size);
+ CompressionConfig::Type type() const { return _type; }
const char *data() const { return _data; }
size_t size() const { return _size; }
};
@@ -72,10 +71,10 @@ public:
class Decompress {
private:
alloc::Alloc _space;
- const char *_data;
- size_t _size;
+ const char *_data;
+ size_t _size;
public:
- Decompress(const CompressionConfig::Type &type, size_t uncompressed_size,
+ Decompress(CompressionConfig::Type type, size_t uncompressed_size,
const char *compressed_data, size_t compressed_size);
const char *data() const { return _data; }
size_t size() const { return _size; }
diff --git a/vespalib/src/vespa/vespalib/util/lz4compressor.cpp b/vespalib/src/vespa/vespalib/util/lz4compressor.cpp
index d33b7339c4b..4176dd5d2c6 100644
--- a/vespalib/src/vespa/vespalib/util/lz4compressor.cpp
+++ b/vespalib/src/vespa/vespalib/util/lz4compressor.cpp
@@ -13,7 +13,7 @@ namespace vespalib::compression {
size_t LZ4Compressor::adjustProcessLen(uint16_t, size_t len) const { return LZ4_compressBound(len); }
bool
-LZ4Compressor::process(const CompressionConfig& config, const void * inputV, size_t inputLen, void * outputV, size_t & outputLenV)
+LZ4Compressor::process(CompressionConfig config, const void * inputV, size_t inputLen, void * outputV, size_t & outputLenV)
{
const char * input(static_cast<const char *>(inputV));
char * output(static_cast<char *>(outputV));
diff --git a/vespalib/src/vespa/vespalib/util/lz4compressor.h b/vespalib/src/vespa/vespalib/util/lz4compressor.h
index e723fce80cf..947888e625f 100644
--- a/vespalib/src/vespa/vespalib/util/lz4compressor.h
+++ b/vespalib/src/vespa/vespalib/util/lz4compressor.h
@@ -8,7 +8,7 @@ namespace vespalib::compression {
class LZ4Compressor : public ICompressor
{
public:
- bool process(const CompressionConfig& config, const void * input, size_t inputLen, void * output, size_t & outputLen) override;
+ bool process(CompressionConfig config, const void * input, size_t inputLen, void * output, size_t & outputLen) override;
bool unprocess(const void * input, size_t inputLen, void * output, size_t & outputLen) override;
size_t adjustProcessLen(uint16_t options, size_t len) const override;
};
diff --git a/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp b/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp
index 49560c1b3b0..8c89f6745e4 100644
--- a/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp
+++ b/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp
@@ -62,10 +62,13 @@ MmapFileAllocator::alloc(size_t sz) const
assert(ins_res.second);
int retval = madvise(buf, sz, MADV_RANDOM);
assert(retval == 0);
+#ifdef __linux__
+ retval = madvise(buf, sz, MADV_DONTDUMP);
+ assert(retval == 0);
+#endif
return PtrAndSize(buf, sz);
}
-
void
MmapFileAllocator::free(PtrAndSize alloc) const
{
diff --git a/vespalib/src/vespa/vespalib/util/zstdcompressor.cpp b/vespalib/src/vespa/vespalib/util/zstdcompressor.cpp
index 1b0562985d8..51adc68c848 100644
--- a/vespalib/src/vespa/vespalib/util/zstdcompressor.cpp
+++ b/vespalib/src/vespa/vespalib/util/zstdcompressor.cpp
@@ -36,7 +36,7 @@ thread_local std::unique_ptr<DecompressContext> _tlDecompressState;
size_t ZStdCompressor::adjustProcessLen(uint16_t, size_t len) const { return ZSTD_compressBound(len); }
bool
-ZStdCompressor::process(const CompressionConfig& config, const void * inputV, size_t inputLen, void * outputV, size_t & outputLenV)
+ZStdCompressor::process(CompressionConfig config, const void * inputV, size_t inputLen, void * outputV, size_t & outputLenV)
{
size_t maxOutputLen = ZSTD_compressBound(inputLen);
if ( ! _tlCompressState) {
diff --git a/vespalib/src/vespa/vespalib/util/zstdcompressor.h b/vespalib/src/vespa/vespalib/util/zstdcompressor.h
index 1a141ab2b0f..483f2521475 100644
--- a/vespalib/src/vespa/vespalib/util/zstdcompressor.h
+++ b/vespalib/src/vespa/vespalib/util/zstdcompressor.h
@@ -8,7 +8,7 @@ namespace vespalib::compression {
class ZStdCompressor : public ICompressor
{
public:
- bool process(const CompressionConfig& config, const void * input, size_t inputLen, void * output, size_t & outputLen) override;
+ bool process(CompressionConfig config, const void * input, size_t inputLen, void * output, size_t & outputLen) override;
bool unprocess(const void * input, size_t inputLen, void * output, size_t & outputLen) override;
size_t adjustProcessLen(uint16_t options, size_t len) const override;
};
diff --git a/zkfacade/src/main/java/com/yahoo/vespa/curator/SingletonManager.java b/zkfacade/src/main/java/com/yahoo/vespa/curator/SingletonManager.java
index 8eda57b0476..0b6d1611563 100644
--- a/zkfacade/src/main/java/com/yahoo/vespa/curator/SingletonManager.java
+++ b/zkfacade/src/main/java/com/yahoo/vespa/curator/SingletonManager.java
@@ -336,14 +336,16 @@ class SingletonManager {
shouldBeActive = false;
}
}
- if (active && ! shouldBeActive) {
+ if ( ! shouldBeActive) {
logger.log(FINE, () -> "Doom value is " + doom);
- try {
- if ( ! singletons.isEmpty()) metrics.deactivation(singletons.peek()::deactivate);
- active = false;
- }
- catch (RuntimeException e) {
- logger.log(WARNING, "Failed to deactivate " + singletons.peek(), e);
+ if (active) {
+ try {
+ if ( ! singletons.isEmpty()) metrics.deactivation(singletons.peek()::deactivate);
+ active = false;
+ }
+ catch (RuntimeException e) {
+ logger.log(WARNING, "Failed to deactivate " + singletons.peek(), e);
+ }
}
unlock();
}