aboutsummaryrefslogtreecommitdiffstats
path: root/storage/src/tests/storageserver
diff options
context:
space:
mode:
authorTor Brede Vekterli <vekterli@vespa.ai>2023-11-13 13:58:41 +0000
committerTor Brede Vekterli <vekterli@vespa.ai>2023-11-13 14:13:38 +0000
commit777f614ba7aa54a33de96e277f7029ee391ba276 (patch)
treebece79399d35e6e01342a109af82cc12f45c4715 /storage/src/tests/storageserver
parent26285c69cd996e05040b2d147e48d36b9a0ad648 (diff)
Also memory limit throttle enqueued merges
This plugs the hole where merges could enter the active window even if doing so would exceeded the total memory limit, as dequeueing is a separate code path from when a merge is initially evaluated for inclusion in the active window. There is a theoretical head-of-line blocking/queue starvation issue if the merge at the front of the queue has an unrealistically large footprint and the memory limit is unrealistically low. In practice this is not expected to be a problem, and it should never cause merging to stop (at least one merge is always guaranteed to be able to execute). As such, not adding any kind of heuristics to deal with this for now.
Diffstat (limited to 'storage/src/tests/storageserver')
-rw-r--r--storage/src/tests/storageserver/mergethrottlertest.cpp33
1 files changed, 33 insertions, 0 deletions
diff --git a/storage/src/tests/storageserver/mergethrottlertest.cpp b/storage/src/tests/storageserver/mergethrottlertest.cpp
index 6f80ffe0727..a480ba2740f 100644
--- a/storage/src/tests/storageserver/mergethrottlertest.cpp
+++ b/storage/src/tests/storageserver/mergethrottlertest.cpp
@@ -1603,6 +1603,39 @@ TEST_F(MergeThrottlerTest, queued_merges_are_not_counted_towards_memory_usage) {
EXPECT_EQ(throttler(0).getMetrics().estimated_merge_memory_usage.getLast(), 0_Mi);
}
+TEST_F(MergeThrottlerTest, enqueued_merge_not_started_if_insufficient_memory_available) {
+ // See `queued_merges_are_not_counted_towards_memory_usage` test for magic number rationale
+ const auto max_pending = throttler_max_merges_pending(0);
+ ASSERT_LT(max_pending, 1000);
+ ASSERT_GT(max_pending, 1);
+ throttler(0).set_max_merge_memory_usage_bytes_locking(10_Mi);
+
+ // Fill up entire active window and enqueue a single merge
+ fill_throttler_queue_with_n_commands(0, 0);
+ _topLinks[0]->sendDown(MergeBuilder(document::BucketId(16, 1000)).nodes(0, 1, 2).unordered(true).memory_usage(11_Mi).create());
+ waitUntilMergeQueueIs(throttler(0), 1, _messageWaitTime); // Should end up in queue
+
+ // Drain all active merges. As long as we have other active merges, the enqueued merge should not
+ // be allowed through since it's too large. Eventually it will hit the "at least one merge must
+ // be allowed at any time regardless of size" exception and is dequeued.
+ for (uint32_t i = 0; i < max_pending; ++i) {
+ auto fwd_cmd = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ auto fwd_reply = dynamic_cast<api::MergeBucketCommand&>(*fwd_cmd).makeReply();
+
+ ASSERT_NO_FATAL_FAILURE(send_and_expect_reply(
+ std::shared_ptr<api::StorageReply>(std::move(fwd_reply)),
+ MessageType::MERGEBUCKET_REPLY, ReturnCode::OK)); // Unwind reply for completed merge
+
+ if (i < max_pending - 1) {
+ // Merge should still be in the queue, as it requires 11 MiB, and we only have 10 MiB.
+ // It will eventually be executed when the window is empty (see below).
+ waitUntilMergeQueueIs(throttler(0), 1, _messageWaitTime);
+ }
+ }
+ // We've freed up the entire send window, so the over-sized merge can finally squeeze through.
+ waitUntilMergeQueueIs(throttler(0), 0, _messageWaitTime);
+}
+
namespace {
vespalib::HwInfo make_mem_info(uint64_t mem_size) {