aboutsummaryrefslogtreecommitdiffstats
path: root/storage
diff options
context:
space:
mode:
authorTor Brede Vekterli <vekterli@oath.com>2018-04-27 12:33:12 +0000
committerTor Brede Vekterli <vekterli@oath.com>2018-04-27 12:43:26 +0000
commit943cf25b0fd2bd76b3ac4a1236655446a2c9f4cd (patch)
tree62f7f36f7fd7a3e85bc38e07f70f94d14fceb009 /storage
parentd83814043998817d404b922e3050ce2006e8ec19 (diff)
Ensure commands are processed before shutting down persistence threads
Prevents race condition where test cleanup closes persistence threads while there are still enqueued operations. Normally the content layer shall ensure that all enqueued operations are aborted before reaching this step, but this does not take place in the simplified testing setup.
Diffstat (limited to 'storage')
-rw-r--r--storage/src/tests/persistence/filestorage/deletebuckettest.cpp50
-rw-r--r--storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp2
2 files changed, 28 insertions, 24 deletions
diff --git a/storage/src/tests/persistence/filestorage/deletebuckettest.cpp b/storage/src/tests/persistence/filestorage/deletebuckettest.cpp
index 3e0f36a6cdd..ec3e02e85b8 100644
--- a/storage/src/tests/persistence/filestorage/deletebuckettest.cpp
+++ b/storage/src/tests/persistence/filestorage/deletebuckettest.cpp
@@ -34,32 +34,36 @@ DeleteBucketTest::testDeleteAbortsOperationsForBucket()
createBucket(bucket);
LOG(info, "TEST STAGE: taking resume guard");
- ResumeGuard rg(c.manager->getFileStorHandler().pause());
- // First put may or may not be queued, since pausing might race with
- // an existing getNextMessage iteration (ugh...).
- c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
- // Put will be queued since thread now must know it's paused.
- c.sendPut(bucket, DocumentIndex(1), PutTimestamp(1000));
+ {
+ ResumeGuard rg(c.manager->getFileStorHandler().pause());
+ // First put may or may not be queued, since pausing might race with
+ // an existing getNextMessage iteration (ugh...).
+ c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
+ // Put will be queued since thread now must know it's paused.
+ c.sendPut(bucket, DocumentIndex(1), PutTimestamp(1000));
- auto deleteMsg = std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bucket));
- c.top.sendDown(deleteMsg);
- // We should now have two put replies. The first one will either be OK
- // or BUCKET_DELETED depending on whether it raced. The second (which is
- // the one we care about since it's deterministic) must be BUCKET_DELETED.
- // Problem is, their returned ordering is not deterministic so we're left
- // with having to check that _at least_ 1 reply had BUCKET_DELETED. Joy!
- c.top.waitForMessages(2, 60*2);
- std::vector<api::StorageMessage::SP> msgs(c.top.getRepliesOnce());
- CPPUNIT_ASSERT_EQUAL(size_t(2), msgs.size());
- int numDeleted = 0;
- for (uint32_t i = 0; i < 2; ++i) {
- api::StorageReply& reply(dynamic_cast<api::StorageReply&>(*msgs[i]));
- if (reply.getResult().getResult() == api::ReturnCode::BUCKET_DELETED) {
- ++numDeleted;
+ auto deleteMsg = std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bucket));
+ c.top.sendDown(deleteMsg);
+ // We should now have two put replies. The first one will either be OK
+ // or BUCKET_DELETED depending on whether it raced. The second (which is
+ // the one we care about since it's deterministic) must be BUCKET_DELETED.
+ // Problem is, their returned ordering is not deterministic so we're left
+ // with having to check that _at least_ 1 reply had BUCKET_DELETED. Joy!
+ c.top.waitForMessages(2, 60 * 2);
+ std::vector <api::StorageMessage::SP> msgs(c.top.getRepliesOnce());
+ CPPUNIT_ASSERT_EQUAL(size_t(2), msgs.size());
+ int numDeleted = 0;
+ for (uint32_t i = 0; i < 2; ++i) {
+ api::StorageReply& reply(dynamic_cast<api::StorageReply&>(*msgs[i]));
+ if (reply.getResult().getResult() == api::ReturnCode::BUCKET_DELETED) {
+ ++numDeleted;
+ }
}
+ CPPUNIT_ASSERT(numDeleted >= 1);
+ LOG(info, "TEST STAGE: done, releasing resume guard");
}
- CPPUNIT_ASSERT(numDeleted >= 1);
- LOG(info, "TEST STAGE: done, releasing resume guard");
+ // Ensure we don't shut down persistence threads before DeleteBucket op has completed
+ c.top.waitForMessages(1, 60*2);
}
} // namespace storage
diff --git a/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp b/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
index b906c9f67d6..961d2628052 100644
--- a/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
+++ b/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
@@ -65,7 +65,7 @@ void SanityCheckedDeleteTest::delete_bucket_fails_when_provider_out_of_sync() {
// Send a put to another bucket to serialize the operation (guaranteed
// since we only have 1 thread and the delete always has max priority).
c.sendPut(syncBucket, DocumentIndex(0), PutTimestamp(1001));
- c.top.waitForMessages(1, MSG_WAIT_TIME);
+ c.top.waitForMessages(2, MSG_WAIT_TIME);
// Should still be able to get identical bucket info for bucket.
spi::BucketInfoResult infoResult(
_node->getPersistenceProvider().getBucketInfo(spiBucket));