summaryrefslogtreecommitdiffstats
path: root/storage
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@oath.com>2017-10-13 15:06:14 +0000
committerTor Egge <Tor.Egge@oath.com>2017-10-13 15:06:14 +0000
commita179357a7f8ebc6d052c913d5ff6303ce3bc88fc (patch)
tree9c986ba314b428b6cb5ccf2d4013ac3f69c2f6ef /storage
parente3f14147291b22bf13ec7fa2cafda748a80dd950 (diff)
Reduce number of calls to document::BucketSpace::placeHolder() by using
bucket space information from storage commands.
Diffstat (limited to 'storage')
-rw-r--r--storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp10
-rw-r--r--storage/src/vespa/storage/persistence/mergehandler.cpp20
-rw-r--r--storage/src/vespa/storage/persistence/persistencethread.cpp44
-rw-r--r--storage/src/vespa/storage/persistence/processallhandler.cpp7
4 files changed, 36 insertions, 45 deletions
diff --git a/storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp b/storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp
index 86b04066a5a..93c005728f4 100644
--- a/storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp
+++ b/storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp
@@ -22,7 +22,7 @@ DiskMoveOperationHandler::handleBucketDiskMove(BucketDiskMoveCommand& cmd,
_env._metrics.movedBuckets,
_env._component.getClock()));
- document::BucketId bucket(cmd.getBucketId());
+ document::Bucket bucket(cmd.getBucket());
uint32_t targetDisk(cmd.getDstDisk());
uint32_t deviceIndex(_env._partition);
@@ -45,8 +45,8 @@ DiskMoveOperationHandler::handleBucketDiskMove(BucketDiskMoveCommand& cmd,
bucket.toString().c_str(),
deviceIndex, targetDisk);
- spi::Bucket from(document::Bucket(document::BucketSpace::placeHolder(), bucket), spi::PartitionId(deviceIndex));
- spi::Bucket to(document::Bucket(document::BucketSpace::placeHolder(), bucket), spi::PartitionId(targetDisk));
+ spi::Bucket from(bucket, spi::PartitionId(deviceIndex));
+ spi::Bucket to(bucket, spi::PartitionId(targetDisk));
spi::Result result(
_provider.move(from, spi::PartitionId(targetDisk), context));
@@ -66,13 +66,13 @@ DiskMoveOperationHandler::handleBucketDiskMove(BucketDiskMoveCommand& cmd,
// is executed. moving queue should move delete command to correct disk
StorBucketDatabase::WrappedEntry entry(
_env.getBucketDatabase().get(
- bucket, "FileStorThread::onBucketDiskMove",
+ bucket.getBucketId(), "FileStorThread::onBucketDiskMove",
StorBucketDatabase::LOCK_IF_NONEXISTING_AND_NOT_CREATING));
// Move queued operations in bucket to new thread. Hold bucket lock
// while doing it, so filestor manager can't put in other operations
// first, such that operations change order.
- _env._fileStorHandler.remapQueueAfterDiskMove(bucket, deviceIndex, targetDisk);
+ _env._fileStorHandler.remapQueueAfterDiskMove(bucket.getBucketId(), deviceIndex, targetDisk);
if (entry.exist()) {
entry->setBucketInfo(bInfo);
diff --git a/storage/src/vespa/storage/persistence/mergehandler.cpp b/storage/src/vespa/storage/persistence/mergehandler.cpp
index 36e417b65e4..88bca44558c 100644
--- a/storage/src/vespa/storage/persistence/mergehandler.cpp
+++ b/storage/src/vespa/storage/persistence/mergehandler.cpp
@@ -955,8 +955,8 @@ MergeHandler::handleMergeBucket(api::MergeBucketCommand& cmd,
_env._metrics.mergeBuckets,
_env._component.getClock()));
- const document::BucketId& id(cmd.getBucketId());
- spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), id), spi::PartitionId(_env._partition));
+ spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
+ const document::BucketId id(bucket.getBucketId());
LOG(debug, "MergeBucket(%s) with max timestamp %" PRIu64 ".",
bucket.toString().c_str(), cmd.getMaxTimestamp());
@@ -1181,8 +1181,8 @@ MergeHandler::handleGetBucketDiff(api::GetBucketDiffCommand& cmd,
MessageTracker::UP tracker(new MessageTracker(
_env._metrics.getBucketDiff,
_env._component.getClock()));
- const document::BucketId& id(cmd.getBucketId());
- spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), id), spi::PartitionId(_env._partition));
+ spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
+ const document::BucketId id(bucket.getBucketId());
LOG(debug, "GetBucketDiff(%s)", bucket.toString().c_str());
checkResult(_spi.createBucket(bucket, context), bucket, "create bucket");
@@ -1304,8 +1304,8 @@ MergeHandler::handleGetBucketDiffReply(api::GetBucketDiffReply& reply,
MessageSender& sender)
{
++_env._metrics.getBucketDiffReply;
- document::BucketId id(reply.getBucketId());
- spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), id), spi::PartitionId(_env._partition));
+ spi::Bucket bucket(reply.getBucket(), spi::PartitionId(_env._partition));
+ document::BucketId id(bucket.getBucketId());
LOG(debug, "GetBucketDiffReply(%s)", bucket.toString().c_str());
if (!_env._fileStorHandler.isMerging(id)) {
@@ -1388,8 +1388,8 @@ MergeHandler::handleApplyBucketDiff(api::ApplyBucketDiffCommand& cmd,
_env._metrics.applyBucketDiff,
_env._component.getClock()));
- const document::BucketId& id(cmd.getBucketId());
- spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), id), spi::PartitionId(_env._partition));
+ spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
+ const document::BucketId id(bucket.getBucketId());
LOG(debug, "%s", cmd.toString().c_str());
if (_env._fileStorHandler.isMerging(id)) {
@@ -1484,8 +1484,8 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
MessageSender& sender)
{
++_env._metrics.applyBucketDiffReply;
- document::BucketId id(reply.getBucketId());
- spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), id), spi::PartitionId(_env._partition));
+ spi::Bucket bucket(reply.getBucket(), spi::PartitionId(_env._partition));
+ document::BucketId id(bucket.getBucketId());
std::vector<api::ApplyBucketDiffCommand::Entry>& diff(reply.getDiff());
LOG(debug, "%s", reply.toString().c_str());
diff --git a/storage/src/vespa/storage/persistence/persistencethread.cpp b/storage/src/vespa/storage/persistence/persistencethread.cpp
index 643eb208d52..7c2641643f3 100644
--- a/storage/src/vespa/storage/persistence/persistencethread.cpp
+++ b/storage/src/vespa/storage/persistence/persistencethread.cpp
@@ -257,8 +257,7 @@ PersistenceThread::handleMultiOperation(api::MultiOperationCommand& cmd)
MessageTracker::UP tracker(new MessageTracker(
_env._metrics.multiOp[cmd.getLoadType()],
_env._component.getClock()));
- spi::Bucket b = spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
- spi::PartitionId(_env._partition));
+ spi::Bucket b = spi::Bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
long puts = 0;
long removes = 0;
long updates = 0;
@@ -314,8 +313,7 @@ PersistenceThread::handleRevert(api::RevertCommand& cmd)
MessageTracker::UP tracker(new MessageTracker(
_env._metrics.revert[cmd.getLoadType()],
_env._component.getClock()));
- spi::Bucket b = spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
- spi::PartitionId(_env._partition));
+ spi::Bucket b = spi::Bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
const std::vector<api::Timestamp> tokens = cmd.getRevertTokens();
for (uint32_t i = 0; i < tokens.size(); ++i) {
spi::Result result = _spi.removeEntry(b,
@@ -337,7 +335,7 @@ PersistenceThread::handleCreateBucket(api::CreateBucketCommand& cmd)
cmd.getBucketId().toString().c_str());
DUMP_LOGGED_BUCKET_OPERATIONS(cmd.getBucketId());
}
- spi::Bucket spiBucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()), spi::PartitionId(_env._partition));
+ spi::Bucket spiBucket(cmd.getBucket(), spi::PartitionId(_env._partition));
_spi.createBucket(spiBucket, _context);
if (cmd.getActive()) {
_spi.setActiveState(spiBucket, spi::BucketInfo::ACTIVE);
@@ -406,7 +404,7 @@ PersistenceThread::handleDeleteBucket(api::DeleteBucketCommand& cmd)
api::ReturnCode(api::ReturnCode::ABORTED,
"Bucket was deleted during the merge"));
}
- spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()), spi::PartitionId(_env._partition));
+ spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
if (!checkProviderBucketInfoMatches(bucket, cmd.getBucketInfo())) {
return tracker;
}
@@ -497,7 +495,7 @@ PersistenceThread::handleCreateIterator(CreateIteratorCommand& cmd)
// _context is reset per command, so it's safe to modify it like this.
_context.setReadConsistency(cmd.getReadConsistency());
spi::CreateIteratorResult result(_spi.createIterator(
- spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()), spi::PartitionId(_env._partition)),
+ spi::Bucket(cmd.getBucket(), spi::PartitionId(_env._partition)),
*fieldSet,
cmd.getSelection(),
cmd.getIncludedVersions(),
@@ -533,7 +531,7 @@ PersistenceThread::handleSplitBucket(api::SplitBucketCommand& cmd)
return tracker;
}
- spi::Bucket spiBucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()), spi::PartitionId(_env._partition));
+ spi::Bucket spiBucket(cmd.getBucket(), spi::PartitionId(_env._partition));
SplitBitDetector::Result targetInfo;
if (_env._config.enableMultibitSplitOptimalization) {
targetInfo = SplitBitDetector::detectSplit(
@@ -581,8 +579,8 @@ PersistenceThread::handleSplitBucket(api::SplitBucketCommand& cmd)
#endif
spi::Result result = _spi.split(
spiBucket,
- spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), target1), spi::PartitionId(lock1.disk)),
- spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), target2), spi::PartitionId(lock2.disk)), _context);
+ spi::Bucket(document::Bucket(spiBucket.getBucketSpace(), target1), spi::PartitionId(lock1.disk)),
+ spi::Bucket(document::Bucket(spiBucket.getBucketSpace(), target2), spi::PartitionId(lock2.disk)), _context);
if (result.hasError()) {
tracker->fail(_env.convertErrorCode(result),
result.getErrorMessage());
@@ -646,7 +644,7 @@ PersistenceThread::handleSplitBucket(api::SplitBucketCommand& cmd)
// to an empty target bucket, since the provider will have
// implicitly erased it by this point.
spi::Bucket createTarget(
- spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), targets[i].second.bid),
+ spi::Bucket(document::Bucket(spiBucket.getBucketSpace(), targets[i].second.bid),
spi::PartitionId(targets[i].second.diskIndex)));
LOG(debug,
"Split target %s was empty, but re-creating it since "
@@ -755,18 +753,16 @@ PersistenceThread::handleJoinBuckets(api::JoinBucketsCommand& cmd)
}
}
#endif
+ document::Bucket destBucket = cmd.getBucket();
spi::Result result =
- _spi.join(spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), firstBucket), spi::PartitionId(lock1.disk)),
- spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), secondBucket), spi::PartitionId(lock2.disk)),
- spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
- spi::PartitionId(_env._partition)),
+ _spi.join(spi::Bucket(document::Bucket(destBucket.getBucketSpace(), firstBucket), spi::PartitionId(lock1.disk)),
+ spi::Bucket(document::Bucket(destBucket.getBucketSpace(), secondBucket), spi::PartitionId(lock2.disk)),
+ spi::Bucket(destBucket, spi::PartitionId(_env._partition)),
_context);
if (!checkForError(result, *tracker)) {
return tracker;
}
- result = _spi.flush(spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
- spi::PartitionId(_env._partition)),
- _context);
+ result = _spi.flush(spi::Bucket(destBucket, spi::PartitionId(_env._partition)), _context);
if (!checkForError(result, *tracker)) {
return tracker;
}
@@ -813,7 +809,7 @@ PersistenceThread::handleSetBucketState(api::SetBucketStateCommand& cmd)
NotificationGuard notifyGuard(*_bucketOwnershipNotifier);
LOG(debug, "handleSetBucketState(): %s", cmd.toString().c_str());
- spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()), spi::PartitionId(_env._partition));
+ spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
bool shouldBeActive(cmd.getState() == api::SetBucketStateCommand::ACTIVE);
spi::BucketInfo::ActiveState newState(
shouldBeActive
@@ -860,13 +856,11 @@ PersistenceThread::handleInternalBucketJoin(InternalBucketJoinCommand& cmd)
entry->disk = _env._partition;
entry.write();
}
+ document::Bucket destBucket = cmd.getBucket();
spi::Result result =
- _spi.join(spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
- spi::PartitionId(cmd.getDiskOfInstanceToJoin())),
- spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
- spi::PartitionId(cmd.getDiskOfInstanceToJoin())),
- spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
- spi::PartitionId(cmd.getDiskOfInstanceToKeep())),
+ _spi.join(spi::Bucket(destBucket, spi::PartitionId(cmd.getDiskOfInstanceToJoin())),
+ spi::Bucket(destBucket, spi::PartitionId(cmd.getDiskOfInstanceToJoin())),
+ spi::Bucket(destBucket, spi::PartitionId(cmd.getDiskOfInstanceToKeep())),
_context);
if (checkForError(result, *tracker)) {
tracker->setReply(
diff --git a/storage/src/vespa/storage/persistence/processallhandler.cpp b/storage/src/vespa/storage/persistence/processallhandler.cpp
index 67b150ec70a..51ef67dc7ac 100644
--- a/storage/src/vespa/storage/persistence/processallhandler.cpp
+++ b/storage/src/vespa/storage/persistence/processallhandler.cpp
@@ -87,9 +87,7 @@ ProcessAllHandler::handleRemoveLocation(api::RemoveLocationCommand& cmd,
cmd.getBucketId().toString().c_str(),
cmd.getDocumentSelection().c_str());
- spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(),
- cmd.getBucketId()),
- spi::PartitionId(_env._partition));
+ spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
UnrevertableRemoveEntryProcessor processor(_spi, bucket, context);
BucketProcessor::iterateAll(_spi,
bucket,
@@ -118,8 +116,7 @@ ProcessAllHandler::handleStatBucket(api::StatBucketCommand& cmd,
ost << "Persistence bucket " << cmd.getBucketId()
<< ", partition " << _env._partition << "\n";
- spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
- spi::PartitionId(_env._partition));
+ spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
StatEntryProcessor processor(ost);
BucketProcessor::iterateAll(_spi,
bucket,