aboutsummaryrefslogtreecommitdiffstats
path: root/storage
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@oath.com>2017-10-25 18:48:28 +0000
committerTor Egge <Tor.Egge@oath.com>2017-10-26 08:55:20 +0000
commit235b2756cc0af34dc968ff4cb4b5fc4455240547 (patch)
tree23b80b9518282b981bd5a6100d5eabeb2fcc10be /storage
parent106567609a5b0efc3285187582acf45a0899b5ef (diff)
Make document::BucketSpace a mandatory argument to
PersistenceUtil::getBucketDatabase().
Diffstat (limited to 'storage')
-rw-r--r--storage/src/tests/persistence/mergehandlertest.cpp2
-rw-r--r--storage/src/tests/persistence/persistencetestutils.cpp10
-rw-r--r--storage/src/tests/persistence/persistencetestutils.h2
-rw-r--r--storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp2
-rw-r--r--storage/src/vespa/storage/persistence/mergehandler.cpp2
-rw-r--r--storage/src/vespa/storage/persistence/persistencethread.cpp26
-rw-r--r--storage/src/vespa/storage/persistence/persistenceutil.cpp4
-rw-r--r--storage/src/vespa/storage/persistence/persistenceutil.h4
8 files changed, 26 insertions, 26 deletions
diff --git a/storage/src/tests/persistence/mergehandlertest.cpp b/storage/src/tests/persistence/mergehandlertest.cpp
index 47ed60b62ad..5f1f1ebe362 100644
--- a/storage/src/tests/persistence/mergehandlertest.cpp
+++ b/storage/src/tests/persistence/mergehandlertest.cpp
@@ -241,7 +241,7 @@ MergeHandlerTest::setUp() {
LOG(info, "Creating %s in bucket database", _bucket.toString().c_str());
bucketdb::StorageBucketInfo bucketDBEntry;
bucketDBEntry.disk = 0;
- getEnv().getBucketDatabase().insert(_bucket.getBucketId(), bucketDBEntry, "mergetestsetup");
+ getEnv().getBucketDatabase(_bucket.getBucketSpace()).insert(_bucket.getBucketId(), bucketDBEntry, "mergetestsetup");
LOG(info, "Creating bucket to merge");
createTestBucket(_bucket);
diff --git a/storage/src/tests/persistence/persistencetestutils.cpp b/storage/src/tests/persistence/persistencetestutils.cpp
index fb6bc298b08..fcfbcd0a78b 100644
--- a/storage/src/tests/persistence/persistencetestutils.cpp
+++ b/storage/src/tests/persistence/persistencetestutils.cpp
@@ -271,17 +271,17 @@ PersistenceTestUtils::createHeaderUpdate(
}
uint16_t
-PersistenceTestUtils::getDiskFromBucketDatabaseIfUnset(const document::BucketId& bucket,
+PersistenceTestUtils::getDiskFromBucketDatabaseIfUnset(const document::Bucket& bucket,
uint16_t disk)
{
if (disk == 0xffff) {
StorBucketDatabase::WrappedEntry entry(
- getEnv().getBucketDatabase().get(bucket, "createTestBucket"));
+ getEnv().getBucketDatabase(bucket.getBucketSpace()).get(bucket.getBucketId(), "createTestBucket"));
if (entry.exist()) {
return entry->disk;
} else {
std::ostringstream error;
- error << bucket << " not in db and disk unset";
+ error << bucket.toString() << " not in db and disk unset";
throw vespalib::IllegalStateException(error.str(), VESPA_STRLOC);
}
}
@@ -297,7 +297,7 @@ PersistenceTestUtils::doPut(const document::Document::SP& doc,
document::BucketId bucket(
_env->_component.getBucketIdFactory().getBucketId(doc->getId()));
bucket.setUsedBits(usedBits);
- disk = getDiskFromBucketDatabaseIfUnset(bucket, disk);
+ disk = getDiskFromBucketDatabaseIfUnset(makeDocumentBucket(bucket), disk);
doPut(doc, bucket, time, disk);
}
@@ -335,7 +335,7 @@ PersistenceTestUtils::doRemove(const document::DocumentId& id, spi::Timestamp ti
document::BucketId bucket(
_env->_component.getBucketIdFactory().getBucketId(id));
bucket.setUsedBits(usedBits);
- disk = getDiskFromBucketDatabaseIfUnset(bucket, disk);
+ disk = getDiskFromBucketDatabaseIfUnset(makeDocumentBucket(bucket), disk);
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
if (unrevertableRemove) {
diff --git a/storage/src/tests/persistence/persistencetestutils.h b/storage/src/tests/persistence/persistencetestutils.h
index 573ff28a80c..ee87925a0bc 100644
--- a/storage/src/tests/persistence/persistencetestutils.h
+++ b/storage/src/tests/persistence/persistencetestutils.h
@@ -154,7 +154,7 @@ public:
const document::DocumentId& id,
const document::FieldValue& updateValue);
- uint16_t getDiskFromBucketDatabaseIfUnset(const document::BucketId&,
+ uint16_t getDiskFromBucketDatabaseIfUnset(const document::Bucket &,
uint16_t disk = 0xffff);
/**
diff --git a/storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp b/storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp
index 4f4d47cacac..e906cfc624f 100644
--- a/storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp
+++ b/storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp
@@ -65,7 +65,7 @@ DiskMoveOperationHandler::handleBucketDiskMove(BucketDiskMoveCommand& cmd,
// delete bucket command. If so, it'll be deleted when delete bucket
// is executed. moving queue should move delete command to correct disk
StorBucketDatabase::WrappedEntry entry(
- _env.getBucketDatabase().get(
+ _env.getBucketDatabase(bucket.getBucketSpace()).get(
bucket.getBucketId(), "FileStorThread::onBucketDiskMove",
StorBucketDatabase::LOCK_IF_NONEXISTING_AND_NOT_CREATING));
diff --git a/storage/src/vespa/storage/persistence/mergehandler.cpp b/storage/src/vespa/storage/persistence/mergehandler.cpp
index 92de9720bfb..e17be7479f0 100644
--- a/storage/src/vespa/storage/persistence/mergehandler.cpp
+++ b/storage/src/vespa/storage/persistence/mergehandler.cpp
@@ -220,7 +220,7 @@ MergeHandler::buildBucketInfoList(
// fixed, but by making merge fix it, distributors will stop and spin
// on merge, never getting their problems fixed.
{
- StorBucketDatabase& db(_env.getBucketDatabase());
+ StorBucketDatabase& db(_env.getBucketDatabase(bucket.getBucketSpace()));
StorBucketDatabase::WrappedEntry entry(
db.get(bucket.getBucketId(), "MergeHandler::buildBucketInfoList"));
if (entry.exist()) {
diff --git a/storage/src/vespa/storage/persistence/persistencethread.cpp b/storage/src/vespa/storage/persistence/persistencethread.cpp
index 1988321a7b1..c42959b1a87 100644
--- a/storage/src/vespa/storage/persistence/persistencethread.cpp
+++ b/storage/src/vespa/storage/persistence/persistencethread.cpp
@@ -395,7 +395,7 @@ PersistenceThread::handleDeleteBucket(api::DeleteBucketCommand& cmd)
return tracker;
}
_spi.deleteBucket(bucket, _context);
- StorBucketDatabase& db(_env.getBucketDatabase());
+ StorBucketDatabase& db(_env.getBucketDatabase(cmd.getBucket().getBucketSpace()));
{
StorBucketDatabase::WrappedEntry entry(db.get(
cmd.getBucketId(), "FileStorThread::onDeleteBucket"));
@@ -574,7 +574,7 @@ PersistenceThread::handleSplitBucket(api::SplitBucketCommand& cmd)
}
// After split we need to take all bucket db locks to update them.
// Ensure to take them in rising order.
- StorBucketDatabase::WrappedEntry sourceEntry(_env.getBucketDatabase().get(
+ StorBucketDatabase::WrappedEntry sourceEntry(_env.getBucketDatabase(spiBucket.getBucket().getBucketSpace()).get(
cmd.getBucketId(), "PersistenceThread::handleSplitBucket-source"));
api::SplitBucketReply* splitReply(new api::SplitBucketReply(cmd));
tracker->setReply(api::StorageReply::SP(splitReply));
@@ -587,7 +587,7 @@ PersistenceThread::handleSplitBucket(api::SplitBucketCommand& cmd)
uint16_t disk(i == 0 ? lock1.disk : lock2.disk);
assert(target.getBucketId().getRawId() != 0);
targets.push_back(TargetInfo(
- _env.getBucketDatabase().get(
+ _env.getBucketDatabase(target.getBucketSpace()).get(
target.getBucketId(), "PersistenceThread::handleSplitBucket - Target",
StorBucketDatabase::CREATE_IF_NONEXISTING),
FileStorHandler::RemapInfo(target, disk)));
@@ -698,6 +698,7 @@ PersistenceThread::handleJoinBuckets(api::JoinBucketsCommand& cmd)
if (!validateJoinCommand(cmd, *tracker)) {
return tracker;
}
+ document::Bucket destBucket = cmd.getBucket();
// To avoid a potential deadlock all operations locking multiple
// buckets must lock their buckets in the same order (sort order of
// bucket id, lowest countbits, lowest location first).
@@ -706,8 +707,8 @@ PersistenceThread::handleJoinBuckets(api::JoinBucketsCommand& cmd)
{
// Create empty bucket for target.
StorBucketDatabase::WrappedEntry entry =
- _env.getBucketDatabase().get(
- cmd.getBucketId(),
+ _env.getBucketDatabase(destBucket.getBucketSpace()).get(
+ destBucket.getBucketId(),
"join",
StorBucketDatabase::CREATE_IF_NONEXISTING);
@@ -715,7 +716,6 @@ PersistenceThread::handleJoinBuckets(api::JoinBucketsCommand& cmd)
entry.write();
}
- document::Bucket destBucket = cmd.getBucket();
document::Bucket firstBucket(destBucket.getBucketSpace(), cmd.getSourceBuckets()[0]);
document::Bucket secondBucket(destBucket.getBucketSpace(), cmd.getSourceBuckets()[1]);
@@ -763,7 +763,7 @@ PersistenceThread::handleJoinBuckets(api::JoinBucketsCommand& cmd)
target);
// Remove source from bucket db.
StorBucketDatabase::WrappedEntry entry(
- _env.getBucketDatabase().get(
+ _env.getBucketDatabase(srcBucket.getBucketSpace()).get(
srcBucket.getBucketId(), "join-remove-source"));
if (entry.exist()) {
lastModified = std::max(lastModified,
@@ -773,8 +773,8 @@ PersistenceThread::handleJoinBuckets(api::JoinBucketsCommand& cmd)
}
{
StorBucketDatabase::WrappedEntry entry =
- _env.getBucketDatabase().get(
- cmd.getBucketId(),
+ _env.getBucketDatabase(destBucket.getBucketSpace()).get(
+ destBucket.getBucketId(),
"join",
StorBucketDatabase::CREATE_IF_NONEXISTING);
if (entry->info.getLastModified() == 0) {
@@ -804,7 +804,7 @@ PersistenceThread::handleSetBucketState(api::SetBucketStateCommand& cmd)
spi::Result result(_spi.setActiveState(bucket, newState));
if (checkForError(result, *tracker)) {
- StorBucketDatabase::WrappedEntry entry(_env.getBucketDatabase().get(
+ StorBucketDatabase::WrappedEntry entry(_env.getBucketDatabase(bucket.getBucket().getBucketSpace()).get(
cmd.getBucketId(), "handleSetBucketState"));
if (entry.exist()) {
entry->info.setActive(newState == spi::BucketInfo::ACTIVE);
@@ -831,18 +831,18 @@ PersistenceThread::handleInternalBucketJoin(InternalBucketJoinCommand& cmd)
MessageTracker::UP tracker(new MessageTracker(
_env._metrics.internalJoin,
_env._component.getClock()));
+ document::Bucket destBucket = cmd.getBucket();
{
// Create empty bucket for target.
StorBucketDatabase::WrappedEntry entry =
- _env.getBucketDatabase().get(
- cmd.getBucketId(),
+ _env.getBucketDatabase(destBucket.getBucketSpace()).get(
+ destBucket.getBucketId(),
"join",
StorBucketDatabase::CREATE_IF_NONEXISTING);
entry->disk = _env._partition;
entry.write();
}
- document::Bucket destBucket = cmd.getBucket();
spi::Result result =
_spi.join(spi::Bucket(destBucket, spi::PartitionId(cmd.getDiskOfInstanceToJoin())),
spi::Bucket(destBucket, spi::PartitionId(cmd.getDiskOfInstanceToJoin())),
diff --git a/storage/src/vespa/storage/persistence/persistenceutil.cpp b/storage/src/vespa/storage/persistence/persistenceutil.cpp
index d388f52f6f2..8e96c05a66a 100644
--- a/storage/src/vespa/storage/persistence/persistenceutil.cpp
+++ b/storage/src/vespa/storage/persistence/persistenceutil.cpp
@@ -92,7 +92,7 @@ PersistenceUtil::updateBucketDatabase(const document::Bucket &bucket,
const api::BucketInfo& i)
{
// Update bucket database
- StorBucketDatabase::WrappedEntry entry(getBucketDatabase().get(
+ StorBucketDatabase::WrappedEntry entry(getBucketDatabase(bucket.getBucketSpace()).get(
bucket.getBucketId(),
"env::updatebucketdb"));
if (entry.exist()) {
@@ -134,7 +134,7 @@ PersistenceUtil::lockAndGetDisk(const document::Bucket &bucket,
std::shared_ptr<FileStorHandler::BucketLockInterface> lock(
_fileStorHandler.lock(bucket, result.disk));
- StorBucketDatabase::WrappedEntry entry(getBucketDatabase().get(
+ StorBucketDatabase::WrappedEntry entry(getBucketDatabase(bucket.getBucketSpace()).get(
bucket.getBucketId(), "join-lockAndGetDisk-1", flags));
if (entry.exist() && entry->disk != result.disk) {
result.disk = entry->disk;
diff --git a/storage/src/vespa/storage/persistence/persistenceutil.h b/storage/src/vespa/storage/persistence/persistenceutil.h
index 94feb98bfec..5126931bab6 100644
--- a/storage/src/vespa/storage/persistence/persistenceutil.h
+++ b/storage/src/vespa/storage/persistence/persistenceutil.h
@@ -85,8 +85,8 @@ struct PersistenceUtil {
~PersistenceUtil();
- StorBucketDatabase& getBucketDatabase()
- { return _component.getBucketDatabase(document::BucketSpace::placeHolder()); }
+ StorBucketDatabase& getBucketDatabase(document::BucketSpace bucketSpace)
+ { return _component.getBucketDatabase(bucketSpace); }
void updateBucketDatabase(const document::Bucket &bucket,
const api::BucketInfo& info);