summaryrefslogtreecommitdiffstats
path: root/storage
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@broadpark.no>2020-10-14 11:45:31 +0200
committerTor Egge <Tor.Egge@broadpark.no>2020-10-14 15:51:34 +0200
commitd8d068e38bf581875ab78defae4ea15fa5502e83 (patch)
tree81d13f191ab5763d196e1c27c3b1a82b55289f74 /storage
parent1ea40c61012e36f4cecf93194d3e15ef26a4b642 (diff)
Remove partitions from SPI.
Diffstat (limited to 'storage')
-rw-r--r--storage/src/tests/bucketdb/bucketmanagertest.cpp8
-rw-r--r--storage/src/tests/bucketdb/initializertest.cpp34
-rw-r--r--storage/src/tests/common/metricstest.cpp12
-rw-r--r--storage/src/tests/common/teststorageapp.cpp39
-rw-r--r--storage/src/tests/common/teststorageapp.h3
-rw-r--r--storage/src/tests/persistence/common/filestortestfixture.cpp4
-rw-r--r--storage/src/tests/persistence/common/persistenceproviderwrapper.cpp13
-rw-r--r--storage/src/tests/persistence/common/persistenceproviderwrapper.h3
-rw-r--r--storage/src/tests/persistence/filestorage/filestormanagertest.cpp83
-rw-r--r--storage/src/tests/persistence/filestorage/mergeblockingtest.cpp3
-rw-r--r--storage/src/tests/persistence/filestorage/operationabortingtest.cpp3
-rw-r--r--storage/src/tests/persistence/persistencequeuetest.cpp6
-rw-r--r--storage/src/tests/persistence/persistencetestutils.cpp30
-rw-r--r--storage/src/tests/persistence/splitbitdetectortest.cpp6
-rw-r--r--storage/src/tests/storageserver/statereportertest.cpp4
-rw-r--r--storage/src/tests/visiting/visitormanagertest.cpp2
-rw-r--r--storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.cpp20
-rw-r--r--storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.h5
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestorhandler.cpp8
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestorhandler.h7
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp1
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h2
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp101
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestormanager.h3
-rw-r--r--storage/src/vespa/storage/persistence/mergehandler.cpp15
-rw-r--r--storage/src/vespa/storage/persistence/persistencethread.cpp51
-rw-r--r--storage/src/vespa/storage/persistence/persistenceutil.cpp4
-rw-r--r--storage/src/vespa/storage/persistence/processallhandler.cpp6
-rw-r--r--storage/src/vespa/storage/persistence/provider_error_wrapper.cpp16
-rw-r--r--storage/src/vespa/storage/persistence/provider_error_wrapper.h4
-rw-r--r--storage/src/vespa/storage/storageserver/servicelayernode.cpp40
-rw-r--r--storage/src/vespa/storage/storageserver/servicelayernode.h1
32 files changed, 201 insertions, 336 deletions
diff --git a/storage/src/tests/bucketdb/bucketmanagertest.cpp b/storage/src/tests/bucketdb/bucketmanagertest.cpp
index 9668eec404d..51b477a2980 100644
--- a/storage/src/tests/bucketdb/bucketmanagertest.cpp
+++ b/storage/src/tests/bucketdb/bucketmanagertest.cpp
@@ -151,7 +151,7 @@ void BucketManagerTest::setupTestEnvironment(bool fakePersistenceLayer,
"config-doctypes", FileSpec("../config-doctypes.cfg")));
_top = std::make_unique<DummyStorageLink>();
_node = std::make_unique<TestServiceLayerApp>(
- DiskCount(2), NodeIndex(0), config.getConfigId());
+ DiskCount(1), NodeIndex(0), config.getConfigId());
_node->setTypeRepo(repo);
_node->setupDummyPersistence();
// Set up the 3 links
@@ -164,7 +164,7 @@ void BucketManagerTest::setupTestEnvironment(bool fakePersistenceLayer,
_top->push_back(std::move(bottom));
} else {
auto bottom = std::make_unique<FileStorManager>(
- config.getConfigId(), _node->getPartitions(),
+ config.getConfigId(),
_node->getPersistenceProvider(), _node->getComponentRegister());
_filestorManager = bottom.get();
_top->push_back(std::move(bottom));
@@ -191,7 +191,7 @@ void BucketManagerTest::addBucketsToDB(uint32_t count)
info.size = randomizer.nextUint32();
info.count = randomizer.nextUint32(1, 0xFFFF);
- info.partition = _node->getPartition(id);
+ info.partition = 0u;
_bucketInfo[id] = info;
}
@@ -454,7 +454,7 @@ TEST_F(BucketManagerTest, metrics_generation) {
_top->doneInit();
trigger_metric_manager_update();
- ASSERT_EQ(2u, bucket_manager_metrics().disks.size());
+ ASSERT_EQ(1u, bucket_manager_metrics().disks.size());
const DataStoredMetrics& m(*bucket_manager_metrics().disks[0]);
EXPECT_EQ(3, m.buckets.getLast());
EXPECT_EQ(300, m.docs.getLast());
diff --git a/storage/src/tests/bucketdb/initializertest.cpp b/storage/src/tests/bucketdb/initializertest.cpp
index 7d3d2c185da..c11e067731e 100644
--- a/storage/src/tests/bucketdb/initializertest.cpp
+++ b/storage/src/tests/bucketdb/initializertest.cpp
@@ -39,7 +39,6 @@ struct InitializerTest : public Test {
Redundancy redundancy;
uint32_t docsPerDisk;
DiskCount diskCount;
- std::set<uint32_t> disksDown;
bool bucketWrongDisk;
bool bucketMultipleDisks;
bool failingListRequest;
@@ -53,7 +52,7 @@ struct InitializerTest : public Test {
nodeCount(10),
redundancy(2),
docsPerDisk(10),
- diskCount(5),
+ diskCount(1),
bucketWrongDisk(false),
bucketMultipleDisks(false),
failingListRequest(false),
@@ -95,18 +94,6 @@ TEST_F(InitializerTest, init_with_multiple_disks) {
do_test_initialization(params);
}
-TEST_F(InitializerTest, init_with_bad_non_last_disk) {
- InitParams params;
- params.disksDown.insert(1);
- do_test_initialization(params);
-}
-
-TEST_F(InitializerTest, init_with_bad_last_disk) {
- InitParams params;
- params.disksDown.insert(params.diskCount - 1);
- do_test_initialization(params);
-}
-
TEST_F(InitializerTest, init_with_bucket_on_wrong_disk) {
InitParams params;
params.bucketWrongDisk = true;
@@ -169,9 +156,7 @@ buildBucketInfo(const document::TestDocMan& docMan,
{
std::map<PartitionId, DiskData> result;
for (uint32_t i=0; i<params.diskCount; ++i) {
- if (params.disksDown.find(i) == params.disksDown.end()) {
- result[i];
- }
+ result[i];
}
lib::Distribution distribution(
lib::Distribution::getDefaultDistributionConfig(
@@ -197,15 +182,9 @@ buildBucketInfo(const document::TestDocMan& docMan,
uint32_t partition(distribution.getIdealDisk(
nodeState, params.nodeIndex, bid,
lib::Distribution::IDEAL_DISK_EVEN_IF_DOWN));
- if (params.disksDown.find(partition) != params.disksDown.end()) {
- continue;
- }
if (useWrongDisk) {
int correctPart = partition;
partition = (partition + 1) % params.diskCount;;
- while (params.disksDown.find(partition) != params.disksDown.end()) {
- partition = (partition + 1) % params.diskCount;;
- }
LOG(debug, "Putting bucket %s on wrong disk %u instead of %u",
bid.toString().c_str(), partition, correctPart);
}
@@ -432,10 +411,7 @@ InitializerTest::do_test_initialization(InitParams& params)
{
std::map<PartitionId, DiskData> data(buildBucketInfo(_docMan, params));
- spi::PartitionStateList partitions(params.diskCount);
- for (const auto& p : params.disksDown) {
- partitions[p] = spi::PartitionState(spi::PartitionState::DOWN, "Set down in test");
- }
+ assert(params.diskCount == 1u);
TestServiceLayerApp node(params.diskCount, params.nodeIndex,
params.getConfig().getConfigId());
DummyStorageLink top;
@@ -443,7 +419,6 @@ InitializerTest::do_test_initialization(InitParams& params)
FakePersistenceLayer* bottom;
top.push_back(StorageLink::UP(initializer = new StorageBucketDBInitializer(
params.getConfig().getConfigId(),
- partitions,
node.getDoneInitializeHandler(),
node.getComponentRegister())));
top.push_back(StorageLink::UP(bottom = new FakePersistenceLayer(
@@ -579,7 +554,7 @@ TEST_F(InitializerTest, buckets_initialized_by_load) {
std::map<PartitionId, DiskData> data(buildBucketInfo(_docMan, params));
- spi::PartitionStateList partitions(params.diskCount);
+ assert(params.diskCount == 1u);
TestServiceLayerApp node(params.diskCount, params.nodeIndex,
params.getConfig().getConfigId());
DummyStorageLink top;
@@ -587,7 +562,6 @@ TEST_F(InitializerTest, buckets_initialized_by_load) {
FakePersistenceLayer* bottom;
top.push_back(StorageLink::UP(initializer = new StorageBucketDBInitializer(
params.getConfig().getConfigId(),
- partitions,
node.getDoneInitializeHandler(),
node.getComponentRegister())));
top.push_back(StorageLink::UP(bottom = new FakePersistenceLayer(
diff --git a/storage/src/tests/common/metricstest.cpp b/storage/src/tests/common/metricstest.cpp
index 2c7dda823da..59d8fa4b2d3 100644
--- a/storage/src/tests/common/metricstest.cpp
+++ b/storage/src/tests/common/metricstest.cpp
@@ -72,7 +72,7 @@ void MetricsTest::SetUp() {
assert(system(("rm -rf " + getRootFolder(*_config)).c_str()) == 0);
try {
_node = std::make_unique<TestServiceLayerApp>(
- DiskCount(4), NodeIndex(0), _config->getConfigId());
+ DiskCount(1), NodeIndex(0), _config->getConfigId());
_node->setupDummyPersistence();
_clock = &_node->getClock();
_clock->setAbsoluteTimeInSeconds(1000000);
@@ -93,7 +93,7 @@ void MetricsTest::SetUp() {
*_metricManager,
"status");
- uint16_t diskCount = _node->getPartitions().size();
+ uint16_t diskCount = 1u;
documentapi::LoadTypeSet::SP loadTypes(_node->getLoadTypes());
_filestorMetrics = std::make_shared<FileStorMetrics>(_node->getLoadTypes()->getMetricLoadTypes());
@@ -213,10 +213,10 @@ TEST_F(MetricsTest, filestor_metrics) {
bool retVal = _metricsConsumer->reportStatus(ost, path);
ASSERT_TRUE(retVal) << "_metricsConsumer->reportStatus failed";
std::string s = ost.str();
- EXPECT_THAT(s, HasSubstr("vds.filestor.alldisks.allthreads.get.sum.count count=240"));
- EXPECT_THAT(s, HasSubstr("vds.filestor.alldisks.allthreads.put.sum.count count=200"));
- EXPECT_THAT(s, HasSubstr("vds.filestor.alldisks.allthreads.remove.sum.count count=120"));
- EXPECT_THAT(s, HasSubstr("vds.filestor.alldisks.allthreads.remove.sum.not_found count=20"));
+ EXPECT_THAT(s, HasSubstr("vds.filestor.alldisks.allthreads.get.sum.count count=60"));
+ EXPECT_THAT(s, HasSubstr("vds.filestor.alldisks.allthreads.put.sum.count count=50"));
+ EXPECT_THAT(s, HasSubstr("vds.filestor.alldisks.allthreads.remove.sum.count count=30"));
+ EXPECT_THAT(s, HasSubstr("vds.filestor.alldisks.allthreads.remove.sum.not_found count=5"));
}
#define ASSERT_METRIC(interval, metric, count) \
diff --git a/storage/src/tests/common/teststorageapp.cpp b/storage/src/tests/common/teststorageapp.cpp
index f2989f88765..992d6a54d91 100644
--- a/storage/src/tests/common/teststorageapp.cpp
+++ b/storage/src/tests/common/teststorageapp.cpp
@@ -139,8 +139,7 @@ TestServiceLayerApp::TestServiceLayerApp(vespalib::stringref configId)
: TestStorageApp(std::make_unique<ServiceLayerComponentRegisterImpl>(true), // TODO remove B-tree flag once default
lib::NodeType::STORAGE, getIndexFromConfig(configId), configId),
_compReg(dynamic_cast<ServiceLayerComponentRegisterImpl&>(TestStorageApp::getComponentRegister())),
- _persistenceProvider(),
- _partitions(1)
+ _persistenceProvider()
{
_compReg.setDiskCount(1);
lib::NodeState ns(*_nodeStateUpdater.getReportedNodeState());
@@ -153,16 +152,13 @@ TestServiceLayerApp::TestServiceLayerApp(DiskCount dc, NodeIndex index,
: TestStorageApp(std::make_unique<ServiceLayerComponentRegisterImpl>(true), // TODO remove B-tree flag once default
lib::NodeType::STORAGE, index, configId),
_compReg(dynamic_cast<ServiceLayerComponentRegisterImpl&>(TestStorageApp::getComponentRegister())),
- _persistenceProvider(),
- _partitions(dc)
+ _persistenceProvider()
{
- _compReg.setDiskCount(dc);
+ assert(dc == 1u);
+ _compReg.setDiskCount(1);
lib::NodeState ns(*_nodeStateUpdater.getReportedNodeState());
- ns.setDiskCount(dc);
+ ns.setDiskCount(1);
_nodeStateUpdater.setReportedNodeState(ns);
- // Tests should know how many disks they want to use. If testing auto
- // detection, you should not need this utility.
- assert(dc > 0);
}
TestServiceLayerApp::~TestServiceLayerApp() = default;
@@ -170,15 +166,15 @@ TestServiceLayerApp::~TestServiceLayerApp() = default;
void
TestServiceLayerApp::setupDummyPersistence()
{
- auto provider = std::make_unique<spi::dummy::DummyPersistence>(getTypeRepo(), _compReg.getDiskCount());
+ assert(_compReg.getDiskCount() == 1u);
+ auto provider = std::make_unique<spi::dummy::DummyPersistence>(getTypeRepo());
+ provider->initialize();
setPersistenceProvider(std::move(provider));
}
void
TestServiceLayerApp::setPersistenceProvider(PersistenceProviderUP provider)
{
- _partitions = provider->getPartitionStates().getList();
- assert(spi::PartitionId(_compReg.getDiskCount()) == _partitions.size());
_persistenceProvider = std::move(provider);
}
@@ -191,25 +187,6 @@ TestServiceLayerApp::getPersistenceProvider()
return *_persistenceProvider;
}
-spi::PartitionStateList&
-TestServiceLayerApp::getPartitions()
-{
- if (_persistenceProvider.get() == 0) {
- throw vespalib::IllegalStateException("Partition list requested but not initialized.", VESPA_STRLOC);
- }
- return _partitions;
-}
-
-uint16_t
-TestServiceLayerApp::getPartition(const document::BucketId& bucket)
-{
- lib::NodeState state(lib::NodeType::STORAGE, lib::State::UP);
- state.setDiskCount(_compReg.getDiskCount());
- return getDistribution()->getIdealDisk(
- state, _compReg.getIndex(), bucket.stripUnused(),
- lib::Distribution::IDEAL_DISK_EVEN_IF_DOWN);
-}
-
namespace {
template<typename T>
const T getConfig(vespalib::stringref configId) {
diff --git a/storage/src/tests/common/teststorageapp.h b/storage/src/tests/common/teststorageapp.h
index 03936d37788..f7edf5e0678 100644
--- a/storage/src/tests/common/teststorageapp.h
+++ b/storage/src/tests/common/teststorageapp.h
@@ -110,7 +110,6 @@ class TestServiceLayerApp : public TestStorageApp
using PersistenceProviderUP = std::unique_ptr<spi::PersistenceProvider>;
ServiceLayerComponentRegisterImpl& _compReg;
PersistenceProviderUP _persistenceProvider;
- spi::PartitionStateList _partitions;
public:
TestServiceLayerApp(vespalib::stringref configId = "");
@@ -124,8 +123,6 @@ public:
ServiceLayerComponentRegisterImpl& getComponentRegister() { return _compReg; }
spi::PersistenceProvider& getPersistenceProvider();
- spi::PartitionStateList& getPartitions();
- uint16_t getPartition(const document::BucketId&);
StorBucketDatabase& getStorageBucketDatabase() override {
return _compReg.getBucketSpaceRepo().get(document::FixedBucketSpaces::default_space()).bucketDatabase();
diff --git a/storage/src/tests/persistence/common/filestortestfixture.cpp b/storage/src/tests/persistence/common/filestortestfixture.cpp
index 352f1326463..1282bcf85c3 100644
--- a/storage/src/tests/persistence/common/filestortestfixture.cpp
+++ b/storage/src/tests/persistence/common/filestortestfixture.cpp
@@ -39,7 +39,8 @@ FileStorTestFixture::SetUp()
{
setupPersistenceThreads(1);
_node->setPersistenceProvider(
- std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo(), 1));
+ std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo()));
+ _node->getPersistenceProvider().initialize();
}
void
@@ -77,7 +78,6 @@ FileStorTestFixture::TestFileStorComponents::TestFileStorComponents(
const StorageLinkInjector& injector)
: _fixture(fixture),
manager(new FileStorManager(fixture._config->getConfigId(),
- fixture._node->getPartitions(),
fixture._node->getPersistenceProvider(),
fixture._node->getComponentRegister()))
{
diff --git a/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp b/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp
index 67a1c41a9ef..02c92fc1650 100644
--- a/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp
+++ b/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp
@@ -62,19 +62,12 @@ PersistenceProviderWrapper::toString() const
return ss.str();
}
-spi::PartitionStateListResult
-PersistenceProviderWrapper::getPartitionStates() const
-{
- LOG_SPI("getPartitionStates()");
- return _spi.getPartitionStates();
-}
-
spi::BucketIdListResult
-PersistenceProviderWrapper::listBuckets(BucketSpace bucketSpace, spi::PartitionId partitionId) const
+PersistenceProviderWrapper::listBuckets(BucketSpace bucketSpace) const
{
- LOG_SPI("listBuckets(" << bucketSpace.getId() << ", " << uint16_t(partitionId) << ")");
+ LOG_SPI("listBuckets(" << bucketSpace.getId() << ")");
CHECK_ERROR(spi::BucketIdListResult, FAIL_LIST_BUCKETS);
- return _spi.listBuckets(bucketSpace, partitionId);
+ return _spi.listBuckets(bucketSpace);
}
spi::Result
diff --git a/storage/src/tests/persistence/common/persistenceproviderwrapper.h b/storage/src/tests/persistence/common/persistenceproviderwrapper.h
index 75712750d68..4061343c8da 100644
--- a/storage/src/tests/persistence/common/persistenceproviderwrapper.h
+++ b/storage/src/tests/persistence/common/persistenceproviderwrapper.h
@@ -91,8 +91,7 @@ public:
}
spi::Result createBucket(const spi::Bucket&, spi::Context&) override;
- spi::PartitionStateListResult getPartitionStates() const override;
- spi::BucketIdListResult listBuckets(BucketSpace bucketSpace, spi::PartitionId) const override;
+ spi::BucketIdListResult listBuckets(BucketSpace bucketSpace) const override;
spi::BucketInfoResult getBucketInfo(const spi::Bucket&) const override;
spi::Result put(const spi::Bucket&, spi::Timestamp, spi::DocumentSP, spi::Context&) override;
spi::RemoveResult remove(const spi::Bucket&, spi::Timestamp, const spi::DocumentId&, spi::Context&) override;
diff --git a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
index ab24d590d1a..b7165312785 100644
--- a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
+++ b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
@@ -77,7 +77,8 @@ struct FileStorManagerTest : Test{
void createBucket(document::BucketId bid, uint16_t disk)
{
spi::Context context(defaultLoadType, spi::Priority(0), spi::Trace::TraceLevel(0));
- _node->getPersistenceProvider().createBucket(makeSpiBucket(bid, spi::PartitionId(disk)), context);
+ assert(disk == 0u);
+ _node->getPersistenceProvider().createBucket(makeSpiBucket(bid), context);
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(bid, "foo", StorBucketDatabase::CREATE_IF_NONEXISTING));
@@ -213,7 +214,6 @@ struct TestFileStorComponents {
explicit TestFileStorComponents(FileStorManagerTest& test)
: manager(new FileStorManager(test.config->getConfigId(),
- test._node->getPartitions(),
test._node->getPersistenceProvider(),
test._node->getComponentRegister()))
{
@@ -241,7 +241,7 @@ TEST_F(FileStorManagerTest, header_only_put) {
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
- new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ new FileStorManager(config->getConfigId(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
// Creating a document to test with
@@ -308,7 +308,7 @@ TEST_F(FileStorManagerTest, put) {
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
- new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ new FileStorManager(config->getConfigId(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
// Creating a document to test with
@@ -339,7 +339,7 @@ TEST_F(FileStorManagerTest, state_change) {
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
- new FileStorManager(config->getConfigId(), _node->getPartitions(),
+ new FileStorManager(config->getConfigId(),
_node->getPersistenceProvider(),
_node->getComponentRegister())));
top.open();
@@ -356,7 +356,7 @@ TEST_F(FileStorManagerTest, flush) {
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
- config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ config->getConfigId(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
// Creating a document to test with
@@ -395,9 +395,9 @@ TEST_F(FileStorManagerTest, handler_priority) {
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
- metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1, 1);
+ metrics.initDiskMetrics(1u, loadTypes.getMetricLoadTypes(), 1, 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getComponentRegister());
filestorHandler.setGetNextMessageTimeout(50ms);
uint32_t stripeId = filestorHandler.getNextStripeId(0);
ASSERT_EQ(0u, stripeId);
@@ -503,9 +503,9 @@ TEST_F(FileStorManagerTest, handler_paused_multi_thread) {
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
- metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1, 1);
+ metrics.initDiskMetrics(1u, loadTypes.getMetricLoadTypes(), 1, 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getComponentRegister());
filestorHandler.setGetNextMessageTimeout(50ms);
std::string content("Here is some content which is in all documents");
@@ -549,9 +549,9 @@ TEST_F(FileStorManagerTest, handler_pause) {
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
- metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1, 1);
+ metrics.initDiskMetrics(1u, loadTypes.getMetricLoadTypes(), 1, 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getComponentRegister());
filestorHandler.setGetNextMessageTimeout(50ms);
uint32_t stripeId = filestorHandler.getNextStripeId(0);
@@ -595,9 +595,9 @@ TEST_F(FileStorManagerTest, remap_split) {
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
- metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1, 1);
+ metrics.initDiskMetrics(1u, loadTypes.getMetricLoadTypes(), 1, 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getComponentRegister());
filestorHandler.setGetNextMessageTimeout(50ms);
std::string content("Here is some content which is in all documents");
@@ -653,9 +653,9 @@ TEST_F(FileStorManagerTest, handler_timeout) {
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
- metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(),1, 1);
+ metrics.initDiskMetrics(1u, loadTypes.getMetricLoadTypes(),1, 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getComponentRegister());
filestorHandler.setGetNextMessageTimeout(50ms);
uint32_t stripeId = filestorHandler.getNextStripeId(0);
@@ -713,9 +713,9 @@ TEST_F(FileStorManagerTest, priority) {
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
- metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(),1, 2);
+ metrics.initDiskMetrics(1u, loadTypes.getMetricLoadTypes(),1, 2);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getComponentRegister());
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
@@ -794,8 +794,8 @@ TEST_F(FileStorManagerTest, split1) {
ForwardingMessageSender messageSender(*dummyManager);
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
- metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1, 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
+ metrics.initDiskMetrics(1u, loadTypes.getMetricLoadTypes(), 1, 1);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getComponentRegister());
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
@@ -934,8 +934,8 @@ TEST_F(FileStorManagerTest, split_single_group) {
ForwardingMessageSender messageSender(*dummyManager);
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
- metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(),1, 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
+ metrics.initDiskMetrics(1u, loadTypes.getMetricLoadTypes(),1, 1);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getComponentRegister());
spi::Context context(defaultLoadType, spi::Priority(0), spi::Trace::TraceLevel(0));
for (uint32_t j=0; j<1; ++j) {
// Test this twice, once where all the data ends up in file with
@@ -1051,8 +1051,8 @@ TEST_F(FileStorManagerTest, split_empty_target_with_remapped_ops) {
ForwardingMessageSender messageSender(*dummyManager);
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
- metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1, 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
+ metrics.initDiskMetrics(1u, loadTypes.getMetricLoadTypes(), 1, 1);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getComponentRegister());
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
@@ -1116,8 +1116,8 @@ TEST_F(FileStorManagerTest, notify_on_split_source_ownership_changed) {
ForwardingMessageSender messageSender(*dummyManager);
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
- metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1, 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
+ metrics.initDiskMetrics(1u, loadTypes.getMetricLoadTypes(), 1, 1);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getComponentRegister());
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
@@ -1157,8 +1157,8 @@ TEST_F(FileStorManagerTest, join) {
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
- metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1, 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
+ metrics.initDiskMetrics(1u, loadTypes.getMetricLoadTypes(), 1, 1);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getComponentRegister());
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
@@ -1274,7 +1274,7 @@ TEST_F(FileStorManagerTest, visiting) {
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
- smallConfig->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ smallConfig->getConfigId(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
// Adding documents to two buckets which we are going to visit
// We want one bucket in one slotfile, and one bucket with a file split
@@ -1392,7 +1392,7 @@ TEST_F(FileStorManagerTest, remove_location) {
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
- new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ new FileStorManager(config->getConfigId(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
document::BucketId bid(8, 0);
@@ -1435,7 +1435,7 @@ TEST_F(FileStorManagerTest, delete_bucket) {
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
- config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ config->getConfigId(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 2);
// Creating a document to test with
@@ -1481,7 +1481,7 @@ TEST_F(FileStorManagerTest, delete_bucket_rejects_outdated_bucket_info) {
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
- config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ config->getConfigId(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 2);
// Creating a document to test with
@@ -1533,7 +1533,7 @@ TEST_F(FileStorManagerTest, delete_bucket_with_invalid_bucket_info){
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
- config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ config->getConfigId(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 2);
// Creating a document to test with
@@ -1576,7 +1576,7 @@ TEST_F(FileStorManagerTest, no_timestamps) {
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
- new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ new FileStorManager(config->getConfigId(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
api::StorageMessageAddress address(
"storage", lib::NodeType::STORAGE, 3);
@@ -1620,7 +1620,7 @@ TEST_F(FileStorManagerTest, equal_timestamps) {
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
- new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ new FileStorManager(config->getConfigId(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
// Creating a document to test with
@@ -1681,7 +1681,7 @@ TEST_F(FileStorManagerTest, get_iter) {
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
- new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ new FileStorManager(config->getConfigId(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
api::StorageMessageAddress address(
"storage", lib::NodeType::STORAGE, 3);
@@ -1757,7 +1757,6 @@ TEST_F(FileStorManagerTest, set_bucket_active_state) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
- _node->getPartitions(),
_node->getPersistenceProvider(),
_node->getComponentRegister()));
top.push_back(unique_ptr<StorageLink>(manager));
@@ -1770,7 +1769,7 @@ TEST_F(FileStorManagerTest, set_bucket_active_state) {
const uint16_t disk = 0;
createBucket(bid, disk);
auto& provider = dynamic_cast<spi::dummy::DummyPersistence&>(_node->getPersistenceProvider());
- EXPECT_FALSE(provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
+ EXPECT_FALSE(provider.isActive(makeSpiBucket(bid)));
{
auto cmd = std::make_shared<api::SetBucketStateCommand>(
@@ -1785,7 +1784,7 @@ TEST_F(FileStorManagerTest, set_bucket_active_state) {
EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
- EXPECT_TRUE(provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
+ EXPECT_TRUE(provider.isActive(makeSpiBucket(bid)));
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
@@ -1823,7 +1822,7 @@ TEST_F(FileStorManagerTest, set_bucket_active_state) {
EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
- EXPECT_FALSE(provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
+ EXPECT_FALSE(provider.isActive(makeSpiBucket(bid)));
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
@@ -1836,7 +1835,6 @@ TEST_F(FileStorManagerTest, notify_owner_distributor_on_outdated_set_bucket_stat
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
- _node->getPartitions(),
_node->getPersistenceProvider(),
_node->getComponentRegister()));
top.push_back(unique_ptr<StorageLink>(manager));
@@ -1879,7 +1877,6 @@ TEST_F(FileStorManagerTest, GetBucketDiff_implicitly_creates_bucket) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
- _node->getPartitions(),
_node->getPersistenceProvider(),
_node->getComponentRegister()));
top.push_back(unique_ptr<StorageLink>(manager));
@@ -1911,7 +1908,6 @@ TEST_F(FileStorManagerTest, merge_bucket_implicitly_creates_bucket) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
- _node->getPartitions(),
_node->getPersistenceProvider(),
_node->getComponentRegister()));
top.push_back(unique_ptr<StorageLink>(manager));
@@ -1942,7 +1938,6 @@ TEST_F(FileStorManagerTest, newly_created_bucket_is_ready) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
- _node->getPartitions(),
_node->getPersistenceProvider(),
_node->getComponentRegister()));
top.push_back(unique_ptr<StorageLink>(manager));
diff --git a/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp b/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
index c73ae7e506c..58f6587e087 100644
--- a/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
+++ b/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
@@ -15,7 +15,8 @@ namespace storage {
struct MergeBlockingTest : public FileStorTestFixture {
void setupDisks() {
FileStorTestFixture::setupPersistenceThreads(1);
- _node->setPersistenceProvider(std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo(), 1));
+ _node->setPersistenceProvider(std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo()));
+ _node->getPersistenceProvider().initialize();
}
void SetUp() override;
diff --git a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
index 7810a595012..eb7f2f883ab 100644
--- a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
+++ b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
@@ -84,7 +84,8 @@ struct OperationAbortingTest : FileStorTestFixture {
void setupProviderAndBarriers(uint32_t queueBarrierThreads) {
FileStorTestFixture::setupPersistenceThreads(1);
- _dummyProvider = std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo(), 1);
+ _dummyProvider = std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo());
+ _dummyProvider->initialize();
_queueBarrier = std::make_unique<vespalib::Barrier>(queueBarrierThreads);
_completionBarrier = std::make_unique<vespalib::Barrier>(2);
auto blockingProvider = std::make_unique<BlockingMockProvider>(*_dummyProvider, *_queueBarrier, *_completionBarrier);
diff --git a/storage/src/tests/persistence/persistencequeuetest.cpp b/storage/src/tests/persistence/persistencequeuetest.cpp
index 73a34a3f1cf..8edb03b67fa 100644
--- a/storage/src/tests/persistence/persistencequeuetest.cpp
+++ b/storage/src/tests/persistence/persistencequeuetest.cpp
@@ -49,9 +49,9 @@ PersistenceQueueTest::Fixture::Fixture(FileStorTestFixture& parent_)
top.push_back(std::move(dummyManager));
top.open();
- metrics.initDiskMetrics(parent._node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1, 1);
+ metrics.initDiskMetrics(1u, loadTypes.getMetricLoadTypes(), 1, 1);
- filestorHandler = std::make_unique<FileStorHandler>(messageSender, metrics, parent._node->getPartitions(),
+ filestorHandler = std::make_unique<FileStorHandler>(messageSender, metrics,
parent._node->getComponentRegister());
// getNextMessage will time out if no unlocked buckets are present. Choose a timeout
// that is large enough to fail tests with high probability if this is not the case,
@@ -65,7 +65,7 @@ PersistenceQueueTest::Fixture::~Fixture() = default;
void PersistenceQueueTest::SetUp() {
setupPersistenceThreads(1);
- _node->setPersistenceProvider(std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo(), 1));
+ _node->setPersistenceProvider(std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo()));
}
std::shared_ptr<api::StorageMessage> PersistenceQueueTest::createPut(uint64_t bucket, uint64_t docIdx) {
diff --git a/storage/src/tests/persistence/persistencetestutils.cpp b/storage/src/tests/persistence/persistencetestutils.cpp
index f50fbb0c8e8..11a876ad0e5 100644
--- a/storage/src/tests/persistence/persistencetestutils.cpp
+++ b/storage/src/tests/persistence/persistencetestutils.cpp
@@ -57,7 +57,6 @@ PersistenceTestEnvironment::PersistenceTestEnvironment(DiskCount numDisks, const
_node.setupDummyPersistence();
_metrics.initDiskMetrics(numDisks, _node.getLoadTypes()->getMetricLoadTypes(), 1, 1);
_handler = std::make_unique<FileStorHandler>(_messageKeeper, _metrics,
- _node.getPersistenceProvider().getPartitionStates().getList(),
_node.getComponentRegister());
for (uint32_t i = 0; i < numDisks; i++) {
_diskEnvs.push_back(
@@ -78,7 +77,8 @@ PersistenceTestUtils::~PersistenceTestUtils() = default;
std::string
PersistenceTestUtils::dumpBucket(const document::BucketId& bid, uint16_t disk) {
- return dynamic_cast<spi::dummy::DummyPersistence&>(_env->_node.getPersistenceProvider()).dumpBucket(makeSpiBucket(bid, spi::PartitionId(disk)));
+ assert(disk == 0u);
+ return dynamic_cast<spi::dummy::DummyPersistence&>(_env->_node.getPersistenceProvider()).dumpBucket(makeSpiBucket(bid));
}
void
@@ -166,7 +166,8 @@ PersistenceTestUtils::doPutOnDisk(
{
document::Document::SP doc(createRandomDocumentAtLocation(
location, timestamp, minSize, maxSize));
- spi::Bucket b(makeSpiBucket(document::BucketId(16, location), spi::PartitionId(disk)));
+ assert(disk == 0u);
+ spi::Bucket b(makeSpiBucket(document::BucketId(16, location)));
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
@@ -187,14 +188,15 @@ PersistenceTestUtils::doRemoveOnDisk(
{
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
+ assert(disk == 0u);
if (persistRemove) {
spi::RemoveResult result = getPersistenceProvider().removeIfFound(
- makeSpiBucket(bucketId, spi::PartitionId(disk)),
+ makeSpiBucket(bucketId),
timestamp, docId, context);
return result.wasFound();
}
spi::RemoveResult result = getPersistenceProvider().remove(
- makeSpiBucket(bucketId, spi::PartitionId(disk)),
+ makeSpiBucket(bucketId),
timestamp, docId, context);
return result.wasFound();
@@ -207,10 +209,11 @@ PersistenceTestUtils::doUnrevertableRemoveOnDisk(
const document::DocumentId& docId,
spi::Timestamp timestamp)
{
+ assert(disk == 0u);
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
spi::RemoveResult result = getPersistenceProvider().remove(
- makeSpiBucket(bucketId, spi::PartitionId(disk)),
+ makeSpiBucket(bucketId),
timestamp, docId, context);
return result.wasFound();
}
@@ -224,8 +227,8 @@ PersistenceTestUtils::doGetOnDisk(
auto fieldSet = std::make_unique<document::AllFields>();
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
- return getPersistenceProvider().get(makeSpiBucket(
- bucketId, spi::PartitionId(disk)), *fieldSet, docId, context);
+ assert(disk == 0u);
+ return getPersistenceProvider().get(makeSpiBucket(bucketId), *fieldSet, docId, context);
}
document::DocumentUpdate::SP
@@ -289,7 +292,8 @@ PersistenceTestUtils::doPut(const document::Document::SP& doc,
spi::Timestamp time,
uint16_t disk)
{
- spi::Bucket b(makeSpiBucket(bid, spi::PartitionId(disk)));
+ assert(disk == 0u);
+ spi::Bucket b(makeSpiBucket(bid));
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
getPersistenceProvider().createBucket(b, context);
@@ -304,8 +308,9 @@ PersistenceTestUtils::doUpdate(document::BucketId bid,
{
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
+ assert(disk == 0u);
return getPersistenceProvider().update(
- makeSpiBucket(bid, spi::PartitionId(disk)), time, update, context);
+ makeSpiBucket(bid), time, update, context);
}
void
@@ -319,12 +324,13 @@ PersistenceTestUtils::doRemove(const document::DocumentId& id, spi::Timestamp ti
disk = getDiskFromBucketDatabaseIfUnset(makeDocumentBucket(bucket), disk);
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
+ assert(disk == 0u);
if (unrevertableRemove) {
getPersistenceProvider().remove(
- makeSpiBucket(bucket, spi::PartitionId(disk)), time, id, context);
+ makeSpiBucket(bucket), time, id, context);
} else {
spi::RemoveResult result = getPersistenceProvider().removeIfFound(
- makeSpiBucket(bucket, spi::PartitionId(disk)), time, id, context);
+ makeSpiBucket(bucket), time, id, context);
if (!result.wasFound()) {
throw vespalib::IllegalStateException(
"Attempted to remove non-existing doc " + id.toString(),
diff --git a/storage/src/tests/persistence/splitbitdetectortest.cpp b/storage/src/tests/persistence/splitbitdetectortest.cpp
index a2d17117886..69f8268ff2c 100644
--- a/storage/src/tests/persistence/splitbitdetectortest.cpp
+++ b/storage/src/tests/persistence/splitbitdetectortest.cpp
@@ -28,12 +28,12 @@ struct SplitBitDetectorTest : Test {
SplitBitDetectorTest()
: testDocMan(),
- provider(testDocMan.getTypeRepoSP(), 1),
+ provider(testDocMan.getTypeRepoSP()),
bucket(makeSpiBucket(document::BucketId(1, 1))),
context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0))
{
- provider.getPartitionStates();
+ provider.initialize();
provider.createBucket(bucket, context);
}
};
@@ -109,7 +109,7 @@ TEST_F(SplitBitDetectorTest, max_bits_one_below_max) {
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, my_bucket, 15, context));
EXPECT_EQ("SplitTargets(error: No use in trying to split "
- "Bucket(0x3c00000000000001, partition 0) when max split"
+ "Bucket(0x3c00000000000001) when max split"
" bit is set to 15.)",
result.toString());
diff --git a/storage/src/tests/storageserver/statereportertest.cpp b/storage/src/tests/storageserver/statereportertest.cpp
index a7d18b21516..f0907bffba1 100644
--- a/storage/src/tests/storageserver/statereportertest.cpp
+++ b/storage/src/tests/storageserver/statereportertest.cpp
@@ -70,7 +70,7 @@ void StateReporterTest::SetUp() {
_config = std::make_unique<vdstestlib::DirConfig>(getStandardConfig(true, "statereportertest"));
assert(system(("rm -rf " + getRootFolder(*_config)).c_str()) == 0);
- _node = std::make_unique<TestServiceLayerApp>(DiskCount(4), NodeIndex(0), _config->getConfigId());
+ _node = std::make_unique<TestServiceLayerApp>(DiskCount(1), NodeIndex(0), _config->getConfigId());
_node->setupDummyPersistence();
_clock = &_node->getClock();
_clock->setAbsoluteTimeInSeconds(1000000);
@@ -89,7 +89,7 @@ void StateReporterTest::SetUp() {
_generationFetcher,
"status");
- uint16_t diskCount = _node->getPartitions().size();
+ uint16_t diskCount = 1u;
documentapi::LoadTypeSet::SP loadTypes(_node->getLoadTypes());
_filestorMetrics = std::make_shared<FileStorMetrics>(_node->getLoadTypes()->getMetricLoadTypes());
diff --git a/storage/src/tests/visiting/visitormanagertest.cpp b/storage/src/tests/visiting/visitormanagertest.cpp
index 08d7d97b839..c6ce935b611 100644
--- a/storage/src/tests/visiting/visitormanagertest.cpp
+++ b/storage/src/tests/visiting/visitormanagertest.cpp
@@ -96,7 +96,7 @@ VisitorManagerTest::initializeTest()
config.getConfigId(), _node->getComponentRegister(),
*_messageSessionFactory)));
_top->push_back(std::unique_ptr<StorageLink>(new FileStorManager(
- config.getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ config.getConfigId(), _node->getPersistenceProvider(), _node->getComponentRegister())));
_manager->setTimeBetweenTicks(10);
_top->open();
diff --git a/storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.cpp b/storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.cpp
index 2c1a149ab4b..0878ffb3d99 100644
--- a/storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.cpp
+++ b/storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.cpp
@@ -63,13 +63,11 @@ StorageBucketDBInitializer::Config::Config(const config::ConfigUri & configUri)
StorageBucketDBInitializer::System::~System() = default;
StorageBucketDBInitializer::System::System(
- const spi::PartitionStateList& partitions,
DoneInitializeHandler& doneInitializeHandler,
ServiceLayerComponentRegister& compReg,
const Config&)
: _doneInitializeHandler(doneInitializeHandler),
_component(compReg, "storagebucketdbinitializer"),
- _partitions(partitions),
_bucketSpaceRepo(_component.getBucketSpaceRepo()),
_nodeIndex(_component.getIndex()),
_nodeState()
@@ -78,12 +76,7 @@ StorageBucketDBInitializer::System::System(
// so it could work with disk capacities. Object is used to check for
// correct disk further down (in the case of internal join, deciding which
// should have it). Not that bad if wrong disk is picked though.
- _nodeState.setDiskCount(_partitions.size());
- for (uint32_t i=0; i<_partitions.size(); ++i) {
- if (!_partitions[i].isUp()) {
- _nodeState.setDiskState(i, lib::State::DOWN);
- }
- }
+ _nodeState.setDiskCount(1);
}
StorBucketDatabase &
@@ -131,20 +124,18 @@ StorageBucketDBInitializer::GlobalState::~GlobalState() { }
StorageBucketDBInitializer::StorageBucketDBInitializer(
const config::ConfigUri & configUri,
- const spi::PartitionStateList& partitions,
DoneInitializeHandler& doneInitializeHandler,
ServiceLayerComponentRegister& compReg)
: StorageLink("StorageBucketDBInitializer"),
framework::HtmlStatusReporter("dbinit", "Bucket database initializer"),
_config(configUri),
- _system(partitions, doneInitializeHandler, compReg, _config),
+ _system(doneInitializeHandler, compReg, _config),
_metrics(_system._component),
_state(),
- _readState(_system._partitions.size())
+ _readState(1u)
{
// Initialize read state for disks being available
- for (uint32_t i=0; i<_system._partitions.size(); ++i) {
- if (!_system._partitions[i].isUp()) continue;
+ for (uint32_t i=0; i< _readState.size(); ++i) {
_readState[i] = std::make_unique<BucketSpaceReadState>();
for (const auto &elem : _system._bucketSpaceRepo) {
_readState[i]->emplace(elem.first, std::make_unique<BucketReadState>());
@@ -167,8 +158,7 @@ void
StorageBucketDBInitializer::onOpen()
{
// Trigger bucket database initialization
- for (uint32_t i=0; i<_system._partitions.size(); ++i) {
- if (!_system._partitions[i].isUp()) continue;
+ for (uint32_t i=0; i< _readState.size(); ++i) {
assert(_readState[i]);
const BucketSpaceReadState &spaceState = *_readState[i];
for (const auto &stateElem : spaceState) {
diff --git a/storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.h b/storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.h
index 102e27e44d0..00d39965151 100644
--- a/storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.h
+++ b/storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.h
@@ -78,14 +78,12 @@ class StorageBucketDBInitializer : public StorageLink,
struct System {
DoneInitializeHandler& _doneInitializeHandler;
ServiceLayerComponent _component;
- const spi::PartitionStateList& _partitions;
const ContentBucketSpaceRepo& _bucketSpaceRepo;
uint32_t _nodeIndex;
lib::NodeState _nodeState; // Disk info for ideal state calculations
framework::Thread::UP _thread;
- System(const spi::PartitionStateList&,
- DoneInitializeHandler& doneInitializeHandler,
+ System(DoneInitializeHandler& doneInitializeHandler,
ServiceLayerComponentRegister&,
const Config&);
~System();
@@ -145,7 +143,6 @@ private:
public:
StorageBucketDBInitializer(const config::ConfigUri&,
- const spi::PartitionStateList&,
DoneInitializeHandler&,
ServiceLayerComponentRegister&);
~StorageBucketDBInitializer();
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestorhandler.cpp b/storage/src/vespa/storage/persistence/filestorage/filestorhandler.cpp
index bfd20388c3c..9d9c7e10111 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestorhandler.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/filestorhandler.cpp
@@ -5,15 +5,15 @@
namespace storage {
FileStorHandler::FileStorHandler(MessageSender& sender, FileStorMetrics& metrics,
- const spi::PartitionStateList& partitions, ServiceLayerComponentRegister& compReg)
- : _impl(std::make_unique<FileStorHandlerImpl>(1, 1, sender, metrics, partitions, compReg))
+ ServiceLayerComponentRegister& compReg)
+ : _impl(std::make_unique<FileStorHandlerImpl>(1, 1, sender, metrics, compReg))
{
}
FileStorHandler::FileStorHandler(uint32_t numThreads, uint32_t numStripes, MessageSender& sender, FileStorMetrics& metrics,
- const spi::PartitionStateList& partitions, ServiceLayerComponentRegister& compReg)
- : _impl(std::make_unique<FileStorHandlerImpl>(numThreads, numStripes, sender, metrics, partitions, compReg))
+ ServiceLayerComponentRegister& compReg)
+ : _impl(std::make_unique<FileStorHandlerImpl>(numThreads, numStripes, sender, metrics, compReg))
{
}
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestorhandler.h b/storage/src/vespa/storage/persistence/filestorage/filestorhandler.h
index bda38b9379d..ccce5b7326a 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestorhandler.h
+++ b/storage/src/vespa/storage/persistence/filestorage/filestorhandler.h
@@ -25,9 +25,6 @@ namespace api {
class StorageCommand;
class StorageReply;
}
-namespace spi {
- class PartitionStateList;
-}
namespace framework {
class HttpUrlPath;
}
@@ -71,9 +68,9 @@ public:
};
FileStorHandler(uint32_t numThreads, uint32_t numStripes, MessageSender&, FileStorMetrics&,
- const spi::PartitionStateList&, ServiceLayerComponentRegister&);
+ ServiceLayerComponentRegister&);
FileStorHandler(MessageSender&, FileStorMetrics&,
- const spi::PartitionStateList&, ServiceLayerComponentRegister&);
+ ServiceLayerComponentRegister&);
~FileStorHandler();
// Commands used by file stor manager
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp
index 05e74a98f0f..518523be7a2 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp
@@ -39,7 +39,6 @@ uint32_t per_stripe_merge_limit(uint32_t num_threads, uint32_t num_stripes) noex
FileStorHandlerImpl::FileStorHandlerImpl(uint32_t numThreads, uint32_t numStripes, MessageSender& sender,
FileStorMetrics& metrics,
- [[maybe_unused]] const spi::PartitionStateList& partitions,
ServiceLayerComponentRegister& compReg)
: _component(compReg, "filestorhandlerimpl"),
_diskInfo(),
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h
index 1804a47f033..90b0e559899 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h
+++ b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h
@@ -235,7 +235,7 @@ public:
};
FileStorHandlerImpl(uint32_t numThreads, uint32_t numStripes, MessageSender&, FileStorMetrics&,
- const spi::PartitionStateList&, ServiceLayerComponentRegister&);
+ ServiceLayerComponentRegister&);
~FileStorHandlerImpl();
void setGetNextMessageTimeout(vespalib::duration timeout) { _getNextMessageTimeout = timeout; }
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp b/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp
index d3c4c3ef85e..c4369a94161 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp
@@ -26,13 +26,12 @@ using document::BucketSpace;
namespace storage {
FileStorManager::
-FileStorManager(const config::ConfigUri & configUri, const spi::PartitionStateList& partitions,
+FileStorManager(const config::ConfigUri & configUri,
spi::PersistenceProvider& provider, ServiceLayerComponentRegister& compReg)
: StorageLinkQueued("File store manager", compReg),
framework::HtmlStatusReporter("filestorman", "File store manager"),
_compReg(compReg),
_component(compReg, "filestormanager"),
- _partitions(partitions),
_providerCore(provider),
_providerErrorWrapper(_providerCore),
_provider(&_providerErrorWrapper),
@@ -128,20 +127,16 @@ FileStorManager::configure(std::unique_ptr<vespa::config::content::StorFilestorC
size_t numStripes = std::max(size_t(1u), numThreads / 2);
_metrics->initDiskMetrics(_disks.size(), _component.getLoadTypes()->getMetricLoadTypes(), numStripes, numThreads);
- _filestorHandler = std::make_unique<FileStorHandler>(numThreads, numStripes, *this, *_metrics, _partitions, _compReg);
+ _filestorHandler = std::make_unique<FileStorHandler>(numThreads, numStripes, *this, *_metrics, _compReg);
uint32_t numResponseThreads = computeNumResponseThreads(_config->numResponseThreads);
if (numResponseThreads > 0) {
_sequencedExecutor = vespalib::SequencedTaskExecutor::create(numResponseThreads, 10000, selectSequencer(_config->responseSequencerType));
}
for (uint32_t i=0; i<_component.getDiskCount(); ++i) {
- if (_partitions[i].isUp()) {
- LOG(spam, "Setting up disk %u", i);
- for (uint32_t j = 0; j < numThreads; j++) {
- _disks[i].push_back(std::make_shared<PersistenceThread>(_sequencedExecutor.get(), _compReg, _configUri, *_provider,
+ LOG(spam, "Setting up disk %u", i);
+ for (uint32_t j = 0; j < numThreads; j++) {
+ _disks[i].push_back(std::make_shared<PersistenceThread>(_sequencedExecutor.get(), _compReg, _configUri, *_provider,
*_filestorHandler, *_metrics->disks[i]->threads[j], i));
- }
- } else {
- _filestorHandler->disable(i);
}
}
}
@@ -383,24 +378,17 @@ FileStorManager::onCreateBucket(
code = api::ReturnCode(api::ReturnCode::EXISTS, "Bucket already exist");
} else {
entry->disk = _component.getIdealPartition(cmd->getBucket());
- if (_partitions[entry->disk].isUp()) {
- // Newly created buckets are ready but not active, unless
- // explicitly marked as such by the distributor.
- entry->setBucketInfo(api::BucketInfo(
+ // Newly created buckets are ready but not active, unless
+ // explicitly marked as such by the distributor.
+ entry->setBucketInfo(api::BucketInfo(
0, 0, 0, 0, 0, true, cmd->getActive()));
- cmd->setPriority(0);
- handlePersistenceMessage(cmd, entry->disk);
- entry.write();
- LOG(debug, "Created bucket %s on disk %d (node index is %d)",
- cmd->getBucketId().toString().c_str(),
- entry->disk, _component.getIndex());
- return true;
- } else {
- entry.remove();
- code = api::ReturnCode(api::ReturnCode::IO_FAILURE,
- vespalib::make_string("Trying to create bucket %s on disabled disk %d",
- cmd->getBucketId().toString().c_str(), entry->disk));
- }
+ cmd->setPriority(0);
+ handlePersistenceMessage(cmd, entry->disk);
+ entry.write();
+ LOG(debug, "Created bucket %s on disk %d (node index is %d)",
+ cmd->getBucketId().toString().c_str(),
+ entry->disk, _component.getIndex());
+ return true;
}
}
std::shared_ptr<api::CreateBucketReply> reply((api::CreateBucketReply*)cmd->makeReply().release());
@@ -499,27 +487,13 @@ FileStorManager::onMergeBucket(const shared_ptr<api::MergeBucketCommand>& cmd)
if (!entry.preExisted()) {
entry->disk = _component.getIdealPartition(cmd->getBucket());
- if (_partitions[entry->disk].isUp()) {
- entry->info = api::BucketInfo(0, 0, 0, 0, 0, true, false);
- LOG(debug, "Created bucket %s on disk %d (node index is %d) due to merge being received.",
- cmd->getBucketId().toString().c_str(), entry->disk, _component.getIndex());
- // Call before writing bucket entry as we need to have bucket
- // lock while calling
- handlePersistenceMessage(cmd, entry->disk);
- entry.write();
- } else {
- entry.remove();
- api::ReturnCode code(api::ReturnCode::IO_FAILURE,
- vespalib::make_string(
- "Trying to perform merge %s whose bucket belongs on target disk %d, which is down. Cluster state version of command is %d, our system state version is %d",
- cmd->toString().c_str(), entry->disk, cmd->getClusterStateVersion(),
- _component.getStateUpdater().getClusterStateBundle()->getVersion()));
- LOGBT(debug, cmd->getBucketId().toString(), "%s", code.getMessage().c_str());
- auto reply = std::make_shared<api::MergeBucketReply>(*cmd);
- reply->setResult(code);
- sendUp(reply);
- return true;
- }
+ entry->info = api::BucketInfo(0, 0, 0, 0, 0, true, false);
+ LOG(debug, "Created bucket %s on disk %d (node index is %d) due to merge being received.",
+ cmd->getBucketId().toString().c_str(), entry->disk, _component.getIndex());
+ // Call before writing bucket entry as we need to have bucket
+ // lock while calling
+ handlePersistenceMessage(cmd, entry->disk);
+ entry.write();
} else {
handlePersistenceMessage(cmd, entry->disk);
}
@@ -536,28 +510,15 @@ FileStorManager::onGetBucketDiff(
}
if (!entry.preExisted()) {
entry->disk = _component.getIdealPartition(cmd->getBucket());
- if (_partitions[entry->disk].isUp()) {
- LOG(debug, "Created bucket %s on disk %d (node index is %d) due to get bucket diff being received.",
- cmd->getBucketId().toString().c_str(), entry->disk, _component.getIndex());
- entry->info.setTotalDocumentSize(0);
- entry->info.setUsedFileSize(0);
- entry->info.setReady(true);
- // Call before writing bucket entry as we need to have bucket
- // lock while calling
- handlePersistenceMessage(cmd, entry->disk);
- entry.write();
- } else {
- entry.remove();
- api::ReturnCode code(api::ReturnCode::IO_FAILURE,
- vespalib::make_string(
- "Trying to merge non-existing bucket %s, which can't be created because target disk %d is down",
- cmd->getBucketId().toString().c_str(), entry->disk));
- LOGBT(warning, cmd->getBucketId().toString(), "%s", code.getMessage().c_str());
- auto reply = std::make_shared<api::GetBucketDiffReply>(*cmd);
- reply->setResult(code);
- sendUp(reply);
- return true;
- }
+ LOG(debug, "Created bucket %s on disk %d (node index is %d) due to get bucket diff being received.",
+ cmd->getBucketId().toString().c_str(), entry->disk, _component.getIndex());
+ entry->info.setTotalDocumentSize(0);
+ entry->info.setUsedFileSize(0);
+ entry->info.setReady(true);
+ // Call before writing bucket entry as we need to have bucket
+ // lock while calling
+ handlePersistenceMessage(cmd, entry->disk);
+ entry.write();
} else {
handlePersistenceMessage(cmd, entry->disk);
}
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestormanager.h b/storage/src/vespa/storage/persistence/filestorage/filestormanager.h
index deef54840b3..6efd30419b8 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestormanager.h
+++ b/storage/src/vespa/storage/persistence/filestorage/filestormanager.h
@@ -49,7 +49,6 @@ class FileStorManager : public StorageLinkQueued,
{
ServiceLayerComponentRegister & _compReg;
ServiceLayerComponent _component;
- const spi::PartitionStateList & _partitions;
spi::PersistenceProvider & _providerCore;
ProviderErrorWrapper _providerErrorWrapper;
spi::PersistenceProvider * _provider;
@@ -73,7 +72,7 @@ class FileStorManager : public StorageLinkQueued,
friend struct FileStorManagerTest;
public:
- FileStorManager(const config::ConfigUri &, const spi::PartitionStateList&,
+ FileStorManager(const config::ConfigUri &,
spi::PersistenceProvider&, ServiceLayerComponentRegister&);
FileStorManager(const FileStorManager &) = delete;
FileStorManager& operator=(const FileStorManager &) = delete;
diff --git a/storage/src/vespa/storage/persistence/mergehandler.cpp b/storage/src/vespa/storage/persistence/mergehandler.cpp
index 4d06c28078d..c990cd71f22 100644
--- a/storage/src/vespa/storage/persistence/mergehandler.cpp
+++ b/storage/src/vespa/storage/persistence/mergehandler.cpp
@@ -848,7 +848,8 @@ MergeHandler::handleMergeBucket(api::MergeBucketCommand& cmd, MessageTracker::UP
{
tracker->setMetric(_env._metrics.mergeBuckets);
- spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
+ assert(_env._partition == 0u);
+ spi::Bucket bucket(cmd.getBucket());
LOG(debug, "MergeBucket(%s) with max timestamp %" PRIu64 ".",
bucket.toString().c_str(), cmd.getMaxTimestamp());
@@ -1058,7 +1059,8 @@ MessageTracker::UP
MergeHandler::handleGetBucketDiff(api::GetBucketDiffCommand& cmd, MessageTracker::UP tracker)
{
tracker->setMetric(_env._metrics.getBucketDiff);
- spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
+ assert(_env._partition == 0u);
+ spi::Bucket bucket(cmd.getBucket());
LOG(debug, "GetBucketDiff(%s)", bucket.toString().c_str());
checkResult(_spi.createBucket(bucket, tracker->context()), bucket, "create bucket");
@@ -1169,7 +1171,8 @@ void
MergeHandler::handleGetBucketDiffReply(api::GetBucketDiffReply& reply, MessageSender& sender)
{
_env._metrics.getBucketDiffReply.inc();
- spi::Bucket bucket(reply.getBucket(), spi::PartitionId(_env._partition));
+ assert(_env._partition == 0u);
+ spi::Bucket bucket(reply.getBucket());
LOG(debug, "GetBucketDiffReply(%s)", bucket.toString().c_str());
if (!_env._fileStorHandler.isMerging(bucket.getBucket())) {
@@ -1243,7 +1246,8 @@ MergeHandler::handleApplyBucketDiff(api::ApplyBucketDiffCommand& cmd, MessageTra
{
tracker->setMetric(_env._metrics.applyBucketDiff);
- spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
+ assert(_env._partition == 0u);
+ spi::Bucket bucket(cmd.getBucket());
LOG(debug, "%s", cmd.toString().c_str());
if (_env._fileStorHandler.isMerging(bucket.getBucket())) {
@@ -1330,7 +1334,8 @@ void
MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,MessageSender& sender)
{
_env._metrics.applyBucketDiffReply.inc();
- spi::Bucket bucket(reply.getBucket(), spi::PartitionId(_env._partition));
+ assert(_env._partition == 0u);
+ spi::Bucket bucket(reply.getBucket());
std::vector<api::ApplyBucketDiffCommand::Entry>& diff(reply.getDiff());
LOG(debug, "%s", reply.toString().c_str());
diff --git a/storage/src/vespa/storage/persistence/persistencethread.cpp b/storage/src/vespa/storage/persistence/persistencethread.cpp
index 1536984a6bc..da0f06bf662 100644
--- a/storage/src/vespa/storage/persistence/persistencethread.cpp
+++ b/storage/src/vespa/storage/persistence/persistencethread.cpp
@@ -137,7 +137,8 @@ PersistenceThread::getBucket(const DocumentId& id, const document::Bucket &bucke
+ "bucket " + bucket.getBucketId().toString() + ".", VESPA_STRLOC);
}
- return spi::Bucket(bucket, spi::PartitionId(_env._partition));
+ assert(_env._partition == 0u);
+ return spi::Bucket(bucket);
}
bool
@@ -328,7 +329,8 @@ MessageTracker::UP
PersistenceThread::handleRevert(api::RevertCommand& cmd, MessageTracker::UP tracker)
{
tracker->setMetric(_env._metrics.revert[cmd.getLoadType()]);
- spi::Bucket b = spi::Bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
+ assert(_env._partition == 0u);
+ spi::Bucket b = spi::Bucket(cmd.getBucket());
const std::vector<api::Timestamp> & tokens = cmd.getRevertTokens();
for (const api::Timestamp & token : tokens) {
spi::Result result = _spi.removeEntry(b, spi::Timestamp(token), tracker->context());
@@ -345,7 +347,8 @@ PersistenceThread::handleCreateBucket(api::CreateBucketCommand& cmd, MessageTrac
LOG(warning, "Bucket %s was merging at create time. Unexpected.", cmd.getBucketId().toString().c_str());
DUMP_LOGGED_BUCKET_OPERATIONS(cmd.getBucketId());
}
- spi::Bucket spiBucket(cmd.getBucket(), spi::PartitionId(_env._partition));
+ assert(_env._partition == 0u);
+ spi::Bucket spiBucket(cmd.getBucket());
_spi.createBucket(spiBucket, tracker->context());
if (cmd.getActive()) {
_spi.setActiveState(spiBucket, spi::BucketInfo::ACTIVE);
@@ -404,7 +407,8 @@ PersistenceThread::handleDeleteBucket(api::DeleteBucketCommand& cmd, MessageTrac
_env._fileStorHandler.clearMergeStatus(cmd.getBucket(),
api::ReturnCode(api::ReturnCode::ABORTED, "Bucket was deleted during the merge"));
}
- spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
+ assert(_env._partition == 0u);
+ spi::Bucket bucket(cmd.getBucket());
if (!checkProviderBucketInfoMatches(bucket, cmd.getBucketInfo())) {
return tracker;
}
@@ -455,7 +459,8 @@ PersistenceThread::handleReadBucketList(ReadBucketList& cmd, MessageTracker::UP
{
tracker->setMetric(_env._metrics.readBucketList);
- spi::BucketIdListResult result(_spi.listBuckets(cmd.getBucketSpace(), cmd.getPartition()));
+ assert(cmd.getPartition() == 0u);
+ spi::BucketIdListResult result(_spi.listBuckets(cmd.getBucketSpace()));
if (tracker->checkForError(result)) {
auto reply = std::make_shared<ReadBucketListReply>(cmd);
result.getList().swap(reply->getBuckets());
@@ -481,8 +486,9 @@ PersistenceThread::handleCreateIterator(CreateIteratorCommand& cmd, MessageTrack
if ( ! fieldSet) { return tracker; }
tracker->context().setReadConsistency(cmd.getReadConsistency());
+ assert(_env._partition == 0u);
spi::CreateIteratorResult result(_spi.createIterator(
- spi::Bucket(cmd.getBucket(), spi::PartitionId(_env._partition)),
+ spi::Bucket(cmd.getBucket()),
std::move(fieldSet), cmd.getSelection(), cmd.getIncludedVersions(), tracker->context()));
if (tracker->checkForError(result)) {
tracker->setReply(std::make_shared<CreateIteratorReply>(cmd, spi::IteratorId(result.getIteratorId())));
@@ -508,7 +514,8 @@ PersistenceThread::handleSplitBucket(api::SplitBucketCommand& cmd, MessageTracke
return tracker;
}
- spi::Bucket spiBucket(cmd.getBucket(), spi::PartitionId(_env._partition));
+ assert(_env._partition == 0u);
+ spi::Bucket spiBucket(cmd.getBucket());
SplitBitDetector::Result targetInfo;
if (_env._config.enableMultibitSplitOptimalization) {
targetInfo = SplitBitDetector::detectSplit(_spi, spiBucket, cmd.getMaxSplitBits(),
@@ -549,8 +556,10 @@ PersistenceThread::handleSplitBucket(api::SplitBucketCommand& cmd, MessageTracke
}
}
#endif
- spi::Result result = _spi.split(spiBucket, spi::Bucket(target1, spi::PartitionId(lock1.disk)),
- spi::Bucket(target2, spi::PartitionId(lock2.disk)), tracker->context());
+ assert(lock1.disk == 0u);
+ assert(lock2.disk == 0u);
+ spi::Result result = _spi.split(spiBucket, spi::Bucket(target1),
+ spi::Bucket(target2), tracker->context());
if (result.hasError()) {
tracker->fail(PersistenceUtil::convertErrorCode(result), result.getErrorMessage());
return tracker;
@@ -604,8 +613,8 @@ PersistenceThread::handleSplitBucket(api::SplitBucketCommand& cmd, MessageTracke
// Must make sure target bucket exists when we have pending ops
// to an empty target bucket, since the provider will have
// implicitly erased it by this point.
- spi::Bucket createTarget(spi::Bucket(target.second.bucket,
- spi::PartitionId(target.second.diskIndex)));
+ assert(target.second.diskIndex == 0u);
+ spi::Bucket createTarget(spi::Bucket(target.second.bucket));
LOG(debug, "Split target %s was empty, but re-creating it since there are remapped operations queued to it",
createTarget.toString().c_str());
_spi.createBucket(createTarget, tracker->context());
@@ -696,10 +705,13 @@ PersistenceThread::handleJoinBuckets(api::JoinBucketsCommand& cmd, MessageTracke
}
}
#endif
+ assert(lock1.disk == 0u);
+ assert(lock2.disk == 0u);
+ assert(_env._partition == 0u);
spi::Result result =
- _spi.join(spi::Bucket(firstBucket, spi::PartitionId(lock1.disk)),
- spi::Bucket(secondBucket, spi::PartitionId(lock2.disk)),
- spi::Bucket(destBucket, spi::PartitionId(_env._partition)),
+ _spi.join(spi::Bucket(firstBucket),
+ spi::Bucket(secondBucket),
+ spi::Bucket(destBucket),
tracker->context());
if (!tracker->checkForError(result)) {
return tracker;
@@ -737,7 +749,8 @@ PersistenceThread::handleSetBucketState(api::SetBucketStateCommand& cmd, Message
NotificationGuard notifyGuard(*_bucketOwnershipNotifier);
LOG(debug, "handleSetBucketState(): %s", cmd.toString().c_str());
- spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
+ assert(_env._partition == 0u);
+ spi::Bucket bucket(cmd.getBucket());
bool shouldBeActive(cmd.getState() == api::SetBucketStateCommand::ACTIVE);
spi::BucketInfo::ActiveState newState(shouldBeActive ? spi::BucketInfo::ACTIVE : spi::BucketInfo::NOT_ACTIVE);
@@ -775,10 +788,12 @@ PersistenceThread::handleInternalBucketJoin(InternalBucketJoinCommand& cmd, Mess
entry->disk = _env._partition;
entry.write();
}
+ assert(cmd.getDiskOfInstanceToJoin() == 0u);
+ assert(cmd.getDiskOfInstanceToKeep() == 0u);
spi::Result result =
- _spi.join(spi::Bucket(destBucket, spi::PartitionId(cmd.getDiskOfInstanceToJoin())),
- spi::Bucket(destBucket, spi::PartitionId(cmd.getDiskOfInstanceToJoin())),
- spi::Bucket(destBucket, spi::PartitionId(cmd.getDiskOfInstanceToKeep())),
+ _spi.join(spi::Bucket(destBucket),
+ spi::Bucket(destBucket),
+ spi::Bucket(destBucket),
tracker->context());
if (tracker->checkForError(result)) {
tracker->setReply(std::make_shared<InternalBucketJoinReply>(cmd, _env.getBucketInfo(cmd.getBucket())));
diff --git a/storage/src/vespa/storage/persistence/persistenceutil.cpp b/storage/src/vespa/storage/persistence/persistenceutil.cpp
index 775e05488a9..0d9a4a06cee 100644
--- a/storage/src/vespa/storage/persistence/persistenceutil.cpp
+++ b/storage/src/vespa/storage/persistence/persistenceutil.cpp
@@ -260,8 +260,8 @@ PersistenceUtil::getBucketInfo(const document::Bucket &bucket, int disk) const
disk = _partition;
}
- spi::BucketInfoResult response =
- _spi.getBucketInfo(spi::Bucket(bucket, spi::PartitionId(disk)));
+ assert(disk == 0u);
+ spi::BucketInfoResult response = _spi.getBucketInfo(spi::Bucket(bucket));
return convertBucketInfo(response.getBucketInfo());
}
diff --git a/storage/src/vespa/storage/persistence/processallhandler.cpp b/storage/src/vespa/storage/persistence/processallhandler.cpp
index 4829bdf4581..cbc5c0fc6dd 100644
--- a/storage/src/vespa/storage/persistence/processallhandler.cpp
+++ b/storage/src/vespa/storage/persistence/processallhandler.cpp
@@ -88,7 +88,8 @@ ProcessAllHandler::handleRemoveLocation(api::RemoveLocationCommand& cmd, Message
cmd.getBucketId().toString().c_str(),
cmd.getDocumentSelection().c_str());
- spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
+ assert(_env._partition == 0u);
+ spi::Bucket bucket(cmd.getBucket());
UnrevertableRemoveEntryProcessor processor(_spi, bucket, tracker->context());
BucketProcessor::iterateAll(_spi, bucket, cmd.getDocumentSelection(),
processor, spi::NEWEST_DOCUMENT_ONLY,tracker->context());
@@ -107,7 +108,8 @@ ProcessAllHandler::handleStatBucket(api::StatBucketCommand& cmd, MessageTracker:
ost << "Persistence bucket " << cmd.getBucketId()
<< ", partition " << _env._partition << "\n";
- spi::Bucket bucket(cmd.getBucket(), spi::PartitionId(_env._partition));
+ assert(_env._partition == 0u);
+ spi::Bucket bucket(cmd.getBucket());
StatEntryProcessor processor(ost);
BucketProcessor::iterateAll(_spi, bucket, cmd.getDocumentSelection(),
processor, spi::ALL_VERSIONS,tracker->context());
diff --git a/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp b/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp
index 0884d807eda..26cfe845eef 100644
--- a/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp
+++ b/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp
@@ -47,16 +47,10 @@ ProviderErrorWrapper::initialize()
return checkResult(_impl.initialize());
}
-spi::PartitionStateListResult
-ProviderErrorWrapper::getPartitionStates() const
-{
- return checkResult(_impl.getPartitionStates());
-}
-
spi::BucketIdListResult
-ProviderErrorWrapper::listBuckets(BucketSpace bucketSpace, spi::PartitionId partitionId) const
+ProviderErrorWrapper::listBuckets(BucketSpace bucketSpace) const
{
- return checkResult(_impl.listBuckets(bucketSpace, partitionId));
+ return checkResult(_impl.listBuckets(bucketSpace));
}
spi::Result
@@ -162,12 +156,6 @@ ProviderErrorWrapper::join(const spi::Bucket& source1, const spi::Bucket& source
}
spi::Result
-ProviderErrorWrapper::move(const spi::Bucket& source, spi::PartitionId target, spi::Context& context)
-{
- return checkResult(_impl.move(source, target, context));
-}
-
-spi::Result
ProviderErrorWrapper::removeEntry(const spi::Bucket& bucket, spi::Timestamp ts, spi::Context& context)
{
return checkResult(_impl.removeEntry(bucket, ts, context));
diff --git a/storage/src/vespa/storage/persistence/provider_error_wrapper.h b/storage/src/vespa/storage/persistence/provider_error_wrapper.h
index 54abf0e96fb..d4108e6322d 100644
--- a/storage/src/vespa/storage/persistence/provider_error_wrapper.h
+++ b/storage/src/vespa/storage/persistence/provider_error_wrapper.h
@@ -42,8 +42,7 @@ public:
}
spi::Result initialize() override;
- spi::PartitionStateListResult getPartitionStates() const override;
- spi::BucketIdListResult listBuckets(BucketSpace bucketSpace, spi::PartitionId) const override;
+ spi::BucketIdListResult listBuckets(BucketSpace bucketSpace) const override;
spi::Result setClusterState(BucketSpace bucketSpace, const spi::ClusterState&) override;
spi::Result setActiveState(const spi::Bucket& bucket, spi::BucketInfo::ActiveState newState) override;
spi::BucketInfoResult getBucketInfo(const spi::Bucket&) const override;
@@ -62,7 +61,6 @@ public:
spi::BucketIdListResult getModifiedBuckets(BucketSpace bucketSpace) const override;
spi::Result split(const spi::Bucket& source, const spi::Bucket& target1, const spi::Bucket& target2, spi::Context&) override;
spi::Result join(const spi::Bucket& source1, const spi::Bucket& source2, const spi::Bucket& target, spi::Context&) override;
- spi::Result move(const spi::Bucket& source, spi::PartitionId target, spi::Context&) override;
spi::Result removeEntry(const spi::Bucket&, spi::Timestamp, spi::Context&) override;
spi::PersistenceProvider& getProviderImplementation() {
diff --git a/storage/src/vespa/storage/storageserver/servicelayernode.cpp b/storage/src/vespa/storage/storageserver/servicelayernode.cpp
index 1edfcde393b..1916ae46510 100644
--- a/storage/src/vespa/storage/storageserver/servicelayernode.cpp
+++ b/storage/src/vespa/storage/storageserver/servicelayernode.cpp
@@ -34,7 +34,6 @@ ServiceLayerNode::ServiceLayerNode(const config::ConfigUri & configUri, ServiceL
: StorageNode(configUri, context, generationFetcher, std::make_unique<HostInfo>()),
_context(context),
_persistenceProvider(persistenceProvider),
- _partitions(0),
_externalVisitors(externalVisitors),
_fileStorManager(nullptr),
_init_has_been_called(false),
@@ -52,17 +51,6 @@ void ServiceLayerNode::init()
throw spi::HandledException("Failed provider init: " + initResult.toString(), VESPA_STRLOC);
}
- spi::PartitionStateListResult result(_persistenceProvider.getPartitionStates());
- if (result.hasError()) {
- LOG(error, "Failed to get partition list from persistence provider: %s", result.toString().c_str());
- throw spi::HandledException("Failed to get partition list: " + result.toString(), VESPA_STRLOC);
- }
- _partitions = result.getList();
- if (_partitions.size() == 0) {
- LOG(error, "No partitions in persistence provider. See documentation "
- "for your persistence provider as to how to set up partitions in it.");
- throw spi::HandledException("No partitions in provider", VESPA_STRLOC);
- }
try{
initialize();
} catch (spi::HandledException& e) {
@@ -94,15 +82,14 @@ ServiceLayerNode::subscribeToConfigs()
std::lock_guard configLockGuard(_configLock);
// Verify and set disk count
if (_serverConfig->diskCount != 0
- && _serverConfig->diskCount != _partitions.size())
+ && _serverConfig->diskCount != 1u)
{
std::ostringstream ost;
ost << "Storage is configured to have " << _serverConfig->diskCount
- << " disks but persistence provider states it has "
- << _partitions.size() << " disks.";
+ << " disks but persistence provider states it has 1 disk.";
throw vespalib::IllegalStateException(ost.str(), VESPA_STRLOC);
}
- _context.getComponentRegister().setDiskCount(_partitions.size());
+ _context.getComponentRegister().setDiskCount(1u);
}
void
@@ -120,23 +107,8 @@ ServiceLayerNode::initializeNodeSpecific()
// node state.
NodeStateUpdater::Lock::SP lock(_component->getStateUpdater().grabStateChangeLock());
lib::NodeState ns(*_component->getStateUpdater().getReportedNodeState());
- ns.setDiskCount(_partitions.size());
-
- uint32_t usablePartitions = 0;
- for (uint32_t i = 0; i < _partitions.size(); ++i) {
- if (_partitions[i].getState() == spi::PartitionState::UP) {
- ++usablePartitions;
- } else {
- lib::DiskState diskState(lib::State::DOWN, _partitions[i].getReason());
- ns.setDiskState(i, diskState);
- }
- }
+ ns.setDiskCount(1u);
- if (usablePartitions == 0) {
- _noUsablePartitionMode = true;
- ns.setState(lib::State::DOWN);
- ns.setDescription("All partitions are down");
- }
ns.setCapacity(_serverConfig->nodeCapacity);
ns.setReliability(_serverConfig->nodeReliability);
for (uint16_t i=0; i<_serverConfig->diskCapacity.size(); ++i) {
@@ -244,12 +216,12 @@ ServiceLayerNode::createChain(IStorageChainBuilder &builder)
builder.add(std::move(merge_throttler_up));
builder.add(std::make_unique<ChangedBucketOwnershipHandler>(_configUri, compReg));
builder.add(std::make_unique<StorageBucketDBInitializer>(
- _configUri, _partitions, getDoneInitializeHandler(), compReg));
+ _configUri, getDoneInitializeHandler(), compReg));
builder.add(std::make_unique<BucketManager>(_configUri, _context.getComponentRegister()));
builder.add(std::make_unique<VisitorManager>(_configUri, _context.getComponentRegister(), static_cast<VisitorMessageSessionFactory &>(*this), _externalVisitors));
builder.add(std::make_unique<ModifiedBucketChecker>(
_context.getComponentRegister(), _persistenceProvider, _configUri));
- auto filstor_manager = std::make_unique<FileStorManager>(_configUri, _partitions, _persistenceProvider, _context.getComponentRegister());
+ auto filstor_manager = std::make_unique<FileStorManager>(_configUri, _persistenceProvider, _context.getComponentRegister());
_fileStorManager = filstor_manager.get();
builder.add(std::move(filstor_manager));
builder.add(releaseStateManager());
diff --git a/storage/src/vespa/storage/storageserver/servicelayernode.h b/storage/src/vespa/storage/storageserver/servicelayernode.h
index ad570202f5b..03603394e3a 100644
--- a/storage/src/vespa/storage/storageserver/servicelayernode.h
+++ b/storage/src/vespa/storage/storageserver/servicelayernode.h
@@ -28,7 +28,6 @@ class ServiceLayerNode
{
ServiceLayerNodeContext& _context;
spi::PersistenceProvider& _persistenceProvider;
- spi::PartitionStateList _partitions;
VisitorFactory::Map _externalVisitors;
MinimumUsedBitsTracker _minUsedBitsTracker;