summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@oath.com>2018-03-16 17:26:38 +0100
committerHenning Baldersheim <balder@oath.com>2018-03-19 13:01:16 +0100
commit99c0278cfa2118fb26a4e13b8ede982cb6abf1fe (patch)
treeb59e9977e37833da59d937b0244b3df0bf373406
parent2a788ec1b3900d7ba853b3f5f89e49df7e016c7e (diff)
Clean out priority blocking stuff in PersistenceThread
Conflicts: storage/src/tests/persistence/filestorage/filestormanagertest.cpp Resolve merge conflict.
-rw-r--r--configdefinitions/src/vespa/stor-filestor.def394
-rw-r--r--storage/src/tests/common/testhelper.cpp20
-rw-r--r--storage/src/tests/persistence/filestorage/filestormanagertest.cpp339
-rw-r--r--storage/src/tests/persistence/persistencequeuetest.cpp4
-rw-r--r--storage/src/tests/persistence/persistencetestutils.cpp12
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestorhandler.cpp13
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestorhandler.h23
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp124
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h19
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp13
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/pausehandler.h34
-rw-r--r--storage/src/vespa/storage/persistence/persistencethread.cpp11
-rw-r--r--storage/src/vespa/storage/persistence/persistenceutil.cpp10
-rw-r--r--storage/src/vespa/storage/persistence/persistenceutil.h4
-rw-r--r--storage/src/vespa/storage/storageserver/storagenode.h10
-rw-r--r--storageserver/src/tests/storageservertest.cpp3
-rw-r--r--storageserver/src/tests/testhelper.cpp23
17 files changed, 99 insertions, 957 deletions
diff --git a/configdefinitions/src/vespa/stor-filestor.def b/configdefinitions/src/vespa/stor-filestor.def
index 80994bfb477..d20462c99ad 100644
--- a/configdefinitions/src/vespa/stor-filestor.def
+++ b/configdefinitions/src/vespa/stor-filestor.def
@@ -1,79 +1,8 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
namespace=vespa.config.content
-## Use the new storage core
-use_new_core bool default=false restart
-
-## FILE LAYOUT PARAMETERS
-
-## Number of directories per level to spread files across
-## DEPRECATED: see stor-memfilepersistence config instead
-dir_spread int default=256 restart
-
-## Number of directory levels
-## DEPRECATED: see stor-memfilepersistence config instead
-dir_levels int default=1 restart
-
-## FILE SIZE PARAMETERS
-
-## Minimum number of meta data entries in one slotfile. When creating new
-## files or resizing files, enforce it to contain at least this many meta
-## entries. Set to 512 by default, using 20544 bytes total for metadata in
-## new files.
-## DEPRECATED: see stor-memfilepersistence config instead
-minimum_file_meta_slots int default=512
-
-## Maximum number of entries in one slotfile. File must be split before
-## accepting more data
-##
-## Default ensure meta data is less than 512 kB.
-## DEPRECATED: see stor-memfilepersistence config instead
-maximum_file_meta_slots int default=13106
-
-## Minimum size of header block. At least this amount of header space will
-## be available in new or resized files. For 512 documents, this will be
-## 200 bytes per document.
-## DEPRECATED: see stor-memfilepersistence config instead
-minimum_file_header_block_size int default=102848
-
-## Maximum header block size (in bytes). Since the whole meta data list and
-## header block needs to be read into memory for some operations, a limit
-## can be set for the header block, to avoid consuming too much memory.
-##
-## Default is set high, as we dont configure it automatically right now, so we
-## would rather people configure it down than up.
-## DEPRECATED: see stor-memfilepersistence config instead
-maximum_file_header_block_size int default=33554432
-
-## Minimum size of a single slotfile. When creating or resizing files, they
-## will never become smaller than this. Default of 1 MB, will be 1807 byte
-## per doc if we use all 512 meta data entries set as default min meta
-## entries.
-## DEPRECATED: see stor-memfilepersistence config instead
-minimum_file_size int default=1048576
-
-## Maximum size of a single slotfile. File must be split before accepting
-## more data. Will return file full errors.
-##
-## Default is set high, as we dont configure it automatically right now, so we
-## would rather people configure it down than up.
-## DEPRECATED: see stor-memfilepersistence config instead
-maximum_file_size int default=268431360
-
-## When creating new files, always create files as a multiplum of this size.
-## Should be set to the block size on the disk, or possibly a multiplum of
-## that, though we know no advantage of setting it to more than the block
-## size. If you want more free space in files, you should rather adjust that
-## with growfactor or min file sizes.
-## DEPRECATED: see stor-memfilepersistence config instead
-file_block_size int default=4096
-
## DETECT FAILURE PARAMETERS
-## If persistence operations take more than this amount of milliseconds, it
-## will be logged as a warning.
-warn_on_slow_operations int default=5000
-
## After seeing given number of errors on a disk, storage will disable the
## disk and restart. If set to 0, storage will never disable disks. Note
## that if you get disk errors, which arent automatically fixed, this will
@@ -92,60 +21,6 @@ fail_disk_after_error_count int default=1 restart
## during normal load.
disk_operation_timeout int default=0 restart
-## Timestamps provided for entries written are normally provided externally.
-## To detect clock skew between nodes, storage will warn if timestamps look
-## out of sync. Future time clearly indicates clock skew and thus the limit
-## here is low. Time in the past might just indicate that the operation has
-## been queued for a while, so the default value here is not very strict.
-## Time is given in seconds. Note that there are valid usecases for
-## inserting data with old timestamps, for instance when synchronizing two
-## copies of the same data. Warnings should not be printed in these cases.
-time_future_limit int default=5
-time_past_limit int default=3600
-
-## Enabling debug verifications will make storage do extra verifications
-## to find errors as soon as possible. These extra verifications will use up
-## a lot of resources though, and should not be needed in normal operations.
-## They are mostly to be used during test phases or when debugging problems.
-## The value itself is a bit mask, where you can enable different kinds of
-## verifications by setting given bits.
-debug_verifications int default=0 restart
-
-## CONSISTENCY PARAMETERS
-
-## If true, fsync after all disk operations, to ensure no dirty OS file
-## cache afterwards. This is only useful if using cached IO, which is not
-## recommended to start with.
-## DEPRECATED: see stor-memfilepersistence config instead
-fsync_after_each_operation bool default=false restart
-
-## Time period to keep all updates (given in seconds). One can revert any
-## operation done within this time.
-## DEPRECATED: see stor-memfilepersistence config instead
-revert_time_period int default=300
-
-## If a remove is older than the reverttimeperiod, the document it is
-## removing may be removed from the file. There are a few advantages to
-## keeping removes or a bit longer though. If you use this copy to
-## synchronize another copy of data, having the remove entry makes it easy
-## to detect that you should delete this entry from the other data copy.
-## This is useful for internal synchronization of files within VDS if you
-## use multiple copies, or for partial recovery or BCP situations. To
-## guarantuee consistency in such situations, a data destination that have
-## been unavailable for longer than this amount of time, should have its
-## data cleared before being set into the system again. This value is given
-## in seconds, with the default being one week
-## DEPRECATED: see stor-memfilepersistence config instead
-keep_remove_time_period int default=604800
-
-## Maximum number of entries to keep for a single document. Heavy resending
-## within the revert time period may add loads of entries to a file, ruining
-## performance. This setting sets a maximum number of entries for each
-## document. This will make entries potentially live shorter than the revert
-## time period to avoid a resending worst case. A value of 0 means infinite.
-## DEPRECATED: see stor-memfilepersistence config instead
-maximum_versions_of_single_document_stored int default=0
-
## PERFORMANCE PARAMETERS
## Number of threads to use for each mountpoint. VDS needs memory per thread
@@ -156,11 +31,10 @@ maximum_versions_of_single_document_stored int default=0
## See benchmarks for performance/memory tradeoff.
threads[].lowestpri int default=255 restart
-## Pause operations (and block new ones from starting) with priority
-## lower than this value when executing operations with higher pri than
-## min_priority_to_be_blocking
-max_priority_to_block int default=255 restart
-min_priority_to_be_blocking int default=0 restart
+## When merging, if we find more than this number of documents that exist on all
+## of the same copies, send a separate apply bucket diff with these entries
+## to an optimized merge chain that guarantuees minimum data transfer.
+common_merge_chain_optimalization_minimum_size int default=64 restart
## Chunksize to use while merging buckets between nodes.
##
@@ -169,173 +43,6 @@ min_priority_to_be_blocking int default=0 restart
## while still reading 4k blocks from disk.
bucket_merge_chunk_size int default=4190208 restart
-## When reading a slotfile, one does not know the size of the meta data
-## list, so one have to read a static amount of data, and possibly read more
-## if one didnt read enough. This value needs to be at least 64 byte to read
-## the initial header stating the true size of the meta data list.
-## DEPRECATED: see stor-memfilepersistence config instead
-initial_index_read int default=61440
-
-## Similar to index read, when reading the document identifiers, one does
-## not know the length of the name prior to reading. Thus, if you read less
-## than the size, you will have to do an extra read to get the rest. If you
-## use very long document identifiers you should increase this value to be
-## larger than most of your identifiers.
-## restart flag was added automatically and needs to be verified.
-## DEPRECATED: see stor-memfilepersistence config instead
-initial_name_read int default=512 restart
-
-## When we need to read (or write) multiple entries in a file where we can
-## either read a big enough section to cover all of them. But at some
-## distance between the areas we need, it becomes beneficial to do multiple
-## reads rather than to read over them. This setting set how many sequential
-## bytes we dont need that we allow to be read/written in order to join two
-## logical IO operations together in the application. Setting this low might
-## be ok if system calls are cheap and we manage to queue up next IO
-## operation in time such that the disk dont need to spin an extra round.
-## Setting it high will make the disk more likely to process them together,
-## but the time used to read/write the gap might have been used to do
-## something useful instead.
-## DEPRECATED: see stor-memfilepersistence config instead
-maximum_gap_to_read_through int default=65536
-
-## Currently not in here as we dont have append functionality yet. Might
-## improve performance in some cases.
-## max_file_appends int default=0
-
-## When writing with direct IO, we need to align blocks to 512b, and to
-## avoid reading we write garbage after each doc to fill 512b block. When
-## resizing or splitting the file we can realign the files such that we
-## remove the gaps of existing data, as we will rewrite everything anyhow.
-## If using very small documents this might improve your disk space
-## utilization. For larger documents it doesnt really reduce much, so might
-## be useful to turn it off to save CPU.
-## DEPRECATED: see stor-memfilepersistence config instead
-remove_file_gaps_on_rewrite bool default=true restart
-
-## The order we try to enforce in file body blocks. Visitors that only need
-## to visit some of the data in a bucket will be able to read less if what
-## it needs to read is located next to each other on disk. However, as we
-## dont enforce order on write operations, this demands that resize/split
-## operations do the resorting, which, if we cant do it all in memory is
-## very expensive. ANY will not do any reordering. Timestamp will enforce
-## timestamp order, which is fairly close to order that will normally be
-## written anyway, so it should be cheap to reorder even if we cant do it
-## all in memory. This might be useful if you often visit subsets based on
-## time. RGID uses reverse GID, which stores data from one location
-## together. This is useful if you want to visit data from one user from
-## buckets that have many users often. This is a much more expensive sort
-## though, and should only be done if we have enough memory.
-## DEPRECATED: see stor-memfilepersistence config instead
-body_block_order enum { ANY, TIMESTAMP, RGID } default=ANY restart
-
-## If set to true, we will refuse to do reordering of memory unless we have
-## enough memory to do it all in memory. See body_block_order comments.
-## DEPRECATED: see stor-memfilepersistence config instead
-only_reorder_body_in_memory bool default=true restart
-
-## Whether or not we should verify checksums of all read data during regular
-## operations like put, get & remove. Note that some operations, like merge
-## and bucket integrity verify operations will still check checksums even if
-## this is set false.
-## DEPRECATED: see stor-memfilepersistence config instead
-verify_checksum_on_regular_load bool default=true restart
-
-## For streaming search, visiting is very performance critical. Thus you can
-## specifically disable checksum verification for visiting.
-## DEPRECATED: see stor-memfilepersistence config instead
-verify_checksum_on_visit bool default=true restart
-
-## Maximum size of index buffer that will be allowed to stay in memory and
-## not being reduced back to this size after we no longer need it.
-## DEPRECATED: see stor-memfilepersistence config instead
-maximum_sustainable_index_buffer_size int default=1044480 restart
-
-## Maximum size of input buffer that will be allowed to stay in memory and
-## not being reduced back to this size after we no longer need it.
-## DEPRECATED: see stor-memfilepersistence config instead
-maximum_sustainable_input_buffer_size int default=1044480 restart
-
-## Maximum size of output buffer that will be allowed to stay in memory and
-## not being reduced back to this size after we no longer need it.
-## DEPRECATED: see stor-memfilepersistence config instead
-maximum_sustainable_output_buffer_size int default=1044480 restart
-
-## Whether to downsize index buffer immediately after usage if its above the
-## maximum size. If not, it will not be resized down until someone requests
-## to use it that needs less data in the buffer. Index buffer is used all
-## the time so there should be little reason for immediately downsizing it.
-## DEPRECATED: see stor-memfilepersistence config instead
-downsize_index_buffer_immediately_after_use bool default=false restart
-
-## Whether to downsize input buffer immediately after usage if its above the
-## maximum size. If not, it will not be resized down until someone requests
-## to use it that needs less data in the buffer. Input buffer is not used
-## that often, so downsizing it immediately might save some memory.
-## DEPRECATED: see stor-memfilepersistence config instead
-downsize_input_buffer_immediately_after_use bool default=true restart
-
-## Whether to downsize output buffer immediately after usage if its above
-## the maximum size. If not, it will not be resized down until someone
-## requests to use it that needs less data in the buffer. Input buffer is
-## not used that often, so downsizing it immediately might save some memory.
-## DEPRECATED: see stor-memfilepersistence config instead
-downsize_output_buffer_immediately_after_use bool default=true restart
-
-## Minimum size of buffer used to write a continuous file. If maximum amount
-## of memory is not available. At least this amount will be allocated.
-## DEPRECATED: see stor-memfilepersistence config instead
-minimum_continuous_file_write_buffer_size int default=1044480 restart
-
-## Maximum size of buffer used to write a continuous file. If set above max
-## file size we will always write new files in one go, which probably makes
-## for the least chance of getting fragmentation, but will also consume the
-## most memory. Default of writing a MB at a time, should make for little
-## performance loss because of disk seek time, and hopefully get little
-## fragmentation while keeping memory usage down.
-## DEPRECATED: see stor-memfilepersistence config instead
-maximum_continuous_file_write_buffer_size int default=1044480 restart
-
-## Minimum amount of memory allocated to read source data during join. This
-## amount of memory will be forced allocated.
-## DEPRECATED: see stor-memfilepersistence config instead
-minimum_join_source_body_read_buffer_size int default=1044480 restart
-
-## This sets the maximum size of the buffer used in join to read source
-## data. Join uses the least IO if this buffer is as big as the body block
-## of the source file. Due to the memory manager, each join might get that
-## much memory though.
-## DEPRECATED: see stor-memfilepersistence config instead
-maximum_join_source_body_read_buffer_size int default=16773120 restart
-
-## Minimum amount of memory allocated to read source data during export. This
-## amount of memory will be forced allocated.
-## DEPRECATED: see stor-memfilepersistence config instead
-minimum_export_source_body_read_buffer_size int default=1044480 restart
-
-## This sets the maximum size of the buffer used in export to read source
-## data. Export uses the least IO if this buffer is as big as the body block
-## of the source file. In addition, reordering of body block might not be
-## feasibly unless the buffer is big enough to include the whole body block.
-## DEPRECATED: see stor-memfilepersistence config instead
-maximum_export_source_body_read_buffer_size int default=33550336 restart
-
-## Minimum size of buffer used to read data during defragmentation.
-## DEPRECATED: see stor-memfilepersistence config instead
-minimum_defrag_source_body_read_buffer_size int default=1044480 restart
-
-## This sets the maximum size of the buffer used in defragmentation to read
-## source data. Defragmentation uses the least IO if this buffer is as big
-## as the body block of the source file, but this might consume some memory.
-## Defragmentation is not enabled by default.
-## DEPRECATED: see stor-memfilepersistence config instead
-maximum_defrag_source_body_read_buffer_size int default=1044480 restart
-
-## When merging, if we find more than this number of documents that exist on all
-## of the same copies, send a separate apply bucket diff with these entries
-## to an optimized merge chain that guarantuees minimum data transfer.
-common_merge_chain_optimalization_minimum_size int default=64 restart
-
## When merging, it is possible to send more metadata than needed in order to
## let local nodes in merge decide which entries fits best to add this time
## based on disk location. Toggle this option on to use it. Note that memory
@@ -344,100 +51,7 @@ common_merge_chain_optimalization_minimum_size int default=64 restart
## fill all.
enable_merge_local_node_choose_docs_optimalization bool default=true restart
-## If set, when we need to cache the entire header, and we already have cached
-## all the metadata, read the metadata to find max header position, and only
-## read the part containing information.
-## DEPRECATED: see stor-memfilepersistence config instead
-read_only_used_header_part_when_possible bool default=true restart
-
-## Enable the slotfile read cache. The cache holds recent metadata and header
-## blocks read from disks, and even if small, is very useful for localized
-## access.
-## DEPRECATED: see stor-memfilepersistence config instead
-enable_slotfile_cache bool default=true restart
-
-## Let the slotfile cache the whole header on a header only operation not
-## needing the entire header, if this amount of header only accesses needing
-## part of the header has already happened.
-##
-## Set very high to begin with to never reduce performance. If you have heavy
-## header only access to some files, you may get better performance by tuning
-## this value.
-## DEPRECATED: see stor-memfilepersistence config instead
-slotfile_precache_header_after_access_count int default=512 restart
-
## Whether or not to enable the multibit split optimalization. This is useful
## if splitting is expensive, but listing document identifiers is fairly cheap.
## This is true for memfile persistence layer, but not for vespa search.
enable_multibit_split_optimalization bool default=true restart
-
-## STORAGE SPACE vs IO/CPU PERFORMANCE OPTIONS
-
-## If true, use direct IO, bypassing OS caches for disk access. This is very
-## useful as VDS does a random distribution dividing load, so it is unlikely
-## that the OS cache will ever hit, and thus it is a huge performance drain.
-## DEPRECATED: see stor-memfilepersistence config instead
-use_direct_io bool default=true restart
-
-## All IO operations will be aligned to this amount of bytes if direct IO is
-## enabled.
-## DEPRECATED: see stor-memfilepersistence config instead
-block_alignment_size int default=512 restart
-
-## When a disk is fuller than this factor, we will not allow more data to be
-## written to the system, unless this data is written in order to reduce
-## storage consumption, such as resizing files to become smaller or add
-## meta entries to write remove entries into. This value is set high as
-## default as we expect a lot of users to have formatted their disks to
-## already reserve 8% of the data to root user which is often default. We
-## suggest using 0% reserved for root, and rather set this parameter lower
-## to reserve space. That way, VDS have more space available in worst case
-## in order to resize files to become smaller.
-## DEPRECATED: see stor-memfilepersistence config instead
-disk_full_factor double default=0.98 restart
-
-## The grow factor sets how much free space to add to a file when we resize
-## it, whether we are making it smaller or larger. If the space we need
-## after the operation triggering the resize is X, then the file will be
-## resized to be of size X * growfactor. In a growing system with no
-## deletes, a growfactor of 2 will make the files have 25% free space on
-## average. Reducing it to 1.5 will reduce the average free space to 16.7%.
-## DEPRECATED: see stor-memfilepersistence config instead
-grow_factor double default=2.0
-
-## If files fall below this fill rate, resize file to become smaller.
-## Note that this parameter is tightly coupled with the growfactor. For
-## instance, with a growfactor of 2.0, a file will contain 50 % free space
-## after a resize. If the min fill rate then is 0.50, that means that if a
-## single doc is deleted from this file, we need to resize it to
-## become smaller straight away.
-## DEPRECATED: see stor-memfilepersistence config instead
-min_fill_rate double default=0.1
-
-## Minimum part of defragmented space one need to reclaim to allow
-## defragmentation of file. This value is given as a ratio of reclaimed
-## space compared to the total size of the data block.
-## Example: A body block of 100 MB, has 15 MB free space, with largest
-## continuos free space of 5 MB. Gain of defragmentation will then be 0.1.
-## DEPRECATED: see stor-memfilepersistence config instead
-defrag_minimum_gain double default=1.0 restart
-
-## When creating/resizing slotfiles, one uses average document sizes to
-## decide how much free space to add to metadata, header and body portions.
-## This option can be used to allocate extra free space to meta data in
-## order to reduce the chance of the file needing resize due to lack of free
-## meta data entries.
-## DEPRECATED: see stor-memfilepersistence config instead
-overrepresent_meta_data_factor double default=1.2
-
-## When creating/resizing slotfiles, one uses average document sizes to
-## decide how much free space to add to metadata, header and body portions.
-## This option can be used to allocate extra free space to header data in
-## order to reduce the chance of the file needing resize due to lack of
-## header block space.
-## DEPRECATED: see stor-memfilepersistence config instead
-overrepresent_header_block_factor double default=1.1
-
-## Load types to use cache for. If empty, cache for all.
-## DEPRECATED: see stor-memfilepersistence config instead
-load_types_to_cache[] string restart
diff --git a/storage/src/tests/common/testhelper.cpp b/storage/src/tests/common/testhelper.cpp
index 296a9a3cc0f..8cfa403539a 100644
--- a/storage/src/tests/common/testhelper.cpp
+++ b/storage/src/tests/common/testhelper.cpp
@@ -9,23 +9,6 @@ LOG_SETUP(".testhelper");
namespace storage {
-namespace {
- bool useNewStorageCore() {
- if ( // Unit test directory
- vespalib::fileExists("use_new_storage_core") ||
- // src/cpp directory
- vespalib::fileExists("../use_new_storage_core") ||
- // Top build directory where storage-HEAD remains
- vespalib::fileExists("../../../../use_new_storage_core"))
- {
- std::cerr << "Using new storage core for unit tests\n";
- return true;
- }
- return false;
- }
- bool newStorageCore(useNewStorageCore());
-}
-
void addStorageDistributionConfig(vdstestlib::DirConfig& dc)
{
vdstestlib::DirConfig::Config* config;
@@ -128,14 +111,11 @@ vdstestlib::DirConfig getStandardConfig(bool storagenode, const std::string & ro
config->set("threads[0].lowestpri 255");
config->set("dir_spread", "4");
config->set("dir_levels", "0");
- config->set("use_new_core", newStorageCore ? "true" : "false");
config->set("maximum_versions_of_single_document_stored", "0");
//config->set("enable_slotfile_cache", "false");
// Unit tests typically use fake low time values, so don't complain
// about them or compact/delete them by default. Override in tests testing that
// behavior
- config->set("time_future_limit", "5");
- config->set("time_past_limit", "2000000000");
config->set("keep_remove_time_period", "2000000000");
config->set("revert_time_period", "2000000000");
// Don't want test to call exit()
diff --git a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
index 318881364d7..26bfeb41b42 100644
--- a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
+++ b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
@@ -7,17 +7,10 @@
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/storageserver/statemanager.h>
#include <vespa/storage/bucketdb/bucketmanager.h>
-#include <vespa/vdslib/state/cluster_state_bundle.h>
#include <vespa/storage/persistence/persistencethread.h>
#include <vespa/storage/persistence/filestorage/filestormanager.h>
#include <vespa/storage/persistence/filestorage/modifiedbucketchecker.h>
#include <vespa/document/update/assignvalueupdate.h>
-#include <vespa/document/datatype/datatype.h>
-#include <vespa/document/fieldvalue/document.h>
-#include <vespa/document/datatype/documenttype.h>
-#include <vespa/document/update/documentupdate.h>
-#include <vespa/document/fieldvalue/rawfieldvalue.h>
-#include <vespa/document/fieldvalue/stringfieldvalue.h>
#include <vespa/document/select/parser.h>
#include <vespa/vdslib/state/random.h>
#include <vespa/storageapi/message/bucketsplitting.h>
@@ -76,8 +69,6 @@ struct FileStorManagerTest : public CppUnit::TestFixture {
void testFlush();
void testRemapSplit();
void testHandlerPriority();
- void testHandlerPriorityBlocking();
- void testHandlerPriorityPreempt();
void testHandlerMulti();
void testHandlerTimeout();
void testHandlerPause();
@@ -102,7 +93,6 @@ struct FileStorManagerTest : public CppUnit::TestFixture {
void testMergeBucketImplicitCreateBucket();
void testNewlyCreatedBucketIsReady();
void testCreateBucketSetsActiveFlagInDatabaseAndReply();
- void testFileStorThreadLockingStressTest();
void testStateChange();
void testRepairNotifiesDistributorOnChange();
void testDiskMove();
@@ -113,8 +103,6 @@ struct FileStorManagerTest : public CppUnit::TestFixture {
CPPUNIT_TEST(testFlush);
CPPUNIT_TEST(testRemapSplit);
CPPUNIT_TEST(testHandlerPriority);
- CPPUNIT_TEST(testHandlerPriorityBlocking);
- CPPUNIT_TEST(testHandlerPriorityPreempt);
CPPUNIT_TEST(testHandlerMulti);
CPPUNIT_TEST(testHandlerTimeout);
CPPUNIT_TEST(testHandlerPause);
@@ -146,21 +134,17 @@ struct FileStorManagerTest : public CppUnit::TestFixture {
void createBucket(document::BucketId bid, uint16_t disk)
{
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
- _node->getPersistenceProvider().createBucket(
- makeSpiBucket(bid, spi::PartitionId(disk)), context);
+ spi::Context context(defaultLoadType, spi::Priority(0), spi::Trace::TraceLevel(0));
+ _node->getPersistenceProvider().createBucket(makeSpiBucket(bid, spi::PartitionId(disk)), context);
StorBucketDatabase::WrappedEntry entry(
- _node->getStorageBucketDatabase().get(bid, "foo",
- StorBucketDatabase::CREATE_IF_NONEXISTING));
+ _node->getStorageBucketDatabase().get(bid, "foo", StorBucketDatabase::CREATE_IF_NONEXISTING));
entry->disk = disk;
entry->info = api::BucketInfo(0, 0, 0, 0, 0, true, false);
entry.write();
}
- document::Document::UP createDocument(
- const std::string& content, const std::string& id)
+ document::Document::UP createDocument(const std::string& content, const std::string& id)
{
return _node->getTestDocMan().createDocument(content, id);
}
@@ -628,19 +612,16 @@ FileStorManagerTest::testHandlerPriority()
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 255, 0);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
filestorHandler.setGetNextMessageTimeout(50);
std::string content("Here is some content which is in all documents");
std::ostringstream uri;
- Document::SP doc(createDocument(
- content, "userdoc:footype:1234:bar").release());
+ Document::SP doc(createDocument(content, "userdoc:footype:1234:bar").release());
document::BucketIdFactory factory;
- document::BucketId bucket(16, factory.getBucketId(
- doc->getId()).getRawId());
+ document::BucketId bucket(16, factory.getBucketId(doc->getId()).getRawId());
// Populate bucket with the given data
for (uint32_t i = 1; i < 6; i++) {
@@ -739,8 +720,7 @@ FileStorManagerTest::testHandlerPausedMultiThread()
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 255, 0);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
filestorHandler.setGetNextMessageTimeout(50);
std::string content("Here is some content which is in all documents");
@@ -791,8 +771,7 @@ FileStorManagerTest::testHandlerPause()
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 255, 0);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
filestorHandler.setGetNextMessageTimeout(50);
std::string content("Here is some content which is in all documents");
@@ -802,15 +781,12 @@ FileStorManagerTest::testHandlerPause()
document::BucketIdFactory factory;
document::BucketId bucket(16, factory.getBucketId(
- doc->getId()).getRawId());
+ doc->getId()).getRawId());
// Populate bucket with the given data
for (uint32_t i = 1; i < 6; i++) {
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bucket), doc, 100));
- std::unique_ptr<api::StorageMessageAddress> address(
- new api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 3));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bucket), doc, 100);
+ auto address = std::make_unique<api::StorageMessageAddress>("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(*address);
cmd->setPriority(i * 15);
filestorHandler.schedule(cmd, 0);
@@ -847,8 +823,7 @@ FileStorManagerTest::testRemapSplit()
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
- top.push_back(std::unique_ptr<StorageLink>(
- dummyManager = new DummyStorageLink));
+ top.push_back(std::unique_ptr<StorageLink>(dummyManager = new DummyStorageLink));
top.open();
ForwardingMessageSender messageSender(*dummyManager);
// Since we fake time with small numbers, we need to make sure we dont
@@ -859,7 +834,7 @@ FileStorManagerTest::testRemapSplit()
metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 255, 0);
+ _node->getComponentRegister());
filestorHandler.setGetNextMessageTimeout(50);
std::string content("Here is some content which is in all documents");
@@ -874,10 +849,8 @@ FileStorManagerTest::testRemapSplit()
// Populate bucket with the given data
for (uint32_t i = 1; i < 4; i++) {
- filestorHandler.schedule(
- api::StorageMessage::SP(new api::PutCommand(makeDocumentBucket(bucket1), doc1, i)), 0);
- filestorHandler.schedule(
- api::StorageMessage::SP(new api::PutCommand(makeDocumentBucket(bucket2), doc2, i + 10)), 0);
+ filestorHandler.schedule(std::make_shared<api::PutCommand>(makeDocumentBucket(bucket1), doc1, i), 0);
+ filestorHandler.schedule(std::make_shared<api::PutCommand>(makeDocumentBucket(bucket2), doc2, i + 10), 0);
}
CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 1, size 108) (priority: 127)\n"
@@ -925,7 +898,7 @@ FileStorManagerTest::testHandlerMulti()
metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 255, 0);
+ _node->getComponentRegister());
filestorHandler.setGetNextMessageTimeout(50);
std::string content("Here is some content which is in all documents");
@@ -935,10 +908,8 @@ FileStorManagerTest::testHandlerMulti()
Document::SP doc2(createDocument(content, "userdoc:footype:4567:bar").release());
document::BucketIdFactory factory;
- document::BucketId bucket1(16, factory.getBucketId(
- doc1->getId()).getRawId());
- document::BucketId bucket2(16, factory.getBucketId(
- doc2->getId()).getRawId());
+ document::BucketId bucket1(16, factory.getBucketId(doc1->getId()).getRawId());
+ document::BucketId bucket2(16, factory.getBucketId(doc2->getId()).getRawId());
// Populate bucket with the given data
for (uint32_t i = 1; i < 10; i++) {
@@ -988,7 +959,7 @@ FileStorManagerTest::testHandlerTimeout()
metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 255, 0);
+ _node->getComponentRegister());
filestorHandler.setGetNextMessageTimeout(50);
std::string content("Here is some content which is in all documents");
@@ -1041,208 +1012,6 @@ FileStorManagerTest::testHandlerTimeout()
}
void
-FileStorManagerTest::testHandlerPriorityBlocking()
-{
- TestName testName("testHandlerPriorityBlocking");
- // Setup a filestorthread to test
- DummyStorageLink top;
- DummyStorageLink *dummyManager;
- top.push_back(std::unique_ptr<StorageLink>(
- dummyManager = new DummyStorageLink));
- top.open();
- ForwardingMessageSender messageSender(*dummyManager);
- // Since we fake time with small numbers, we need to make sure we dont
- // compact them away, as they will seem to be from 1970
-
- documentapi::LoadTypeSet loadTypes("raw:");
- FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
- metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
-
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 21, 21);
- filestorHandler.setGetNextMessageTimeout(50);
-
- std::string content("Here is some content which is in all documents");
- std::ostringstream uri;
-
- document::BucketIdFactory factory;
-
- // Populate bucket with the given data
- for (uint32_t i = 1; i < 6; i++) {
- Document::SP doc(createDocument(content, vespalib::make_string("doc:foo:%d",i)).release());
- document::BucketId bucket(16, factory.getBucketId(
- doc->getId()).getRawId());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bucket), doc, 100));
- std::unique_ptr<api::StorageMessageAddress> address(
- new api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 3));
- cmd->setAddress(*address);
- cmd->setPriority(i * 15);
- filestorHandler.schedule(cmd, 0);
- }
-
- {
- FileStorHandler::LockedMessage lock1 = filestorHandler.getNextMessage(0);
- CPPUNIT_ASSERT_EQUAL(15, (int)lock1.second->getPriority());
-
- LOG(debug, "Waiting for request that should time out");
- FileStorHandler::LockedMessage lock2 = filestorHandler.getNextMessage(0);
- LOG(debug, "Got request that should time out");
- CPPUNIT_ASSERT(lock2.second.get() == NULL);
- }
-
- {
- FileStorHandler::LockedMessage lock1 = filestorHandler.getNextMessage(0);
- CPPUNIT_ASSERT_EQUAL(30, (int)lock1.second->getPriority());
-
- // New high-pri message comes in
- Document::SP doc(createDocument(content, vespalib::make_string("doc:foo:%d", 100)).release());
- document::BucketId bucket(16, factory.getBucketId(
- doc->getId()).getRawId());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bucket), doc, 100));
- std::unique_ptr<api::StorageMessageAddress> address(
- new api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 3));
- cmd->setAddress(*address);
- cmd->setPriority(15);
- filestorHandler.schedule(cmd, 0);
-
- FileStorHandler::LockedMessage lock2 = filestorHandler.getNextMessage(0);
- CPPUNIT_ASSERT_EQUAL(15, (int)lock2.second->getPriority());
-
- LOG(debug, "Waiting for request that should time out");
- FileStorHandler::LockedMessage lock3 = filestorHandler.getNextMessage(0);
- LOG(debug, "Got request that should time out");
- CPPUNIT_ASSERT(lock3.second.get() == NULL);
- }
-
- {
- FileStorHandler::LockedMessage lock1 = filestorHandler.getNextMessage(0);
- CPPUNIT_ASSERT_EQUAL(45, (int)lock1.second->getPriority());
-
- FileStorHandler::LockedMessage lock = filestorHandler.getNextMessage(0);
- CPPUNIT_ASSERT_EQUAL(60, (int)lock.second->getPriority());
- }
- LOG(debug, "Test done");
-}
-
-class PausedThread : public document::Runnable {
-private:
- FileStorHandler& _handler;
-
-public:
- bool pause;
- bool done;
- bool gotoperation;
-
- PausedThread(FileStorHandler& handler)
- : _handler(handler), pause(false), done(false), gotoperation(false) {}
-
- void run() override {
- FileStorHandler::LockedMessage msg = _handler.getNextMessage(0);
- gotoperation = true;
-
- while (!done) {
- if (pause) {
- _handler.pause(0, msg.second->getPriority());
- pause = false;
- }
- FastOS_Thread::Sleep(10);
- }
-
- done = false;
- };
-};
-
-void
-FileStorManagerTest::testHandlerPriorityPreempt()
-{
- TestName testName("testHandlerPriorityPreempt");
- // Setup a filestorthread to test
- DummyStorageLink top;
- DummyStorageLink *dummyManager;
- top.push_back(std::unique_ptr<StorageLink>(
- dummyManager = new DummyStorageLink));
- top.open();
- ForwardingMessageSender messageSender(*dummyManager);
- // Since we fake time with small numbers, we need to make sure we dont
- // compact them away, as they will seem to be from 1970
-
- documentapi::LoadTypeSet loadTypes("raw:");
- FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
- metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
-
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 21, 21);
- filestorHandler.setGetNextMessageTimeout(50);
-
- std::string content("Here is some content which is in all documents");
- std::ostringstream uri;
-
- document::BucketIdFactory factory;
-
- {
- Document::SP doc(createDocument(content, "doc:foo:1").release());
- document::BucketId bucket(16, factory.getBucketId(
- doc->getId()).getRawId());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bucket), doc, 100));
- std::unique_ptr<api::StorageMessageAddress> address(
- new api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 3));
- cmd->setAddress(*address);
- cmd->setPriority(60);
- filestorHandler.schedule(cmd, 0);
- }
-
- PausedThread thread(filestorHandler);
- FastOS_ThreadPool pool(512 * 1024);
- thread.start(pool);
-
- while (!thread.gotoperation) {
- FastOS_Thread::Sleep(10);
- }
-
- {
- Document::SP doc(createDocument(content, "doc:foo:2").release());
- document::BucketId bucket(16, factory.getBucketId(
- doc->getId()).getRawId());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bucket), doc, 100));
- std::unique_ptr<api::StorageMessageAddress> address(
- new api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 3));
- cmd->setAddress(*address);
- cmd->setPriority(20);
- filestorHandler.schedule(cmd, 0);
- }
-
- {
- FileStorHandler::LockedMessage lock1 = filestorHandler.getNextMessage(0);
- CPPUNIT_ASSERT_EQUAL(20, (int)lock1.second->getPriority());
-
- thread.pause = true;
-
- for (uint32_t i = 0; i < 10; i++) {
- CPPUNIT_ASSERT(thread.pause);
- FastOS_Thread::Sleep(100);
- }
- }
-
- while (thread.pause) {
- FastOS_Thread::Sleep(10);
- }
-
- thread.done = true;
-
- while (thread.done) {
- FastOS_Thread::Sleep(10);
- }
-}
-
-void
FileStorManagerTest::testPriority()
{
TestName testName("testPriority");
@@ -1260,8 +1029,7 @@ FileStorManagerTest::testPriority()
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 2);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 255, 0);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
@@ -1275,8 +1043,7 @@ FileStorManagerTest::testPriority()
std::string content("Here is some content which is in all documents");
std::ostringstream uri;
- uri << "userdoc:footype:" << (i % 3 == 0 ? 0x10001 : 0x0100001)
- << ":mydoc-" << i;
+ uri << "userdoc:footype:" << (i % 3 == 0 ? 0x10001 : 0x0100001)<< ":mydoc-" << i;
Document::SP doc(createDocument(content, uri.str()).release());
documents.push_back(doc);
}
@@ -1285,20 +1052,16 @@ FileStorManagerTest::testPriority()
// Create buckets in separate, initial pass to avoid races with puts
for (uint32_t i=0; i<documents.size(); ++i) {
- document::BucketId bucket(16, factory.getBucketId(
- documents[i]->getId()).getRawId());
+ document::BucketId bucket(16, factory.getBucketId(documents[i]->getId()).getRawId());
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
+ spi::Context context(defaultLoadType, spi::Priority(0), spi::Trace::TraceLevel(0));
- _node->getPersistenceProvider().createBucket(
- makeSpiBucket(bucket), context);
+ _node->getPersistenceProvider().createBucket(makeSpiBucket(bucket), context);
}
// Populate bucket with the given data
for (uint32_t i=0; i<documents.size(); ++i) {
- document::BucketId bucket(16, factory.getBucketId(
- documents[i]->getId()).getRawId());
+ document::BucketId bucket(16, factory.getBucketId(documents[i]->getId()).getRawId());
std::shared_ptr<api::PutCommand> cmd(
new api::PutCommand(makeDocumentBucket(bucket), documents[i], 100 + i));
@@ -1353,8 +1116,7 @@ FileStorManagerTest::testSplit1()
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 255, 0);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
@@ -1522,10 +1284,8 @@ FileStorManagerTest::testSplitSingleGroup()
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 255, 0);
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
+ spi::Context context(defaultLoadType, spi::Priority(0), spi::Trace::TraceLevel(0));
for (uint32_t j=0; j<1; ++j) {
// Test this twice, once where all the data ends up in file with
// splitbit set, and once where all the data ends up in file with
@@ -1663,8 +1423,7 @@ FileStorManagerTest::testSplitEmptyTargetWithRemappedOps()
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 255, 0);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
@@ -1741,8 +1500,7 @@ FileStorManagerTest::testNotifyOnSplitSourceOwnershipChanged()
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 255, 0);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
@@ -1791,8 +1549,7 @@ FileStorManagerTest::testJoin()
documentapi::LoadTypeSet loadTypes("raw:");
FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
- FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
- _node->getComponentRegister(), 255, 0);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
@@ -1802,10 +1559,8 @@ FileStorManagerTest::testJoin()
std::string content("Here is some content which is in all documents");
std::ostringstream uri;
- uri << "userdoc:footype:" << (i % 3 == 0 ? 0x10001 : 0x0100001)
- << ":mydoc-" << i;
- Document::SP doc(createDocument(
- content, uri.str()).release());
+ uri << "userdoc:footype:" << (i % 3 == 0 ? 0x10001 : 0x0100001) << ":mydoc-" << i;
+ Document::SP doc(createDocument(content, uri.str()).release());
documents.push_back(doc);
}
document::BucketIdFactory factory;
@@ -1816,36 +1571,26 @@ FileStorManagerTest::testJoin()
{
// Populate bucket with the given data
for (uint32_t i=0; i<documents.size(); ++i) {
- document::BucketId bucket(17, factory.getBucketId(
- documents[i]->getId()).getRawId());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bucket), documents[i], 100 + i));
- std::unique_ptr<api::StorageMessageAddress> address(
- new api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 3));
+ document::BucketId bucket(17, factory.getBucketId(documents[i]->getId()).getRawId());
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bucket), documents[i], 100 + i);
+ auto address = std::make_unique<api::StorageMessageAddress>("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(*address);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
// Delete every 5th document to have delete entries in file too
if (i % 5 == 0) {
- std::shared_ptr<api::RemoveCommand> rcmd(
- new api::RemoveCommand(
- makeDocumentBucket(bucket), documents[i]->getId(), 1000000 + 100 + i));
+ auto rcmd = std::make_shared<api::RemoveCommand>(makeDocumentBucket(bucket),
+ documents[i]->getId(), 1000000 + 100 + i);
rcmd->setAddress(*address);
filestorHandler.schedule(rcmd, 0);
filestorHandler.flush(true);
CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::RemoveReply> rreply(
- std::dynamic_pointer_cast<api::RemoveReply>(
- top.getReply(0)));
+ auto rreply = std::dynamic_pointer_cast<api::RemoveReply>(top.getReply(0));
CPPUNIT_ASSERT_MSG(top.getReply(0)->getType().toString(),
rreply.get());
CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
diff --git a/storage/src/tests/persistence/persistencequeuetest.cpp b/storage/src/tests/persistence/persistencequeuetest.cpp
index 8d18f75d3db..f5f190ad718 100644
--- a/storage/src/tests/persistence/persistencequeuetest.cpp
+++ b/storage/src/tests/persistence/persistencequeuetest.cpp
@@ -77,9 +77,7 @@ PersistenceQueueTest::testFetchNextUnlockedMessageIfBucketLocked()
metrics.initDiskMetrics(_node->getPartitions().size(),
loadTypes.getMetricLoadTypes(), 1);
- FileStorHandler filestorHandler(messageSender, metrics,
- _node->getPartitions(),
- _node->getComponentRegister(), 255, 0);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
// Send 2 puts, 2 to the first bucket, 1 to the second. Calling
// getNextMessage 2 times should then return a lock on the first bucket,
diff --git a/storage/src/tests/persistence/persistencetestutils.cpp b/storage/src/tests/persistence/persistencetestutils.cpp
index e0769795bed..d46708b3aaa 100644
--- a/storage/src/tests/persistence/persistencetestutils.cpp
+++ b/storage/src/tests/persistence/persistencetestutils.cpp
@@ -52,15 +52,13 @@ PersistenceTestEnvironment::PersistenceTestEnvironment(DiskCount numDisks, const
_node.setupDummyPersistence();
_metrics.initDiskMetrics(
numDisks, _node.getLoadTypes()->getMetricLoadTypes(), 1);
- _handler.reset(new FileStorHandler(
- _messageKeeper, _metrics,
- _node.getPersistenceProvider().getPartitionStates().getList(),
- _node.getComponentRegister(), 255, 0));
+ _handler.reset(new FileStorHandler(_messageKeeper, _metrics,
+ _node.getPersistenceProvider().getPartitionStates().getList(),
+ _node.getComponentRegister()));
for (uint32_t i = 0; i < numDisks; i++) {
_diskEnvs.push_back(
- std::unique_ptr<PersistenceUtil>(
- new PersistenceUtil(_config.getConfigId(), _node.getComponentRegister(),
- *_handler, *_metrics.disks[i]->threads[0], i, _node.getPersistenceProvider())));
+ std::make_unique<PersistenceUtil>(_config.getConfigId(), _node.getComponentRegister(), *_handler,
+ *_metrics.disks[i]->threads[0], i, _node.getPersistenceProvider()));
}
}
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestorhandler.cpp b/storage/src/vespa/storage/persistence/filestorage/filestorhandler.cpp
index b53a2a361b3..949ceb901e1 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestorhandler.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/filestorhandler.cpp
@@ -7,12 +7,8 @@ namespace storage {
FileStorHandler::FileStorHandler(MessageSender& sender,
FileStorMetrics& metrics,
const spi::PartitionStateList& partitions,
- ServiceLayerComponentRegister& compReg,
- uint8_t maxPriorityToBlock,
- uint8_t minPriorityToBeBlocking)
- : _impl(new FileStorHandlerImpl(
- sender, metrics, partitions, compReg,
- maxPriorityToBlock, minPriorityToBeBlocking))
+ ServiceLayerComponentRegister& compReg)
+ : _impl(new FileStorHandlerImpl(sender, metrics, partitions, compReg))
{
}
@@ -57,11 +53,6 @@ FileStorHandler::schedule(const api::StorageMessage::SP& msg, uint16_t thread)
return _impl->schedule(msg, thread);
}
-void
-FileStorHandler::pause(uint16_t disk, uint8_t priority) const {
- return _impl->pause(disk, priority);
-}
-
FileStorHandler::LockedMessage
FileStorHandler::getNextMessage(uint16_t thread)
{
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestorhandler.h b/storage/src/vespa/storage/persistence/filestorage/filestorhandler.h
index 8fc528b5ad7..2cfc97ca71b 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestorhandler.h
+++ b/storage/src/vespa/storage/persistence/filestorage/filestorhandler.h
@@ -15,7 +15,6 @@
#include "mergestatus.h"
#include <vespa/document/bucket/bucket.h>
-#include <ostream>
#include <vespa/storage/storageutil/resumeguard.h>
#include <vespa/storage/common/messagesender.h>
@@ -72,12 +71,7 @@ public:
CLOSED
};
- FileStorHandler(MessageSender&,
- FileStorMetrics&,
- const spi::PartitionStateList&,
- ServiceLayerComponentRegister&,
- uint8_t maxPriorityToBlock,
- uint8_t minPriorityToBeBlocking);
+ FileStorHandler(MessageSender&, FileStorMetrics&, const spi::PartitionStateList&, ServiceLayerComponentRegister&);
~FileStorHandler();
// Commands used by file stor manager
@@ -115,17 +109,7 @@ public:
* Schedule a storage message to be processed by the given disk
* @return True if we maanged to schedule operation. False if not
*/
- bool schedule(const std::shared_ptr<api::StorageMessage>&,
- uint16_t disk);
-
- // Commands used by file stor threads
-
- /**
- * When called, checks if any running operations have "preempting"
- * priority. If so, and the given priority is less than that, this call
- * will hang until the other operation is done.
- */
- void pause(uint16_t disk, uint8_t priority) const;
+ bool schedule(const std::shared_ptr<api::StorageMessage>&, uint16_t disk);
/**
* Used by file stor threads to get their next message to process.
@@ -216,8 +200,7 @@ public:
* Fail all operations towards a single bucket currently queued to the
* given thread with the given error code.
*/
- void failOperations(const document::Bucket&, uint16_t fromDisk,
- const api::ReturnCode&);
+ void failOperations(const document::Bucket&, uint16_t fromDisk, const api::ReturnCode&);
/**
* Add a new merge state to the registry.
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp
index 0793d23b862..75f389815df 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp
@@ -23,20 +23,14 @@ using document::BucketSpace;
namespace storage {
-FileStorHandlerImpl::FileStorHandlerImpl(
- MessageSender& sender,
- FileStorMetrics& metrics,
- const spi::PartitionStateList& partitions,
- ServiceLayerComponentRegister& compReg,
- uint8_t maxPriorityToBlock,
- uint8_t minPriorityToBeBlocking)
+FileStorHandlerImpl::FileStorHandlerImpl(MessageSender& sender, FileStorMetrics& metrics,
+ const spi::PartitionStateList& partitions,
+ ServiceLayerComponentRegister& compReg)
: _partitions(partitions),
_component(compReg, "filestorhandlerimpl"),
_diskInfo(_component.getDiskCount()),
_messageSender(sender),
_bucketIdFactory(_component.getBucketIdFactory()),
- _maxPriorityToBlock(maxPriorityToBlock),
- _minPriorityToBeBlocking(minPriorityToBeBlocking),
_getNextMessageTimeout(100),
_paused(false)
{
@@ -46,23 +40,21 @@ FileStorHandlerImpl::FileStorHandlerImpl(
}
if (_diskInfo.size() == 0) {
- throw vespalib::IllegalArgumentException(
- "No disks configured", VESPA_STRLOC);
+ throw vespalib::IllegalArgumentException("No disks configured", VESPA_STRLOC);
}
// Add update hook, so we will get callbacks each 5 seconds to update
// metrics.
_component.registerMetricUpdateHook(*this, framework::SecondTime(5));
}
-FileStorHandlerImpl::~FileStorHandlerImpl() { }
+FileStorHandlerImpl::~FileStorHandlerImpl() = default;
void
FileStorHandlerImpl::addMergeStatus(const document::Bucket& bucket, MergeStatus::SP status)
{
vespalib::LockGuard mlock(_mergeStatesLock);
if (_mergeStates.find(bucket) != _mergeStates.end()) {;
- LOG(warning, "A merge status already existed for %s. Overwriting it.",
- bucket.toString().c_str());
+ LOG(warning, "A merge status already existed for %s. Overwriting it.", bucket.toString().c_str());
}
_mergeStates[bucket] = status;
}
@@ -73,8 +65,7 @@ FileStorHandlerImpl::editMergeStatus(const document::Bucket& bucket)
vespalib::LockGuard mlock(_mergeStatesLock);
MergeStatus::SP status = _mergeStates[bucket];
if (status.get() == 0) {
- throw vespalib::IllegalStateException(
- "No merge state exist for " + bucket.toString(), VESPA_STRLOC);
+ throw vespalib::IllegalStateException("No merge state exist for " + bucket.toString(), VESPA_STRLOC);
}
return *status;
}
@@ -94,8 +85,7 @@ FileStorHandlerImpl::getNumActiveMerges() const
}
void
-FileStorHandlerImpl::clearMergeStatus(const document::Bucket& bucket,
- const api::ReturnCode* code)
+FileStorHandlerImpl::clearMergeStatus(const document::Bucket& bucket, const api::ReturnCode* code)
{
vespalib::LockGuard mlock(_mergeStatesLock);
auto it = _mergeStates.find(bucket);
@@ -153,10 +143,9 @@ FileStorHandlerImpl::flush(bool killPendingMerges)
if (killPendingMerges) {
api::ReturnCode code(api::ReturnCode::ABORTED,
"Storage node is shutting down");
- for (std::map<document::Bucket, MergeStatus::SP>::iterator it
- = _mergeStates.begin(); it != _mergeStates.end(); ++it)
+ for (auto & entry : _mergeStates)
{
- MergeStatus& s(*it->second);
+ MergeStatus& s(*entry.second);
if (s.pendingGetDiff.get() != 0) {
s.pendingGetDiff->setResult(code);
_messageSender.sendReply(s.pendingGetDiff);
@@ -182,11 +171,9 @@ FileStorHandlerImpl::reply(api::StorageMessage& msg,
std::shared_ptr<api::StorageReply> rep(
static_cast<api::StorageCommand&>(msg).makeReply().release());
if (state == FileStorHandler::DISABLED) {
- rep->setResult(api::ReturnCode(
- api::ReturnCode::DISK_FAILURE, "Disk disabled"));
+ rep->setResult(api::ReturnCode(api::ReturnCode::DISK_FAILURE, "Disk disabled"));
} else {
- rep->setResult(api::ReturnCode(
- api::ReturnCode::ABORTED, "Shutting down storage node."));
+ rep->setResult(api::ReturnCode(api::ReturnCode::ABORTED, "Shutting down storage node."));
}
_messageSender.sendReply(rep);
}
@@ -270,29 +257,6 @@ FileStorHandlerImpl::schedule(const std::shared_ptr<api::StorageMessage>& msg,
return true;
}
-void
-FileStorHandlerImpl::pause(uint16_t disk, uint8_t priority) const {
- if (priority < _maxPriorityToBlock) {
- return;
- }
-
- assert(disk < _diskInfo.size());
- const Disk& t(_diskInfo[disk]);
- vespalib::MonitorGuard lockGuard(t.lock);
-
- bool paused = true;
- while (paused) {
- paused = false;
- for (auto& lockedBucket : t.lockedBuckets) {
- if (lockedBucket.second.priority <= _minPriorityToBeBlocking) {
- paused = true;
- lockGuard.wait();
- break;
- }
- }
- }
-}
-
bool
FileStorHandlerImpl::messageMayBeAborted(const api::StorageMessage& msg) const
{
@@ -395,18 +359,6 @@ FileStorHandlerImpl::abortQueuedOperations(
}
}
-bool
-FileStorHandlerImpl::hasBlockingOperations(const Disk& t) const
-{
- for (const auto & lockedBucket : t.lockedBuckets) {
- if (lockedBucket.second.priority <= _minPriorityToBeBlocking) {
- return true;
- }
- }
-
- return false;
-}
-
void
FileStorHandlerImpl::updateMetrics(const MetricLockGuard &)
{
@@ -423,10 +375,7 @@ FileStorHandlerImpl::getNextMessage(uint16_t disk, FileStorHandler::LockedMessag
{
document::Bucket bucket(lck.first->getBucket());
- LOG(spam,
- "Disk %d retrieving message for buffered bucket %s",
- disk,
- bucket.getBucketId().toString().c_str());
+ LOG(spam, "Disk %d retrieving message for buffered bucket %s", disk, bucket.getBucketId().toString().c_str());
assert(disk < _diskInfo.size());
Disk& t(_diskInfo[disk]);
@@ -449,22 +398,12 @@ FileStorHandlerImpl::getNextMessage(uint16_t disk, FileStorHandler::LockedMessag
api::StorageMessage & m(*range.first->_command);
mbus::Trace& trace = m.getTrace();
- // Priority is too low, not buffering any more.
- if (m.getPriority() >= _maxPriorityToBlock) {
- lck.second.reset();
- return lck;
- }
+ MBUS_TRACE(trace, 9, "FileStorHandler: Message identified by disk thread looking for more requests to active bucket.");
- MBUS_TRACE(trace, 9,
- "FileStorHandler: Message identified by disk thread looking for "
- "more requests to active bucket.");
-
- uint64_t waitTime(
- const_cast<metrics::MetricTimer&>(range.first->_timer).stop(
- t.metrics->averageQueueWaitingTime[m.getLoadType()]));
+ uint64_t waitTime(const_cast<metrics::MetricTimer&>(range.first->_timer).stop(
+ t.metrics->averageQueueWaitingTime[m.getLoadType()]));
- LOG(debug, "Message %s waited %" PRIu64 " ms in storage queue (bucket %s), "
- "timeout %d",
+ LOG(debug, "Message %s waited %" PRIu64 " ms in storage queue (bucket %s), timeout %d",
m.toString().c_str(), waitTime, bucket.getBucketId().toString().c_str(),
static_cast<api::StorageCommand&>(m).getTimeout());
@@ -478,15 +417,11 @@ FileStorHandlerImpl::getNextMessage(uint16_t disk, FileStorHandler::LockedMessag
lockGuard.unlock();
return lck;
} else {
- std::shared_ptr<api::StorageReply> msgReply(
- static_cast<api::StorageCommand&>(m)
- .makeReply().release());
+ std::shared_ptr<api::StorageReply> msgReply(static_cast<api::StorageCommand&>(m).makeReply().release());
idx.erase(range.first);
lockGuard.broadcast();
lockGuard.unlock();
- msgReply->setResult(api::ReturnCode(
- api::ReturnCode::TIMEOUT,
- "Message waited too long in storage queue"));
+ msgReply->setResult(api::ReturnCode(api::ReturnCode::TIMEOUT, "Message waited too long in storage queue"));
_messageSender.sendReply(msgReply);
lck.second.reset();
@@ -515,23 +450,13 @@ FileStorHandlerImpl::diskIsClosed(uint16_t disk) const
}
bool
-FileStorHandlerImpl::operationBlockedByHigherPriorityThread(
- const api::StorageMessage& msg,
- const Disk& disk) const
-{
- return ((msg.getPriority() >= _maxPriorityToBlock)
- && hasBlockingOperations(disk));
-}
-
-bool
FileStorHandlerImpl::messageTimedOutInQueue(const api::StorageMessage& msg,
uint64_t waitTime) const
{
if (msg.getType().isReply()) {
return false; // Replies must always be processed and cannot time out.
}
- return (waitTime >= static_cast<const api::StorageCommand&>(
- msg).getTimeout());
+ return (waitTime >= static_cast<const api::StorageCommand&>(msg).getTimeout());
}
std::unique_ptr<FileStorHandler::BucketLockInterface>
@@ -541,8 +466,7 @@ FileStorHandlerImpl::takeDiskBucketLockOwnership(
const document::Bucket &bucket,
const api::StorageMessage& msg)
{
- return std::unique_ptr<FileStorHandler::BucketLockInterface>(
- new BucketLock(guard, disk, bucket, msg.getPriority(), msg.getSummary()));
+ return std::make_unique<BucketLock>(guard, disk, bucket, msg.getPriority(), msg.getSummary());
}
std::unique_ptr<api::StorageReply>
@@ -587,11 +511,7 @@ FileStorHandlerImpl::getNextMessage(uint16_t disk)
iter++;
}
if (iter != end) {
- api::StorageMessage &m(*iter->_command);
-
- if (! operationBlockedByHigherPriorityThread(m, t)
- && ! isPaused())
- {
+ if (! isPaused()) {
return getMessage(lockGuard, t, idx, iter);
}
}
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h
index 0b1ab5661e2..6b6d154e149 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h
+++ b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.h
@@ -142,12 +142,8 @@ public:
document::Bucket _bucket;
};
- FileStorHandlerImpl(MessageSender&,
- FileStorMetrics&,
- const spi::PartitionStateList&,
- ServiceLayerComponentRegister&,
- uint8_t maxPriorityToBlock,
- uint8_t minPriorityToBeBlocking);
+ FileStorHandlerImpl(MessageSender&, FileStorMetrics&,
+ const spi::PartitionStateList&, ServiceLayerComponentRegister&);
~FileStorHandlerImpl();
void setGetNextMessageTimeout(uint32_t timeout) { _getNextMessageTimeout = timeout; }
@@ -158,7 +154,6 @@ public:
void close();
bool schedule(const std::shared_ptr<api::StorageMessage>&, uint16_t disk);
- void pause(uint16_t disk, uint8_t priority) const;
FileStorHandler::LockedMessage getNextMessage(uint16_t disk);
FileStorHandler::LockedMessage getMessage(vespalib::MonitorGuard & guard, Disk & t, PriorityIdx & idx, PriorityIdx::iterator iter);
@@ -203,8 +198,6 @@ private:
std::map<document::Bucket, MergeStatus::SP> _mergeStates;
- uint8_t _maxPriorityToBlock;
- uint8_t _minPriorityToBeBlocking;
uint32_t _getNextMessageTimeout;
vespalib::Monitor _pauseMonitor;
@@ -236,12 +229,6 @@ private:
bool diskIsClosed(uint16_t disk) const;
/**
- * Return whether an already running high priority operation pre-empts
- * (blocks) the operation in msg from even starting in the current thread.
- */
- bool operationBlockedByHigherPriorityThread(const api::StorageMessage& msg, const Disk& disk) const;
-
- /**
* Return whether msg has timed out based on waitTime and the message's
* specified timeout.
*/
@@ -261,7 +248,6 @@ private:
*/
std::unique_ptr<api::StorageReply> makeQueueTimeoutReply(api::StorageMessage& msg) const;
bool messageMayBeAborted(const api::StorageMessage& msg) const;
- bool hasBlockingOperations(const Disk& t) const;
void abortQueuedCommandsForBuckets(Disk& disk, const AbortBucketOperationsCommand& cmd);
bool diskHasActiveOperationForAbortedBucket(const Disk& disk, const AbortBucketOperationsCommand& cmd) const;
void waitUntilNoActiveOperationsForAbortedBuckets(Disk& disk, const AbortBucketOperationsCommand& cmd);
@@ -286,4 +272,3 @@ private:
};
} // storage
-
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp b/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp
index 20c08acc01c..5d4cae19b59 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp
@@ -116,17 +116,12 @@ FileStorManager::configure(std::unique_ptr<vespa::config::content::StorFilestorC
_disks.resize(_component.getDiskCount());
- _metrics->initDiskMetrics(
- _disks.size(),
- _component.getLoadTypes()->getMetricLoadTypes(),
- (_config->threads.size() > 0) ? (_config->threads.size()) : 6);
-
- _filestorHandler.reset(new FileStorHandler(
- *this, *_metrics, _partitions, _compReg,
- _config->maxPriorityToBlock, _config->minPriorityToBeBlocking));
+ size_t numThreads = (_config->threads.size() == 0) ? 6 : _config->threads.size();
+ _metrics->initDiskMetrics(_disks.size(), _component.getLoadTypes()->getMetricLoadTypes(), numThreads);
+
+ _filestorHandler.reset(new FileStorHandler(*this, *_metrics, _partitions, _compReg));
for (uint32_t i=0; i<_component.getDiskCount(); ++i) {
if (_partitions[i].isUp()) {
- size_t numThreads = (_config->threads.size() == 0) ? 6 : _config->threads.size();
LOG(spam, "Setting up disk %u", i);
for (uint32_t j = 0; j < numThreads; j++) {
_disks[i].push_back(DiskThread::SP(
diff --git a/storage/src/vespa/storage/persistence/filestorage/pausehandler.h b/storage/src/vespa/storage/persistence/filestorage/pausehandler.h
deleted file mode 100644
index 59c543b0067..00000000000
--- a/storage/src/vespa/storage/persistence/filestorage/pausehandler.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/**
- * @class PauseHandler
- * @ingroup persistence
- *
- * @brief Object that can be used to possibly pause running operation
- */
-#pragma once
-
-#include <vespa/storage/persistence/filestorage/filestorhandler.h>
-
-namespace storage {
-
-class PauseHandler {
- FileStorHandler* _handler;
- uint16_t _disk;
- uint8_t _priority;
-
-public:
- PauseHandler() : _handler(0), _disk(0), _priority(0) {}
- PauseHandler(FileStorHandler& handler, uint16_t disk)
- : _handler(&handler),
- _disk(disk),
- _priority(0)
- {
- }
-
- void setPriority(uint8_t priority) { _priority = priority; }
-
- void pause() const { if (_handler != 0) _handler->pause(_disk, _priority); }
-};
-
-} // storage
-
diff --git a/storage/src/vespa/storage/persistence/persistencethread.cpp b/storage/src/vespa/storage/persistence/persistencethread.cpp
index 13d9d0ffbb6..fded9c87978 100644
--- a/storage/src/vespa/storage/persistence/persistencethread.cpp
+++ b/storage/src/vespa/storage/persistence/persistencethread.cpp
@@ -947,28 +947,23 @@ PersistenceThread::processMessage(api::StorageMessage& msg)
++_env._metrics.operations;
if (msg.getType().isReply()) {
try{
- _env._pauseHandler.setPriority(msg.getPriority());
LOG(debug, "Handling reply: %s", msg.toString().c_str());
LOG(spam, "Message content: %s", msg.toString(true).c_str());
handleReply(static_cast<api::StorageReply&>(msg));
} catch (std::exception& e) {
// It's a reply, so nothing we can do.
- LOG(debug, "Caught exception for %s: %s",
- msg.toString().c_str(),
- e.what());
+ LOG(debug, "Caught exception for %s: %s", msg.toString().c_str(), e.what());
}
} else {
api::StorageCommand& initiatingCommand =
static_cast<api::StorageCommand&>(msg);
try {
- int64_t startTime(
- _component->getClock().getTimeInMillis().getTime());
+ int64_t startTime(_component->getClock().getTimeInMillis().getTime());
LOG(debug, "Handling command: %s", msg.toString().c_str());
LOG(spam, "Message content: %s", msg.toString(true).c_str());
- std::unique_ptr<MessageTracker> tracker(
- handleCommand(initiatingCommand));
+ auto tracker(handleCommand(initiatingCommand));
if (!tracker.get()) {
LOG(debug, "Received unsupported command %s",
msg.getType().getName().c_str());
diff --git a/storage/src/vespa/storage/persistence/persistenceutil.cpp b/storage/src/vespa/storage/persistence/persistenceutil.cpp
index ec666925148..c2dcb8e2a29 100644
--- a/storage/src/vespa/storage/persistence/persistenceutil.cpp
+++ b/storage/src/vespa/storage/persistence/persistenceutil.cpp
@@ -76,7 +76,6 @@ PersistenceUtil::PersistenceUtil(
_metrics(metrics),
_bucketFactory(_component.getBucketIdFactory()),
_repo(_component.getTypeRepo()),
- _pauseHandler(),
_spi(provider)
{
}
@@ -87,9 +86,8 @@ void
PersistenceUtil::updateBucketDatabase(const document::Bucket &bucket, const api::BucketInfo& i)
{
// Update bucket database
- StorBucketDatabase::WrappedEntry entry(getBucketDatabase(bucket.getBucketSpace()).get(
- bucket.getBucketId(),
- "env::updatebucketdb"));
+ StorBucketDatabase::WrappedEntry entry(getBucketDatabase(bucket.getBucketSpace()).get(bucket.getBucketId(),
+ "env::updatebucketdb"));
if (entry.exist()) {
api::BucketInfo info = i;
@@ -101,9 +99,7 @@ PersistenceUtil::updateBucketDatabase(const document::Bucket &bucket, const api:
entry->setBucketInfo(info);
entry.write();
} else {
- LOG(debug,
- "Bucket(%s).getBucketInfo: Bucket does not exist.",
- bucket.getBucketId().toString().c_str());
+ LOG(debug, "Bucket(%s).getBucketInfo: Bucket does not exist.", bucket.getBucketId().toString().c_str());
}
}
diff --git a/storage/src/vespa/storage/persistence/persistenceutil.h b/storage/src/vespa/storage/persistence/persistenceutil.h
index c3bd706f4b7..bf347ccb10a 100644
--- a/storage/src/vespa/storage/persistence/persistenceutil.h
+++ b/storage/src/vespa/storage/persistence/persistenceutil.h
@@ -1,12 +1,11 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
+#include "types.h"
#include <vespa/persistence/spi/persistenceprovider.h>
#include <vespa/storage/common/servicelayercomponent.h>
#include <vespa/storage/persistence/filestorage/filestorhandler.h>
#include <vespa/storage/persistence/filestorage/filestormetrics.h>
-#include <vespa/storage/persistence/filestorage/pausehandler.h>
-#include <vespa/storage/persistence/types.h>
#include <vespa/vespalib/io/fileutil.h>
#include <vespa/storage/storageutil/utils.h>
#include <vespa/config-stor-filestor.h>
@@ -70,7 +69,6 @@ struct PersistenceUtil {
FileStorThreadMetrics& _metrics;
const document::BucketIdFactory& _bucketFactory;
const std::shared_ptr<document::DocumentTypeRepo> _repo;
- PauseHandler _pauseHandler;
spi::PersistenceProvider& _spi;
PersistenceUtil(
diff --git a/storage/src/vespa/storage/storageserver/storagenode.h b/storage/src/vespa/storage/storageserver/storagenode.h
index 38a48ac2eed..c8739442c13 100644
--- a/storage/src/vespa/storage/storageserver/storagenode.h
+++ b/storage/src/vespa/storage/storageserver/storagenode.h
@@ -115,20 +115,20 @@ private:
bool _attemptedStopped;
vespalib::string _pidFile;
- // First components that doesn't depend on others
+ // First components that doesn't depend on others
std::unique_ptr<StatusWebServer> _statusWebServer;
std::shared_ptr<StorageMetricSet> _metrics;
std::unique_ptr<metrics::MetricManager> _metricManager;
- // Depends on bucket databases and stop() functionality
+ // Depends on bucket databases and stop() functionality
std::unique_ptr<DeadLockDetector> _deadLockDetector;
- // Depends on metric manager
+ // Depends on metric manager
std::unique_ptr<StatusMetricConsumer> _statusMetrics;
- // Depends on metric manager
+ // Depends on metric manager
std::unique_ptr<StateReporter> _stateReporter;
std::unique_ptr<StateManager> _stateManager;
- // The storage chain can depend on anything.
+ // The storage chain can depend on anything.
std::unique_ptr<StorageLink> _chain;
/** Implementation of config callbacks. */
diff --git a/storageserver/src/tests/storageservertest.cpp b/storageserver/src/tests/storageservertest.cpp
index 36d72bdc2b4..03b4dfd80da 100644
--- a/storageserver/src/tests/storageservertest.cpp
+++ b/storageserver/src/tests/storageservertest.cpp
@@ -222,8 +222,7 @@ StorageServerTest::setUp()
storConfig.reset(new vdstestlib::DirConfig(getStandardConfig(true)));
addSlobrokConfig(*distConfig, *slobrok);
addSlobrokConfig(*storConfig, *slobrok);
- storConfig->getConfig("stor-filestor")
- .set("fail_disk_after_error_count", "1");
+ storConfig->getConfig("stor-filestor").set("fail_disk_after_error_count", "1");
system("mkdir -p vdsroot/disks/d0");
system("mkdir -p vdsroot.distributor");
slobrokMirror.reset(new SlobrokMirror(slobrok->config()));
diff --git a/storageserver/src/tests/testhelper.cpp b/storageserver/src/tests/testhelper.cpp
index bcc5967215b..b245a6500bd 100644
--- a/storageserver/src/tests/testhelper.cpp
+++ b/storageserver/src/tests/testhelper.cpp
@@ -7,23 +7,6 @@ LOG_SETUP(".testhelper");
namespace storage {
-namespace {
- bool useNewStorageCore() {
- if ( // Unit test directory
- vespalib::fileExists("use_new_storage_core") ||
- // src/cpp directory
- vespalib::fileExists("../use_new_storage_core") ||
- // Top build directory where storage-HEAD remains
- vespalib::fileExists("../../../../../use_new_storage_core"))
- {
- std::cerr << "Using new storage core for unit tests\n";
- return true;
- }
- return false;
- }
- bool newStorageCore(useNewStorageCore());
-}
-
void addStorageDistributionConfig(vdstestlib::DirConfig& dc)
{
vdstestlib::DirConfig::Config* config;
@@ -72,18 +55,13 @@ vdstestlib::DirConfig getStandardConfig(bool storagenode) {
config->set("threads[0].lowestpri 255");
config->set("dir_spread", "4");
config->set("dir_levels", "0");
- config->set("use_new_core", newStorageCore ? "true" : "false");
config->set("maximum_versions_of_single_document_stored", "0");
//config->set("enable_slotfile_cache", "false");
// Unit tests typically use fake low time values, so don't complain
// about them or compact/delete them by default. Override in tests testing that
// behavior
- config->set("time_future_limit", "5");
- config->set("time_past_limit", "2000000000");
config->set("keep_remove_time_period", "2000000000");
config->set("revert_time_period", "2000000000");
- // Don't want test to call exit()
- config->set("fail_disk_after_error_count", "0");
config = &dc.addConfig("stor-memfilepersistence");
// Easier to see what goes wrong with only 1 thread per disk.
config->set("minimum_file_meta_slots", "2");
@@ -94,6 +72,7 @@ vdstestlib::DirConfig getStandardConfig(bool storagenode) {
config = &dc.addConfig("persistence");
config->set("keep_remove_time_period", "2000000000");
config->set("revert_time_period", "2000000000");
+ config->set("fail_disk_after_error_count", "0");
config = &dc.addConfig("stor-bouncer");
config = &dc.addConfig("stor-integritychecker");
config = &dc.addConfig("stor-bucketmover");