aboutsummaryrefslogtreecommitdiffstats
path: root/searchcore
diff options
context:
space:
mode:
authorTor Egge <Tor.Egge@broadpark.no>2018-09-21 13:47:00 +0200
committergjoranv <gv@oath.com>2019-01-21 15:09:22 +0100
commitc30e067851e194606158ea76da3e100893674d3c (patch)
tree1bf4e0c7d4afd12fb1462ab4f6945e925ee8c6cd /searchcore
parentbe6b0026a8b54c02244bc73382096dc9ed41b27a (diff)
Remove deprecated settings from proton.def.
Diffstat (limited to 'searchcore')
-rw-r--r--searchcore/src/vespa/searchcore/config/proton.def17
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/proton.cpp4
2 files changed, 2 insertions, 19 deletions
diff --git a/searchcore/src/vespa/searchcore/config/proton.def b/searchcore/src/vespa/searchcore/config/proton.def
index 8740a91bab7..2888a3999a6 100644
--- a/searchcore/src/vespa/searchcore/config/proton.def
+++ b/searchcore/src/vespa/searchcore/config/proton.def
@@ -66,10 +66,6 @@ flush.memory.each.diskbloatfactor double default=0.2
## Unit is seconds with 1 day being the default.
flush.memory.maxage.time double default=86400.0
-## Max diff in serial number allowed before that takes precedence.
-## TODO Deprecated and ignored. Remove soon.
-flush.memory.maxage.serial long default=1000000
-
## When resource limit for memory is reached we choose a conservative mode for the flush strategy.
## In this case this factor is multiplied with 'maxmemory' and 'each.maxmemory' to calculate conservative values to use instead.
flush.memory.conservative.memorylimitfactor double default=0.5
@@ -255,10 +251,6 @@ summary.log.chunk.compression.level int default=9
## Max size in bytes per chunk.
summary.log.chunk.maxbytes int default=65536
-## Max number of documents in each chunk.
-## TODO Deprecated and ignored. Remove soon.
-summary.log.chunk.maxentries int default=256
-
## Skip crc32 check on read.
summary.log.chunk.skipcrconread bool default=false
@@ -269,10 +261,6 @@ summary.log.compact2activefile bool default=false
## Max size per summary file.
summary.log.maxfilesize long default=1000000000
-## Max number of removes per summary file.
-## TODO Deprecated and ignored. Remove soon.
-summary.log.maxentriesperfile long default=20000000
-
## Max disk bloat factor. This will trigger compacting.
summary.log.maxdiskbloatfactor double default=0.1
@@ -283,11 +271,6 @@ summary.log.maxbucketspread double default=2.5
## Value in the range [0.0, 1.0]
summary.log.minfilesizefactor double default=0.2
-## Number of threads used for compressing incoming documents/compacting.
-## Deprecated. Use feeding.concurrency instead.
-## TODO Remove
-summary.log.numthreads int default=8 restart
-
## Control io options during flush of stored documents.
summary.write.io enum {NORMAL, OSYNC, DIRECTIO} default=DIRECTIO
diff --git a/searchcore/src/vespa/searchcore/proton/server/proton.cpp b/searchcore/src/vespa/searchcore/proton/server/proton.cpp
index 2174412bedf..cf40050537d 100644
--- a/searchcore/src/vespa/searchcore/proton/server/proton.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/proton.cpp
@@ -90,11 +90,11 @@ size_t
deriveCompactionCompressionThreads(const ProtonConfig &proton,
const HwInfo::Cpu &cpuInfo) {
size_t scaledCores = (size_t)std::ceil(cpuInfo.cores() * proton.feeding.concurrency);
- size_t threads = std::max(scaledCores, size_t(proton.summary.log.numthreads));
+ size_t threads = std::max(scaledCores, 8ul);
// We need at least 1 guaranteed free worker in order to ensure progress so #documentsdbs + 1 should suffice,
// but we will not be cheap and give #documentsdbs * 2
- return std::max(threads, proton.documentdb.size() * 2);;
+ return std::max(threads, proton.documentdb.size() * 2);
}
const vespalib::string CUSTOM_COMPONENT_API_PATH = "/state/v1/custom/component";