summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2020-04-04 23:08:03 +0000
committerHenning Baldersheim <balder@yahoo-inc.com>2020-04-04 23:08:03 +0000
commit14443fdcab31276ae11684981bf4bb055e3bffdc (patch)
tree0c95dfce00a12073fe39412bd6d5bc3c231f6056
parent416ff1764ce98954b3b15fcae0f6a50d76b38323 (diff)
Also allow for testing of the adaptive task executor.
-rw-r--r--searchcore/src/vespa/searchcore/config/proton.def2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/threading_service_config.cpp1
-rw-r--r--staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp21
-rw-r--r--storage/src/vespa/storage/config/stor-communicationmanager.def2
-rw-r--r--vespalib/src/vespa/vespalib/util/executor.h2
5 files changed, 17 insertions, 11 deletions
diff --git a/searchcore/src/vespa/searchcore/config/proton.def b/searchcore/src/vespa/searchcore/config/proton.def
index 6040fc651c2..9434fe7d7ff 100644
--- a/searchcore/src/vespa/searchcore/config/proton.def
+++ b/searchcore/src/vespa/searchcore/config/proton.def
@@ -119,7 +119,7 @@ indexing.threads int default=1 restart
## Option to specify what is most important during indexing.
## This is experimental and will most likely be temporary.
-indexing.optimize enum {LATENCY, THROUGHPUT} default=LATENCY restart
+indexing.optimize enum {LATENCY, THROUGHPUT, ADAPTIVE} default=LATENCY restart
## Maximum number of pending operations for each of the internal
## indexing threads. Only used when visibility delay is zero.
diff --git a/searchcore/src/vespa/searchcore/proton/server/threading_service_config.cpp b/searchcore/src/vespa/searchcore/proton/server/threading_service_config.cpp
index 5bc6ef543f3..e95920ca606 100644
--- a/searchcore/src/vespa/searchcore/proton/server/threading_service_config.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/threading_service_config.cpp
@@ -39,6 +39,7 @@ selectOptimization(ProtonConfig::Indexing::Optimize optimize) {
switch (optimize) {
case CfgOptimize::LATENCY: return OptimizeFor::LATENCY;
case CfgOptimize::THROUGHPUT: return OptimizeFor::THROUGHPUT;
+ case CfgOptimize::ADAPTIVE: return OptimizeFor::ADAPTIVE;
}
return OptimizeFor::LATENCY;
}
diff --git a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp
index aa43bfaae7d..2688a35fc6c 100644
--- a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp
+++ b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "sequencedtaskexecutor.h"
+#include "adaptive_sequenced_executor.h"
#include "singleexecutor.h"
#include <vespa/vespalib/util/blockingthreadstackexecutor.h>
@@ -16,16 +17,20 @@ constexpr uint32_t stackSize = 128 * 1024;
std::unique_ptr<ISequencedTaskExecutor>
SequencedTaskExecutor::create(uint32_t threads, uint32_t taskLimit, OptimizeFor optimize)
{
- auto executors = std::make_unique<std::vector<std::unique_ptr<vespalib::SyncableThreadExecutor>>>();
- executors->reserve(threads);
- for (uint32_t id = 0; id < threads; ++id) {
- if (optimize == OptimizeFor::THROUGHPUT) {
- executors->push_back(std::make_unique<SingleExecutor>(taskLimit));
- } else {
- executors->push_back(std::make_unique<BlockingThreadStackExecutor>(1, stackSize, taskLimit));
+ if (optimize == OptimizeFor::ADAPTIVE) {
+ return std::make_unique<AdaptiveSequencedExecutor>(threads, threads, taskLimit/100, taskLimit);
+ } else {
+ auto executors = std::make_unique<std::vector<std::unique_ptr<SyncableThreadExecutor>>>();
+ executors->reserve(threads);
+ for (uint32_t id = 0; id < threads; ++id) {
+ if (optimize == OptimizeFor::THROUGHPUT) {
+ executors->push_back(std::make_unique<SingleExecutor>(taskLimit, taskLimit/100, 1ms));
+ } else {
+ executors->push_back(std::make_unique<BlockingThreadStackExecutor>(1, stackSize, taskLimit));
+ }
}
+ return std::unique_ptr<ISequencedTaskExecutor>(new SequencedTaskExecutor(std::move(executors)));
}
- return std::unique_ptr<ISequencedTaskExecutor>(new SequencedTaskExecutor(std::move(executors)));
}
SequencedTaskExecutor::~SequencedTaskExecutor()
diff --git a/storage/src/vespa/storage/config/stor-communicationmanager.def b/storage/src/vespa/storage/config/stor-communicationmanager.def
index e0f2e89fa9d..52dce733321 100644
--- a/storage/src/vespa/storage/config/stor-communicationmanager.def
+++ b/storage/src/vespa/storage/config/stor-communicationmanager.def
@@ -37,7 +37,7 @@ mbus.num_network_threads int default=2
## Any value below 1 will be 1.
mbus.num_threads int default=1
-mbus.optimize_for enum {LATENCY, THROUGHPUT} default = THROUGHPUT
+mbus.optimize_for enum {LATENCY, THROUGHPUT, ADAPTIVE} default = THROUGHPUT
## Enable to use above thread pool for encoding replies
## False will use network(fnet) thread
diff --git a/vespalib/src/vespa/vespalib/util/executor.h b/vespalib/src/vespa/vespalib/util/executor.h
index 97cde4ffbe2..a412832ecaf 100644
--- a/vespalib/src/vespa/vespalib/util/executor.h
+++ b/vespalib/src/vespa/vespalib/util/executor.h
@@ -23,7 +23,7 @@ public:
virtual ~Task() {}
};
- enum class OptimizeFor {LATENCY, THROUGHPUT};
+ enum class OptimizeFor {LATENCY, THROUGHPUT, ADAPTIVE};
/**
* Execute the given task using one of the internal threads some