From 798f1affa2f5be0bfb0b4c00519b6e935b674c4d Mon Sep 17 00:00:00 2001 From: Henning Baldersheim Date: Sat, 4 Apr 2020 20:34:22 +0000 Subject: Adhere to latency versus throughput settings. --- messagebus/src/vespa/messagebus/network/rpcnetwork.cpp | 17 +++++++++++++---- messagebus/src/vespa/messagebus/network/rpcnetwork.h | 8 +++++--- 2 files changed, 18 insertions(+), 7 deletions(-) (limited to 'messagebus') diff --git a/messagebus/src/vespa/messagebus/network/rpcnetwork.cpp b/messagebus/src/vespa/messagebus/network/rpcnetwork.cpp index e049e436d03..bbc9fc94d14 100644 --- a/messagebus/src/vespa/messagebus/network/rpcnetwork.cpp +++ b/messagebus/src/vespa/messagebus/network/rpcnetwork.cpp @@ -81,7 +81,7 @@ struct TargetPoolTask : public FNET_Task { }; std::unique_ptr -createExecutor(RPCNetworkParams::OptimizeFor optimizeFor) { +createSingleExecutor(RPCNetworkParams::OptimizeFor optimizeFor) { switch (optimizeFor) { case RPCNetworkParams::OptimizeFor::LATENCY: return std::make_unique(1, 0x10000); @@ -91,6 +91,14 @@ createExecutor(RPCNetworkParams::OptimizeFor optimizeFor) { } } +std::unique_ptr +createExecutor(RPCNetworkParams::OptimizeFor optimizeFor, uint32_t numThreads) { + if ((optimizeFor == RPCNetworkParams::OptimizeFor::LATENCY) || (numThreads >= 2)) { + return std::make_unique(numThreads, 0x10000); + } + return std::make_unique(1000, 10, 1ms); +} + } RPCNetwork::SendContext::SendContext(RPCNetwork &net, const Message &msg, @@ -147,9 +155,10 @@ RPCNetwork::RPCNetwork(const RPCNetworkParams ¶ms) : _targetPool(std::make_unique(params.getConnectionExpireSecs())), _targetPoolTask(std::make_unique(_scheduler, *_targetPool)), _servicePool(std::make_unique(*_mirror, 4096)), - _singleEncodeExecutor(createExecutor(params.getOptimizeFor())), - _singleDecodeExecutor(createExecutor(params.getOptimizeFor())), - _executor(std::make_unique(params.getNumThreads(), 128000)), + _singleEncodeExecutor(createSingleExecutor(params.getOptimizeFor())), + _singleDecodeExecutor(createSingleExecutor(params.getOptimizeFor())), + _encodeExecutor(createExecutor(params.getOptimizeFor(), std::max(1u, params.getNumThreads()/4))), + _decodeExecutor(createExecutor(params.getOptimizeFor(), std::max(1u, params.getNumThreads()/4))), _sendV1(std::make_unique()), _sendV2(std::make_unique()), _sendAdapters(), diff --git a/messagebus/src/vespa/messagebus/network/rpcnetwork.h b/messagebus/src/vespa/messagebus/network/rpcnetwork.h index a37cf1d9176..625efd7ae27 100644 --- a/messagebus/src/vespa/messagebus/network/rpcnetwork.h +++ b/messagebus/src/vespa/messagebus/network/rpcnetwork.h @@ -65,9 +65,11 @@ private: std::unique_ptr _targetPool; std::unique_ptr _targetPoolTask; std::unique_ptr _servicePool; + // TODO Instead of having 4 different executors, we should move the SequencedTaskExecutor into vespalib and use it there. std::unique_ptr _singleEncodeExecutor; std::unique_ptr _singleDecodeExecutor; - std::unique_ptr _executor; + std::unique_ptr _encodeExecutor; + std::unique_ptr _decodeExecutor; std::unique_ptr _sendV1; std::unique_ptr _sendV2; SendAdapterMap _sendAdapters; @@ -225,8 +227,8 @@ public: const slobrok::api::IMirrorAPI &getMirror() const override; CompressionConfig getCompressionConfig() { return _compressionConfig; } void invoke(FRT_RPCRequest *req); - vespalib::Executor & getEncodeExecutor(bool requireSequencing) const { return requireSequencing ? *_singleEncodeExecutor : *_executor; } - vespalib::Executor & getDecodeExecutor(bool requireSequencing) const { return requireSequencing ? *_singleDecodeExecutor : *_executor; } + vespalib::Executor & getEncodeExecutor(bool requireSequencing) const { return requireSequencing ? *_singleEncodeExecutor : *_encodeExecutor; } + vespalib::Executor & getDecodeExecutor(bool requireSequencing) const { return requireSequencing ? *_singleDecodeExecutor : *_decodeExecutor; } bool allowDispatchForEncode() const { return _allowDispatchForEncode; } bool allowDispatchForDecode() const { return _allowDispatchForDecode; } -- cgit v1.2.3