summaryrefslogtreecommitdiffstats
path: root/staging_vespalib/src/tests
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2020-04-04 22:20:35 +0000
committerHenning Baldersheim <balder@yahoo-inc.com>2020-04-04 22:35:17 +0000
commit416ff1764ce98954b3b15fcae0f6a50d76b38323 (patch)
tree8974071929be2d3723db0a14567dcbeb2f7a1797 /staging_vespalib/src/tests
parent130d4607a359ae2740bdeeb0179a731751f979a0 (diff)
Move sequenced task executors to staging vespalib
Diffstat (limited to 'staging_vespalib/src/tests')
-rw-r--r--staging_vespalib/src/tests/sequencedtaskexecutor/.gitignore4
-rw-r--r--staging_vespalib/src/tests/sequencedtaskexecutor/CMakeLists.txt31
-rw-r--r--staging_vespalib/src/tests/sequencedtaskexecutor/adaptive_sequenced_executor_test.cpp250
-rw-r--r--staging_vespalib/src/tests/sequencedtaskexecutor/foregroundtaskexecutor_test.cpp120
-rw-r--r--staging_vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_benchmark.cpp70
-rw-r--r--staging_vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_test.cpp250
6 files changed, 725 insertions, 0 deletions
diff --git a/staging_vespalib/src/tests/sequencedtaskexecutor/.gitignore b/staging_vespalib/src/tests/sequencedtaskexecutor/.gitignore
new file mode 100644
index 00000000000..523cfe5e3e1
--- /dev/null
+++ b/staging_vespalib/src/tests/sequencedtaskexecutor/.gitignore
@@ -0,0 +1,4 @@
+staging_vespalib_sequencedtaskexecutor_test_app
+staging_vespalib_sequencedtaskexecutor_benchmark_app
+staging_vespalib_adaptive_sequenced_executor_test_app
+staging_vespalib_foregroundtaskexecutor_test_app
diff --git a/staging_vespalib/src/tests/sequencedtaskexecutor/CMakeLists.txt b/staging_vespalib/src/tests/sequencedtaskexecutor/CMakeLists.txt
new file mode 100644
index 00000000000..6895eafd94a
--- /dev/null
+++ b/staging_vespalib/src/tests/sequencedtaskexecutor/CMakeLists.txt
@@ -0,0 +1,31 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(staging_vespalib_sequencedtaskexecutor_benchmark_app TEST
+ SOURCES
+ sequencedtaskexecutor_benchmark.cpp
+ DEPENDS
+ staging_vespalib
+)
+
+vespa_add_executable(staging_vespalib_sequencedtaskexecutor_test_app TEST
+ SOURCES
+ sequencedtaskexecutor_test.cpp
+ DEPENDS
+ staging_vespalib
+)
+vespa_add_test(NAME staging_vespalib_sequencedtaskexecutor_test_app COMMAND staging_vespalib_sequencedtaskexecutor_test_app)
+
+vespa_add_executable(staging_vespalib_adaptive_sequenced_executor_test_app TEST
+ SOURCES
+ adaptive_sequenced_executor_test.cpp
+ DEPENDS
+ staging_vespalib
+)
+vespa_add_test(NAME staging_vespalib_adaptive_sequenced_executor_test_app COMMAND staging_vespalib_adaptive_sequenced_executor_test_app)
+
+vespa_add_executable(staging_vespalib_foregroundtaskexecutor_test_app TEST
+ SOURCES
+ foregroundtaskexecutor_test.cpp
+ DEPENDS
+ staging_vespalib
+)
+vespa_add_test(NAME staging_vespalib_foregroundtaskexecutor_test_app COMMAND staging_vespalib_foregroundtaskexecutor_test_app)
diff --git a/staging_vespalib/src/tests/sequencedtaskexecutor/adaptive_sequenced_executor_test.cpp b/staging_vespalib/src/tests/sequencedtaskexecutor/adaptive_sequenced_executor_test.cpp
new file mode 100644
index 00000000000..10f3f6089e3
--- /dev/null
+++ b/staging_vespalib/src/tests/sequencedtaskexecutor/adaptive_sequenced_executor_test.cpp
@@ -0,0 +1,250 @@
+// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/util/adaptive_sequenced_executor.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+
+#include <condition_variable>
+#include <unistd.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP("adaptive_sequenced_executor_test");
+
+namespace vespalib {
+
+
+class Fixture
+{
+public:
+ AdaptiveSequencedExecutor _threads;
+
+ Fixture() : _threads(2, 2, 0, 1000) { }
+};
+
+
+class TestObj
+{
+public:
+ std::mutex _m;
+ std::condition_variable _cv;
+ int _done;
+ int _fail;
+ int _val;
+
+ TestObj()
+ : _m(),
+ _cv(),
+ _done(0),
+ _fail(0),
+ _val(0)
+ {
+ }
+
+ void
+ modify(int oldValue, int newValue)
+ {
+ {
+ std::lock_guard<std::mutex> guard(_m);
+ if (_val == oldValue) {
+ _val = newValue;
+ } else {
+ ++_fail;
+ }
+ ++_done;
+ }
+ _cv.notify_all();
+ }
+
+ void
+ wait(int wantDone)
+ {
+ std::unique_lock<std::mutex> guard(_m);
+ _cv.wait(guard, [&] { return this->_done >= wantDone; });
+ }
+};
+
+TEST_F("testExecute", Fixture) {
+ std::shared_ptr<TestObj> tv(std::make_shared<TestObj>());
+ EXPECT_EQUAL(0, tv->_val);
+ f._threads.execute(1, [&]() { tv->modify(0, 42); });
+ tv->wait(1);
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+ f._threads.sync();
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+}
+
+
+TEST_F("require that task with same component id are serialized", Fixture)
+{
+ std::shared_ptr<TestObj> tv(std::make_shared<TestObj>());
+ EXPECT_EQUAL(0, tv->_val);
+ f._threads.execute(0, [&]() { usleep(2000); tv->modify(0, 14); });
+ f._threads.execute(0, [&]() { tv->modify(14, 42); });
+ tv->wait(2);
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+ f._threads.sync();
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+}
+
+TEST_F("require that task with different component ids are not serialized", Fixture)
+{
+ int tryCnt = 0;
+ for (tryCnt = 0; tryCnt < 100; ++tryCnt) {
+ std::shared_ptr<TestObj> tv(std::make_shared<TestObj>());
+ EXPECT_EQUAL(0, tv->_val);
+ f._threads.execute(0, [&]() { usleep(2000); tv->modify(0, 14); });
+ f._threads.execute(2, [&]() { tv->modify(14, 42); });
+ tv->wait(2);
+ if (tv->_fail != 1) {
+ continue;
+ }
+ EXPECT_EQUAL(1, tv->_fail);
+ EXPECT_EQUAL(14, tv->_val);
+ f._threads.sync();
+ EXPECT_EQUAL(1, tv->_fail);
+ EXPECT_EQUAL(14, tv->_val);
+ break;
+ }
+ EXPECT_TRUE(tryCnt < 100);
+}
+
+
+TEST_F("require that task with same string component id are serialized", Fixture)
+{
+ std::shared_ptr<TestObj> tv(std::make_shared<TestObj>());
+ EXPECT_EQUAL(0, tv->_val);
+ auto test2 = [&]() { tv->modify(14, 42); };
+ f._threads.execute(f._threads.getExecutorId("0"), [&]() { usleep(2000); tv->modify(0, 14); });
+ f._threads.execute(f._threads.getExecutorId("0"), test2);
+ tv->wait(2);
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+ f._threads.sync();
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+}
+
+namespace {
+
+int detectSerializeFailure(Fixture &f, vespalib::stringref altComponentId, int tryLimit)
+{
+ int tryCnt = 0;
+ for (tryCnt = 0; tryCnt < tryLimit; ++tryCnt) {
+ std::shared_ptr<TestObj> tv(std::make_shared<TestObj>());
+ EXPECT_EQUAL(0, tv->_val);
+ f._threads.execute(f._threads.getExecutorId("0"), [&]() { usleep(2000); tv->modify(0, 14); });
+ f._threads.execute(f._threads.getExecutorId(altComponentId), [&]() { tv->modify(14, 42); });
+ tv->wait(2);
+ if (tv->_fail != 1) {
+ continue;
+ }
+ EXPECT_EQUAL(1, tv->_fail);
+ EXPECT_EQUAL(14, tv->_val);
+ f._threads.sync();
+ EXPECT_EQUAL(1, tv->_fail);
+ EXPECT_EQUAL(14, tv->_val);
+ break;
+ }
+ return tryCnt;
+}
+
+vespalib::string makeAltComponentId(Fixture &f)
+{
+ int tryCnt = 0;
+ char altComponentId[20];
+ ISequencedTaskExecutor::ExecutorId executorId0 = f._threads.getExecutorId("0");
+ for (tryCnt = 1; tryCnt < 100; ++tryCnt) {
+ sprintf(altComponentId, "%d", tryCnt);
+ if (f._threads.getExecutorId(altComponentId) == executorId0) {
+ break;
+ }
+ }
+ EXPECT_TRUE(tryCnt < 100);
+ return altComponentId;
+}
+
+}
+
+TEST_F("require that task with different string component ids are not serialized", Fixture)
+{
+ int tryCnt = detectSerializeFailure(f, "2", 100);
+ EXPECT_TRUE(tryCnt < 100);
+}
+
+
+TEST_F("require that task with different string component ids mapping to the same executor id are serialized",
+ Fixture)
+{
+ vespalib::string altComponentId = makeAltComponentId(f);
+ LOG(info, "second string component id is \"%s\"", altComponentId.c_str());
+ int tryCnt = detectSerializeFailure(f, altComponentId, 100);
+ EXPECT_TRUE(tryCnt == 100);
+}
+
+
+TEST_F("require that execute works with const lambda", Fixture)
+{
+ int i = 5;
+ std::vector<int> res;
+ const auto lambda = [i, &res]() mutable
+ { res.push_back(i--); res.push_back(i--); };
+ f._threads.execute(0, lambda);
+ f._threads.execute(0, lambda);
+ f._threads.sync();
+ std::vector<int> exp({5, 4, 5, 4});
+ EXPECT_EQUAL(exp, res);
+ EXPECT_EQUAL(5, i);
+}
+
+TEST_F("require that execute works with reference to lambda", Fixture)
+{
+ int i = 5;
+ std::vector<int> res;
+ auto lambda = [i, &res]() mutable
+ { res.push_back(i--); res.push_back(i--); };
+ auto &lambdaref = lambda;
+ f._threads.execute(0, lambdaref);
+ f._threads.execute(0, lambdaref);
+ f._threads.sync();
+ std::vector<int> exp({5, 4, 5, 4});
+ EXPECT_EQUAL(exp, res);
+ EXPECT_EQUAL(5, i);
+}
+
+TEST_F("require that executeLambda works", Fixture)
+{
+ int i = 5;
+ std::vector<int> res;
+ const auto lambda = [i, &res]() mutable
+ { res.push_back(i--); res.push_back(i--); };
+ f._threads.executeLambda(ISequencedTaskExecutor::ExecutorId(0), lambda);
+ f._threads.sync();
+ std::vector<int> exp({5, 4});
+ EXPECT_EQUAL(exp, res);
+ EXPECT_EQUAL(5, i);
+}
+
+TEST("require that you get correct number of executors") {
+ AdaptiveSequencedExecutor seven(7, 1, 0, 10);
+ EXPECT_EQUAL(7u, seven.getNumExecutors());
+}
+
+TEST("require that you distribute well") {
+ AdaptiveSequencedExecutor seven(7, 1, 0, 10);
+ EXPECT_EQUAL(7u, seven.getNumExecutors());
+ EXPECT_EQUAL(97u, seven.getComponentHashSize());
+ EXPECT_EQUAL(0u, seven.getComponentEffectiveHashSize());
+ for (uint32_t id=0; id < 1000; id++) {
+ EXPECT_EQUAL((id%97)%7, seven.getExecutorId(id).getId());
+ }
+ EXPECT_EQUAL(97u, seven.getComponentHashSize());
+ EXPECT_EQUAL(97u, seven.getComponentEffectiveHashSize());
+}
+
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/staging_vespalib/src/tests/sequencedtaskexecutor/foregroundtaskexecutor_test.cpp b/staging_vespalib/src/tests/sequencedtaskexecutor/foregroundtaskexecutor_test.cpp
new file mode 100644
index 00000000000..a2671bb81a7
--- /dev/null
+++ b/staging_vespalib/src/tests/sequencedtaskexecutor/foregroundtaskexecutor_test.cpp
@@ -0,0 +1,120 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/util/foregroundtaskexecutor.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <condition_variable>
+#include <unistd.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP("foregroundtaskexecutor_test");
+
+namespace vespalib {
+
+
+class Fixture
+{
+public:
+ ForegroundTaskExecutor _threads;
+
+ Fixture()
+ : _threads()
+ {
+ }
+};
+
+
+class TestObj
+{
+public:
+ std::mutex _m;
+ std::condition_variable _cv;
+ int _done;
+ int _fail;
+ int _val;
+
+ TestObj()
+ : _m(),
+ _cv(),
+ _done(0),
+ _fail(0),
+ _val(0)
+ {
+ }
+
+ void
+ modify(int oldValue, int newValue)
+ {
+ {
+ std::lock_guard<std::mutex> guard(_m);
+ if (_val == oldValue) {
+ _val = newValue;
+ } else {
+ ++_fail;
+ }
+ ++_done;
+ }
+ _cv.notify_all();
+ }
+
+ void
+ wait(int wantDone)
+ {
+ std::unique_lock<std::mutex> guard(_m);
+ _cv.wait(guard, [=] { return this->_done >= wantDone; });
+ }
+};
+
+TEST_F("testExecute", Fixture) {
+ std::shared_ptr<TestObj> tv(std::make_shared<TestObj>());
+ EXPECT_EQUAL(0, tv->_val);
+ f._threads.execute(1, [=]() { tv->modify(0, 42); });
+ tv->wait(1);
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+ f._threads.sync();
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+}
+
+
+TEST_F("require that task with same id are serialized", Fixture)
+{
+ std::shared_ptr<TestObj> tv(std::make_shared<TestObj>());
+ EXPECT_EQUAL(0, tv->_val);
+ f._threads.execute(0, [=]() { usleep(2000); tv->modify(0, 14); });
+ f._threads.execute(0, [=]() { tv->modify(14, 42); });
+ tv->wait(2);
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+ f._threads.sync();
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+}
+
+TEST_F("require that task with different ids are serialized", Fixture)
+{
+ int tryCnt = 0;
+ for (tryCnt = 0; tryCnt < 100; ++tryCnt) {
+ std::shared_ptr<TestObj> tv(std::make_shared<TestObj>());
+ EXPECT_EQUAL(0, tv->_val);
+ f._threads.execute(0, [=]() { usleep(2000); tv->modify(0, 14); });
+ f._threads.execute(1, [=]() { tv->modify(14, 42); });
+ tv->wait(2);
+ if (tv->_fail != 1) {
+ continue;
+ }
+ EXPECT_EQUAL(1, tv->_fail);
+ EXPECT_EQUAL(14, tv->_val);
+ f._threads.sync();
+ EXPECT_EQUAL(1, tv->_fail);
+ EXPECT_EQUAL(14, tv->_val);
+ break;
+ }
+ EXPECT_TRUE(tryCnt >= 100);
+}
+
+
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/staging_vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_benchmark.cpp b/staging_vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_benchmark.cpp
new file mode 100644
index 00000000000..042408d439f
--- /dev/null
+++ b/staging_vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_benchmark.cpp
@@ -0,0 +1,70 @@
+// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/util/sequencedtaskexecutor.h>
+#include <vespa/vespalib/util/adaptive_sequenced_executor.h>
+#include <vespa/vespalib/util/lambdatask.h>
+#include <vespa/vespalib/util/time.h>
+#include <atomic>
+
+using vespalib::ISequencedTaskExecutor;
+using vespalib::SequencedTaskExecutor;
+using vespalib::AdaptiveSequencedExecutor;
+using ExecutorId = vespalib::ISequencedTaskExecutor::ExecutorId;
+
+size_t do_work(size_t size) {
+ size_t ret = 0;
+ for (size_t i = 0; i < size; ++i) {
+ for (size_t j = 0; j < 128; ++j) {
+ ret = (ret + i) * j;
+ }
+ }
+ return ret;
+}
+
+struct SimpleParams {
+ int argc;
+ char **argv;
+ int idx;
+ SimpleParams(int argc_in, char **argv_in) : argc(argc_in), argv(argv_in), idx(0) {}
+ int next(const char *name, int fallback) {
+ ++idx;
+ int value = 0;
+ if (argc > idx) {
+ value = atoi(argv[idx]);
+ } else {
+ value = fallback;
+ }
+ fprintf(stderr, "param %s: %d\n", name, value);
+ return value;
+ }
+};
+
+int main(int argc, char **argv) {
+ SimpleParams params(argc, argv);
+ bool use_adaptive_executor = params.next("use_adaptive_executor", 0);
+ bool optimize_for_throughput = params.next("optimize_for_throughput", 0);
+ size_t num_tasks = params.next("num_tasks", 1000000);
+ size_t num_strands = params.next("num_strands", 4);
+ size_t task_limit = params.next("task_limit", 1000);
+ size_t num_threads = params.next("num_threads", num_strands);
+ size_t max_waiting = params.next("max_waiting", optimize_for_throughput ? 32 : 0);
+ size_t work_size = params.next("work_size", 0);
+ std::atomic<long> counter(0);
+ std::unique_ptr<ISequencedTaskExecutor> executor;
+ if (use_adaptive_executor) {
+ executor = std::make_unique<AdaptiveSequencedExecutor>(num_strands, num_threads, max_waiting, task_limit);
+ } else {
+ auto optimize = optimize_for_throughput
+ ? vespalib::Executor::OptimizeFor::THROUGHPUT
+ : vespalib::Executor::OptimizeFor::LATENCY;
+ executor = SequencedTaskExecutor::create(num_strands, task_limit, optimize);
+ }
+ vespalib::Timer timer;
+ for (size_t task_id = 0; task_id < num_tasks; ++task_id) {
+ executor->executeTask(ExecutorId(task_id % num_strands),
+ vespalib::makeLambdaTask([&counter,work_size] { (void) do_work(work_size); counter++; }));
+ }
+ executor.reset();
+ fprintf(stderr, "\ntotal time: %zu ms\n", vespalib::count_ms(timer.elapsed()));
+ return (size_t(counter) == num_tasks) ? 0 : 1;
+}
diff --git a/staging_vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_test.cpp b/staging_vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_test.cpp
new file mode 100644
index 00000000000..f5f04738e92
--- /dev/null
+++ b/staging_vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_test.cpp
@@ -0,0 +1,250 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/util/sequencedtaskexecutor.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+
+#include <condition_variable>
+#include <unistd.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP("sequencedtaskexecutor_test");
+
+namespace vespalib {
+
+
+class Fixture
+{
+public:
+ std::unique_ptr<ISequencedTaskExecutor> _threads;
+
+ Fixture() : _threads(SequencedTaskExecutor::create(2)) { }
+};
+
+
+class TestObj
+{
+public:
+ std::mutex _m;
+ std::condition_variable _cv;
+ int _done;
+ int _fail;
+ int _val;
+
+ TestObj()
+ : _m(),
+ _cv(),
+ _done(0),
+ _fail(0),
+ _val(0)
+ {
+ }
+
+ void
+ modify(int oldValue, int newValue)
+ {
+ {
+ std::lock_guard<std::mutex> guard(_m);
+ if (_val == oldValue) {
+ _val = newValue;
+ } else {
+ ++_fail;
+ }
+ ++_done;
+ }
+ _cv.notify_all();
+ }
+
+ void
+ wait(int wantDone)
+ {
+ std::unique_lock<std::mutex> guard(_m);
+ _cv.wait(guard, [=] { return this->_done >= wantDone; });
+ }
+};
+
+TEST_F("testExecute", Fixture) {
+ std::shared_ptr<TestObj> tv(std::make_shared<TestObj>());
+ EXPECT_EQUAL(0, tv->_val);
+ f._threads->execute(1, [=]() { tv->modify(0, 42); });
+ tv->wait(1);
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+ f._threads->sync();
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+}
+
+
+TEST_F("require that task with same component id are serialized", Fixture)
+{
+ std::shared_ptr<TestObj> tv(std::make_shared<TestObj>());
+ EXPECT_EQUAL(0, tv->_val);
+ f._threads->execute(0, [=]() { usleep(2000); tv->modify(0, 14); });
+ f._threads->execute(0, [=]() { tv->modify(14, 42); });
+ tv->wait(2);
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+ f._threads->sync();
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+}
+
+TEST_F("require that task with different component ids are not serialized", Fixture)
+{
+ int tryCnt = 0;
+ for (tryCnt = 0; tryCnt < 100; ++tryCnt) {
+ std::shared_ptr<TestObj> tv(std::make_shared<TestObj>());
+ EXPECT_EQUAL(0, tv->_val);
+ f._threads->execute(0, [=]() { usleep(2000); tv->modify(0, 14); });
+ f._threads->execute(2, [=]() { tv->modify(14, 42); });
+ tv->wait(2);
+ if (tv->_fail != 1) {
+ continue;
+ }
+ EXPECT_EQUAL(1, tv->_fail);
+ EXPECT_EQUAL(14, tv->_val);
+ f._threads->sync();
+ EXPECT_EQUAL(1, tv->_fail);
+ EXPECT_EQUAL(14, tv->_val);
+ break;
+ }
+ EXPECT_TRUE(tryCnt < 100);
+}
+
+
+TEST_F("require that task with same string component id are serialized", Fixture)
+{
+ std::shared_ptr<TestObj> tv(std::make_shared<TestObj>());
+ EXPECT_EQUAL(0, tv->_val);
+ auto test2 = [=]() { tv->modify(14, 42); };
+ f._threads->execute(f._threads->getExecutorId("0"), [=]() { usleep(2000); tv->modify(0, 14); });
+ f._threads->execute(f._threads->getExecutorId("0"), test2);
+ tv->wait(2);
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+ f._threads->sync();
+ EXPECT_EQUAL(0, tv->_fail);
+ EXPECT_EQUAL(42, tv->_val);
+}
+
+namespace {
+
+int detectSerializeFailure(Fixture &f, vespalib::stringref altComponentId, int tryLimit)
+{
+ int tryCnt = 0;
+ for (tryCnt = 0; tryCnt < tryLimit; ++tryCnt) {
+ std::shared_ptr<TestObj> tv(std::make_shared<TestObj>());
+ EXPECT_EQUAL(0, tv->_val);
+ f._threads->execute(f._threads->getExecutorId("0"), [=]() { usleep(2000); tv->modify(0, 14); });
+ f._threads->execute(f._threads->getExecutorId(altComponentId), [=]() { tv->modify(14, 42); });
+ tv->wait(2);
+ if (tv->_fail != 1) {
+ continue;
+ }
+ EXPECT_EQUAL(1, tv->_fail);
+ EXPECT_EQUAL(14, tv->_val);
+ f._threads->sync();
+ EXPECT_EQUAL(1, tv->_fail);
+ EXPECT_EQUAL(14, tv->_val);
+ break;
+ }
+ return tryCnt;
+}
+
+vespalib::string makeAltComponentId(Fixture &f)
+{
+ int tryCnt = 0;
+ char altComponentId[20];
+ ISequencedTaskExecutor::ExecutorId executorId0 = f._threads->getExecutorId("0");
+ for (tryCnt = 1; tryCnt < 100; ++tryCnt) {
+ sprintf(altComponentId, "%d", tryCnt);
+ if (f._threads->getExecutorId(altComponentId) == executorId0) {
+ break;
+ }
+ }
+ EXPECT_TRUE(tryCnt < 100);
+ return altComponentId;
+}
+
+}
+
+TEST_F("require that task with different string component ids are not serialized", Fixture)
+{
+ int tryCnt = detectSerializeFailure(f, "2", 100);
+ EXPECT_TRUE(tryCnt < 100);
+}
+
+
+TEST_F("require that task with different string component ids mapping to the same executor id are serialized",
+ Fixture)
+{
+ vespalib::string altComponentId = makeAltComponentId(f);
+ LOG(info, "second string component id is \"%s\"", altComponentId.c_str());
+ int tryCnt = detectSerializeFailure(f, altComponentId, 100);
+ EXPECT_TRUE(tryCnt == 100);
+}
+
+
+TEST_F("require that execute works with const lambda", Fixture)
+{
+ int i = 5;
+ std::vector<int> res;
+ const auto lambda = [i, &res]() mutable
+ { res.push_back(i--); res.push_back(i--); };
+ f._threads->execute(0, lambda);
+ f._threads->execute(0, lambda);
+ f._threads->sync();
+ std::vector<int> exp({5, 4, 5, 4});
+ EXPECT_EQUAL(exp, res);
+ EXPECT_EQUAL(5, i);
+}
+
+TEST_F("require that execute works with reference to lambda", Fixture)
+{
+ int i = 5;
+ std::vector<int> res;
+ auto lambda = [i, &res]() mutable
+ { res.push_back(i--); res.push_back(i--); };
+ auto &lambdaref = lambda;
+ f._threads->execute(0, lambdaref);
+ f._threads->execute(0, lambdaref);
+ f._threads->sync();
+ std::vector<int> exp({5, 4, 5, 4});
+ EXPECT_EQUAL(exp, res);
+ EXPECT_EQUAL(5, i);
+}
+
+TEST_F("require that executeLambda works", Fixture)
+{
+ int i = 5;
+ std::vector<int> res;
+ const auto lambda = [i, &res]() mutable
+ { res.push_back(i--); res.push_back(i--); };
+ f._threads->executeLambda(ISequencedTaskExecutor::ExecutorId(0), lambda);
+ f._threads->sync();
+ std::vector<int> exp({5, 4});
+ EXPECT_EQUAL(exp, res);
+ EXPECT_EQUAL(5, i);
+}
+
+TEST("require that you get correct number of executors") {
+ auto seven = SequencedTaskExecutor::create(7);
+ EXPECT_EQUAL(7u, seven->getNumExecutors());
+}
+
+TEST("require that you distribute well") {
+ auto seven = SequencedTaskExecutor::create(7);
+ EXPECT_EQUAL(7u, seven->getNumExecutors());
+ EXPECT_EQUAL(97u, seven->getComponentHashSize());
+ EXPECT_EQUAL(0u, seven->getComponentEffectiveHashSize());
+ for (uint32_t id=0; id < 1000; id++) {
+ EXPECT_EQUAL((id%97)%7, seven->getExecutorId(id).getId());
+ }
+ EXPECT_EQUAL(97u, seven->getComponentHashSize());
+ EXPECT_EQUAL(97u, seven->getComponentEffectiveHashSize());
+}
+
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }