diff options
author | Henning Baldersheim <balder@yahoo-inc.com> | 2022-05-18 11:05:54 +0000 |
---|---|---|
committer | Henning Baldersheim <balder@yahoo-inc.com> | 2022-05-18 11:05:54 +0000 |
commit | 39443ba7ffe7966fb06555ef832f4eff3756c076 (patch) | |
tree | 5e0a2fd6ab79aa6be435551ea307be9750e69227 /vespalib/src/tests/sequencedtaskexecutor | |
parent | 36df8bd3d9fd4ee60aadd04af89199a8bc504e68 (diff) |
Move state_server, metrivs and some all executors from staging_vespalib too vespalib.
Diffstat (limited to 'vespalib/src/tests/sequencedtaskexecutor')
6 files changed, 827 insertions, 0 deletions
diff --git a/vespalib/src/tests/sequencedtaskexecutor/.gitignore b/vespalib/src/tests/sequencedtaskexecutor/.gitignore new file mode 100644 index 00000000000..3b6f7c74a67 --- /dev/null +++ b/vespalib/src/tests/sequencedtaskexecutor/.gitignore @@ -0,0 +1,4 @@ +vespalib_sequencedtaskexecutor_test_app +vespalib_sequencedtaskexecutor_benchmark_app +vespalib_adaptive_sequenced_executor_test_app +vespalib_foregroundtaskexecutor_test_app diff --git a/vespalib/src/tests/sequencedtaskexecutor/CMakeLists.txt b/vespalib/src/tests/sequencedtaskexecutor/CMakeLists.txt new file mode 100644 index 00000000000..6a488b3c716 --- /dev/null +++ b/vespalib/src/tests/sequencedtaskexecutor/CMakeLists.txt @@ -0,0 +1,31 @@ +# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +vespa_add_executable(vespalib_sequencedtaskexecutor_benchmark_app TEST + SOURCES + sequencedtaskexecutor_benchmark.cpp + DEPENDS + vespalib +) + +vespa_add_executable(vespalib_sequencedtaskexecutor_test_app TEST + SOURCES + sequencedtaskexecutor_test.cpp + DEPENDS + vespalib +) +vespa_add_test(NAME vespalib_sequencedtaskexecutor_test_app COMMAND vespalib_sequencedtaskexecutor_test_app) + +vespa_add_executable(vespalib_adaptive_sequenced_executor_test_app TEST + SOURCES + adaptive_sequenced_executor_test.cpp + DEPENDS + vespalib +) +vespa_add_test(NAME vespalib_adaptive_sequenced_executor_test_app COMMAND vespalib_adaptive_sequenced_executor_test_app) + +vespa_add_executable(vespalib_foregroundtaskexecutor_test_app TEST + SOURCES + foregroundtaskexecutor_test.cpp + DEPENDS + vespalib +) +vespa_add_test(NAME vespalib_foregroundtaskexecutor_test_app COMMAND vespalib_foregroundtaskexecutor_test_app) diff --git a/vespalib/src/tests/sequencedtaskexecutor/adaptive_sequenced_executor_test.cpp b/vespalib/src/tests/sequencedtaskexecutor/adaptive_sequenced_executor_test.cpp new file mode 100644 index 00000000000..1a458f86232 --- /dev/null +++ b/vespalib/src/tests/sequencedtaskexecutor/adaptive_sequenced_executor_test.cpp @@ -0,0 +1,248 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/vespalib/util/adaptive_sequenced_executor.h> +#include <vespa/vespalib/testkit/testapp.h> +#include <vespa/vespalib/test/insertion_operators.h> + +#include <condition_variable> +#include <unistd.h> + +#include <vespa/log/log.h> +LOG_SETUP("adaptive_sequenced_executor_test"); + +namespace vespalib { + + +class Fixture +{ +public: + AdaptiveSequencedExecutor _threads; + + Fixture(bool is_max_pending_hard=true) : _threads(2, 2, 0, 1000, is_max_pending_hard) { } +}; + + +class TestObj +{ +public: + std::mutex _m; + std::condition_variable _cv; + int _done; + int _fail; + int _val; + + TestObj() noexcept + : _m(), + _cv(), + _done(0), + _fail(0), + _val(0) + { + } + + void + modify(int oldValue, int newValue) + { + { + std::lock_guard<std::mutex> guard(_m); + if (_val == oldValue) { + _val = newValue; + } else { + ++_fail; + } + ++_done; + _cv.notify_all(); + } + } + + void + wait(int wantDone) + { + std::unique_lock<std::mutex> guard(_m); + _cv.wait(guard, [&] { return this->_done >= wantDone; }); + } +}; + +vespalib::stringref ZERO("0"); + +TEST_F("testExecute", Fixture) { + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + f._threads.execute(1, [&]() { tv->modify(0, 42); }); + tv->wait(1); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); + f._threads.sync_all(); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); +} + + +TEST_F("require that task with same component id are serialized", Fixture) +{ + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + f._threads.execute(0, [&]() { usleep(2000); tv->modify(0, 14); }); + f._threads.execute(0, [&]() { tv->modify(14, 42); }); + tv->wait(2); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); + f._threads.sync_all(); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); +} + +TEST_F("require that task with different component ids are not serialized", Fixture) +{ + int tryCnt = 0; + for (tryCnt = 0; tryCnt < 100; ++tryCnt) { + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + f._threads.execute(0, [&]() { usleep(2000); tv->modify(0, 14); }); + f._threads.execute(1, [&]() { tv->modify(14, 42); }); + tv->wait(2); + if (tv->_fail != 1) { + continue; + } + EXPECT_EQUAL(1, tv->_fail); + EXPECT_EQUAL(14, tv->_val); + f._threads.sync_all(); + EXPECT_EQUAL(1, tv->_fail); + EXPECT_EQUAL(14, tv->_val); + break; + } + EXPECT_TRUE(tryCnt < 100); +} + + +TEST_F("require that task with same string component id are serialized", Fixture) +{ + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + auto test2 = [&]() { tv->modify(14, 42); }; + f._threads.execute(f._threads.getExecutorIdFromName(ZERO), [&]() { usleep(2000); tv->modify(0, 14); }); + f._threads.execute(f._threads.getExecutorIdFromName(ZERO), test2); + tv->wait(2); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); + f._threads.sync_all(); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); +} + +namespace { + +int detectSerializeFailure(Fixture &f, vespalib::stringref altComponentId, int tryLimit) +{ + int tryCnt = 0; + for (tryCnt = 0; tryCnt < tryLimit; ++tryCnt) { + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + f._threads.execute(f._threads.getExecutorIdFromName(ZERO), [&]() { usleep(2000); tv->modify(0, 14); }); + f._threads.execute(f._threads.getExecutorIdFromName(altComponentId), [&]() { tv->modify(14, 42); }); + tv->wait(2); + if (tv->_fail != 1) { + continue; + } + EXPECT_EQUAL(1, tv->_fail); + EXPECT_EQUAL(14, tv->_val); + f._threads.sync_all(); + EXPECT_EQUAL(1, tv->_fail); + EXPECT_EQUAL(14, tv->_val); + break; + } + return tryCnt; +} + +vespalib::string makeAltComponentId(Fixture &f) +{ + int tryCnt = 0; + char altComponentId[20]; + ISequencedTaskExecutor::ExecutorId executorId0 = f._threads.getExecutorIdFromName(ZERO); + for (tryCnt = 1; tryCnt < 100; ++tryCnt) { + sprintf(altComponentId, "%d", tryCnt); + if (f._threads.getExecutorIdFromName(altComponentId) == executorId0) { + break; + } + } + EXPECT_TRUE(tryCnt < 100); + return altComponentId; +} + +} + +TEST_F("require that task with different string component ids are not serialized", Fixture) +{ + int tryCnt = detectSerializeFailure(f, "2", 100); + EXPECT_TRUE(tryCnt < 100); +} + + +TEST_F("require that task with different string component ids mapping to the same executor id are serialized", + Fixture) +{ + vespalib::string altComponentId = makeAltComponentId(f); + LOG(info, "second string component id is \"%s\"", altComponentId.c_str()); + int tryCnt = detectSerializeFailure(f, altComponentId, 100); + EXPECT_TRUE(tryCnt == 100); +} + + +TEST_F("require that execute works with const lambda", Fixture) +{ + int i = 5; + std::vector<int> res; + const auto lambda = [i, &res]() mutable + { res.push_back(i--); res.push_back(i--); }; + f._threads.execute(0, lambda); + f._threads.execute(0, lambda); + f._threads.sync_all(); + std::vector<int> exp({5, 4, 5, 4}); + EXPECT_EQUAL(exp, res); + EXPECT_EQUAL(5, i); +} + +TEST_F("require that execute works with reference to lambda", Fixture) +{ + int i = 5; + std::vector<int> res; + auto lambda = [i, &res]() mutable + { res.push_back(i--); res.push_back(i--); }; + auto &lambdaref = lambda; + f._threads.execute(0, lambdaref); + f._threads.execute(0, lambdaref); + f._threads.sync_all(); + std::vector<int> exp({5, 4, 5, 4}); + EXPECT_EQUAL(exp, res); + EXPECT_EQUAL(5, i); +} + +TEST_F("require that executeLambda works", Fixture) +{ + int i = 5; + std::vector<int> res; + const auto lambda = [i, &res]() mutable + { res.push_back(i--); res.push_back(i--); }; + f._threads.executeLambda(ISequencedTaskExecutor::ExecutorId(0), lambda); + f._threads.sync_all(); + std::vector<int> exp({5, 4}); + EXPECT_EQUAL(exp, res); + EXPECT_EQUAL(5, i); +} + +TEST("require that you get correct number of executors") { + AdaptiveSequencedExecutor seven(7, 1, 0, 10, true); + EXPECT_EQUAL(7u, seven.getNumExecutors()); +} + +TEST("require that you distribute well") { + AdaptiveSequencedExecutor seven(7, 1, 0, 10, true); + EXPECT_EQUAL(7u, seven.getNumExecutors()); + for (uint32_t id=0; id < 1000; id++) { + EXPECT_EQUAL(id%7, seven.getExecutorId(id).getId()); + } +} + +} + +TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/sequencedtaskexecutor/foregroundtaskexecutor_test.cpp b/vespalib/src/tests/sequencedtaskexecutor/foregroundtaskexecutor_test.cpp new file mode 100644 index 00000000000..56fb570209c --- /dev/null +++ b/vespalib/src/tests/sequencedtaskexecutor/foregroundtaskexecutor_test.cpp @@ -0,0 +1,120 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/vespalib/util/foregroundtaskexecutor.h> +#include <vespa/vespalib/testkit/testapp.h> + +#include <condition_variable> +#include <unistd.h> + +#include <vespa/log/log.h> +LOG_SETUP("foregroundtaskexecutor_test"); + +namespace vespalib { + + +class Fixture +{ +public: + ForegroundTaskExecutor _threads; + + Fixture() + : _threads() + { + } +}; + + +class TestObj +{ +public: + std::mutex _m; + std::condition_variable _cv; + int _done; + int _fail; + int _val; + + TestObj() noexcept + : _m(), + _cv(), + _done(0), + _fail(0), + _val(0) + { + } + + void + modify(int oldValue, int newValue) + { + { + std::lock_guard<std::mutex> guard(_m); + if (_val == oldValue) { + _val = newValue; + } else { + ++_fail; + } + ++_done; + } + _cv.notify_all(); + } + + void + wait(int wantDone) + { + std::unique_lock<std::mutex> guard(_m); + _cv.wait(guard, [this, wantDone] { return this->_done >= wantDone; }); + } +}; + +TEST_F("testExecute", Fixture) { + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + f._threads.execute(1, [=]() { tv->modify(0, 42); }); + tv->wait(1); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); + f._threads.sync_all(); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); +} + + +TEST_F("require that task with same id are serialized", Fixture) +{ + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + f._threads.execute(0, [=]() { usleep(2000); tv->modify(0, 14); }); + f._threads.execute(0, [=]() { tv->modify(14, 42); }); + tv->wait(2); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); + f._threads.sync_all(); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); +} + +TEST_F("require that task with different ids are serialized", Fixture) +{ + int tryCnt = 0; + for (tryCnt = 0; tryCnt < 100; ++tryCnt) { + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + f._threads.execute(0, [=]() { usleep(2000); tv->modify(0, 14); }); + f._threads.execute(1, [=]() { tv->modify(14, 42); }); + tv->wait(2); + if (tv->_fail != 1) { + continue; + } + EXPECT_EQUAL(1, tv->_fail); + EXPECT_EQUAL(14, tv->_val); + f._threads.sync_all(); + EXPECT_EQUAL(1, tv->_fail); + EXPECT_EQUAL(14, tv->_val); + break; + } + EXPECT_TRUE(tryCnt >= 100); +} + + +} + +TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_benchmark.cpp b/vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_benchmark.cpp new file mode 100644 index 00000000000..0f7c82ef988 --- /dev/null +++ b/vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_benchmark.cpp @@ -0,0 +1,73 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/vespalib/util/sequencedtaskexecutor.h> +#include <vespa/vespalib/util/adaptive_sequenced_executor.h> +#include <vespa/vespalib/util/lambdatask.h> +#include <vespa/vespalib/util/time.h> +#include <atomic> +#include <cinttypes> + +using vespalib::ISequencedTaskExecutor; +using vespalib::SequencedTaskExecutor; +using vespalib::AdaptiveSequencedExecutor; +using ExecutorId = vespalib::ISequencedTaskExecutor::ExecutorId; + +size_t do_work(size_t size) { + size_t ret = 0; + for (size_t i = 0; i < size; ++i) { + for (size_t j = 0; j < 128; ++j) { + ret = (ret + i) * j; + } + } + return ret; +} + +struct SimpleParams { + int argc; + char **argv; + int idx; + SimpleParams(int argc_in, char **argv_in) : argc(argc_in), argv(argv_in), idx(0) {} + int next(const char *name, int fallback) { + ++idx; + int value = 0; + if (argc > idx) { + value = atoi(argv[idx]); + } else { + value = fallback; + } + fprintf(stderr, "param %s: %d\n", name, value); + return value; + } +}; + +VESPA_THREAD_STACK_TAG(sequenced_executor) + +int main(int argc, char **argv) { + SimpleParams params(argc, argv); + bool use_adaptive_executor = params.next("use_adaptive_executor", 0); + bool optimize_for_throughput = params.next("optimize_for_throughput", 0); + size_t num_tasks = params.next("num_tasks", 1000000); + size_t num_strands = params.next("num_strands", 4); + size_t task_limit = params.next("task_limit", 1000); + size_t num_threads = params.next("num_threads", num_strands); + size_t max_waiting = params.next("max_waiting", optimize_for_throughput ? 32 : 0); + size_t work_size = params.next("work_size", 0); + std::atomic<long> counter(0); + std::unique_ptr<ISequencedTaskExecutor> executor; + if (use_adaptive_executor) { + executor = std::make_unique<AdaptiveSequencedExecutor>(num_strands, num_threads, max_waiting, task_limit, true); + } else { + auto optimize = optimize_for_throughput + ? vespalib::Executor::OptimizeFor::THROUGHPUT + : vespalib::Executor::OptimizeFor::LATENCY; + executor = SequencedTaskExecutor::create(sequenced_executor, num_strands, task_limit, true, optimize); + } + vespalib::Timer timer; + for (size_t task_id = 0; task_id < num_tasks; ++task_id) { + executor->executeTask(ExecutorId(task_id % num_strands), + vespalib::makeLambdaTask([&counter,work_size] { (void) do_work(work_size); counter++; })); + } + executor.reset(); + fprintf(stderr, "\ntotal time: %" PRId64 " ms\n", vespalib::count_ms(timer.elapsed())); + return (size_t(counter) == num_tasks) ? 0 : 1; +} diff --git a/vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_test.cpp b/vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_test.cpp new file mode 100644 index 00000000000..705d6346e8c --- /dev/null +++ b/vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_test.cpp @@ -0,0 +1,351 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/vespalib/util/sequencedtaskexecutor.h> +#include <vespa/vespalib/util/adaptive_sequenced_executor.h> +#include <vespa/vespalib/util/blockingthreadstackexecutor.h> +#include <vespa/vespalib/util/singleexecutor.h> +#include <vespa/vespalib/testkit/testapp.h> +#include <vespa/vespalib/test/insertion_operators.h> + +#include <condition_variable> +#include <unistd.h> + +#include <vespa/log/log.h> +LOG_SETUP("sequencedtaskexecutor_test"); + +VESPA_THREAD_STACK_TAG(sequenced_executor) + +namespace vespalib { + + +class Fixture +{ +public: + std::unique_ptr<ISequencedTaskExecutor> _threads; + + Fixture(bool is_task_limit_hard = true) : + _threads(SequencedTaskExecutor::create(sequenced_executor, 2, 1000, is_task_limit_hard, + Executor::OptimizeFor::LATENCY)) + { } +}; + + +class TestObj +{ +public: + std::mutex _m; + std::condition_variable _cv; + int _done; + int _fail; + int _val; + + TestObj() noexcept + : _m(), + _cv(), + _done(0), + _fail(0), + _val(0) + { + } + + void + modify(int oldValue, int newValue) + { + { + std::lock_guard<std::mutex> guard(_m); + if (_val == oldValue) { + _val = newValue; + } else { + ++_fail; + } + ++_done; + } + _cv.notify_all(); + } + + void + wait(int wantDone) + { + std::unique_lock<std::mutex> guard(_m); + _cv.wait(guard, [this, wantDone] { return this->_done >= wantDone; }); + } +}; + +vespalib::stringref ZERO("0"); + +TEST_F("testExecute", Fixture) { + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + f._threads->execute(1, [=]() { tv->modify(0, 42); }); + tv->wait(1); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); + f._threads->sync_all(); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); +} + + +TEST_F("require that task with same component id are serialized", Fixture) +{ + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + f._threads->execute(0, [=]() { usleep(2000); tv->modify(0, 14); }); + f._threads->execute(0, [=]() { tv->modify(14, 42); }); + tv->wait(2); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); + f._threads->sync_all(); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); +} + +TEST_F("require that task with same component id are serialized when executed with list", Fixture) +{ + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + ISequencedTaskExecutor::ExecutorId executorId = f._threads->getExecutorId(0); + ISequencedTaskExecutor::TaskList list; + list.template emplace_back(executorId, makeLambdaTask([=]() { usleep(2000); tv->modify(0, 14); })); + list.template emplace_back(executorId, makeLambdaTask([=]() { tv->modify(14, 42); })); + f._threads->executeTasks(std::move(list)); + tv->wait(2); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); + f._threads->sync_all(); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); +} + +TEST_F("require that task with different component ids are not serialized", Fixture) +{ + int tryCnt = 0; + for (tryCnt = 0; tryCnt < 100; ++tryCnt) { + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + f._threads->execute(0, [=]() { usleep(2000); tv->modify(0, 14); }); + f._threads->execute(2, [=]() { tv->modify(14, 42); }); + tv->wait(2); + if (tv->_fail != 1) { + continue; + } + EXPECT_EQUAL(1, tv->_fail); + EXPECT_EQUAL(14, tv->_val); + f._threads->sync_all(); + EXPECT_EQUAL(1, tv->_fail); + EXPECT_EQUAL(14, tv->_val); + break; + } + EXPECT_TRUE(tryCnt < 100); +} + + +TEST_F("require that task with same string component id are serialized", Fixture) +{ + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + auto test2 = [=]() { tv->modify(14, 42); }; + f._threads->execute(f._threads->getExecutorIdFromName(ZERO), [=]() { usleep(2000); tv->modify(0, 14); }); + f._threads->execute(f._threads->getExecutorIdFromName(ZERO), test2); + tv->wait(2); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); + f._threads->sync_all(); + EXPECT_EQUAL(0, tv->_fail); + EXPECT_EQUAL(42, tv->_val); +} + +namespace { + +int +detectSerializeFailure(Fixture &f, vespalib::stringref altComponentId, int tryLimit) +{ + int tryCnt = 0; + for (tryCnt = 0; tryCnt < tryLimit; ++tryCnt) { + std::shared_ptr<TestObj> tv(std::make_shared<TestObj>()); + EXPECT_EQUAL(0, tv->_val); + f._threads->execute(f._threads->getExecutorIdFromName(ZERO), [=]() { usleep(2000); tv->modify(0, 14); }); + f._threads->execute(f._threads->getExecutorIdFromName(altComponentId), [=]() { tv->modify(14, 42); }); + tv->wait(2); + if (tv->_fail != 1) { + continue; + } + EXPECT_EQUAL(1, tv->_fail); + EXPECT_EQUAL(14, tv->_val); + f._threads->sync_all(); + EXPECT_EQUAL(1, tv->_fail); + EXPECT_EQUAL(14, tv->_val); + break; + } + return tryCnt; +} + +vespalib::string +makeAltComponentId(Fixture &f) +{ + int tryCnt = 0; + char altComponentId[20]; + ISequencedTaskExecutor::ExecutorId executorId0 = f._threads->getExecutorIdFromName(ZERO); + for (tryCnt = 1; tryCnt < 100; ++tryCnt) { + sprintf(altComponentId, "%d", tryCnt); + if (f._threads->getExecutorIdFromName(altComponentId) == executorId0) { + break; + } + } + EXPECT_TRUE(tryCnt < 100); + return altComponentId; +} + +} + +TEST_F("require that task with different string component ids are not serialized", Fixture) +{ + int tryCnt = detectSerializeFailure(f, "2", 100); + EXPECT_TRUE(tryCnt < 100); +} + + +TEST_F("require that task with different string component ids mapping to the same executor id are serialized", + Fixture) +{ + vespalib::string altComponentId = makeAltComponentId(f); + LOG(info, "second string component id is \"%s\"", altComponentId.c_str()); + int tryCnt = detectSerializeFailure(f, altComponentId, 100); + EXPECT_TRUE(tryCnt == 100); +} + + +TEST_F("require that execute works with const lambda", Fixture) +{ + int i = 5; + std::vector<int> res; + const auto lambda = [i, &res]() mutable + { res.push_back(i--); res.push_back(i--); }; + f._threads->execute(0, lambda); + f._threads->execute(0, lambda); + f._threads->sync_all(); + std::vector<int> exp({5, 4, 5, 4}); + EXPECT_EQUAL(exp, res); + EXPECT_EQUAL(5, i); +} + +TEST_F("require that execute works with reference to lambda", Fixture) +{ + int i = 5; + std::vector<int> res; + auto lambda = [i, &res]() mutable + { res.push_back(i--); res.push_back(i--); }; + auto &lambdaref = lambda; + f._threads->execute(0, lambdaref); + f._threads->execute(0, lambdaref); + f._threads->sync_all(); + std::vector<int> exp({5, 4, 5, 4}); + EXPECT_EQUAL(exp, res); + EXPECT_EQUAL(5, i); +} + +TEST_F("require that executeLambda works", Fixture) +{ + int i = 5; + std::vector<int> res; + const auto lambda = [i, &res]() mutable + { res.push_back(i--); res.push_back(i--); }; + f._threads->executeLambda(ISequencedTaskExecutor::ExecutorId(0), lambda); + f._threads->sync_all(); + std::vector<int> exp({5, 4}); + EXPECT_EQUAL(exp, res); + EXPECT_EQUAL(5, i); +} + +TEST("require that you get correct number of executors") { + auto seven = SequencedTaskExecutor::create(sequenced_executor, 7); + EXPECT_EQUAL(7u, seven->getNumExecutors()); +} + +void verifyHardLimitForLatency(bool expect_hard) { + auto sequenced = SequencedTaskExecutor::create(sequenced_executor, 1, 100, expect_hard, Executor::OptimizeFor::LATENCY); + const SequencedTaskExecutor & seq = dynamic_cast<const SequencedTaskExecutor &>(*sequenced); + EXPECT_EQUAL(expect_hard,nullptr != dynamic_cast<const BlockingThreadStackExecutor *>(seq.first_executor())); +} + +void verifyHardLimitForThroughput(bool expect_hard) { + auto sequenced = SequencedTaskExecutor::create(sequenced_executor, 1, 100, expect_hard, Executor::OptimizeFor::THROUGHPUT); + const SequencedTaskExecutor & seq = dynamic_cast<const SequencedTaskExecutor &>(*sequenced); + const SingleExecutor * first = dynamic_cast<const SingleExecutor *>(seq.first_executor()); + EXPECT_TRUE(first != nullptr); + EXPECT_EQUAL(expect_hard, first->isBlocking()); +} + +TEST("require that you can get executor with both hard and soft limit") { + verifyHardLimitForLatency(true); + verifyHardLimitForLatency(false); + verifyHardLimitForThroughput(true); + verifyHardLimitForThroughput(false); +} + + +TEST("require that you distribute well") { + auto seven = SequencedTaskExecutor::create(sequenced_executor, 7); + const SequencedTaskExecutor & seq = dynamic_cast<const SequencedTaskExecutor &>(*seven); + const uint32_t NUM_EXACT = 8 * seven->getNumExecutors(); + EXPECT_EQUAL(7u, seven->getNumExecutors()); + EXPECT_EQUAL(97u, seq.getComponentHashSize()); + EXPECT_EQUAL(0u, seq.getComponentEffectiveHashSize()); + for (uint32_t id=0; id < 1000; id++) { + if (id < NUM_EXACT) { + EXPECT_EQUAL(id % seven->getNumExecutors(), seven->getExecutorId(id).getId()); + } else { + EXPECT_EQUAL(((id - NUM_EXACT) % 97) % seven->getNumExecutors(), seven->getExecutorId(id).getId()); + } + } + EXPECT_EQUAL(97u, seq.getComponentHashSize()); + EXPECT_EQUAL(97u, seq.getComponentEffectiveHashSize()); +} + +TEST("require that similar names get perfect distribution with 4 executors") { + auto four = SequencedTaskExecutor::create(sequenced_executor, 4); + EXPECT_EQUAL(0u, four->getExecutorIdFromName("f1").getId()); + EXPECT_EQUAL(1u, four->getExecutorIdFromName("f2").getId()); + EXPECT_EQUAL(2u, four->getExecutorIdFromName("f3").getId()); + EXPECT_EQUAL(3u, four->getExecutorIdFromName("f4").getId()); + EXPECT_EQUAL(0u, four->getExecutorIdFromName("f5").getId()); + EXPECT_EQUAL(1u, four->getExecutorIdFromName("f6").getId()); + EXPECT_EQUAL(2u, four->getExecutorIdFromName("f7").getId()); + EXPECT_EQUAL(3u, four->getExecutorIdFromName("f8").getId()); +} + +TEST("require that similar names get perfect distribution with 8 executors") { + auto four = SequencedTaskExecutor::create(sequenced_executor, 8); + EXPECT_EQUAL(0u, four->getExecutorIdFromName("f1").getId()); + EXPECT_EQUAL(1u, four->getExecutorIdFromName("f2").getId()); + EXPECT_EQUAL(2u, four->getExecutorIdFromName("f3").getId()); + EXPECT_EQUAL(3u, four->getExecutorIdFromName("f4").getId()); + EXPECT_EQUAL(4u, four->getExecutorIdFromName("f5").getId()); + EXPECT_EQUAL(5u, four->getExecutorIdFromName("f6").getId()); + EXPECT_EQUAL(6u, four->getExecutorIdFromName("f7").getId()); + EXPECT_EQUAL(7u, four->getExecutorIdFromName("f8").getId()); +} + +TEST("Test creation of different types") { + auto iseq = SequencedTaskExecutor::create(sequenced_executor, 1); + + EXPECT_EQUAL(1u, iseq->getNumExecutors()); + auto * seq = dynamic_cast<SequencedTaskExecutor *>(iseq.get()); + ASSERT_TRUE(seq != nullptr); + + iseq = SequencedTaskExecutor::create(sequenced_executor, 1, 1000, true, Executor::OptimizeFor::LATENCY); + seq = dynamic_cast<SequencedTaskExecutor *>(iseq.get()); + ASSERT_TRUE(seq != nullptr); + + iseq = SequencedTaskExecutor::create(sequenced_executor, 1, 1000, true, Executor::OptimizeFor::THROUGHPUT); + seq = dynamic_cast<SequencedTaskExecutor *>(iseq.get()); + ASSERT_TRUE(seq != nullptr); + + iseq = SequencedTaskExecutor::create(sequenced_executor, 1, 1000, true, Executor::OptimizeFor::ADAPTIVE, 17); + auto aseq = dynamic_cast<AdaptiveSequencedExecutor *>(iseq.get()); + ASSERT_TRUE(aseq != nullptr); +} + +} + +TEST_MAIN() { TEST_RUN_ALL(); } |