From 1b3e34605eba38778deaa09f81998c9b8c80acc7 Mon Sep 17 00:00:00 2001 From: Tor Brede Vekterli Date: Mon, 27 May 2019 10:40:48 +0000 Subject: Move datastore and btree code from searchlib to vespalib Namespace is still `search` and not `vespalib` due to the massive amount of code that would need to be modified for such a change. Other changes: - Move `BufferWriter` from searchlib to vespalib - Move assertion and rand48 utilities from staging_vespalib to vespalib - Move gtest utility code from staging_vespalib to vespalib --- searchlib/CMakeLists.txt | 8 - searchlib/src/apps/tests/btreestress_test.cpp | 28 +- .../tests/attribute/comparator/comparator_test.cpp | 8 +- .../enumeratedsave/enumeratedsave_test.cpp | 2 +- .../tests/attribute/postinglist/postinglist.cpp | 15 +- searchlib/src/tests/btree/.gitignore | 5 - searchlib/src/tests/btree/CMakeLists.txt | 29 - searchlib/src/tests/btree/btree_test.cpp | 1526 -------------------- .../src/tests/btree/btreeaggregation_test.cpp | 1157 --------------- searchlib/src/tests/btree/frozenbtree_test.cpp | 469 ------ searchlib/src/tests/btree/iteratespeed.cpp | 212 --- .../src/tests/datastore/array_store/CMakeLists.txt | 8 - .../datastore/array_store/array_store_test.cpp | 360 ----- .../datastore/array_store_config/CMakeLists.txt | 8 - .../array_store_config/array_store_config_test.cpp | 84 -- .../src/tests/datastore/buffer_type/CMakeLists.txt | 8 - .../datastore/buffer_type/buffer_type_test.cpp | 116 -- .../src/tests/datastore/datastore/CMakeLists.txt | 9 - .../tests/datastore/datastore/datastore_test.cpp | 584 -------- .../tests/datastore/unique_store/CMakeLists.txt | 8 - .../datastore/unique_store/unique_store_test.cpp | 267 ---- .../src/tests/diskindex/fusion/fusion_test.cpp | 6 +- .../compact_words_store_test.cpp | 2 +- .../memoryindex/field_index/field_index_test.cpp | 4 +- .../predicate_bounds_posting_list_test.cpp | 4 +- .../src/tests/predicate/predicate_index_test.cpp | 4 +- .../predicate_interval_posting_list_test.cpp | 4 +- ...redicate_zstar_compressed_posting_list_test.cpp | 4 +- .../src/tests/predicate/simple_index_test.cpp | 10 +- .../tests/util/bufferwriter/bufferwriter_test.cpp | 2 +- searchlib/src/tests/util/bufferwriter/work.cpp | 2 +- searchlib/src/vespa/searchlib/CMakeLists.txt | 2 - .../attribute/attributefilebufferwriter.h | 2 +- .../searchlib/attribute/attributeiterators.hpp | 4 +- .../src/vespa/searchlib/attribute/dociditerator.h | 2 +- .../searchlib/attribute/enumattributesaver.cpp | 2 +- .../src/vespa/searchlib/attribute/enumstore.h | 10 +- .../src/vespa/searchlib/attribute/enumstore.hpp | 16 +- .../vespa/searchlib/attribute/enumstorebase.cpp | 14 +- .../src/vespa/searchlib/attribute/enumstorebase.h | 4 +- .../src/vespa/searchlib/attribute/loadedvalue.h | 2 +- .../searchlib/attribute/multi_value_mapping.h | 2 +- .../searchlib/attribute/multi_value_mapping.hpp | 2 +- .../searchlib/attribute/multi_value_mapping_base.h | 2 +- .../attribute/multinumericattributesaver.cpp | 2 +- .../searchlib/attribute/multistringattribute.hpp | 2 +- .../attribute/multivalueattributesaverutils.cpp | 2 +- .../attribute/multivalueattributesaverutils.h | 2 +- .../searchlib/attribute/posting_list_merger.h | 2 +- .../src/vespa/searchlib/attribute/postingdata.h | 2 +- .../searchlib/attribute/postinglistattribute.h | 4 +- .../attribute/postinglistsearchcontext.cpp | 2 +- .../searchlib/attribute/postinglisttraits.cpp | 10 +- .../vespa/searchlib/attribute/postinglisttraits.h | 2 +- .../src/vespa/searchlib/attribute/postingstore.cpp | 5 +- .../src/vespa/searchlib/attribute/reference.h | 2 +- .../searchlib/attribute/reference_attribute.cpp | 6 +- .../searchlib/attribute/reference_attribute.h | 2 +- .../attribute/reference_attribute_saver.cpp | 2 +- .../attribute/reference_attribute_saver.h | 4 +- .../searchlib/attribute/reference_mappings.cpp | 4 +- .../vespa/searchlib/attribute/reference_mappings.h | 2 +- .../attribute/singleenumattributesaver.cpp | 2 +- .../searchlib/attribute/singlestringattribute.hpp | 2 +- searchlib/src/vespa/searchlib/btree/CMakeLists.txt | 17 - searchlib/src/vespa/searchlib/btree/OWNERS | 2 - searchlib/src/vespa/searchlib/btree/btree.h | 167 --- searchlib/src/vespa/searchlib/btree/btree.hpp | 30 - .../src/vespa/searchlib/btree/btree_key_data.cpp | 12 - .../src/vespa/searchlib/btree/btree_key_data.h | 85 -- .../src/vespa/searchlib/btree/btreeaggregator.cpp | 15 - .../src/vespa/searchlib/btree/btreeaggregator.h | 42 - .../src/vespa/searchlib/btree/btreeaggregator.hpp | 92 -- .../src/vespa/searchlib/btree/btreebuilder.cpp | 19 - searchlib/src/vespa/searchlib/btree/btreebuilder.h | 70 - .../src/vespa/searchlib/btree/btreebuilder.hpp | 449 ------ .../src/vespa/searchlib/btree/btreeinserter.cpp | 21 - .../src/vespa/searchlib/btree/btreeinserter.h | 67 - .../src/vespa/searchlib/btree/btreeinserter.hpp | 184 --- .../src/vespa/searchlib/btree/btreeiterator.cpp | 21 - .../src/vespa/searchlib/btree/btreeiterator.h | 884 ------------ .../src/vespa/searchlib/btree/btreeiterator.hpp | 1361 ----------------- searchlib/src/vespa/searchlib/btree/btreenode.cpp | 28 - searchlib/src/vespa/searchlib/btree/btreenode.h | 508 ------- searchlib/src/vespa/searchlib/btree/btreenode.hpp | 384 ----- .../vespa/searchlib/btree/btreenodeallocator.cpp | 20 - .../src/vespa/searchlib/btree/btreenodeallocator.h | 196 --- .../vespa/searchlib/btree/btreenodeallocator.hpp | 434 ------ .../src/vespa/searchlib/btree/btreenodestore.cpp | 21 - .../src/vespa/searchlib/btree/btreenodestore.h | 222 --- .../src/vespa/searchlib/btree/btreenodestore.hpp | 83 -- .../src/vespa/searchlib/btree/btreeremover.cpp | 18 - searchlib/src/vespa/searchlib/btree/btreeremover.h | 104 -- .../src/vespa/searchlib/btree/btreeremover.hpp | 185 --- searchlib/src/vespa/searchlib/btree/btreeroot.cpp | 18 - searchlib/src/vespa/searchlib/btree/btreeroot.h | 217 --- searchlib/src/vespa/searchlib/btree/btreeroot.hpp | 489 ------- .../src/vespa/searchlib/btree/btreerootbase.cpp | 18 - .../src/vespa/searchlib/btree/btreerootbase.h | 95 -- .../src/vespa/searchlib/btree/btreerootbase.hpp | 85 -- searchlib/src/vespa/searchlib/btree/btreestore.cpp | 13 - searchlib/src/vespa/searchlib/btree/btreestore.h | 509 ------- searchlib/src/vespa/searchlib/btree/btreestore.hpp | 957 ------------ searchlib/src/vespa/searchlib/btree/btreetraits.h | 19 - .../src/vespa/searchlib/btree/minmaxaggrcalc.h | 50 - .../src/vespa/searchlib/btree/minmaxaggregated.h | 105 -- searchlib/src/vespa/searchlib/btree/noaggrcalc.h | 94 -- searchlib/src/vespa/searchlib/btree/noaggregated.h | 15 - .../src/vespa/searchlib/datastore/CMakeLists.txt | 11 - .../src/vespa/searchlib/datastore/allocator.h | 36 - .../src/vespa/searchlib/datastore/allocator.hpp | 75 - .../src/vespa/searchlib/datastore/array_store.h | 111 -- .../src/vespa/searchlib/datastore/array_store.hpp | 203 --- .../searchlib/datastore/array_store_config.cpp | 65 - .../vespa/searchlib/datastore/array_store_config.h | 72 - .../src/vespa/searchlib/datastore/buffer_type.cpp | 140 -- .../src/vespa/searchlib/datastore/buffer_type.h | 162 --- .../src/vespa/searchlib/datastore/bufferstate.cpp | 295 ---- .../src/vespa/searchlib/datastore/bufferstate.h | 191 --- .../src/vespa/searchlib/datastore/datastore.cpp | 17 - .../src/vespa/searchlib/datastore/datastore.h | 120 -- .../src/vespa/searchlib/datastore/datastore.hpp | 187 --- .../vespa/searchlib/datastore/datastorebase.cpp | 500 ------- .../src/vespa/searchlib/datastore/datastorebase.h | 360 ----- .../src/vespa/searchlib/datastore/entryref.cpp | 17 - searchlib/src/vespa/searchlib/datastore/entryref.h | 64 - .../src/vespa/searchlib/datastore/entryref.hpp | 18 - .../searchlib/datastore/free_list_allocator.h | 35 - .../searchlib/datastore/free_list_allocator.hpp | 104 -- .../searchlib/datastore/free_list_raw_allocator.h | 33 - .../datastore/free_list_raw_allocator.hpp | 35 - searchlib/src/vespa/searchlib/datastore/handle.h | 25 - .../searchlib/datastore/i_compaction_context.h | 21 - .../src/vespa/searchlib/datastore/raw_allocator.h | 34 - .../vespa/searchlib/datastore/raw_allocator.hpp | 44 - .../src/vespa/searchlib/datastore/unique_store.h | 118 -- .../src/vespa/searchlib/datastore/unique_store.hpp | 254 ---- .../searchlib/datastore/unique_store_builder.h | 46 - .../searchlib/datastore/unique_store_builder.hpp | 59 - .../vespa/searchlib/datastore/unique_store_saver.h | 51 - .../searchlib/datastore/unique_store_saver.hpp | 47 - .../searchlib/memoryindex/compact_words_store.cpp | 2 +- .../searchlib/memoryindex/compact_words_store.h | 4 +- .../vespa/searchlib/memoryindex/feature_store.cpp | 2 +- .../vespa/searchlib/memoryindex/feature_store.h | 2 +- .../vespa/searchlib/memoryindex/field_index.cpp | 14 +- .../src/vespa/searchlib/memoryindex/field_index.h | 8 +- .../memoryindex/field_index_collection.cpp | 14 +- .../memoryindex/i_field_index_insert_listener.h | 2 +- .../vespa/searchlib/memoryindex/memory_index.cpp | 2 +- .../memoryindex/ordered_field_index_inserter.cpp | 14 +- .../searchlib/memoryindex/posting_iterator.cpp | 10 +- .../src/vespa/searchlib/memoryindex/word_store.cpp | 2 +- .../src/vespa/searchlib/memoryindex/word_store.h | 2 +- searchlib/src/vespa/searchlib/predicate/common.h | 2 +- .../predicate/document_features_store.cpp | 6 +- .../searchlib/predicate/document_features_store.h | 2 +- .../vespa/searchlib/predicate/predicate_index.cpp | 10 +- .../predicate/predicate_interval_store.cpp | 2 +- .../searchlib/predicate/predicate_interval_store.h | 4 +- .../src/vespa/searchlib/predicate/simple_index.cpp | 10 +- .../src/vespa/searchlib/predicate/simple_index.h | 2 +- .../searchlib/queryeval/predicate_blueprint.cpp | 10 +- .../tensor/dense_tensor_attribute_saver.cpp | 2 +- .../vespa/searchlib/tensor/dense_tensor_store.cpp | 2 +- .../tensor/generic_tensor_attribute_saver.cpp | 2 +- .../searchlib/tensor/generic_tensor_store.cpp | 6 +- .../src/vespa/searchlib/tensor/tensor_store.cpp | 2 +- .../src/vespa/searchlib/tensor/tensor_store.h | 4 +- .../searchlib/test/btree/aggregated_printer.h | 26 - .../src/vespa/searchlib/test/btree/btree_printer.h | 103 -- .../src/vespa/searchlib/test/btree/data_printer.h | 28 - .../src/vespa/searchlib/test/datastore/memstats.h | 37 - .../searchlib/test/fakedata/fakememtreeocc.cpp | 10 +- searchlib/src/vespa/searchlib/util/CMakeLists.txt | 1 - .../src/vespa/searchlib/util/bufferwriter.cpp | 36 - searchlib/src/vespa/searchlib/util/bufferwriter.h | 56 - .../vespa/searchlib/util/drainingbufferwriter.h | 2 +- 178 files changed, 187 insertions(+), 17309 deletions(-) delete mode 100644 searchlib/src/tests/btree/.gitignore delete mode 100644 searchlib/src/tests/btree/CMakeLists.txt delete mode 100644 searchlib/src/tests/btree/btree_test.cpp delete mode 100644 searchlib/src/tests/btree/btreeaggregation_test.cpp delete mode 100644 searchlib/src/tests/btree/frozenbtree_test.cpp delete mode 100644 searchlib/src/tests/btree/iteratespeed.cpp delete mode 100644 searchlib/src/tests/datastore/array_store/CMakeLists.txt delete mode 100644 searchlib/src/tests/datastore/array_store/array_store_test.cpp delete mode 100644 searchlib/src/tests/datastore/array_store_config/CMakeLists.txt delete mode 100644 searchlib/src/tests/datastore/array_store_config/array_store_config_test.cpp delete mode 100644 searchlib/src/tests/datastore/buffer_type/CMakeLists.txt delete mode 100644 searchlib/src/tests/datastore/buffer_type/buffer_type_test.cpp delete mode 100644 searchlib/src/tests/datastore/datastore/CMakeLists.txt delete mode 100644 searchlib/src/tests/datastore/datastore/datastore_test.cpp delete mode 100644 searchlib/src/tests/datastore/unique_store/CMakeLists.txt delete mode 100644 searchlib/src/tests/datastore/unique_store/unique_store_test.cpp delete mode 100644 searchlib/src/vespa/searchlib/btree/CMakeLists.txt delete mode 100644 searchlib/src/vespa/searchlib/btree/OWNERS delete mode 100644 searchlib/src/vespa/searchlib/btree/btree.h delete mode 100644 searchlib/src/vespa/searchlib/btree/btree.hpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btree_key_data.cpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btree_key_data.h delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeaggregator.cpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeaggregator.h delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeaggregator.hpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreebuilder.cpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreebuilder.h delete mode 100644 searchlib/src/vespa/searchlib/btree/btreebuilder.hpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeinserter.cpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeinserter.h delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeinserter.hpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeiterator.cpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeiterator.h delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeiterator.hpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreenode.cpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreenode.h delete mode 100644 searchlib/src/vespa/searchlib/btree/btreenode.hpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreenodeallocator.cpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreenodeallocator.h delete mode 100644 searchlib/src/vespa/searchlib/btree/btreenodeallocator.hpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreenodestore.cpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreenodestore.h delete mode 100644 searchlib/src/vespa/searchlib/btree/btreenodestore.hpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeremover.cpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeremover.h delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeremover.hpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeroot.cpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeroot.h delete mode 100644 searchlib/src/vespa/searchlib/btree/btreeroot.hpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreerootbase.cpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreerootbase.h delete mode 100644 searchlib/src/vespa/searchlib/btree/btreerootbase.hpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreestore.cpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreestore.h delete mode 100644 searchlib/src/vespa/searchlib/btree/btreestore.hpp delete mode 100644 searchlib/src/vespa/searchlib/btree/btreetraits.h delete mode 100644 searchlib/src/vespa/searchlib/btree/minmaxaggrcalc.h delete mode 100644 searchlib/src/vespa/searchlib/btree/minmaxaggregated.h delete mode 100644 searchlib/src/vespa/searchlib/btree/noaggrcalc.h delete mode 100644 searchlib/src/vespa/searchlib/btree/noaggregated.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/CMakeLists.txt delete mode 100644 searchlib/src/vespa/searchlib/datastore/allocator.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/allocator.hpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/array_store.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/array_store.hpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/array_store_config.cpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/array_store_config.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/buffer_type.cpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/buffer_type.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/bufferstate.cpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/bufferstate.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/datastore.cpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/datastore.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/datastore.hpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/datastorebase.cpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/datastorebase.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/entryref.cpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/entryref.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/entryref.hpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/free_list_allocator.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/free_list_allocator.hpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/handle.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/i_compaction_context.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/raw_allocator.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/raw_allocator.hpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/unique_store.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/unique_store.hpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/unique_store_builder.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/unique_store_builder.hpp delete mode 100644 searchlib/src/vespa/searchlib/datastore/unique_store_saver.h delete mode 100644 searchlib/src/vespa/searchlib/datastore/unique_store_saver.hpp delete mode 100644 searchlib/src/vespa/searchlib/test/btree/aggregated_printer.h delete mode 100644 searchlib/src/vespa/searchlib/test/btree/btree_printer.h delete mode 100644 searchlib/src/vespa/searchlib/test/btree/data_printer.h delete mode 100644 searchlib/src/vespa/searchlib/test/datastore/memstats.h delete mode 100644 searchlib/src/vespa/searchlib/util/bufferwriter.cpp delete mode 100644 searchlib/src/vespa/searchlib/util/bufferwriter.h (limited to 'searchlib') diff --git a/searchlib/CMakeLists.txt b/searchlib/CMakeLists.txt index bde467087da..bae47872d6c 100644 --- a/searchlib/CMakeLists.txt +++ b/searchlib/CMakeLists.txt @@ -23,10 +23,8 @@ vespa_define_module( src/vespa/searchlib/aggregation src/vespa/searchlib/attribute src/vespa/searchlib/bitcompression - src/vespa/searchlib/btree src/vespa/searchlib/common src/vespa/searchlib/config - src/vespa/searchlib/datastore src/vespa/searchlib/diskindex src/vespa/searchlib/docstore src/vespa/searchlib/engine @@ -97,7 +95,6 @@ vespa_define_module( src/tests/attribute/tensorattribute src/tests/bitcompression/expgolomb src/tests/bitvector - src/tests/btree src/tests/bytecomplens src/tests/common/bitvector src/tests/common/foregroundtaskexecutor @@ -106,11 +103,6 @@ vespa_define_module( src/tests/common/resultset src/tests/common/sequencedtaskexecutor src/tests/common/summaryfeatures - src/tests/datastore/array_store - src/tests/datastore/array_store_config - src/tests/datastore/buffer_type - src/tests/datastore/datastore - src/tests/datastore/unique_store src/tests/diskindex/bitvector src/tests/diskindex/diskindex src/tests/diskindex/fieldwriter diff --git a/searchlib/src/apps/tests/btreestress_test.cpp b/searchlib/src/apps/tests/btreestress_test.cpp index ca92ad4865b..e6ef0740fc3 100644 --- a/searchlib/src/apps/tests/btreestress_test.cpp +++ b/searchlib/src/apps/tests/btreestress_test.cpp @@ -1,22 +1,22 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include diff --git a/searchlib/src/tests/attribute/comparator/comparator_test.cpp b/searchlib/src/tests/attribute/comparator/comparator_test.cpp index 9493d3d5ca9..6f16cbbe391 100644 --- a/searchlib/src/tests/attribute/comparator/comparator_test.cpp +++ b/searchlib/src/tests/attribute/comparator/comparator_test.cpp @@ -3,12 +3,12 @@ LOG_SETUP("comparator_test"); #include #include -#include +#include #include -#include -#include -#include +#include +#include +#include namespace search { diff --git a/searchlib/src/tests/attribute/enumeratedsave/enumeratedsave_test.cpp b/searchlib/src/tests/attribute/enumeratedsave/enumeratedsave_test.cpp index 9ede6e63c2e..d4dc3adfcea 100644 --- a/searchlib/src/tests/attribute/enumeratedsave/enumeratedsave_test.cpp +++ b/searchlib/src/tests/attribute/enumeratedsave/enumeratedsave_test.cpp @@ -14,10 +14,10 @@ #include #include #include +#include #include #include #include -#include #include #include #include diff --git a/searchlib/src/tests/attribute/postinglist/postinglist.cpp b/searchlib/src/tests/attribute/postinglist/postinglist.cpp index fe79db163f8..78b93e2b78b 100644 --- a/searchlib/src/tests/attribute/postinglist/postinglist.cpp +++ b/searchlib/src/tests/attribute/postinglist/postinglist.cpp @@ -1,15 +1,16 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include #include #include #include +#include #include LOG_SETUP("postinglist_test"); diff --git a/searchlib/src/tests/btree/.gitignore b/searchlib/src/tests/btree/.gitignore deleted file mode 100644 index 0cc4519cf9c..00000000000 --- a/searchlib/src/tests/btree/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -iteratespeed -searchlib_btree_test_app -searchlib_btreeaggregation_test_app -searchlib_frozenbtree_test_app -searchlib_iteratespeed_app diff --git a/searchlib/src/tests/btree/CMakeLists.txt b/searchlib/src/tests/btree/CMakeLists.txt deleted file mode 100644 index 9817f2b9079..00000000000 --- a/searchlib/src/tests/btree/CMakeLists.txt +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(searchlib_btree_test_app TEST - SOURCES - btree_test.cpp - DEPENDS - searchlib -) -vespa_add_test(NAME searchlib_btree_test_app COMMAND searchlib_btree_test_app) -vespa_add_executable(searchlib_frozenbtree_test_app TEST - SOURCES - frozenbtree_test.cpp - DEPENDS - searchlib -) -vespa_add_test(NAME searchlib_frozenbtree_test_app COMMAND searchlib_frozenbtree_test_app) -vespa_add_executable(searchlib_btreeaggregation_test_app TEST - SOURCES - btreeaggregation_test.cpp - DEPENDS - searchlib -) -vespa_add_test(NAME searchlib_btreeaggregation_test_app COMMAND searchlib_btreeaggregation_test_app) -vespa_add_executable(searchlib_iteratespeed_app - SOURCES - iteratespeed.cpp - DEPENDS - searchlib -) -vespa_add_test(NAME searchlib_iteratespeed_app COMMAND searchlib_iteratespeed_app BENCHMARK) diff --git a/searchlib/src/tests/btree/btree_test.cpp b/searchlib/src/tests/btree/btree_test.cpp deleted file mode 100644 index 32c3f952e47..00000000000 --- a/searchlib/src/tests/btree/btree_test.cpp +++ /dev/null @@ -1,1526 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include -LOG_SETUP("btree_test"); -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -using vespalib::GenerationHandler; -using search::datastore::EntryRef; - -namespace search { -namespace btree { - -namespace { - -template -std::string -toStr(const T & v) -{ - std::stringstream ss; - ss << v; - return ss.str(); -} - -} - -typedef BTreeTraits<4, 4, 31, false> MyTraits; - -#define KEYWRAP - -#ifdef KEYWRAP - -// Force use of functor to compare keys. -class WrapInt -{ -public: - int _val; - WrapInt(int val) : _val(val) {} - WrapInt() : _val(0) {} - bool operator==(const WrapInt & rhs) const { return _val == rhs._val; } -}; - -std::ostream & -operator<<(std::ostream &s, const WrapInt &i) -{ - s << i._val; - return s; -} - -typedef WrapInt MyKey; -class MyComp -{ -public: - bool - operator()(const WrapInt &a, const WrapInt &b) const - { - return a._val < b._val; - } -}; - -#define UNWRAP(key) (key._val) -#else -typedef int MyKey; -typedef std::less MyComp; -#define UNWRAP(key) (key) -#endif - -typedef BTree MyTree; -typedef BTreeStore MyTreeStore; -typedef MyTree::Builder MyTreeBuilder; -typedef MyTree::LeafNodeType MyLeafNode; -typedef MyTree::InternalNodeType MyInternalNode; -typedef MyTree::NodeAllocatorType MyNodeAllocator; -typedef std::pair LeafPair; -typedef MyTreeStore::KeyDataType MyKeyData; -typedef MyTreeStore::KeyDataTypeRefPair MyKeyDataRefPair; - -typedef BTree SetTreeB; - -typedef BTreeTraits<16, 16, 10, false> LSeekTraits; -typedef BTree, LSeekTraits> SetTreeL; - -struct LeafPairLess { - bool operator()(const LeafPair & lhs, const LeafPair & rhs) const { - return UNWRAP(lhs.first) < UNWRAP(rhs.first); - } -}; - -template -void -cleanup(GenerationHandler & g, ManagerType & m) -{ - m.freeze(); - m.transferHoldLists(g.getCurrentGeneration()); - g.incGeneration(); - m.trimHoldLists(g.getFirstUsedGeneration()); -} - -template -void -cleanup(GenerationHandler & g, - ManagerType & m, - BTreeNode::Ref n1Ref, NodeType * n1, - BTreeNode::Ref n2Ref = BTreeNode::Ref(), NodeType * n2 = NULL) -{ - assert(ManagerType::isValidRef(n1Ref)); - m.holdNode(n1Ref, n1); - if (n2 != NULL) { - assert(ManagerType::isValidRef(n2Ref)); - m.holdNode(n2Ref, n2); - } else { - assert(!ManagerType::isValidRef(n2Ref)); - } - cleanup(g, m); -} - -template -bool -assertTree(const std::string &exp, const Tree &t) -{ - std::stringstream ss; - test::BTreePrinter printer(ss, t.getAllocator()); - printer.print(t.getRoot()); - if (!EXPECT_EQUAL(exp, ss.str())) return false; - return true; -} - -template -void -populateTree(Tree &t, uint32_t count, uint32_t delta) -{ - uint32_t key = 1; - int32_t value = 101; - for (uint32_t i = 0; i < count; ++i) { - t.insert(key, value); - key += delta; - value += delta; - } -} - -template -void -populateLeafNode(Tree &t) -{ - populateTree(t, 4, 2); -} - - -class Test : public vespalib::TestApp { -private: - template - bool assertLeafNode(const std::string & exp, const LeafNodeType & n); - bool assertSeek(int skey, int ekey, const MyTree & tree); - bool assertSeek(int skey, int ekey, MyTree::Iterator & itr); - bool assertMemoryUsage(const vespalib::MemoryUsage & exp, const vespalib::MemoryUsage & act); - - void - buildSubTree(const std::vector &sub, - size_t numEntries); - - void requireThatNodeInsertWorks(); - void requireThatTreeInsertWorks(); - void requireThatNodeSplitInsertWorks(); - void requireThatNodeStealWorks(); - void requireThatTreeRemoveStealWorks(); - void requireThatNodeRemoveWorks(); - void requireThatNodeLowerBoundWorks(); - void requireThatWeCanInsertAndRemoveFromTree(); - void requireThatSortedTreeInsertWorks(); - void requireThatCornerCaseTreeFindWorks(); - void requireThatBasicTreeIteratorWorks(); - void requireThatTreeIteratorSeekWorks(); - void requireThatTreeIteratorAssignWorks(); - void requireThatMemoryUsageIsCalculated(); - template - void requireThatLowerBoundWorksT(); - void requireThatLowerBoundWorks(); - template - void requireThatUpperBoundWorksT(); - void requireThatUpperBoundWorks(); - void requireThatUpdateOfKeyWorks(); - - void - requireThatSmallNodesWorks(); - - void - requireThatApplyWorks(); - - void - requireThatIteratorDistanceWorks(int numEntries); - - void - requireThatIteratorDistanceWorks(); -public: - int Main() override; -}; - -template -bool -Test::assertLeafNode(const std::string & exp, const LeafNodeType & n) -{ - std::stringstream ss; - ss << "["; - for (uint32_t i = 0; i < n.validSlots(); ++i) { - if (i > 0) ss << ","; - ss << n.getKey(i) << ":" << n.getData(i); - } - ss << "]"; - if (!EXPECT_EQUAL(exp, ss.str())) return false; - return true; -} - -bool -Test::assertSeek(int skey, int ekey, const MyTree & tree) -{ - MyTree::Iterator itr = tree.begin(); - return assertSeek(skey, ekey, itr); -} - -bool -Test::assertSeek(int skey, int ekey, MyTree::Iterator & itr) -{ - MyTree::Iterator bseekItr = itr; - MyTree::Iterator lseekItr = itr; - bseekItr.binarySeek(skey); - lseekItr.linearSeek(skey); - if (!EXPECT_EQUAL(ekey, UNWRAP(bseekItr.getKey()))) return false; - if (!EXPECT_EQUAL(ekey, UNWRAP(lseekItr.getKey()))) return false; - itr = bseekItr; - return true; -} - -bool -Test::assertMemoryUsage(const vespalib::MemoryUsage & exp, const vespalib::MemoryUsage & act) -{ - if (!EXPECT_EQUAL(exp.allocatedBytes(), act.allocatedBytes())) return false; - if (!EXPECT_EQUAL(exp.usedBytes(), act.usedBytes())) return false; - if (!EXPECT_EQUAL(exp.deadBytes(), act.deadBytes())) return false; - if (!EXPECT_EQUAL(exp.allocatedBytesOnHold(), act.allocatedBytesOnHold())) return false; - return true; -} - -void -Test::requireThatNodeInsertWorks() -{ - GenerationHandler g; - MyNodeAllocator m; - MyLeafNode::RefPair nPair = m.allocLeafNode(); - MyLeafNode *n = nPair.data; - EXPECT_TRUE(n->isLeaf()); - EXPECT_EQUAL(0u, n->validSlots()); - n->insert(0, 20, "b"); - EXPECT_TRUE(!n->isFull()); - EXPECT_TRUE(!n->isAtLeastHalfFull()); - EXPECT_TRUE(assertLeafNode("[20:b]", *n)); - n->insert(0, 10, "a"); - EXPECT_TRUE(!n->isFull()); - EXPECT_TRUE(n->isAtLeastHalfFull()); - EXPECT_TRUE(assertLeafNode("[10:a,20:b]", *n)); - EXPECT_EQUAL(20, UNWRAP(n->getLastKey())); - EXPECT_EQUAL("b", n->getLastData()); - n->insert(2, 30, "c"); - EXPECT_TRUE(!n->isFull()); - n->insert(3, 40, "d"); - EXPECT_TRUE(n->isFull()); - EXPECT_TRUE(n->isAtLeastHalfFull()); - EXPECT_TRUE(assertLeafNode("[10:a,20:b,30:c,40:d]", *n)); - cleanup(g, m, nPair.ref, n); -} - -void -Test::requireThatTreeInsertWorks() -{ - using Tree = BTree; - { - Tree t; - EXPECT_TRUE(assertTree("{}", t)); - t.insert(20, 102); - EXPECT_TRUE(assertTree("{{20:102}}", t)); - t.insert(10, 101); - EXPECT_TRUE(assertTree("{{10:101,20:102}}", t)); - t.insert(30, 103); - t.insert(40, 104); - EXPECT_TRUE(assertTree("{{10:101,20:102,30:103,40:104}}", t)); - } - { // new entry in current node - Tree t; - populateLeafNode(t); - t.insert(4, 104); - EXPECT_TRUE(assertTree("{{4,7}} -> " - "{{1:101,3:103,4:104}," - "{5:105,7:107}}", t)); - } - { // new entry in split node - Tree t; - populateLeafNode(t); - t.insert(6, 106); - EXPECT_TRUE(assertTree("{{5,7}} -> " - "{{1:101,3:103,5:105}," - "{6:106,7:107}}", t)); - } - { // new entry at end - Tree t; - populateLeafNode(t); - t.insert(8, 108); - EXPECT_TRUE(assertTree("{{5,8}} -> " - "{{1:101,3:103,5:105}," - "{7:107,8:108}}", t)); - } - { // multi level node split - Tree t; - populateTree(t, 16, 2); - EXPECT_TRUE(assertTree("{{7,15,23,31}} -> " - "{{1:101,3:103,5:105,7:107}," - "{9:109,11:111,13:113,15:115}," - "{17:117,19:119,21:121,23:123}," - "{25:125,27:127,29:129,31:131}}", t)); - t.insert(33, 133); - EXPECT_TRUE(assertTree("{{23,33}} -> " - "{{7,15,23},{29,33}} -> " - "{{1:101,3:103,5:105,7:107}," - "{9:109,11:111,13:113,15:115}," - "{17:117,19:119,21:121,23:123}," - "{25:125,27:127,29:129}," - "{31:131,33:133}}", t)); - } - { // give to left node to avoid split - Tree t; - populateTree(t, 8, 2); - t.remove(5); - EXPECT_TRUE(assertTree("{{7,15}} -> " - "{{1:101,3:103,7:107}," - "{9:109,11:111,13:113,15:115}}", t)); - t.insert(10, 110); - EXPECT_TRUE(assertTree("{{9,15}} -> " - "{{1:101,3:103,7:107,9:109}," - "{10:110,11:111,13:113,15:115}}", t)); - } - { // give to left node to avoid split, and move to left node - Tree t; - populateTree(t, 8, 2); - t.remove(3); - t.remove(5); - EXPECT_TRUE(assertTree("{{7,15}} -> " - "{{1:101,7:107}," - "{9:109,11:111,13:113,15:115}}", t)); - t.insert(8, 108); - EXPECT_TRUE(assertTree("{{9,15}} -> " - "{{1:101,7:107,8:108,9:109}," - "{11:111,13:113,15:115}}", t)); - } - { // not give to left node to avoid split, but insert at end at left node - Tree t; - populateTree(t, 8, 2); - t.remove(5); - EXPECT_TRUE(assertTree("{{7,15}} -> " - "{{1:101,3:103,7:107}," - "{9:109,11:111,13:113,15:115}}", t)); - t.insert(8, 108); - EXPECT_TRUE(assertTree("{{8,15}} -> " - "{{1:101,3:103,7:107,8:108}," - "{9:109,11:111,13:113,15:115}}", t)); - } - { // give to right node to avoid split - Tree t; - populateTree(t, 8, 2); - t.remove(13); - EXPECT_TRUE(assertTree("{{7,15}} -> " - "{{1:101,3:103,5:105,7:107}," - "{9:109,11:111,15:115}}", t)); - t.insert(4, 104); - EXPECT_TRUE(assertTree("{{5,15}} -> " - "{{1:101,3:103,4:104,5:105}," - "{7:107,9:109,11:111,15:115}}", t)); - } - { // give to right node to avoid split and move to right node - using MyTraits6 = BTreeTraits<6, 6, 31, false>; - using Tree6 = BTree; - - Tree6 t; - populateTree(t, 12, 2); - t.remove(19); - t.remove(21); - t.remove(23); - EXPECT_TRUE(assertTree("{{11,17}} -> " - "{{1:101,3:103,5:105,7:107,9:109,11:111}," - "{13:113,15:115,17:117}}", t)); - t.insert(10, 110); - EXPECT_TRUE(assertTree("{{7,17}} -> " - "{{1:101,3:103,5:105,7:107}," - "{9:109,10:110,11:111,13:113,15:115,17:117}}", t)); - } -} - -MyLeafNode::RefPair -getLeafNode(MyNodeAllocator &allocator) -{ - MyLeafNode::RefPair nPair = allocator.allocLeafNode(); - MyLeafNode *n = nPair.data; - n->insert(0, 1, "a"); - n->insert(1, 3, "c"); - n->insert(2, 5, "e"); - n->insert(3, 7, "g"); - return nPair; -} - -void -Test::requireThatNodeSplitInsertWorks() -{ - { // new entry in current node - GenerationHandler g; - MyNodeAllocator m; - MyLeafNode::RefPair nPair = getLeafNode(m); - MyLeafNode *n = nPair.data; - MyLeafNode::RefPair sPair = m.allocLeafNode(); - MyLeafNode *s = sPair.data; - n->splitInsert(s, 2, 4, "d"); - EXPECT_TRUE(assertLeafNode("[1:a,3:c,4:d]", *n)); - EXPECT_TRUE(assertLeafNode("[5:e,7:g]", *s)); - cleanup(g, m, nPair.ref, n, sPair.ref, s); - } - { // new entry in split node - GenerationHandler g; - MyNodeAllocator m; - MyLeafNode::RefPair nPair = getLeafNode(m); - MyLeafNode *n = nPair.data; - MyLeafNode::RefPair sPair = m.allocLeafNode(); - MyLeafNode *s = sPair.data; - n->splitInsert(s, 3, 6, "f"); - EXPECT_TRUE(assertLeafNode("[1:a,3:c,5:e]", *n)); - EXPECT_TRUE(assertLeafNode("[6:f,7:g]", *s)); - cleanup(g, m, nPair.ref, n, sPair.ref, s); - } - { // new entry at end - GenerationHandler g; - MyNodeAllocator m; - MyLeafNode::RefPair nPair = getLeafNode(m); - MyLeafNode *n = nPair.data; - MyLeafNode::RefPair sPair = m.allocLeafNode(); - MyLeafNode *s = sPair.data; - n->splitInsert(s, 4, 8, "h"); - EXPECT_TRUE(assertLeafNode("[1:a,3:c,5:e]", *n)); - EXPECT_TRUE(assertLeafNode("[7:g,8:h]", *s)); - cleanup(g, m, nPair.ref, n, sPair.ref, s); - } -} - -struct BTreeStealTraits -{ - static const size_t LEAF_SLOTS = 6; - static const size_t INTERNAL_SLOTS = 6; - static const size_t PATH_SIZE = 20; - static const bool BINARY_SEEK = true; -}; - -void -Test::requireThatNodeStealWorks() -{ - typedef BTreeLeafNode MyStealNode; - typedef BTreeNodeAllocator - MyStealManager; - { // steal all from left - GenerationHandler g; - MyStealManager m; - MyStealNode::RefPair nPair = m.allocLeafNode(); - MyStealNode *n = nPair.data; - n->insert(0, 4, "d"); - n->insert(1, 5, "e"); - EXPECT_TRUE(!n->isAtLeastHalfFull()); - MyStealNode::RefPair vPair = m.allocLeafNode(); - MyStealNode *v = vPair.data; - v->insert(0, 1, "a"); - v->insert(1, 2, "b"); - v->insert(2, 3, "c"); - n->stealAllFromLeftNode(v); - EXPECT_TRUE(n->isAtLeastHalfFull()); - EXPECT_TRUE(assertLeafNode("[1:a,2:b,3:c,4:d,5:e]", *n)); - cleanup(g, m, nPair.ref, n, vPair.ref, v); - } - { // steal all from right - GenerationHandler g; - MyStealManager m; - MyStealNode::RefPair nPair = m.allocLeafNode(); - MyStealNode *n = nPair.data; - n->insert(0, 1, "a"); - n->insert(1, 2, "b"); - EXPECT_TRUE(!n->isAtLeastHalfFull()); - MyStealNode::RefPair vPair = m.allocLeafNode(); - MyStealNode *v = vPair.data; - v->insert(0, 3, "c"); - v->insert(1, 4, "d"); - v->insert(2, 5, "e"); - n->stealAllFromRightNode(v); - EXPECT_TRUE(n->isAtLeastHalfFull()); - EXPECT_TRUE(assertLeafNode("[1:a,2:b,3:c,4:d,5:e]", *n)); - cleanup(g, m, nPair.ref, n, vPair.ref, v); - } - { // steal some from left - GenerationHandler g; - MyStealManager m; - MyStealNode::RefPair nPair = m.allocLeafNode(); - MyStealNode *n = nPair.data; - n->insert(0, 5, "e"); - n->insert(1, 6, "f"); - EXPECT_TRUE(!n->isAtLeastHalfFull()); - MyStealNode::RefPair vPair = m.allocLeafNode(); - MyStealNode *v = vPair.data; - v->insert(0, 1, "a"); - v->insert(1, 2, "b"); - v->insert(2, 3, "c"); - v->insert(3, 4, "d"); - n->stealSomeFromLeftNode(v); - EXPECT_TRUE(n->isAtLeastHalfFull()); - EXPECT_TRUE(v->isAtLeastHalfFull()); - EXPECT_TRUE(assertLeafNode("[4:d,5:e,6:f]", *n)); - EXPECT_TRUE(assertLeafNode("[1:a,2:b,3:c]", *v)); - cleanup(g, m, nPair.ref, n, vPair.ref, v); - } - { // steal some from right - GenerationHandler g; - MyStealManager m; - MyStealNode::RefPair nPair = m.allocLeafNode(); - MyStealNode *n = nPair.data; - n->insert(0, 1, "a"); - n->insert(1, 2, "b"); - EXPECT_TRUE(!n->isAtLeastHalfFull()); - MyStealNode::RefPair vPair = m.allocLeafNode(); - MyStealNode *v = vPair.data; - v->insert(0, 3, "c"); - v->insert(1, 4, "d"); - v->insert(2, 5, "e"); - v->insert(3, 6, "f"); - n->stealSomeFromRightNode(v); - EXPECT_TRUE(n->isAtLeastHalfFull()); - EXPECT_TRUE(v->isAtLeastHalfFull()); - EXPECT_TRUE(assertLeafNode("[1:a,2:b,3:c]", *n)); - EXPECT_TRUE(assertLeafNode("[4:d,5:e,6:f]", *v)); - cleanup(g, m, nPair.ref, n, vPair.ref, v); - } -} - -void -Test::requireThatTreeRemoveStealWorks() -{ - using MyStealTree = BTree; - { // steal all from left - MyStealTree t; - t.insert(10, 110); - t.insert(20, 120); - t.insert(30, 130); - t.insert(40, 140); - t.insert(50, 150); - t.insert(60, 160); - t.insert(35, 135); - t.remove(35); - EXPECT_TRUE(assertTree("{{30,60}} -> " - "{{10:110,20:120,30:130}," - "{40:140,50:150,60:160}}", t)); - t.remove(50); - EXPECT_TRUE(assertTree("{{10:110,20:120,30:130,40:140,60:160}}", t)); - } - { // steal all from right - MyStealTree t; - t.insert(10, 110); - t.insert(20, 120); - t.insert(30, 130); - t.insert(40, 140); - t.insert(50, 150); - t.insert(60, 160); - t.insert(35, 135); - t.remove(35); - EXPECT_TRUE(assertTree("{{30,60}} -> " - "{{10:110,20:120,30:130}," - "{40:140,50:150,60:160}}", t)); - t.remove(20); - EXPECT_TRUE(assertTree("{{10:110,30:130,40:140,50:150,60:160}}", t)); - } - { // steal some from left - MyStealTree t; - t.insert(10, 110); - t.insert(20, 120); - t.insert(30, 130); - t.insert(60, 160); - t.insert(70, 170); - t.insert(80, 180); - t.insert(50, 150); - t.insert(40, 140); - EXPECT_TRUE(assertTree("{{50,80}} -> " - "{{10:110,20:120,30:130,40:140,50:150}," - "{60:160,70:170,80:180}}", t)); - t.remove(60); - EXPECT_TRUE(assertTree("{{30,80}} -> " - "{{10:110,20:120,30:130}," - "{40:140,50:150,70:170,80:180}}", t)); - } - { // steal some from right - MyStealTree t; - t.insert(10, 110); - t.insert(20, 120); - t.insert(30, 130); - t.insert(40, 140); - t.insert(50, 150); - t.insert(60, 160); - t.insert(70, 170); - t.insert(80, 180); - t.insert(90, 190); - t.remove(40); - EXPECT_TRUE(assertTree("{{30,90}} -> " - "{{10:110,20:120,30:130}," - "{50:150,60:160,70:170,80:180,90:190}}", t)); - t.remove(20); - EXPECT_TRUE(assertTree("{{60,90}} -> " - "{{10:110,30:130,50:150,60:160}," - "{70:170,80:180,90:190}}", t)); - } -} - -void -Test::requireThatNodeRemoveWorks() -{ - GenerationHandler g; - MyNodeAllocator m; - MyLeafNode::RefPair nPair = getLeafNode(m); - MyLeafNode *n = nPair.data; - n->remove(1); - EXPECT_TRUE(assertLeafNode("[1:a,5:e,7:g]", *n)); - cleanup(g, m, nPair.ref, n); -} - -void -Test::requireThatNodeLowerBoundWorks() -{ - GenerationHandler g; - MyNodeAllocator m; - MyLeafNode::RefPair nPair = getLeafNode(m); - MyLeafNode *n = nPair.data; - EXPECT_EQUAL(1u, n->lower_bound(3, MyComp())); - EXPECT_FALSE(MyComp()(3, n->getKey(1u))); - EXPECT_EQUAL(0u, n->lower_bound(0, MyComp())); - EXPECT_TRUE(MyComp()(0, n->getKey(0u))); - EXPECT_EQUAL(1u, n->lower_bound(2, MyComp())); - EXPECT_TRUE(MyComp()(2, n->getKey(1u))); - EXPECT_EQUAL(3u, n->lower_bound(6, MyComp())); - EXPECT_TRUE(MyComp()(6, n->getKey(3u))); - EXPECT_EQUAL(4u, n->lower_bound(8, MyComp())); - cleanup(g, m, nPair.ref, n); -} - -void -generateData(std::vector & data, size_t numEntries) -{ - data.reserve(numEntries); - Rand48 rnd; - rnd.srand48(10); - for (size_t i = 0; i < numEntries; ++i) { - int num = rnd.lrand48() % 10000000; - std::string str = toStr(num); - data.push_back(std::make_pair(num, str)); - } -} - - -void -Test::buildSubTree(const std::vector &sub, - size_t numEntries) -{ - GenerationHandler g; - MyTree tree; - MyTreeBuilder builder(tree.getAllocator()); - - std::vector sorted(sub.begin(), sub.begin() + numEntries); - std::sort(sorted.begin(), sorted.end(), LeafPairLess()); - for (size_t i = 0; i < numEntries; ++i) { - int num = UNWRAP(sorted[i].first); - const std::string & str = sorted[i].second; - builder.insert(num, str); - } - tree.assign(builder); - assert(numEntries == tree.size()); - assert(tree.isValid()); - EXPECT_EQUAL(numEntries, tree.size()); - EXPECT_TRUE(tree.isValid()); - MyTree::Iterator itr = tree.begin(); - MyTree::Iterator ritr = itr; - if (numEntries > 0) { - EXPECT_TRUE(ritr.valid()); - EXPECT_EQUAL(0u, ritr.position()); - --ritr; - EXPECT_TRUE(!ritr.valid()); - EXPECT_EQUAL(numEntries, ritr.position()); - --ritr; - EXPECT_TRUE(ritr.valid()); - EXPECT_EQUAL(numEntries - 1, ritr.position()); - } else { - EXPECT_TRUE(!ritr.valid()); - EXPECT_EQUAL(0u, ritr.position()); - --ritr; - EXPECT_TRUE(!ritr.valid()); - EXPECT_EQUAL(0u, ritr.position()); - } - for (size_t i = 0; i < numEntries; ++i) { - EXPECT_TRUE(itr.valid()); - EXPECT_EQUAL(sorted[i].first, itr.getKey()); - EXPECT_EQUAL(sorted[i].second, itr.getData()); - ++itr; - } - EXPECT_TRUE(!itr.valid()); - ritr = itr; - EXPECT_TRUE(!ritr.valid()); - --ritr; - for (size_t i = 0; i < numEntries; ++i) { - EXPECT_TRUE(ritr.valid()); - EXPECT_EQUAL(sorted[numEntries - 1 - i].first, ritr.getKey()); - EXPECT_EQUAL(sorted[numEntries - 1 - i].second, ritr.getData()); - --ritr; - } - EXPECT_TRUE(!ritr.valid()); -} - -void -Test::requireThatWeCanInsertAndRemoveFromTree() -{ - GenerationHandler g; - MyTree tree; - std::vector exp; - std::vector sorted; - size_t numEntries = 1000; - generateData(exp, numEntries); - sorted = exp; - std::sort(sorted.begin(), sorted.end(), LeafPairLess()); - // insert entries - for (size_t i = 0; i < numEntries; ++i) { - int num = UNWRAP(exp[i].first); - const std::string & str = exp[i].second; - EXPECT_TRUE(!tree.find(num).valid()); - //LOG(info, "insert[%zu](%d, %s)", i, num, str.c_str()); - EXPECT_TRUE(tree.insert(num, str)); - EXPECT_TRUE(!tree.insert(num, str)); - for (size_t j = 0; j <= i; ++j) { - //LOG(info, "find[%zu](%d)", j, exp[j].first._val); - MyTree::Iterator itr = tree.find(exp[j].first); - EXPECT_TRUE(itr.valid()); - EXPECT_EQUAL(exp[j].first, itr.getKey()); - EXPECT_EQUAL(exp[j].second, itr.getData()); - } - EXPECT_EQUAL(i + 1u, tree.size()); - EXPECT_TRUE(tree.isValid()); - buildSubTree(exp, i + 1); - } - //std::cout << "tree: " << tree.toString() << std::endl; - - { - MyTree::Iterator itr = tree.begin(); - MyTree::Iterator itre = itr; - MyTree::Iterator itre2; - MyTree::Iterator ritr = itr; - while (itre.valid()) - ++itre; - if (numEntries > 0) { - EXPECT_TRUE(ritr.valid()); - EXPECT_EQUAL(0u, ritr.position()); - --ritr; - EXPECT_TRUE(!ritr.valid()); - EXPECT_EQUAL(numEntries, ritr.position()); - --ritr; - EXPECT_TRUE(ritr.valid()); - EXPECT_EQUAL(numEntries - 1, ritr.position()); - } else { - EXPECT_TRUE(!ritr.valid()); - EXPECT_EQUAL(0u, ritr.position()); - --ritr; - EXPECT_TRUE(!ritr.valid()); - EXPECT_EQUAL(0u, ritr.position()); - } - MyTree::Iterator pitr = itr; - for (size_t i = 0; i < numEntries; ++i) { - ssize_t si = i; - ssize_t sileft = numEntries - i; - EXPECT_TRUE(itr.valid()); - EXPECT_EQUAL(i, itr.position()); - EXPECT_EQUAL(sileft, itre - itr); - EXPECT_EQUAL(-sileft, itr - itre); - EXPECT_EQUAL(sileft, itre2 - itr); - EXPECT_EQUAL(-sileft, itr - itre2); - EXPECT_EQUAL(si, itr - tree.begin()); - EXPECT_EQUAL(-si, tree.begin() - itr); - EXPECT_EQUAL(i != 0, itr - pitr); - EXPECT_EQUAL(-(i != 0), pitr - itr); - EXPECT_EQUAL(sorted[i].first, itr.getKey()); - EXPECT_EQUAL(sorted[i].second, itr.getData()); - pitr = itr; - ++itr; - ritr = itr; - --ritr; - EXPECT_TRUE(ritr.valid()); - EXPECT_TRUE(ritr == pitr); - } - EXPECT_TRUE(!itr.valid()); - EXPECT_EQUAL(numEntries, itr.position()); - ssize_t sNumEntries = numEntries; - EXPECT_EQUAL(sNumEntries, itr - tree.begin()); - EXPECT_EQUAL(-sNumEntries, tree.begin() - itr); - EXPECT_EQUAL(1, itr - pitr); - EXPECT_EQUAL(-1, pitr - itr); - } - // compact full tree by calling incremental compaction methods in a loop - { - MyTree::NodeAllocatorType &manager = tree.getAllocator(); - std::vector toHold = manager.startCompact(); - MyTree::Iterator itr = tree.begin(); - tree.setRoot(itr.moveFirstLeafNode(tree.getRoot())); - while (itr.valid()) { - // LOG(info, "Leaf moved to %d", UNWRAP(itr.getKey())); - itr.moveNextLeafNode(); - } - manager.finishCompact(toHold); - manager.freeze(); - manager.transferHoldLists(g.getCurrentGeneration()); - g.incGeneration(); - manager.trimHoldLists(g.getFirstUsedGeneration()); - } - // remove entries - for (size_t i = 0; i < numEntries; ++i) { - int num = UNWRAP(exp[i].first); - //LOG(info, "remove[%zu](%d)", i, num); - //std::cout << "tree: " << tree.toString() << std::endl; - EXPECT_TRUE(tree.remove(num)); - EXPECT_TRUE(!tree.find(num).valid()); - EXPECT_TRUE(!tree.remove(num)); - EXPECT_TRUE(tree.isValid()); - for (size_t j = i + 1; j < numEntries; ++j) { - MyTree::Iterator itr = tree.find(exp[j].first); - EXPECT_TRUE(itr.valid()); - EXPECT_EQUAL(exp[j].first, itr.getKey()); - EXPECT_EQUAL(exp[j].second, itr.getData()); - } - EXPECT_EQUAL(numEntries - 1 - i, tree.size()); - } -} - -void -Test::requireThatSortedTreeInsertWorks() -{ - { - GenerationHandler g; - MyTree tree; - for (int i = 0; i < 1000; ++i) { - EXPECT_TRUE(tree.insert(i, toStr(i))); - MyTree::Iterator itr = tree.find(i); - EXPECT_TRUE(itr.valid()); - EXPECT_EQUAL(toStr(i), itr.getData()); - EXPECT_TRUE(tree.isValid()); - } - } - { - GenerationHandler g; - MyTree tree; - for (int i = 1000; i > 0; --i) { - EXPECT_TRUE(tree.insert(i, toStr(i))); - MyTree::Iterator itr = tree.find(i); - EXPECT_TRUE(itr.valid()); - EXPECT_EQUAL(toStr(i), itr.getData()); - EXPECT_TRUE(tree.isValid()); - } - } -} - -void -Test::requireThatCornerCaseTreeFindWorks() -{ - GenerationHandler g; - MyTree tree; - for (int i = 1; i < 100; ++i) { - tree.insert(i, toStr(i)); - } - EXPECT_TRUE(!tree.find(0).valid()); // lower than lowest - EXPECT_TRUE(!tree.find(1000).valid()); // higher than highest -} - -void -Test::requireThatBasicTreeIteratorWorks() -{ - GenerationHandler g; - MyTree tree; - EXPECT_TRUE(!tree.begin().valid()); - std::vector exp; - size_t numEntries = 1000; - generateData(exp, numEntries); - for (size_t i = 0; i < numEntries; ++i) { - tree.insert(exp[i].first, exp[i].second); - } - std::sort(exp.begin(), exp.end(), LeafPairLess()); - size_t ei = 0; - MyTree::Iterator itr = tree.begin(); - MyTree::Iterator ritr; - EXPECT_EQUAL(1000u, itr.size()); - for (; itr.valid(); ++itr) { - //LOG(info, "itr(%d, %s)", itr.getKey(), itr.getData().c_str()); - EXPECT_EQUAL(UNWRAP(exp[ei].first), UNWRAP(itr.getKey())); - EXPECT_EQUAL(exp[ei].second, itr.getData()); - ei++; - ritr = itr; - } - EXPECT_EQUAL(numEntries, ei); - for (; ritr.valid(); --ritr) { - --ei; - //LOG(info, "itr(%d, %s)", itr.getKey(), itr.getData().c_str()); - EXPECT_EQUAL(UNWRAP(exp[ei].first), UNWRAP(ritr.getKey())); - EXPECT_EQUAL(exp[ei].second, ritr.getData()); - } -} - -void -Test::requireThatTreeIteratorSeekWorks() -{ - GenerationHandler g; - MyTree tree; - for (int i = 0; i < 40; i += 2) { - tree.insert(i, toStr(i)); - } - //std::cout << tree.toString() << std::endl; - EXPECT_TRUE(assertSeek(2, 2, tree)); // next key - EXPECT_TRUE(assertSeek(10, 10, tree)); // skip to existing - EXPECT_TRUE(assertSeek(26, 26, tree)); // skip to existing - EXPECT_TRUE(assertSeek(11, 12, tree)); // skip to non-existing - EXPECT_TRUE(assertSeek(23, 24, tree)); // skip to non-existing - { - MyTree::Iterator itr = tree.begin(); - EXPECT_TRUE(assertSeek(4, 4, itr)); - EXPECT_TRUE(assertSeek(14, 14, itr)); - EXPECT_TRUE(assertSeek(18, 18, itr)); - EXPECT_TRUE(assertSeek(36, 36, itr)); - } - { - MyTree::Iterator itr = tree.begin(); - EXPECT_TRUE(assertSeek(3, 4, itr)); - EXPECT_TRUE(assertSeek(13, 14, itr)); - EXPECT_TRUE(assertSeek(17, 18, itr)); - EXPECT_TRUE(assertSeek(35, 36, itr)); - } - { - MyTree::Iterator itr = tree.begin(); - MyTree::Iterator itr2 = tree.begin(); - itr.binarySeek(40); // outside - itr2.linearSeek(40); // outside - EXPECT_TRUE(!itr.valid()); - EXPECT_TRUE(!itr2.valid()); - } - { - MyTree::Iterator itr = tree.begin(); - EXPECT_TRUE(assertSeek(8, 8, itr)); - for (int i = 10; i < 40; i += 2) { - ++itr; - EXPECT_EQUAL(i, UNWRAP(itr.getKey())); - } - } - { - MyTree::Iterator itr = tree.begin(); - EXPECT_TRUE(assertSeek(26, 26, itr)); - for (int i = 28; i < 40; i += 2) { - ++itr; - EXPECT_EQUAL(i, UNWRAP(itr.getKey())); - } - } - GenerationHandler g2; - MyTree tree2; // only leaf node - tree2.insert(0, "0"); - tree2.insert(2, "2"); - tree2.insert(4, "4"); - EXPECT_TRUE(assertSeek(1, 2, tree2)); - EXPECT_TRUE(assertSeek(2, 2, tree2)); - { - MyTree::Iterator itr = tree2.begin(); - MyTree::Iterator itr2 = tree2.begin(); - itr.binarySeek(5); // outside - itr2.linearSeek(5); // outside - EXPECT_TRUE(!itr.valid()); - EXPECT_TRUE(!itr2.valid()); - } -} - -void -Test::requireThatTreeIteratorAssignWorks() -{ - GenerationHandler g; - MyTree tree; - for (int i = 0; i < 1000; ++i) { - tree.insert(i, toStr(i)); - } - for (int i = 0; i < 1000; ++i) { - MyTree::Iterator itr = tree.find(i); - MyTree::Iterator itr2 = itr; - EXPECT_TRUE(itr == itr2); - int expNum = i; - for (; itr2.valid(); ++itr2) { - EXPECT_EQUAL(expNum++, UNWRAP(itr2.getKey())); - } - EXPECT_EQUAL(1000, expNum); - } -} - -size_t -adjustAllocatedBytes(size_t nodeCount, size_t nodeSize) -{ - // Note: Sizes of underlying data store buffers are power of 2. - size_t allocatedBytes = vespalib::roundUp2inN(nodeCount * nodeSize); - size_t adjustedNodeCount = allocatedBytes / nodeSize; - return adjustedNodeCount * nodeSize; -} - -void -Test::requireThatMemoryUsageIsCalculated() -{ - typedef BTreeNodeAllocator NodeAllocator; - typedef NodeAllocator::InternalNodeType INode; - typedef NodeAllocator::LeafNodeType LNode; - typedef NodeAllocator::InternalNodeTypeRefPair IRef; - typedef NodeAllocator::LeafNodeTypeRefPair LRef; - LOG(info, "sizeof(BTreeNode)=%zu, sizeof(INode)=%zu, sizeof(LNode)=%zu", - sizeof(BTreeNode), sizeof(INode), sizeof(LNode)); - EXPECT_GREATER(sizeof(INode), sizeof(LNode)); - GenerationHandler gh; - gh.incGeneration(); - NodeAllocator tm; - vespalib::MemoryUsage mu; - const uint32_t initialInternalNodes = 128u; - const uint32_t initialLeafNodes = 128u; - mu.incAllocatedBytes(adjustAllocatedBytes(initialInternalNodes, sizeof(INode))); - mu.incAllocatedBytes(adjustAllocatedBytes(initialLeafNodes, sizeof(LNode))); - mu.incUsedBytes(sizeof(INode)); - mu.incDeadBytes(sizeof(INode)); - EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage())); - - // add internal node - IRef ir = tm.allocInternalNode(1); - mu.incUsedBytes(sizeof(INode)); - EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage())); - - // add leaf node - LRef lr = tm.allocLeafNode(); - mu.incUsedBytes(sizeof(LNode)); - EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage())); - - // move nodes to hold list - tm.freeze(); // mark allocated nodes as frozen so we can hold them later on - tm.holdNode(ir.ref, ir.data); - mu.incAllocatedBytesOnHold(sizeof(INode)); - EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage())); - tm.holdNode(lr.ref, lr.data); - mu.incAllocatedBytesOnHold(sizeof(LNode)); - EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage())); - - // trim hold lists - tm.transferHoldLists(gh.getCurrentGeneration()); - gh.incGeneration(); - tm.trimHoldLists(gh.getFirstUsedGeneration()); - mu = vespalib::MemoryUsage(); - mu.incAllocatedBytes(adjustAllocatedBytes(initialInternalNodes, sizeof(INode))); - mu.incAllocatedBytes(adjustAllocatedBytes(initialLeafNodes, sizeof(LNode))); - mu.incUsedBytes(sizeof(INode) * 2); - mu.incDeadBytes(sizeof(INode) * 2); - mu.incUsedBytes(sizeof(LNode)); - mu.incDeadBytes(sizeof(LNode)); - EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage())); -} - -template -void -Test::requireThatLowerBoundWorksT() -{ - GenerationHandler g; - TreeType t; - EXPECT_TRUE(t.insert(10, BTreeNoLeafData())); - EXPECT_TRUE(t.insert(20, BTreeNoLeafData())); - EXPECT_TRUE(t.insert(30, BTreeNoLeafData())); - EXPECT_EQUAL(10, t.lowerBound(9).getKey()); - EXPECT_EQUAL(20, t.lowerBound(20).getKey()); - EXPECT_EQUAL(30, t.lowerBound(21).getKey()); - EXPECT_EQUAL(30, t.lowerBound(30).getKey()); - EXPECT_TRUE(!t.lowerBound(31).valid()); - for (int i = 40; i < 1000; i+=10) { - EXPECT_TRUE(t.insert(i, BTreeNoLeafData())); - } - for (int i = 9; i < 990; i+=10) { - EXPECT_EQUAL(i + 1, t.lowerBound(i).getKey()); - EXPECT_EQUAL(i + 1, t.lowerBound(i + 1).getKey()); - } - EXPECT_TRUE(!t.lowerBound(991).valid()); -} - -void -Test::requireThatLowerBoundWorks() -{ - requireThatLowerBoundWorksT(); - requireThatLowerBoundWorksT(); -} - -template -void -Test::requireThatUpperBoundWorksT() -{ - GenerationHandler g; - TreeType t; - EXPECT_TRUE(t.insert(10, BTreeNoLeafData())); - EXPECT_TRUE(t.insert(20, BTreeNoLeafData())); - EXPECT_TRUE(t.insert(30, BTreeNoLeafData())); - EXPECT_EQUAL(10, t.upperBound(9).getKey()); - EXPECT_EQUAL(30, t.upperBound(20).getKey()); - EXPECT_EQUAL(30, t.upperBound(21).getKey()); - EXPECT_TRUE(!t.upperBound(30).valid()); - for (int i = 40; i < 1000; i+=10) { - EXPECT_TRUE(t.insert(i, BTreeNoLeafData())); - } - for (int i = 9; i < 980; i+=10) { - EXPECT_EQUAL(i + 1, t.upperBound(i).getKey()); - EXPECT_EQUAL(i + 11, t.upperBound(i + 1).getKey()); - } - EXPECT_TRUE(!t.upperBound(990).valid()); -} - -void -Test::requireThatUpperBoundWorks() -{ - requireThatUpperBoundWorksT(); - requireThatUpperBoundWorksT(); -} - -struct UpdKeyComp { - int _remainder; - mutable size_t _numErrors; - UpdKeyComp(int remainder) : _remainder(remainder), _numErrors(0) {} - bool operator() (const int & lhs, const int & rhs) const { - if (lhs % 2 != _remainder) ++_numErrors; - if (rhs % 2 != _remainder) ++_numErrors; - return lhs < rhs; - } -}; - -void -Test::requireThatUpdateOfKeyWorks() -{ - typedef BTree UpdKeyTree; - typedef UpdKeyTree::Iterator UpdKeyTreeIterator; - GenerationHandler g; - UpdKeyTree t; - UpdKeyComp cmp1(0); - for (int i = 0; i < 1000; i+=2) { - EXPECT_TRUE(t.insert(i, BTreeNoLeafData(), cmp1)); - } - EXPECT_EQUAL(0u, cmp1._numErrors); - for (int i = 0; i < 1000; i+=2) { - UpdKeyTreeIterator itr = t.find(i, cmp1); - itr.writeKey(i + 1); - } - UpdKeyComp cmp2(1); - for (int i = 1; i < 1000; i+=2) { - UpdKeyTreeIterator itr = t.find(i, cmp2); - EXPECT_TRUE(itr.valid()); - } - EXPECT_EQUAL(0u, cmp2._numErrors); -} - - -void -Test::requireThatSmallNodesWorks() -{ - typedef BTreeStore TreeStore; - GenerationHandler g; - TreeStore s; - - EntryRef root; - EXPECT_EQUAL(0u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - EXPECT_TRUE(s.insert(root, 40, "fourty")); - EXPECT_TRUE(!s.insert(root, 40, "fourty.not")); - EXPECT_EQUAL(1u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - EXPECT_TRUE(s.insert(root, 20, "twenty")); - EXPECT_TRUE(!s.insert(root, 20, "twenty.not")); - EXPECT_TRUE(!s.insert(root, 40, "fourty.not")); - EXPECT_EQUAL(2u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - EXPECT_TRUE(s.insert(root, 60, "sixty")); - EXPECT_TRUE(!s.insert(root, 60, "sixty.not")); - EXPECT_TRUE(!s.insert(root, 20, "twenty.not")); - EXPECT_TRUE(!s.insert(root, 40, "fourty.not")); - EXPECT_EQUAL(3u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - EXPECT_TRUE(s.insert(root, 50, "fifty")); - EXPECT_TRUE(!s.insert(root, 50, "fifty.not")); - EXPECT_TRUE(!s.insert(root, 60, "sixty.not")); - EXPECT_TRUE(!s.insert(root, 20, "twenty.not")); - EXPECT_TRUE(!s.insert(root, 40, "fourty.not")); - EXPECT_EQUAL(4u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - - for (uint32_t i = 0; i < 100; ++i) { - EXPECT_TRUE(s.insert(root, 1000 + i, "big")); - if (i > 0) { - EXPECT_TRUE(!s.insert(root, 1000 + i - 1, "big")); - } - EXPECT_EQUAL(5u + i, s.size(root)); - EXPECT_EQUAL(5u + i <= 8u, s.isSmallArray(root)); - } - EXPECT_TRUE(s.remove(root, 40)); - EXPECT_TRUE(!s.remove(root, 40)); - EXPECT_EQUAL(103u, s.size(root)); - EXPECT_TRUE(!s.isSmallArray(root)); - EXPECT_TRUE(s.remove(root, 20)); - EXPECT_TRUE(!s.remove(root, 20)); - EXPECT_EQUAL(102u, s.size(root)); - EXPECT_TRUE(!s.isSmallArray(root)); - EXPECT_TRUE(s.remove(root, 50)); - EXPECT_TRUE(!s.remove(root, 50)); - EXPECT_EQUAL(101u, s.size(root)); - EXPECT_TRUE(!s.isSmallArray(root)); - for (uint32_t i = 0; i < 100; ++i) { - EXPECT_TRUE(s.remove(root, 1000 + i)); - if (i > 0) { - EXPECT_TRUE(!s.remove(root, 1000 + i - 1)); - } - EXPECT_EQUAL(100 - i, s.size(root)); - EXPECT_EQUAL(100 - i <= 8u, s.isSmallArray(root)); - } - EXPECT_EQUAL(1u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - - s.clear(root); - s.clearBuilder(); - s.freeze(); - s.transferHoldLists(g.getCurrentGeneration()); - g.incGeneration(); - s.trimHoldLists(g.getFirstUsedGeneration()); -} - - -void -Test::requireThatApplyWorks() -{ - typedef BTreeStore TreeStore; - typedef TreeStore::KeyType KeyType; - typedef TreeStore::KeyDataType KeyDataType; - GenerationHandler g; - TreeStore s; - std::vector additions; - std::vector removals; - - EntryRef root; - EXPECT_EQUAL(0u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - - additions.clear(); - removals.clear(); - additions.push_back(KeyDataType(40, "fourty")); - s.apply(root, &additions[0], &additions[0] + additions.size(), - &removals[0], &removals[0] + removals.size()); - EXPECT_EQUAL(1u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - - additions.clear(); - removals.clear(); - additions.push_back(KeyDataType(20, "twenty")); - s.apply(root, &additions[0], &additions[0] + additions.size(), - &removals[0], &removals[0] + removals.size()); - EXPECT_EQUAL(2u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - - additions.clear(); - removals.clear(); - additions.push_back(KeyDataType(60, "sixty")); - s.apply(root, &additions[0], &additions[0] + additions.size(), - &removals[0], &removals[0] + removals.size()); - EXPECT_EQUAL(3u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - - additions.clear(); - removals.clear(); - additions.push_back(KeyDataType(50, "fifty")); - s.apply(root, &additions[0], &additions[0] + additions.size(), - &removals[0], &removals[0] + removals.size()); - EXPECT_EQUAL(4u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - - for (uint32_t i = 0; i < 100; ++i) { - additions.clear(); - removals.clear(); - additions.push_back(KeyDataType(1000 + i, "big")); - s.apply(root, &additions[0], &additions[0] + additions.size(), - &removals[0], &removals[0] + removals.size()); - EXPECT_EQUAL(5u + i, s.size(root)); - EXPECT_EQUAL(5u + i <= 8u, s.isSmallArray(root)); - } - - additions.clear(); - removals.clear(); - removals.push_back(40); - s.apply(root, &additions[0], &additions[0] + additions.size(), - &removals[0], &removals[0] + removals.size()); - EXPECT_EQUAL(103u, s.size(root)); - EXPECT_TRUE(!s.isSmallArray(root)); - - additions.clear(); - removals.clear(); - removals.push_back(20); - s.apply(root, &additions[0], &additions[0] + additions.size(), - &removals[0], &removals[0] + removals.size()); - EXPECT_EQUAL(102u, s.size(root)); - EXPECT_TRUE(!s.isSmallArray(root)); - - additions.clear(); - removals.clear(); - removals.push_back(50); - s.apply(root, &additions[0], &additions[0] + additions.size(), - &removals[0], &removals[0] + removals.size()); - EXPECT_EQUAL(101u, s.size(root)); - EXPECT_TRUE(!s.isSmallArray(root)); - for (uint32_t i = 0; i < 100; ++i) { - additions.clear(); - removals.clear(); - removals.push_back(1000 +i); - s.apply(root, &additions[0], &additions[0] + additions.size(), - &removals[0], &removals[0] + removals.size()); - EXPECT_EQUAL(100 - i, s.size(root)); - EXPECT_EQUAL(100 - i <= 8u, s.isSmallArray(root)); - } - EXPECT_EQUAL(1u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - - additions.clear(); - removals.clear(); - for (uint32_t i = 0; i < 20; ++i) - additions.push_back(KeyDataType(1000 + i, "big")); - removals.push_back(60); - removals.push_back(1002); - s.apply(root, &additions[0], &additions[0] + additions.size(), - &removals[0], &removals[0] + removals.size()); - EXPECT_EQUAL(20u, s.size(root)); - EXPECT_TRUE(!s.isSmallArray(root)); - - additions.clear(); - s.apply(root, &additions[0], &additions[0] + additions.size(), - &removals[0], &removals[0] + removals.size()); - EXPECT_EQUAL(19u, s.size(root)); - EXPECT_TRUE(!s.isSmallArray(root)); - - additions.clear(); - removals.clear(); - for (uint32_t i = 0; i < 20; ++i) - additions.push_back(KeyDataType(1100 + i, "big")); - for (uint32_t i = 0; i < 10; ++i) - removals.push_back(1000 + i); - s.apply(root, &additions[0], &additions[0] + additions.size(), - &removals[0], &removals[0] + removals.size()); - EXPECT_EQUAL(30u, s.size(root)); - EXPECT_TRUE(!s.isSmallArray(root)); - - s.clear(root); - s.clearBuilder(); - s.freeze(); - s.transferHoldLists(g.getCurrentGeneration()); - g.incGeneration(); - s.trimHoldLists(g.getFirstUsedGeneration()); -} - -class MyTreeTestIterator : public MyTree::Iterator -{ -public: - MyTreeTestIterator(const MyTree::Iterator &rhs) - : MyTree::Iterator(rhs) - { - } - - int getPathSize() const { return _pathSize; } -}; - - -void -Test::requireThatIteratorDistanceWorks(int numEntries) -{ - GenerationHandler g; - MyTree tree; - typedef MyTree::Iterator Iterator; - for (int i = 0; i < numEntries; ++i) { - tree.insert(i, toStr(i)); - } - MyTreeTestIterator tit = tree.begin(); - LOG(info, - "numEntries=%d, iterator pathSize=%d", - numEntries, tit.getPathSize()); - Iterator it = tree.begin(); - for (int i = 0; i <= numEntries; ++i) { - Iterator iit = tree.lowerBound(i); - Iterator iitn = tree.lowerBound(i + 1); - Iterator iitu = tree.upperBound(i); - Iterator iitls = tree.begin(); - Iterator iitbs = tree.begin(); - Iterator iitlsp = tree.begin(); - Iterator iitbsp = tree.begin(); - Iterator iitlb(tree.getRoot(), tree.getAllocator()); - iitlb.lower_bound(i); - Iterator iitlb2(BTreeNode::Ref(), tree.getAllocator()); - iitlb2.lower_bound(tree.getRoot(), i); - if (i > 0) { - iitls.linearSeek(i); - iitbs.binarySeek(i); - ++it; - } - iitlsp.linearSeekPast(i); - iitbsp.binarySeekPast(i); - Iterator iitlsp2 = iitls; - Iterator iitbsp2 = iitbs; - Iterator iitnr = i < numEntries ? iitn : tree.begin(); - --iitnr; - if (i < numEntries) { - iitlsp2.linearSeekPast(i); - iitbsp2.binarySeekPast(i); - } - EXPECT_EQUAL(i, static_cast(iit.position())); - EXPECT_EQUAL(i < numEntries, iit.valid()); - EXPECT_TRUE(iit.identical(it)); - EXPECT_TRUE(iit.identical(iitls)); - EXPECT_TRUE(iit.identical(iitbs)); - EXPECT_TRUE(iit.identical(iitnr)); - EXPECT_TRUE(iit.identical(iitlb)); - EXPECT_TRUE(iit.identical(iitlb2)); - EXPECT_TRUE(iitn.identical(iitu)); - EXPECT_TRUE(iitn.identical(iitlsp)); - EXPECT_TRUE(iitn.identical(iitbsp)); - EXPECT_TRUE(iitn.identical(iitlsp2)); - EXPECT_TRUE(iitn.identical(iitbsp2)); - if (i < numEntries) { - EXPECT_EQUAL(i + 1, static_cast(iitn.position())); - EXPECT_EQUAL(i + 1 < numEntries, iitn.valid()); - } - for (int j = 0; j <= numEntries; ++j) { - Iterator jit = tree.lowerBound(j); - EXPECT_EQUAL(j, static_cast(jit.position())); - EXPECT_EQUAL(j < numEntries, jit.valid()); - EXPECT_EQUAL(i - j, iit - jit); - EXPECT_EQUAL(j - i, jit - iit); - - Iterator jit2 = jit; - jit2.setupEnd(); - EXPECT_EQUAL(numEntries - j, jit2 - jit); - EXPECT_EQUAL(numEntries - i, jit2 - iit); - EXPECT_EQUAL(j - numEntries, jit - jit2); - EXPECT_EQUAL(i - numEntries, iit - jit2); - } - } -} - - -void -Test::requireThatIteratorDistanceWorks() -{ - requireThatIteratorDistanceWorks(1); - requireThatIteratorDistanceWorks(3); - requireThatIteratorDistanceWorks(8); - requireThatIteratorDistanceWorks(20); - requireThatIteratorDistanceWorks(100); - requireThatIteratorDistanceWorks(400); -} - - -int -Test::Main() -{ - TEST_INIT("btree_test"); - - requireThatNodeInsertWorks(); - requireThatTreeInsertWorks(); - requireThatNodeSplitInsertWorks(); - requireThatNodeStealWorks(); - requireThatTreeRemoveStealWorks(); - requireThatNodeRemoveWorks(); - requireThatNodeLowerBoundWorks(); - requireThatWeCanInsertAndRemoveFromTree(); - requireThatSortedTreeInsertWorks(); - requireThatCornerCaseTreeFindWorks(); - requireThatBasicTreeIteratorWorks(); - requireThatTreeIteratorSeekWorks(); - requireThatTreeIteratorAssignWorks(); - requireThatMemoryUsageIsCalculated(); - requireThatLowerBoundWorks(); - requireThatUpperBoundWorks(); - requireThatUpdateOfKeyWorks(); - requireThatSmallNodesWorks(); - requireThatApplyWorks(); - requireThatIteratorDistanceWorks(); - - TEST_DONE(); -} - -} -} - -TEST_APPHOOK(search::btree::Test); diff --git a/searchlib/src/tests/btree/btreeaggregation_test.cpp b/searchlib/src/tests/btree/btreeaggregation_test.cpp deleted file mode 100644 index ef91c2c868a..00000000000 --- a/searchlib/src/tests/btree/btreeaggregation_test.cpp +++ /dev/null @@ -1,1157 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include -LOG_SETUP("btreeaggregation_test"); -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -using vespalib::GenerationHandler; -using search::datastore::EntryRef; - -namespace search { -namespace btree { - -namespace { - -int32_t -toVal(uint32_t key) -{ - return key + 1000; -} - -int32_t -toHighVal(uint32_t key) -{ - return toVal(key) + 1000; -} - -int32_t -toLowVal(uint32_t key) -{ - return toVal(key) - 1000000; -} - -int32_t -toNotVal(uint32_t key) -{ - return key + 2000; -} - -} - -typedef BTreeTraits<4, 4, 31, false> MyTraits; - -#define KEYWRAP - -#ifdef KEYWRAP - -// Force use of functor to compare keys. -class WrapInt -{ -public: - int _val; - WrapInt(int val) : _val(val) {} - WrapInt() : _val(0) {} - bool operator==(const WrapInt & rhs) const { return _val == rhs._val; } -}; - -std::ostream & -operator<<(std::ostream &s, const WrapInt &i) -{ - s << i._val; - return s; -} - -typedef WrapInt MyKey; -class MyComp -{ -public: - bool - operator()(const WrapInt &a, const WrapInt &b) const - { - return a._val < b._val; - } -}; - -#define UNWRAP(key) (key._val) -#else -typedef int MyKey; -typedef std::less MyComp; -#define UNWRAP(key) (key) -#endif - -typedef BTree MyTree; -typedef BTreeStore MyTreeStore; -typedef MyTree::Builder MyTreeBuilder; -typedef MyTree::LeafNodeType MyLeafNode; -typedef MyTree::InternalNodeType MyInternalNode; -typedef MyTree::NodeAllocatorType MyNodeAllocator; -typedef MyTree::Builder::Aggregator MyAggregator; -typedef MyTree::AggrCalcType MyAggrCalc; -typedef std::pair LeafPair; -typedef MyTreeStore::KeyDataType MyKeyData; -typedef MyTreeStore::KeyDataTypeRefPair MyKeyDataRefPair; - -typedef BTree SetTreeB; - -typedef BTreeTraits<16, 16, 10, false> LSeekTraits; -typedef BTree, LSeekTraits> SetTreeL; - -struct LeafPairLess { - bool operator()(const LeafPair & lhs, const LeafPair & rhs) const { - return UNWRAP(lhs.first) < UNWRAP(rhs.first); - } -}; - - -class MockTree -{ -public: - typedef std::map MTree; - typedef std::map > MRTree; - MTree _tree; - MRTree _rtree; - - MockTree(); - ~MockTree(); - - - void - erase(uint32_t key) - { - MTree::iterator it(_tree.find(key)); - if (it == _tree.end()) - return; - int32_t oval = it->second; - MRTree::iterator rit(_rtree.find(oval)); - assert(rit != _rtree.end()); - size_t ecount = rit->second.erase(key); - assert(ecount == 1); - (void) ecount; - if (rit->second.empty()) { - _rtree.erase(oval); - } - _tree.erase(key); - } - - void - insert(uint32_t key, int32_t val) - { - erase(key); - _tree[key] = val; - _rtree[val].insert(key); - } -}; - - -MockTree::MockTree() - : _tree(), - _rtree() -{} -MockTree::~MockTree() {} - -class MyTreeForceApplyStore : public MyTreeStore -{ -public: - typedef MyComp CompareT; - - bool - insert(EntryRef &ref, const KeyType &key, const DataType &data, - CompareT comp = CompareT()); - - bool - remove(EntryRef &ref, const KeyType &key, CompareT comp = CompareT()); -}; - - -bool -MyTreeForceApplyStore::insert(EntryRef &ref, - const KeyType &key, const DataType &data, - CompareT comp) -{ - bool retVal = true; - if (ref.valid()) { - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - const BTreeType *tree = getTreeEntry(iRef); - const NodeAllocatorType &allocator = getAllocator(); - Iterator itr = tree->find(key, allocator, comp); - if (itr.valid()) - retVal = false; - } else { - const KeyDataType *old = getKeyDataEntry(iRef, clusterSize); - const KeyDataType *olde = old + clusterSize; - const KeyDataType *oldi = lower_bound(old, olde, key, comp); - if (oldi < olde && !comp(key, oldi->_key)) - retVal = false; // key already present - } - } - KeyDataType addition(key, data); - if (retVal) { - apply(ref, &addition, &addition+1, NULL, NULL, comp); - } - return retVal; -} - - -bool -MyTreeForceApplyStore::remove(EntryRef &ref, const KeyType &key, - CompareT comp) -{ - bool retVal = true; - if (!ref.valid()) - retVal = false; // not found - else { - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - const BTreeType *tree = getTreeEntry(iRef); - const NodeAllocatorType &allocator = getAllocator(); - Iterator itr = tree->find(key, allocator, comp); - if (!itr.valid()) - retVal = false; - } else { - const KeyDataType *old = getKeyDataEntry(iRef, clusterSize); - const KeyDataType *olde = old + clusterSize; - const KeyDataType *oldi = lower_bound(old, olde, key, comp); - if (oldi == olde || comp(key, oldi->_key)) - retVal = false; // not found - } - } - std::vector additions; - std::vector removals; - removals.push_back(key); - apply(ref, - &additions[0], &additions[additions.size()], - &removals[0], &removals[removals.size()], - comp); - return retVal; -} - - -template -void -freezeTree(GenerationHandler &g, ManagerType &m) -{ - m.freeze(); - m.transferHoldLists(g.getCurrentGeneration()); - g.incGeneration(); - m.trimHoldLists(g.getFirstUsedGeneration()); -} - -template -void -cleanup(GenerationHandler &g, ManagerType &m) -{ - freezeTree(g, m); -} - -template -void -cleanup(GenerationHandler & g, - ManagerType & m, - BTreeNode::Ref n1Ref, NodeType * n1, - BTreeNode::Ref n2Ref = BTreeNode::Ref(), NodeType * n2 = NULL) -{ - assert(ManagerType::isValidRef(n1Ref)); - m.holdNode(n1Ref, n1); - if (n2 != NULL) { - assert(ManagerType::isValidRef(n2Ref)); - m.holdNode(n2Ref, n2); - } else { - assert(!ManagerType::isValidRef(n2Ref)); - } - cleanup(g, m); -} - -class Test : public vespalib::TestApp { -private: - template - bool - assertTree(const std::string & exp, const Tree &t); - - template - bool - assertAggregated(const MockTree &m, const Tree &t); - - template - bool - assertAggregated(const MockTree &m, const TreeStore &s, EntryRef ref); - - void - buildSubTree(const std::vector &sub, - size_t numEntries); - - void requireThatNodeInsertWorks(); - void requireThatNodeSplitInsertWorks(); - void requireThatTreeInsertWorks(); - void requireThatNodeStealWorks(); - void requireThatNodeRemoveWorks(); - void requireThatWeCanInsertAndRemoveFromTree(); - void requireThatSortedTreeInsertWorks(); - void requireThatCornerCaseTreeFindWorks(); - void requireThatBasicTreeIteratorWorks(); - void requireThatTreeIteratorAssignWorks(); - void requireThatUpdateOfKeyWorks(); - void requireThatUpdateOfDataWorks(); - - template - void - requireThatSmallNodesWorks(); -public: - int Main() override; -}; - - -template -bool -Test::assertTree(const std::string &exp, const Tree &t) -{ - std::stringstream ss; - test::BTreePrinter printer(ss, t.getAllocator()); - printer.print(t.getRoot()); - if (!EXPECT_EQUAL(exp, ss.str())) return false; - return true; -} - - -template -bool -Test::assertAggregated(const MockTree &m, const Tree &t) -{ - const MinMaxAggregated &ta(t.getAggregated()); - if (t.getRoot().valid()) { - return - EXPECT_FALSE(m._rtree.empty()) && - EXPECT_EQUAL(m._rtree.rbegin()->first, - ta.getMax()) && - EXPECT_EQUAL(m._rtree.begin()->first, - ta.getMin()); - } else { - return EXPECT_TRUE(m._rtree.empty()) && - EXPECT_EQUAL(std::numeric_limits::min(), - ta.getMax()) && - EXPECT_EQUAL(std::numeric_limits::max(), - ta.getMin()); - } -} - -template -bool -Test::assertAggregated(const MockTree &m, const TreeStore &s, EntryRef ref) -{ - typename TreeStore::Iterator i(s.begin(ref)); - MinMaxAggregated sa(s.getAggregated(ref)); - const MinMaxAggregated &ia(i.getAggregated()); - if (ref.valid()) { - return - EXPECT_FALSE(m._rtree.empty()) && - EXPECT_EQUAL(m._rtree.rbegin()->first, - ia.getMax()) && - EXPECT_EQUAL(m._rtree.begin()->first, - ia.getMin()) && - EXPECT_EQUAL(m._rtree.rbegin()->first, - sa.getMax()) && - EXPECT_EQUAL(m._rtree.begin()->first, - sa.getMin()); - } else { - return EXPECT_TRUE(m._rtree.empty()) && - EXPECT_EQUAL(std::numeric_limits::min(), - ia.getMax()) && - EXPECT_EQUAL(std::numeric_limits::max(), - ia.getMin()) && - EXPECT_EQUAL(std::numeric_limits::min(), - sa.getMax()) && - EXPECT_EQUAL(std::numeric_limits::max(), - sa.getMin()); - } -} - - -void -Test::requireThatNodeInsertWorks() -{ - MyTree t; - t.insert(20, 102); - EXPECT_TRUE(assertTree("{{20:102[min=102,max=102]}}", t)); - t.insert(10, 101); - EXPECT_TRUE(assertTree("{{10:101,20:102[min=101,max=102]}}", t)); - t.insert(30, 103); - t.insert(40, 104); - EXPECT_TRUE(assertTree("{{10:101,20:102,30:103,40:104[min=101,max=104]}}", t)); -} - -template -void -populateTree(Tree &t, uint32_t count, uint32_t delta) -{ - uint32_t key = 1; - int32_t value = 101; - for (uint32_t i = 0; i < count; ++i) { - t.insert(key, value); - key += delta; - value += delta; - } -} - -void -populateLeafNode(MyTree &t) -{ - populateTree(t, 4, 2); -} - -void -Test::requireThatNodeSplitInsertWorks() -{ - { // new entry in current node - MyTree t; - populateLeafNode(t); - t.insert(4, 104); - EXPECT_TRUE(assertTree("{{4,7[min=101,max=107]}} -> " - "{{1:101,3:103,4:104[min=101,max=104]}," - "{5:105,7:107[min=105,max=107]}}", t)); - } - { // new entry in split node - MyTree t; - populateLeafNode(t); - t.insert(6, 106); - EXPECT_TRUE(assertTree("{{5,7[min=101,max=107]}} -> " - "{{1:101,3:103,5:105[min=101,max=105]}," - "{6:106,7:107[min=106,max=107]}}", t)); - } - { // new entry at end - MyTree t; - populateLeafNode(t); - t.insert(8, 108); - EXPECT_TRUE(assertTree("{{5,8[min=101,max=108]}} -> " - "{{1:101,3:103,5:105[min=101,max=105]}," - "{7:107,8:108[min=107,max=108]}}", t)); - } -} - -void -Test::requireThatTreeInsertWorks() -{ - { // multi level node split - MyTree t; - populateTree(t, 16, 2); - EXPECT_TRUE(assertTree("{{7,15,23,31[min=101,max=131]}} -> " - "{{1:101,3:103,5:105,7:107[min=101,max=107]}," - "{9:109,11:111,13:113,15:115[min=109,max=115]}," - "{17:117,19:119,21:121,23:123[min=117,max=123]}," - "{25:125,27:127,29:129,31:131[min=125,max=131]}}", t)); - t.insert(33, 133); - EXPECT_TRUE(assertTree("{{23,33[min=101,max=133]}} -> " - "{{7,15,23[min=101,max=123]},{29,33[min=125,max=133]}} -> " - "{{1:101,3:103,5:105,7:107[min=101,max=107]}," - "{9:109,11:111,13:113,15:115[min=109,max=115]}," - "{17:117,19:119,21:121,23:123[min=117,max=123]}," - "{25:125,27:127,29:129[min=125,max=129]}," - "{31:131,33:133[min=131,max=133]}}", t)); - } - { // give to left node to avoid split - MyTree t; - populateTree(t, 8, 2); - t.remove(5); - EXPECT_TRUE(assertTree("{{7,15[min=101,max=115]}} -> " - "{{1:101,3:103,7:107[min=101,max=107]}," - "{9:109,11:111,13:113,15:115[min=109,max=115]}}", t)); - t.insert(10, 110); - EXPECT_TRUE(assertTree("{{9,15[min=101,max=115]}} -> " - "{{1:101,3:103,7:107,9:109[min=101,max=109]}," - "{10:110,11:111,13:113,15:115[min=110,max=115]}}", t)); - } - { // give to left node to avoid split, and move to left node - MyTree t; - populateTree(t, 8, 2); - t.remove(3); - t.remove(5); - EXPECT_TRUE(assertTree("{{7,15[min=101,max=115]}} -> " - "{{1:101,7:107[min=101,max=107]}," - "{9:109,11:111,13:113,15:115[min=109,max=115]}}", t)); - t.insert(8, 108); - EXPECT_TRUE(assertTree("{{9,15[min=101,max=115]}} -> " - "{{1:101,7:107,8:108,9:109[min=101,max=109]}," - "{11:111,13:113,15:115[min=111,max=115]}}", t)); - } - { // not give to left node to avoid split, but insert at end at left node - MyTree t; - populateTree(t, 8, 2); - t.remove(5); - EXPECT_TRUE(assertTree("{{7,15[min=101,max=115]}} -> " - "{{1:101,3:103,7:107[min=101,max=107]}," - "{9:109,11:111,13:113,15:115[min=109,max=115]}}", t)); - t.insert(8, 108); - EXPECT_TRUE(assertTree("{{8,15[min=101,max=115]}} -> " - "{{1:101,3:103,7:107,8:108[min=101,max=108]}," - "{9:109,11:111,13:113,15:115[min=109,max=115]}}", t)); - } - { // give to right node to avoid split - MyTree t; - populateTree(t, 8, 2); - t.remove(13); - EXPECT_TRUE(assertTree("{{7,15[min=101,max=115]}} -> " - "{{1:101,3:103,5:105,7:107[min=101,max=107]}," - "{9:109,11:111,15:115[min=109,max=115]}}", t)); - t.insert(4, 104); - EXPECT_TRUE(assertTree("{{5,15[min=101,max=115]}} -> " - "{{1:101,3:103,4:104,5:105[min=101,max=105]}," - "{7:107,9:109,11:111,15:115[min=107,max=115]}}", t)); - } - { // give to right node to avoid split and move to right node - using MyTraits6 = BTreeTraits<6, 6, 31, false>; - using Tree6 = BTree; - - Tree6 t; - populateTree(t, 12, 2); - t.remove(19); - t.remove(21); - t.remove(23); - EXPECT_TRUE(assertTree("{{11,17[min=101,max=117]}} -> " - "{{1:101,3:103,5:105,7:107,9:109,11:111[min=101,max=111]}," - "{13:113,15:115,17:117[min=113,max=117]}}", t)); - t.insert(10, 110); - EXPECT_TRUE(assertTree("{{7,17[min=101,max=117]}} -> " - "{{1:101,3:103,5:105,7:107[min=101,max=107]}," - "{9:109,10:110,11:111,13:113,15:115,17:117[min=109,max=117]}}", t)); - } -} - -struct BTreeStealTraits -{ - static const size_t LEAF_SLOTS = 6; - static const size_t INTERNAL_SLOTS = 6; - static const size_t PATH_SIZE = 20; - static const bool BINARY_SEEK = true; -}; - -void -Test::requireThatNodeStealWorks() -{ - typedef BTree MyStealTree; - { // steal all from left - MyStealTree t; - t.insert(10, 110); - t.insert(20, 120); - t.insert(30, 130); - t.insert(40, 140); - t.insert(50, 150); - t.insert(60, 160); - t.insert(35, 135); - t.remove(35); - EXPECT_TRUE(assertTree("{{30,60[min=110,max=160]}} -> " - "{{10:110,20:120,30:130[min=110,max=130]}," - "{40:140,50:150,60:160[min=140,max=160]}}", t)); - t.remove(50); - EXPECT_TRUE(assertTree("{{10:110,20:120,30:130,40:140,60:160[min=110,max=160]}}", t)); - } - { // steal all from right - MyStealTree t; - t.insert(10, 110); - t.insert(20, 120); - t.insert(30, 130); - t.insert(40, 140); - t.insert(50, 150); - t.insert(60, 160); - t.insert(35, 135); - t.remove(35); - EXPECT_TRUE(assertTree("{{30,60[min=110,max=160]}} -> " - "{{10:110,20:120,30:130[min=110,max=130]}," - "{40:140,50:150,60:160[min=140,max=160]}}", t)); - t.remove(20); - EXPECT_TRUE(assertTree("{{10:110,30:130,40:140,50:150,60:160[min=110,max=160]}}", t)); - } - { // steal some from left - MyStealTree t; - t.insert(10, 110); - t.insert(20, 120); - t.insert(30, 130); - t.insert(60, 160); - t.insert(70, 170); - t.insert(80, 180); - t.insert(50, 150); - t.insert(40, 140); - EXPECT_TRUE(assertTree("{{50,80[min=110,max=180]}} -> " - "{{10:110,20:120,30:130,40:140,50:150[min=110,max=150]}," - "{60:160,70:170,80:180[min=160,max=180]}}", t)); - t.remove(60); - EXPECT_TRUE(assertTree("{{30,80[min=110,max=180]}} -> " - "{{10:110,20:120,30:130[min=110,max=130]}," - "{40:140,50:150,70:170,80:180[min=140,max=180]}}", t)); - } - { // steal some from right - MyStealTree t; - t.insert(10, 110); - t.insert(20, 120); - t.insert(30, 130); - t.insert(40, 140); - t.insert(50, 150); - t.insert(60, 160); - t.insert(70, 170); - t.insert(80, 180); - t.insert(90, 190); - t.remove(40); - EXPECT_TRUE(assertTree("{{30,90[min=110,max=190]}} -> " - "{{10:110,20:120,30:130[min=110,max=130]}," - "{50:150,60:160,70:170,80:180,90:190[min=150,max=190]}}", t)); - t.remove(20); - EXPECT_TRUE(assertTree("{{60,90[min=110,max=190]}} -> " - "{{10:110,30:130,50:150,60:160[min=110,max=160]}," - "{70:170,80:180,90:190[min=170,max=190]}}", t)); - } -} - -void -Test::requireThatNodeRemoveWorks() -{ - MyTree t; - populateLeafNode(t); - t.remove(3); - EXPECT_TRUE(assertTree("{{1:101,5:105,7:107[min=101,max=107]}}", t)); - t.remove(1); - EXPECT_TRUE(assertTree("{{5:105,7:107[min=105,max=107]}}", t)); - t.remove(7); - EXPECT_TRUE(assertTree("{{5:105[min=105,max=105]}}", t)); -} - -void -generateData(std::vector & data, size_t numEntries) -{ - data.reserve(numEntries); - Rand48 rnd; - rnd.srand48(10); - for (size_t i = 0; i < numEntries; ++i) { - int num = rnd.lrand48() % 10000000; - uint32_t val = toVal(num); - data.push_back(std::make_pair(num, val)); - } -} - -void -Test::buildSubTree(const std::vector &sub, - size_t numEntries) -{ - GenerationHandler g; - MyTree tree; - MyTreeBuilder builder(tree.getAllocator()); - MockTree mock; - - std::vector sorted(sub.begin(), sub.begin() + numEntries); - std::sort(sorted.begin(), sorted.end(), LeafPairLess()); - for (size_t i = 0; i < numEntries; ++i) { - int num = UNWRAP(sorted[i].first); - const uint32_t & val = sorted[i].second; - builder.insert(num, val); - mock.insert(num, val); - } - tree.assign(builder); - assert(numEntries == tree.size()); - assert(tree.isValid()); - - TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree))); - EXPECT_EQUAL(numEntries, tree.size()); - EXPECT_TRUE(tree.isValid()); - MyTree::Iterator itr = tree.begin(); - MyTree::Iterator ritr = itr; - if (numEntries > 0) { - EXPECT_TRUE(ritr.valid()); - EXPECT_EQUAL(0u, ritr.position()); - --ritr; - EXPECT_TRUE(!ritr.valid()); - EXPECT_EQUAL(numEntries, ritr.position()); - --ritr; - EXPECT_TRUE(ritr.valid()); - EXPECT_EQUAL(numEntries - 1, ritr.position()); - } else { - EXPECT_TRUE(!ritr.valid()); - EXPECT_EQUAL(0u, ritr.position()); - --ritr; - EXPECT_TRUE(!ritr.valid()); - EXPECT_EQUAL(0u, ritr.position()); - } - for (size_t i = 0; i < numEntries; ++i) { - EXPECT_TRUE(itr.valid()); - EXPECT_EQUAL(sorted[i].first, itr.getKey()); - EXPECT_EQUAL(sorted[i].second, itr.getData()); - ++itr; - } - EXPECT_TRUE(!itr.valid()); - ritr = itr; - EXPECT_TRUE(!ritr.valid()); - --ritr; - for (size_t i = 0; i < numEntries; ++i) { - EXPECT_TRUE(ritr.valid()); - EXPECT_EQUAL(sorted[numEntries - 1 - i].first, ritr.getKey()); - EXPECT_EQUAL(sorted[numEntries - 1 - i].second, ritr.getData()); - --ritr; - } - EXPECT_TRUE(!ritr.valid()); -} - -void -Test::requireThatWeCanInsertAndRemoveFromTree() -{ - GenerationHandler g; - MyTree tree; - MockTree mock; - std::vector exp; - std::vector sorted; - TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree))); - size_t numEntries = 1000; - generateData(exp, numEntries); - sorted = exp; - std::sort(sorted.begin(), sorted.end(), LeafPairLess()); - // insert entries - for (size_t i = 0; i < numEntries; ++i) { - int num = UNWRAP(exp[i].first); - const uint32_t & val = exp[i].second; - EXPECT_TRUE(!tree.find(num).valid()); - //LOG(info, "insert[%zu](%d, %s)", i, num, str.c_str()); - EXPECT_TRUE(tree.insert(num, val)); - EXPECT_TRUE(!tree.insert(num, val)); - mock.insert(num, val); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree))); - for (size_t j = 0; j <= i; ++j) { - //LOG(info, "find[%zu](%d)", j, exp[j].first._val); - MyTree::Iterator itr = tree.find(exp[j].first); - EXPECT_TRUE(itr.valid()); - EXPECT_EQUAL(exp[j].first, itr.getKey()); - EXPECT_EQUAL(exp[j].second, itr.getData()); - } - EXPECT_EQUAL(i + 1u, tree.size()); - EXPECT_TRUE(tree.isValid()); - buildSubTree(exp, i + 1); - } - //std::cout << "tree: " << tree.toString() << std::endl; - - { - MyTree::Iterator itr = tree.begin(); - MyTree::Iterator itre = itr; - MyTree::Iterator itre2; - MyTree::Iterator ritr = itr; - while (itre.valid()) - ++itre; - if (numEntries > 0) { - EXPECT_TRUE(ritr.valid()); - EXPECT_EQUAL(0u, ritr.position()); - --ritr; - EXPECT_TRUE(!ritr.valid()); - EXPECT_EQUAL(numEntries, ritr.position()); - --ritr; - EXPECT_TRUE(ritr.valid()); - EXPECT_EQUAL(numEntries - 1, ritr.position()); - } else { - EXPECT_TRUE(!ritr.valid()); - EXPECT_EQUAL(0u, ritr.position()); - --ritr; - EXPECT_TRUE(!ritr.valid()); - EXPECT_EQUAL(0u, ritr.position()); - } - MyTree::Iterator pitr = itr; - for (size_t i = 0; i < numEntries; ++i) { - ssize_t si = i; - ssize_t sileft = numEntries - i; - EXPECT_TRUE(itr.valid()); - EXPECT_EQUAL(i, itr.position()); - EXPECT_EQUAL(sileft, itre - itr); - EXPECT_EQUAL(-sileft, itr - itre); - EXPECT_EQUAL(sileft, itre2 - itr); - EXPECT_EQUAL(-sileft, itr - itre2); - EXPECT_EQUAL(si, itr - tree.begin()); - EXPECT_EQUAL(-si, tree.begin() - itr); - EXPECT_EQUAL(i != 0, itr - pitr); - EXPECT_EQUAL(-(i != 0), pitr - itr); - EXPECT_EQUAL(sorted[i].first, itr.getKey()); - EXPECT_EQUAL(sorted[i].second, itr.getData()); - pitr = itr; - ++itr; - ritr = itr; - --ritr; - EXPECT_TRUE(ritr.valid()); - EXPECT_TRUE(ritr == pitr); - } - EXPECT_TRUE(!itr.valid()); - EXPECT_EQUAL(numEntries, itr.position()); - ssize_t sNumEntries = numEntries; - EXPECT_EQUAL(sNumEntries, itr - tree.begin()); - EXPECT_EQUAL(-sNumEntries, tree.begin() - itr); - EXPECT_EQUAL(1, itr - pitr); - EXPECT_EQUAL(-1, pitr - itr); - } - // compact full tree by calling incremental compaction methods in a loop - { - MyTree::NodeAllocatorType &manager = tree.getAllocator(); - std::vector toHold = manager.startCompact(); - MyTree::Iterator itr = tree.begin(); - tree.setRoot(itr.moveFirstLeafNode(tree.getRoot())); - while (itr.valid()) { - // LOG(info, "Leaf moved to %d", UNWRAP(itr.getKey())); - itr.moveNextLeafNode(); - } - manager.finishCompact(toHold); - manager.freeze(); - manager.transferHoldLists(g.getCurrentGeneration()); - g.incGeneration(); - manager.trimHoldLists(g.getFirstUsedGeneration()); - } - // remove entries - for (size_t i = 0; i < numEntries; ++i) { - int num = UNWRAP(exp[i].first); - //LOG(info, "remove[%zu](%d)", i, num); - //std::cout << "tree: " << tree.toString() << std::endl; - EXPECT_TRUE(tree.remove(num)); - EXPECT_TRUE(!tree.find(num).valid()); - EXPECT_TRUE(!tree.remove(num)); - EXPECT_TRUE(tree.isValid()); - mock.erase(num); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree))); - for (size_t j = i + 1; j < numEntries; ++j) { - MyTree::Iterator itr = tree.find(exp[j].first); - EXPECT_TRUE(itr.valid()); - EXPECT_EQUAL(exp[j].first, itr.getKey()); - EXPECT_EQUAL(exp[j].second, itr.getData()); - } - EXPECT_EQUAL(numEntries - 1 - i, tree.size()); - } -} - -void -Test::requireThatSortedTreeInsertWorks() -{ - { - MyTree tree; - MockTree mock; - TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree))); - for (int i = 0; i < 1000; ++i) { - EXPECT_TRUE(tree.insert(i, toVal(i))); - mock.insert(i, toVal(i)); - MyTree::Iterator itr = tree.find(i); - EXPECT_TRUE(itr.valid()); - EXPECT_EQUAL(toVal(i), itr.getData()); - EXPECT_TRUE(tree.isValid()); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree))); - } - } - { - MyTree tree; - MockTree mock; - TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree))); - for (int i = 1000; i > 0; --i) { - EXPECT_TRUE(tree.insert(i, toVal(i))); - mock.insert(i, toVal(i)); - MyTree::Iterator itr = tree.find(i); - EXPECT_TRUE(itr.valid()); - EXPECT_EQUAL(toVal(i), itr.getData()); - EXPECT_TRUE(tree.isValid()); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree))); - } - } -} - -void -Test::requireThatCornerCaseTreeFindWorks() -{ - GenerationHandler g; - MyTree tree; - for (int i = 1; i < 100; ++i) { - tree.insert(i, toVal(i)); - } - EXPECT_TRUE(!tree.find(0).valid()); // lower than lowest - EXPECT_TRUE(!tree.find(1000).valid()); // higher than highest -} - -void -Test::requireThatBasicTreeIteratorWorks() -{ - GenerationHandler g; - MyTree tree; - EXPECT_TRUE(!tree.begin().valid()); - std::vector exp; - size_t numEntries = 1000; - generateData(exp, numEntries); - for (size_t i = 0; i < numEntries; ++i) { - tree.insert(exp[i].first, exp[i].second); - } - std::sort(exp.begin(), exp.end(), LeafPairLess()); - size_t ei = 0; - MyTree::Iterator itr = tree.begin(); - MyTree::Iterator ritr; - EXPECT_EQUAL(1000u, itr.size()); - for (; itr.valid(); ++itr) { - //LOG(info, "itr(%d, %s)", itr.getKey(), itr.getData().c_str()); - EXPECT_EQUAL(UNWRAP(exp[ei].first), UNWRAP(itr.getKey())); - EXPECT_EQUAL(exp[ei].second, itr.getData()); - ei++; - ritr = itr; - } - EXPECT_EQUAL(numEntries, ei); - for (; ritr.valid(); --ritr) { - --ei; - //LOG(info, "itr(%d, %s)", itr.getKey(), itr.getData().c_str()); - EXPECT_EQUAL(UNWRAP(exp[ei].first), UNWRAP(ritr.getKey())); - EXPECT_EQUAL(exp[ei].second, ritr.getData()); - } -} - - - -void -Test::requireThatTreeIteratorAssignWorks() -{ - GenerationHandler g; - MyTree tree; - for (int i = 0; i < 1000; ++i) { - tree.insert(i, toVal(i)); - } - for (int i = 0; i < 1000; ++i) { - MyTree::Iterator itr = tree.find(i); - MyTree::Iterator itr2 = itr; - EXPECT_TRUE(itr == itr2); - int expNum = i; - for (; itr2.valid(); ++itr2) { - EXPECT_EQUAL(expNum++, UNWRAP(itr2.getKey())); - } - EXPECT_EQUAL(1000, expNum); - } -} - -struct UpdKeyComp { - int _remainder; - mutable size_t _numErrors; - UpdKeyComp(int remainder) : _remainder(remainder), _numErrors(0) {} - bool operator() (const int & lhs, const int & rhs) const { - if (lhs % 2 != _remainder) ++_numErrors; - if (rhs % 2 != _remainder) ++_numErrors; - return lhs < rhs; - } -}; - -void -Test::requireThatUpdateOfKeyWorks() -{ - typedef BTree UpdKeyTree; - typedef UpdKeyTree::Iterator UpdKeyTreeIterator; - GenerationHandler g; - UpdKeyTree t; - UpdKeyComp cmp1(0); - for (int i = 0; i < 1000; i+=2) { - EXPECT_TRUE(t.insert(i, BTreeNoLeafData(), cmp1)); - } - EXPECT_EQUAL(0u, cmp1._numErrors); - for (int i = 0; i < 1000; i+=2) { - UpdKeyTreeIterator itr = t.find(i, cmp1); - itr.writeKey(i + 1); - } - UpdKeyComp cmp2(1); - for (int i = 1; i < 1000; i+=2) { - UpdKeyTreeIterator itr = t.find(i, cmp2); - EXPECT_TRUE(itr.valid()); - } - EXPECT_EQUAL(0u, cmp2._numErrors); -} - - -void -Test::requireThatUpdateOfDataWorks() -{ - // typedef MyTree::Iterator Iterator; - GenerationHandler g; - MyTree t; - MockTree mock; - MyAggrCalc ac; - MyTree::NodeAllocatorType &manager = t.getAllocator(); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, t))); - for (int i = 0; i < 1000; i+=2) { - EXPECT_TRUE(t.insert(i, toVal(i))); - mock.insert(i, toVal(i)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, t))); - } - freezeTree(g, manager); - for (int i = 0; i < 1000; i+=2) { - MyTree::Iterator itr = t.find(i); - MyTree::Iterator itr2 = itr; - t.thaw(itr); - itr.updateData(toHighVal(i), ac); - EXPECT_EQUAL(toHighVal(i), itr.getData()); - EXPECT_EQUAL(toVal(i), itr2.getData()); - mock.erase(i); - mock.insert(i, toHighVal(i)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, t))); - freezeTree(g, manager); - itr = t.find(i); - itr2 = itr; - t.thaw(itr); - itr.updateData(toLowVal(i), ac); - EXPECT_EQUAL(toLowVal(i), itr.getData()); - EXPECT_EQUAL(toHighVal(i), itr2.getData()); - mock.erase(i); - mock.insert(i, toLowVal(i)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, t))); - freezeTree(g, manager); - itr = t.find(i); - itr2 = itr; - t.thaw(itr); - itr.updateData(toVal(i), ac); - EXPECT_EQUAL(toVal(i), itr.getData()); - EXPECT_EQUAL(toLowVal(i), itr2.getData()); - mock.erase(i); - mock.insert(i, toVal(i)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, t))); - freezeTree(g, manager); - } -} - - -template -void -Test::requireThatSmallNodesWorks() -{ - GenerationHandler g; - TreeStore s; - MockTree mock; - - EntryRef root; - EXPECT_EQUAL(0u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root))); - EXPECT_TRUE(s.insert(root, 40, toVal(40))); - mock.insert(40, toVal(40)); - EXPECT_TRUE(!s.insert(root, 40, toNotVal(40))); - EXPECT_EQUAL(1u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root))); - EXPECT_TRUE(s.insert(root, 20, toVal(20))); - mock.insert(20, toVal(20)); - EXPECT_TRUE(!s.insert(root, 20, toNotVal(20))); - EXPECT_TRUE(!s.insert(root, 40, toNotVal(40))); - EXPECT_EQUAL(2u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root))); - EXPECT_TRUE(s.insert(root, 60, toVal(60))); - mock.insert(60, toVal(60)); - EXPECT_TRUE(!s.insert(root, 60, toNotVal(60))); - EXPECT_TRUE(!s.insert(root, 20, toNotVal(20))); - EXPECT_TRUE(!s.insert(root, 40, toNotVal(40))); - EXPECT_EQUAL(3u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root))); - EXPECT_TRUE(s.insert(root, 50, toVal(50))); - mock.insert(50, toVal(50)); - EXPECT_TRUE(!s.insert(root, 50, toNotVal(50))); - EXPECT_TRUE(!s.insert(root, 60, toNotVal(60))); - EXPECT_TRUE(!s.insert(root, 20, toNotVal(20))); - EXPECT_TRUE(!s.insert(root, 40, toNotVal(40))); - EXPECT_EQUAL(4u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root))); - - for (uint32_t i = 0; i < 100; ++i) { - EXPECT_TRUE(s.insert(root, 1000 + i, 42)); - mock.insert(1000 + i, 42); - if (i > 0) { - EXPECT_TRUE(!s.insert(root, 1000 + i - 1, 42)); - } - EXPECT_EQUAL(5u + i, s.size(root)); - EXPECT_EQUAL(5u + i <= 8u, s.isSmallArray(root)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root))); - } - EXPECT_TRUE(s.remove(root, 40)); - mock.erase(40); - EXPECT_TRUE(!s.remove(root, 40)); - EXPECT_EQUAL(103u, s.size(root)); - EXPECT_TRUE(!s.isSmallArray(root)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root))); - EXPECT_TRUE(s.remove(root, 20)); - mock.erase(20); - EXPECT_TRUE(!s.remove(root, 20)); - EXPECT_EQUAL(102u, s.size(root)); - EXPECT_TRUE(!s.isSmallArray(root)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root))); - EXPECT_TRUE(s.remove(root, 50)); - mock.erase(50); - EXPECT_TRUE(!s.remove(root, 50)); - EXPECT_EQUAL(101u, s.size(root)); - EXPECT_TRUE(!s.isSmallArray(root)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root))); - for (uint32_t i = 0; i < 100; ++i) { - EXPECT_TRUE(s.remove(root, 1000 + i)); - mock.erase(1000 + i); - if (i > 0) { - EXPECT_TRUE(!s.remove(root, 1000 + i - 1)); - } - EXPECT_EQUAL(100 - i, s.size(root)); - EXPECT_EQUAL(100 - i <= 8u, s.isSmallArray(root)); - TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root))); - } - EXPECT_EQUAL(1u, s.size(root)); - EXPECT_TRUE(s.isSmallArray(root)); - - s.clear(root); - s.clearBuilder(); - s.freeze(); - s.transferHoldLists(g.getCurrentGeneration()); - g.incGeneration(); - s.trimHoldLists(g.getFirstUsedGeneration()); -} - - -int -Test::Main() -{ - TEST_INIT("btreeaggregation_test"); - - requireThatNodeInsertWorks(); - requireThatNodeSplitInsertWorks(); - requireThatTreeInsertWorks(); - requireThatNodeStealWorks(); - requireThatNodeRemoveWorks(); - requireThatWeCanInsertAndRemoveFromTree(); - requireThatSortedTreeInsertWorks(); - requireThatCornerCaseTreeFindWorks(); - requireThatBasicTreeIteratorWorks(); - requireThatTreeIteratorAssignWorks(); - requireThatUpdateOfKeyWorks(); - requireThatUpdateOfDataWorks(); - TEST_DO(requireThatSmallNodesWorks()); - TEST_DO(requireThatSmallNodesWorks()); - - TEST_DONE(); -} - -} -} - -TEST_APPHOOK(search::btree::Test); diff --git a/searchlib/src/tests/btree/frozenbtree_test.cpp b/searchlib/src/tests/btree/frozenbtree_test.cpp deleted file mode 100644 index 988239a5438..00000000000 --- a/searchlib/src/tests/btree/frozenbtree_test.cpp +++ /dev/null @@ -1,469 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#define DEBUG_FROZENBTREE -#define LOG_FROZENBTREEXX -#include -#include -#include -#include -#include -#include -#include - -#include -LOG_SETUP("frozenbtree_test"); - -using search::btree::BTreeRoot; -using search::btree::BTreeNode; -using search::btree::BTreeInternalNode; -using search::btree::BTreeLeafNode; -using search::btree::BTreeDefaultTraits; -using vespalib::GenerationHandler; - -namespace search { - - -class FrozenBTreeTest : public vespalib::TestApp -{ -public: - typedef int KeyType; -private: - std::vector _randomValues; - std::vector _sortedRandomValues; - -public: - typedef int DataType; - typedef BTreeRoot, - BTreeDefaultTraits> Tree; - typedef Tree::NodeAllocatorType NodeAllocator; - typedef Tree::InternalNodeType InternalNodeType; - typedef Tree::LeafNodeType LeafNodeType; - typedef Tree::Iterator Iterator; - typedef Tree::ConstIterator ConstIterator; -private: - GenerationHandler *_generationHandler; - NodeAllocator *_allocator; - Tree *_tree; - - Rand48 _randomGenerator; - - void allocTree(); - void freeTree(bool verbose); - void fillRandomValues(unsigned int count); - void insertRandomValues(Tree &tree, NodeAllocator &allocator, const std::vector &values); - void removeRandomValues(Tree &tree, NodeAllocator &allocator, const std::vector &values); - void lookupRandomValues(const Tree &tree, NodeAllocator &allocator, const std::vector &values); - void lookupGoneRandomValues(const Tree &tree, NodeAllocator &allocator, const std::vector &values); - void lookupFrozenRandomValues(const Tree &tree, NodeAllocator &allocator, const std::vector &values); - void sortRandomValues(); - void traverseTreeIterator(const Tree &tree, NodeAllocator &allocator, - const std::vector &sorted, bool frozen); - - void printSubEnumTree(BTreeNode::Ref node, NodeAllocator &allocator, int indent) const; - void printEnumTree(const Tree *tree, NodeAllocator &allocator); - - static const char *frozenName(bool frozen) { - return frozen ? "frozen" : "thawed"; - } -public: - FrozenBTreeTest(); - ~FrozenBTreeTest(); - - int Main() override; -}; - -FrozenBTreeTest::FrozenBTreeTest() - : vespalib::TestApp(), - _randomValues(), - _sortedRandomValues(), - _generationHandler(NULL), - _allocator(NULL), - _tree(NULL), - _randomGenerator() -{} -FrozenBTreeTest::~FrozenBTreeTest() {} - -void -FrozenBTreeTest::allocTree() -{ - assert(_generationHandler == NULL); - assert(_allocator == NULL); - assert(_tree == NULL); - _generationHandler = new GenerationHandler; - _allocator = new NodeAllocator(); - _tree = new Tree; -} - - -void -FrozenBTreeTest::freeTree(bool verbose) -{ -#if 0 - LOG(info, - "freeTree before clear: %" PRIu64 " (%" PRIu64 " held)" - ", %" PRIu32 " leaves", - static_cast(_intTree->getUsedMemory()), - static_cast(_intTree->getHeldMemory()), - _intTree->validLeaves()); - _intTree->clear(); - LOG(info, - "freeTree before unhold: %" PRIu64 " (%" PRIu64 " held)", - static_cast(_intTree->getUsedMemory()), - static_cast(_intTree->getHeldMemory())); - _intTree->dropFrozen(); - _intTree->removeOldGenerations(_intTree->getGeneration() + 1); - LOG(info, - "freeTree after unhold: %" PRIu64 " (%" PRIu64 " held)", - static_cast(_intTree->getUsedMemory()), - static_cast(_intTree->getHeldMemory())); - if (verbose) - LOG(info, - "%d+%d leftover tree nodes", - _intTree->getNumInternalNodes(), - _intTree->getNumLeafNodes()); - EXPECT_TRUE(_intTree->getNumInternalNodes() == 0 && - _intTree->getNumLeafNodes() == 0); - delete _intTree; - _intTree = NULL; - delete _intKeyStore; - _intKeyStore = NULL; -#endif - (void) verbose; - _tree->clear(*_allocator); - _allocator->freeze(); - _allocator->transferHoldLists(_generationHandler->getCurrentGeneration()); - _generationHandler->incGeneration(); - _allocator->trimHoldLists(_generationHandler->getFirstUsedGeneration()); - delete _tree; - _tree = NULL; - delete _allocator; - _allocator = NULL; - delete _generationHandler; - _generationHandler = NULL; -} - - -void -FrozenBTreeTest::fillRandomValues(unsigned int count) -{ - unsigned int i; - - LOG(info, "Filling %u random values", count); - _randomValues.clear(); - _randomValues.reserve(count); - _randomGenerator.srand48(42); - for (i = 0; i &values) -{ - std::vector::const_iterator i(values.begin()); - std::vector::const_iterator ie(values.end()); - Iterator p; - - LOG(info, "insertRandomValues start"); - for (; i != ie; ++i) { -#ifdef LOG_FROZENBTREE - LOG(info, "Try lookup %d before insert", *i); -#endif - p = tree.find(*i, allocator); - if (!p.valid()) { - DataType val = *i + 42; - if (tree.insert(*i, val, allocator)) - p = tree.find(*i, allocator); - } - ASSERT_TRUE(p.valid() && p.getKey() == *i && p.getData() == *i + 42); -#ifdef DEBUG_FROZENBTREEX - printEnumTree(&tree); -#endif - } - ASSERT_TRUE(tree.isValid(allocator)); - ASSERT_TRUE(tree.isValidFrozen(allocator)); - LOG(info, "insertRandomValues done"); -} - - -void -FrozenBTreeTest:: -removeRandomValues(Tree &tree, - NodeAllocator &allocator, - const std::vector & values) -{ - std::vector::const_iterator i(values.begin()); - std::vector::const_iterator ie(values.end()); - Iterator p; - - LOG(info, "removeRandomValues start"); - for (; i != ie; ++i) { -#ifdef LOG_FROZENBTREE - LOG(info, "Try lookup %d before remove", *i); -#endif - p = tree.find(*i, allocator); - if (p.valid()) { - if (tree.remove(*i, allocator)) - p = tree.find(*i, allocator); - } - ASSERT_TRUE(!p.valid()); -#ifdef DEBUG_FROZENBTREEX - tree.printTree(); -#endif - } - ASSERT_TRUE(tree.isValid(allocator)); - ASSERT_TRUE(tree.isValidFrozen(allocator)); - LOG(info, "removeRandomValues done"); -} - - -void -FrozenBTreeTest:: -lookupRandomValues(const Tree &tree, - NodeAllocator &allocator, - const std::vector &values) -{ - std::vector::const_iterator i(values.begin()); - std::vector::const_iterator ie(values.end()); - Iterator p; - - LOG(info, "lookupRandomValues start"); - for (; i != ie; ++i) { - p = tree.find(*i, allocator); - ASSERT_TRUE(p.valid() && p.getKey() == *i); - } - LOG(info, "lookupRandomValues done"); -} - - -void -FrozenBTreeTest:: -lookupGoneRandomValues(const Tree &tree, - NodeAllocator &allocator, - const std::vector &values) -{ - std::vector::const_iterator i(values.begin()); - std::vector::const_iterator ie(values.end()); - Iterator p; - - LOG(info, "lookupGoneRandomValues start"); - for (; i != ie; ++i) { - p = tree.find(*i, allocator); - ASSERT_TRUE(!p.valid()); - } - LOG(info, "lookupGoneRandomValues done"); -} - - -void -FrozenBTreeTest:: -lookupFrozenRandomValues(const Tree &tree, - NodeAllocator &allocator, - const std::vector &values) -{ - std::vector::const_iterator i(values.begin()); - std::vector::const_iterator ie(values.end()); - ConstIterator p; - - LOG(info, "lookupFrozenRandomValues start"); - for (; i != ie; ++i) { - p = tree.getFrozenView(allocator).find(*i, std::less()); - ASSERT_TRUE(p.valid() && p.getKey() == *i && p.getData() == *i + 42); - } - LOG(info, "lookupFrozenRandomValues done"); -} - - -void -FrozenBTreeTest::sortRandomValues() -{ - std::vector::iterator i; - std::vector::iterator ie; - uint32_t okcnt; - int prevVal; - std::vector sorted; - - LOG(info, "sortRandomValues start"); - sorted = _randomValues; - std::sort(sorted.begin(), sorted.end()); - _sortedRandomValues.clear(); - _sortedRandomValues.reserve(sorted.size()); - - okcnt = 0; - prevVal = 0; - ie = sorted.end(); - for (i = sorted.begin(); i != ie; ++i) { - if (i == _sortedRandomValues.begin() || *i > prevVal) { - okcnt++; - _sortedRandomValues.push_back(*i); - } else if (*i == prevVal) - okcnt++; - else - LOG_ABORT("should not be reached"); - prevVal = *i; - } - EXPECT_TRUE(okcnt == sorted.size()); - LOG(info, "sortRandomValues done"); -} - - -void -FrozenBTreeTest:: -traverseTreeIterator(const Tree &tree, - NodeAllocator &allocator, - const std::vector &sorted, - bool frozen) -{ - LOG(info, - "traverseTreeIterator %s start", - frozenName(frozen)); - - std::vector::const_iterator i; - - i = sorted.begin(); - if (frozen) { - ConstIterator ai; - ai = tree.getFrozenView(allocator).begin(); - for (;ai.valid(); ++ai, ++i) - { - ASSERT_TRUE(ai.getKey() == *i); - } - } else { - Iterator ai; - ai = tree.begin(allocator); - for (;ai.valid(); ++ai, ++i) - { - ASSERT_TRUE(ai.getKey() == *i); - } - } - - - ASSERT_TRUE(i == sorted.end()); - - LOG(info, - "traverseTreeIterator %s done", - frozenName(frozen)); -} - - -void -FrozenBTreeTest:: -printSubEnumTree(BTreeNode::Ref node, - NodeAllocator &allocator, - int indent) const -{ - // typedef BTreeNode Node; - typedef LeafNodeType LeafNode; - typedef InternalNodeType InternalNode; - BTreeNode::Ref subNode; - unsigned int i; - - if (allocator.isLeafRef(node)) { - const LeafNode *lnode = allocator.mapLeafRef(node); - printf("%*s LeafNode %s valid=%d\n", - indent, "", - lnode->getFrozen() ? "frozen" : "thawed", - lnode->validSlots()); - for (i = 0; i < lnode->validSlots(); i++) { - - KeyType k = lnode->getKey(i); - DataType d = lnode->getData(i); - printf("leaf value %3d %d %d\n", - (int) i, - (int) k, - (int) d); - } - return; - } - const InternalNode *inode = allocator.mapInternalRef(node); - printf("%*s IntermediteNode %s valid=%d\n", - indent, "", - inode->getFrozen() ? "frozen" : "thawed", - inode->validSlots()); - for (i = 0; i < inode->validSlots(); i++) { - subNode = inode->getChild(i); - assert(subNode != BTreeNode::Ref()); - printSubEnumTree(subNode, allocator, indent + 4); - } -} - - -void -FrozenBTreeTest::printEnumTree(const Tree *tree, - NodeAllocator &allocator) -{ - printf("Tree Dump start\n"); - if (!NodeAllocator::isValidRef(tree->getRoot())) { - printf("EMPTY\n"); - } else { - printSubEnumTree(tree->getRoot(), allocator, 0); - } - printf("Tree Dump done\n"); -} - - - -int -FrozenBTreeTest::Main() -{ - TEST_INIT("frozenbtree_test"); - - fillRandomValues(1000); - sortRandomValues(); - - allocTree(); - insertRandomValues(*_tree, *_allocator, _randomValues); - lookupRandomValues(*_tree, *_allocator, _randomValues); - _allocator->freeze(); - _allocator->transferHoldLists(_generationHandler->getCurrentGeneration()); - lookupFrozenRandomValues(*_tree, *_allocator, _randomValues); - traverseTreeIterator(*_tree, - *_allocator, - _sortedRandomValues, - false); - traverseTreeIterator(*_tree, - *_allocator, - _sortedRandomValues, - true); - traverseTreeIterator(*_tree, - *_allocator, - _sortedRandomValues, - false); - traverseTreeIterator(*_tree, - *_allocator, - _sortedRandomValues, - true); - removeRandomValues(*_tree, *_allocator, _randomValues); - lookupGoneRandomValues(*_tree, *_allocator, _randomValues); - lookupFrozenRandomValues(*_tree, *_allocator,_randomValues); - traverseTreeIterator(*_tree, - *_allocator, - _sortedRandomValues, - true); - insertRandomValues(*_tree, *_allocator, _randomValues); - freeTree(true); - - fillRandomValues(1000000); - sortRandomValues(); - - allocTree(); - insertRandomValues(*_tree, *_allocator, _randomValues); - traverseTreeIterator(*_tree, - *_allocator, - _sortedRandomValues, - false); - freeTree(false); - - TEST_DONE(); -} - -} - -TEST_APPHOOK(search::FrozenBTreeTest); diff --git a/searchlib/src/tests/btree/iteratespeed.cpp b/searchlib/src/tests/btree/iteratespeed.cpp deleted file mode 100644 index 2f60b06d675..00000000000 --- a/searchlib/src/tests/btree/iteratespeed.cpp +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -LOG_SETUP("iteratespeed"); - -namespace search { -namespace btree { - -enum class IterateMethod -{ - FORWARD, - BACKWARDS, - LAMBDA -}; - -class IterateSpeed : public FastOS_Application -{ - template - void - workLoop(int loops, bool enableForward, bool enableBackwards, - bool enableLambda, int leafSlots); - void usage(); - int Main() override; -}; - - -namespace { - -const char *iterateMethodName(IterateMethod iterateMethod) -{ - switch (iterateMethod) { - case IterateMethod::FORWARD: - return "forward"; - case IterateMethod::BACKWARDS: - return "backwards"; - default: - return "lambda"; - } -} - -} - -template -void -IterateSpeed::workLoop(int loops, bool enableForward, bool enableBackwards, - bool enableLambda, int leafSlots) -{ - if ((iterateMethod == IterateMethod::FORWARD && !enableForward) || - (iterateMethod == IterateMethod::BACKWARDS && !enableBackwards) || - (iterateMethod == IterateMethod::LAMBDA && !enableLambda) || - (leafSlots != 0 && - leafSlots != static_cast(Traits::LEAF_SLOTS))) - return; - vespalib::GenerationHandler g; - using Tree = BTree, Traits>; - using Builder = typename Tree::Builder; - using ConstIterator = typename Tree::ConstIterator; - Tree tree; - Builder builder(tree.getAllocator()); - size_t numEntries = 1000000; - size_t numInnerLoops = 1000; - for (size_t i = 0; i < numEntries; ++i) { - builder.insert(i, 0); - } - tree.assign(builder); - assert(numEntries == tree.size()); - assert(tree.isValid()); - for (int l = 0; l < loops; ++l) { - fastos::TimeStamp before = fastos::ClockSystem::now(); - uint64_t sum = 0; - for (size_t innerl = 0; innerl < numInnerLoops; ++innerl) { - if (iterateMethod == IterateMethod::FORWARD) { - ConstIterator itr(BTreeNode::Ref(), tree.getAllocator()); - itr.begin(tree.getRoot()); - while (itr.valid()) { - sum += itr.getKey(); - ++itr; - } - } else if (iterateMethod == IterateMethod::BACKWARDS) { - ConstIterator itr(BTreeNode::Ref(), tree.getAllocator()); - itr.end(tree.getRoot()); - --itr; - while (itr.valid()) { - sum += itr.getKey(); - --itr; - } - } else { - tree.getAllocator().foreach_key(tree.getRoot(), - [&](int key) { sum += key; } ); - } - } - fastos::TimeStamp after = fastos::ClockSystem::now(); - double used = after.sec() - before.sec(); - printf("Elapsed time for iterating %ld steps is %8.5f, " - "direction=%s, fanout=%u,%u, sum=%" PRIu64 "\n", - numEntries * numInnerLoops, - used, - iterateMethodName(iterateMethod), - static_cast(Traits::LEAF_SLOTS), - static_cast(Traits::INTERNAL_SLOTS), - sum); - fflush(stdout); - } -} - - -void -IterateSpeed::usage() -{ - printf("iteratspeed " - "[-F ] " - "[-b] " - "[-c ] " - "[-f] " - "[-l]\n"); -} - -int -IterateSpeed::Main() -{ - int argi; - char c; - const char *optArg; - argi = 1; - int loops = 1; - bool backwards = false; - bool forwards = false; - bool lambda = false; - int leafSlots = 0; - while ((c = GetOpt("F:bc:fl", optArg, argi)) != -1) { - switch (c) { - case 'F': - leafSlots = atoi(optArg); - break; - case 'b': - backwards = true; - break; - case 'c': - loops = atoi(optArg); - break; - case 'f': - forwards = true; - break; - case 'l': - lambda = true; - break; - default: - usage(); - return 1; - } - } - if (!backwards && !forwards && !lambda) { - backwards = true; - forwards = true; - lambda = true; - } - - using SmallTraits = BTreeTraits<4, 4, 31, false>; - using DefTraits = BTreeDefaultTraits; - using LargeTraits = BTreeTraits<32, 16, 10, true>; - using HugeTraits = BTreeTraits<64, 16, 10, true>; - workLoop(loops, forwards, backwards, - lambda, leafSlots); - workLoop(loops, forwards, backwards, - lambda, leafSlots); - workLoop(loops, forwards, backwards, - lambda, leafSlots); - workLoop(loops, forwards, backwards, - lambda, leafSlots); - workLoop(loops, forwards, backwards, - lambda, leafSlots); - workLoop(loops, forwards, backwards, - lambda, leafSlots); - workLoop(loops, forwards, backwards, - lambda, leafSlots); - workLoop(loops, forwards, backwards, - lambda, leafSlots); - workLoop(loops, forwards, backwards, - lambda, leafSlots); - workLoop(loops, forwards, backwards, - lambda, leafSlots); - workLoop(loops, forwards, backwards, - lambda, leafSlots); - workLoop(loops, forwards, backwards, - lambda, leafSlots); - return 0; -} - -} -} - -FASTOS_MAIN(search::btree::IterateSpeed); - - diff --git a/searchlib/src/tests/datastore/array_store/CMakeLists.txt b/searchlib/src/tests/datastore/array_store/CMakeLists.txt deleted file mode 100644 index c99759b7350..00000000000 --- a/searchlib/src/tests/datastore/array_store/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(searchlib_array_store_test_app TEST - SOURCES - array_store_test.cpp - DEPENDS - searchlib -) -vespa_add_test(NAME searchlib_array_store_test_app COMMAND searchlib_array_store_test_app) diff --git a/searchlib/src/tests/datastore/array_store/array_store_test.cpp b/searchlib/src/tests/datastore/array_store/array_store_test.cpp deleted file mode 100644 index 0af9002ae75..00000000000 --- a/searchlib/src/tests/datastore/array_store/array_store_test.cpp +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include -#include -#include -#include -#include -#include - -using namespace search::datastore; -using vespalib::MemoryUsage; -using vespalib::ArrayRef; -using generation_t = vespalib::GenerationHandler::generation_t; -using MemStats = search::datastore::test::MemStats; - -constexpr float ALLOC_GROW_FACTOR = 0.2; - -template > -struct Fixture -{ - using EntryRefType = RefT; - using ArrayStoreType = ArrayStore; - using LargeArray = typename ArrayStoreType::LargeArray; - using ConstArrayRef = typename ArrayStoreType::ConstArrayRef; - using EntryVector = std::vector; - using value_type = EntryT; - using ReferenceStore = std::map; - - ArrayStoreType store; - ReferenceStore refStore; - generation_t generation; - Fixture(uint32_t maxSmallArraySize) - : store(ArrayStoreConfig(maxSmallArraySize, - ArrayStoreConfig::AllocSpec(16, RefT::offsetSize(), 8 * 1024, - ALLOC_GROW_FACTOR))), - refStore(), - generation(1) - {} - Fixture(const ArrayStoreConfig &storeCfg) - : store(storeCfg), - refStore(), - generation(1) - {} - void assertAdd(const EntryVector &input) { - EntryRef ref = add(input); - assertGet(ref, input); - } - EntryRef add(const EntryVector &input) { - EntryRef result = store.add(ConstArrayRef(input)); - ASSERT_EQUAL(0u, refStore.count(result)); - refStore.insert(std::make_pair(result, input)); - return result; - } - void assertGet(EntryRef ref, const EntryVector &exp) const { - ConstArrayRef act = store.get(ref); - EXPECT_EQUAL(exp, EntryVector(act.begin(), act.end())); - } - void remove(EntryRef ref) { - ASSERT_EQUAL(1u, refStore.count(ref)); - store.remove(ref); - refStore.erase(ref); - } - void remove(const EntryVector &input) { - remove(getEntryRef(input)); - } - uint32_t getBufferId(EntryRef ref) const { - return EntryRefType(ref).bufferId(); - } - void assertBufferState(EntryRef ref, const MemStats expStats) const { - EXPECT_EQUAL(expStats._used, store.bufferState(ref).size()); - EXPECT_EQUAL(expStats._hold, store.bufferState(ref).getHoldElems()); - EXPECT_EQUAL(expStats._dead, store.bufferState(ref).getDeadElems()); - } - void assertMemoryUsage(const MemStats expStats) const { - MemoryUsage act = store.getMemoryUsage(); - EXPECT_EQUAL(expStats._used, act.usedBytes()); - EXPECT_EQUAL(expStats._hold, act.allocatedBytesOnHold()); - EXPECT_EQUAL(expStats._dead, act.deadBytes()); - } - void assertStoreContent() const { - for (const auto &elem : refStore) { - TEST_DO(assertGet(elem.first, elem.second)); - } - } - EntryRef getEntryRef(const EntryVector &input) { - for (auto itr = refStore.begin(); itr != refStore.end(); ++itr) { - if (itr->second == input) { - return itr->first; - } - } - return EntryRef(); - } - void trimHoldLists() { - store.transferHoldLists(generation++); - store.trimHoldLists(generation); - } - void compactWorst(bool compactMemory, bool compactAddressSpace) { - ICompactionContext::UP ctx = store.compactWorst(compactMemory, compactAddressSpace); - std::vector refs; - for (auto itr = refStore.begin(); itr != refStore.end(); ++itr) { - refs.push_back(itr->first); - } - std::vector compactedRefs = refs; - ctx->compact(ArrayRef(compactedRefs)); - ReferenceStore compactedRefStore; - for (size_t i = 0; i < refs.size(); ++i) { - ASSERT_EQUAL(0u, compactedRefStore.count(compactedRefs[i])); - ASSERT_EQUAL(1u, refStore.count(refs[i])); - compactedRefStore.insert(std::make_pair(compactedRefs[i], refStore[refs[i]])); - } - refStore = compactedRefStore; - } - size_t entrySize() const { return sizeof(EntryT); } - size_t largeArraySize() const { return sizeof(LargeArray); } -}; - -using NumberFixture = Fixture; -using StringFixture = Fixture; -using SmallOffsetNumberFixture = Fixture>; -using ByteFixture = Fixture; - - - -TEST("require that we test with trivial and non-trivial types") -{ - EXPECT_TRUE(vespalib::can_skip_destruction::value); - EXPECT_FALSE(vespalib::can_skip_destruction::value); -} - -TEST_F("require that we can add and get small arrays of trivial type", NumberFixture(3)) -{ - TEST_DO(f.assertAdd({})); - TEST_DO(f.assertAdd({1})); - TEST_DO(f.assertAdd({2,3})); - TEST_DO(f.assertAdd({3,4,5})); -} - -TEST_F("require that we can add and get small arrays of non-trivial type", StringFixture(3)) -{ - TEST_DO(f.assertAdd({})); - TEST_DO(f.assertAdd({"aa"})); - TEST_DO(f.assertAdd({"bbb", "ccc"})); - TEST_DO(f.assertAdd({"ddd", "eeee", "fffff"})); -} - -TEST_F("require that we can add and get large arrays of simple type", NumberFixture(3)) -{ - TEST_DO(f.assertAdd({1,2,3,4})); - TEST_DO(f.assertAdd({2,3,4,5,6})); -} - -TEST_F("require that we can add and get large arrays of non-trivial type", StringFixture(3)) -{ - TEST_DO(f.assertAdd({"aa", "bb", "cc", "dd"})); - TEST_DO(f.assertAdd({"ddd", "eee", "ffff", "gggg", "hhhh"})); -} - -TEST_F("require that elements are put on hold when a small array is removed", NumberFixture(3)) -{ - EntryRef ref = f.add({1,2,3}); - TEST_DO(f.assertBufferState(ref, MemStats().used(3).hold(0))); - f.store.remove(ref); - TEST_DO(f.assertBufferState(ref, MemStats().used(3).hold(3))); -} - -TEST_F("require that elements are put on hold when a large array is removed", NumberFixture(3)) -{ - EntryRef ref = f.add({1,2,3,4}); - // Note: The first buffer have the first element reserved -> we expect 2 elements used here. - TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(0).dead(1))); - f.store.remove(ref); - TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(1).dead(1))); -} - -TEST_F("require that new underlying buffer is allocated when current is full", SmallOffsetNumberFixture(3)) -{ - uint32_t firstBufferId = f.getBufferId(f.add({1,1})); - for (uint32_t i = 0; i < (F1::EntryRefType::offsetSize() - 1); ++i) { - uint32_t bufferId = f.getBufferId(f.add({i, i+1})); - EXPECT_EQUAL(firstBufferId, bufferId); - } - TEST_DO(f.assertStoreContent()); - - uint32_t secondBufferId = f.getBufferId(f.add({2,2})); - EXPECT_NOT_EQUAL(firstBufferId, secondBufferId); - for (uint32_t i = 0; i < 10u; ++i) { - uint32_t bufferId = f.getBufferId(f.add({i+2,i})); - EXPECT_EQUAL(secondBufferId, bufferId); - } - TEST_DO(f.assertStoreContent()); -} - -TEST_F("require that the buffer with most dead space is compacted", NumberFixture(2)) -{ - EntryRef size1Ref = f.add({1}); - EntryRef size2Ref = f.add({2,2}); - EntryRef size3Ref = f.add({3,3,3}); - f.remove(f.add({5,5})); - f.trimHoldLists(); - TEST_DO(f.assertBufferState(size1Ref, MemStats().used(1).dead(0))); - TEST_DO(f.assertBufferState(size2Ref, MemStats().used(4).dead(2))); - TEST_DO(f.assertBufferState(size3Ref, MemStats().used(2).dead(1))); // Note: First element is reserved - uint32_t size1BufferId = f.getBufferId(size1Ref); - uint32_t size2BufferId = f.getBufferId(size2Ref); - uint32_t size3BufferId = f.getBufferId(size3Ref); - - EXPECT_EQUAL(3u, f.refStore.size()); - f.compactWorst(true, false); - EXPECT_EQUAL(3u, f.refStore.size()); - f.assertStoreContent(); - - EXPECT_EQUAL(size1BufferId, f.getBufferId(f.getEntryRef({1}))); - EXPECT_EQUAL(size3BufferId, f.getBufferId(f.getEntryRef({3,3,3}))); - // Buffer for size 2 arrays has been compacted - EXPECT_NOT_EQUAL(size2BufferId, f.getBufferId(f.getEntryRef({2,2}))); - f.assertGet(size2Ref, {2,2}); // Old ref should still point to data. - EXPECT_TRUE(f.store.bufferState(size2Ref).isOnHold()); - f.trimHoldLists(); - EXPECT_TRUE(f.store.bufferState(size2Ref).isFree()); -} - -namespace { - -void testCompaction(NumberFixture &f, bool compactMemory, bool compactAddressSpace) -{ - EntryRef size1Ref = f.add({1}); - EntryRef size2Ref = f.add({2,2}); - EntryRef size3Ref = f.add({3,3,3}); - f.remove(f.add({5,5,5})); - f.remove(f.add({6})); - f.remove(f.add({7})); - f.trimHoldLists(); - TEST_DO(f.assertBufferState(size1Ref, MemStats().used(3).dead(2))); - TEST_DO(f.assertBufferState(size2Ref, MemStats().used(2).dead(0))); - TEST_DO(f.assertBufferState(size3Ref, MemStats().used(6).dead(3))); - uint32_t size1BufferId = f.getBufferId(size1Ref); - uint32_t size2BufferId = f.getBufferId(size2Ref); - uint32_t size3BufferId = f.getBufferId(size3Ref); - - EXPECT_EQUAL(3u, f.refStore.size()); - f.compactWorst(compactMemory, compactAddressSpace); - EXPECT_EQUAL(3u, f.refStore.size()); - f.assertStoreContent(); - - if (compactMemory) { - EXPECT_NOT_EQUAL(size3BufferId, f.getBufferId(f.getEntryRef({3,3,3}))); - } else { - EXPECT_EQUAL(size3BufferId, f.getBufferId(f.getEntryRef({3,3,3}))); - } - if (compactAddressSpace) { - EXPECT_NOT_EQUAL(size1BufferId, f.getBufferId(f.getEntryRef({1}))); - } else { - EXPECT_EQUAL(size1BufferId, f.getBufferId(f.getEntryRef({1}))); - } - EXPECT_EQUAL(size2BufferId, f.getBufferId(f.getEntryRef({2,2}))); - f.assertGet(size1Ref, {1}); // Old ref should still point to data. - f.assertGet(size3Ref, {3,3,3}); // Old ref should still point to data. - if (compactMemory) { - EXPECT_TRUE(f.store.bufferState(size3Ref).isOnHold()); - } else { - EXPECT_FALSE(f.store.bufferState(size3Ref).isOnHold()); - } - if (compactAddressSpace) { - EXPECT_TRUE(f.store.bufferState(size1Ref).isOnHold()); - } else { - EXPECT_FALSE(f.store.bufferState(size1Ref).isOnHold()); - } - EXPECT_FALSE(f.store.bufferState(size2Ref).isOnHold()); - f.trimHoldLists(); - if (compactMemory) { - EXPECT_TRUE(f.store.bufferState(size3Ref).isFree()); - } else { - EXPECT_FALSE(f.store.bufferState(size3Ref).isFree()); - } - if (compactAddressSpace) { - EXPECT_TRUE(f.store.bufferState(size1Ref).isFree()); - } else { - EXPECT_FALSE(f.store.bufferState(size1Ref).isFree()); - } - EXPECT_FALSE(f.store.bufferState(size2Ref).isFree()); -} - -} - -TEST_F("require that compactWorst selects on only memory", NumberFixture(3)) { - testCompaction(f, true, false); -} - -TEST_F("require that compactWorst selects on only address space", NumberFixture(3)) { - testCompaction(f, false, true); -} - -TEST_F("require that compactWorst selects on both memory and address space", NumberFixture(3)) { - testCompaction(f, true, true); -} - -TEST_F("require that compactWorst selects on neither memory nor address space", NumberFixture(3)) { - testCompaction(f, false, false); -} - -TEST_F("require that used, onHold and dead memory usage is tracked for small arrays", NumberFixture(2)) -{ - MemStats exp(f.store.getMemoryUsage()); - f.add({2,2}); - TEST_DO(f.assertMemoryUsage(exp.used(f.entrySize() * 2))); - f.remove({2,2}); - TEST_DO(f.assertMemoryUsage(exp.hold(f.entrySize() * 2))); - f.trimHoldLists(); - TEST_DO(f.assertMemoryUsage(exp.holdToDead(f.entrySize() * 2))); -} - -TEST_F("require that used, onHold and dead memory usage is tracked for large arrays", NumberFixture(2)) -{ - MemStats exp(f.store.getMemoryUsage()); - f.add({3,3,3}); - TEST_DO(f.assertMemoryUsage(exp.used(f.largeArraySize() + f.entrySize() * 3))); - f.remove({3,3,3}); - TEST_DO(f.assertMemoryUsage(exp.hold(f.largeArraySize() + f.entrySize() * 3))); - f.trimHoldLists(); - TEST_DO(f.assertMemoryUsage(exp.decHold(f.largeArraySize() + f.entrySize() * 3). - dead(f.largeArraySize()))); -} - -TEST_F("require that address space usage is ratio between used arrays and number of possible arrays", NumberFixture(3)) -{ - f.add({2,2}); - f.add({3,3,3}); - // 1 array is reserved (buffer 0, offset 0). - EXPECT_EQUAL(3u, f.store.addressSpaceUsage().used()); - EXPECT_EQUAL(1u, f.store.addressSpaceUsage().dead()); - size_t fourgig = (1ull << 32); - /* - * Expected limit is sum of allocated arrays for active buffers and - * potentially allocated arrays for free buffers. If all buffers were - * free then the limit would be 4 Gi. - * Then we subtract arrays for 4 buffers that are not free (arraySize=1,2,3 + largeArray), - * and add their actual number of allocated arrays (16 arrays per buffer). - * Note: arraySize=3 has 21 arrays as allocated buffer is rounded up to power of 2: - * 16 * 3 * sizeof(int) = 192 -> 256. - * allocated elements = 256 / sizeof(int) = 64. - * limit = 64 / 3 = 21. - */ - size_t expLimit = fourgig - 4 * F1::EntryRefType::offsetSize() + 3 * 16 + 21; - EXPECT_EQUAL(static_cast(2)/ expLimit, f.store.addressSpaceUsage().usage()); - EXPECT_EQUAL(expLimit, f.store.addressSpaceUsage().limit()); -} - -TEST_F("require that offset in EntryRefT is within bounds when allocating memory buffers where wanted number of bytes is not a power of 2 and less than huge page size", - ByteFixture(ByteFixture::ArrayStoreType::optimizedConfigForHugePage(1023, vespalib::alloc::MemoryAllocator::HUGEPAGE_SIZE, - 4 * 1024, 8 * 1024, ALLOC_GROW_FACTOR))) -{ - // The array store config used in this test is equivalent to the one multi-value attribute uses when initializing multi-value mapping. - // See similar test in datastore_test.cpp for more details on what happens during memory allocation. - for (size_t i = 0; i < 1000000; ++i) { - f.add({1, 2, 3}); - } - f.assertStoreContent(); -} - -TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/searchlib/src/tests/datastore/array_store_config/CMakeLists.txt b/searchlib/src/tests/datastore/array_store_config/CMakeLists.txt deleted file mode 100644 index 7ae62767586..00000000000 --- a/searchlib/src/tests/datastore/array_store_config/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(searchlib_array_store_config_test_app TEST - SOURCES - array_store_config_test.cpp - DEPENDS - searchlib -) -vespa_add_test(NAME searchlib_array_store_config_test_app COMMAND searchlib_array_store_config_test_app) diff --git a/searchlib/src/tests/datastore/array_store_config/array_store_config_test.cpp b/searchlib/src/tests/datastore/array_store_config/array_store_config_test.cpp deleted file mode 100644 index e0f93761b13..00000000000 --- a/searchlib/src/tests/datastore/array_store_config/array_store_config_test.cpp +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include -#include -#include - -using namespace search::datastore; -using AllocSpec = ArrayStoreConfig::AllocSpec; - -constexpr float ALLOC_GROW_FACTOR = 0.2; - -struct Fixture -{ - using EntryRefType = EntryRefT<18>; - ArrayStoreConfig cfg; - - Fixture(uint32_t maxSmallArraySize, - const AllocSpec &defaultSpec) - : cfg(maxSmallArraySize, defaultSpec) {} - - Fixture(uint32_t maxSmallArraySize, - size_t hugePageSize, - size_t smallPageSize, - size_t minNumArraysForNewBuffer) - : cfg(ArrayStoreConfig::optimizeForHugePage(maxSmallArraySize, hugePageSize, smallPageSize, - sizeof(int), EntryRefType::offsetSize(), - minNumArraysForNewBuffer, - ALLOC_GROW_FACTOR)) { } - void assertSpec(size_t arraySize, uint32_t numArraysForNewBuffer) { - assertSpec(arraySize, AllocSpec(0, EntryRefType::offsetSize(), - numArraysForNewBuffer, ALLOC_GROW_FACTOR)); - } - void assertSpec(size_t arraySize, const AllocSpec &expSpec) { - const ArrayStoreConfig::AllocSpec &actSpec = cfg.specForSize(arraySize); - EXPECT_EQUAL(expSpec.minArraysInBuffer, actSpec.minArraysInBuffer); - EXPECT_EQUAL(expSpec.maxArraysInBuffer, actSpec.maxArraysInBuffer); - EXPECT_EQUAL(expSpec.numArraysForNewBuffer, actSpec.numArraysForNewBuffer); - EXPECT_EQUAL(expSpec.allocGrowFactor, actSpec.allocGrowFactor); - } -}; - -AllocSpec -makeSpec(size_t minArraysInBuffer, - size_t maxArraysInBuffer, - size_t numArraysForNewBuffer) -{ - return AllocSpec(minArraysInBuffer, maxArraysInBuffer, numArraysForNewBuffer, ALLOC_GROW_FACTOR); -} - -constexpr size_t KB = 1024; -constexpr size_t MB = KB * KB; - -TEST_F("require that default allocation spec is given for all array sizes", Fixture(3, makeSpec(4, 32, 8))) -{ - EXPECT_EQUAL(3u, f.cfg.maxSmallArraySize()); - TEST_DO(f.assertSpec(0, makeSpec(4, 32, 8))); - TEST_DO(f.assertSpec(1, makeSpec(4, 32, 8))); - TEST_DO(f.assertSpec(2, makeSpec(4, 32, 8))); - TEST_DO(f.assertSpec(3, makeSpec(4, 32, 8))); -} - -TEST_F("require that we can generate config optimized for a given huge page", Fixture(1024, - 2 * MB, - 4 * KB, - 8 * KB)) -{ - EXPECT_EQUAL(1024u, f.cfg.maxSmallArraySize()); - TEST_DO(f.assertSpec(0, 8 * KB)); // large arrays - TEST_DO(f.assertSpec(1, 256 * KB)); - TEST_DO(f.assertSpec(2, 256 * KB)); - TEST_DO(f.assertSpec(3, 168 * KB)); - TEST_DO(f.assertSpec(4, 128 * KB)); - TEST_DO(f.assertSpec(5, 100 * KB)); - TEST_DO(f.assertSpec(6, 84 * KB)); - - TEST_DO(f.assertSpec(32, 16 * KB)); - TEST_DO(f.assertSpec(33, 12 * KB)); - TEST_DO(f.assertSpec(42, 12 * KB)); - TEST_DO(f.assertSpec(43, 8 * KB)); - TEST_DO(f.assertSpec(1022, 8 * KB)); - TEST_DO(f.assertSpec(1023, 8 * KB)); -} - -TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/searchlib/src/tests/datastore/buffer_type/CMakeLists.txt b/searchlib/src/tests/datastore/buffer_type/CMakeLists.txt deleted file mode 100644 index 3c3a6eb6f87..00000000000 --- a/searchlib/src/tests/datastore/buffer_type/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(searchlib_buffer_type_test_app TEST - SOURCES - buffer_type_test.cpp - DEPENDS - searchlib -) -vespa_add_test(NAME searchlib_buffer_type_test_app COMMAND searchlib_buffer_type_test_app) diff --git a/searchlib/src/tests/datastore/buffer_type/buffer_type_test.cpp b/searchlib/src/tests/datastore/buffer_type/buffer_type_test.cpp deleted file mode 100644 index 2ea615bea08..00000000000 --- a/searchlib/src/tests/datastore/buffer_type/buffer_type_test.cpp +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include -#include - -using namespace search::datastore; - -using IntBufferType = BufferType; -constexpr uint32_t ARRAYS_SIZE(4); -constexpr uint32_t MAX_ARRAYS(128); -constexpr uint32_t NUM_ARRAYS_FOR_NEW_BUFFER(0); - -struct Setup { - uint32_t _minArrays; - size_t _usedElems; - size_t _neededElems; - uint32_t _bufferId; - float _allocGrowFactor; - bool _resizing; - Setup() - : _minArrays(0), - _usedElems(0), - _neededElems(0), - _bufferId(1), - _allocGrowFactor(0.5), - _resizing(false) - {} - Setup &minArrays(uint32_t value) { _minArrays = value; return *this; } - Setup &used(size_t value) { _usedElems = value; return *this; } - Setup &needed(size_t value) { _neededElems = value; return *this; } - Setup &bufferId(uint32_t value) { _bufferId = value; return *this; } - Setup &resizing(bool value) { _resizing = value; return *this; } -}; - -struct Fixture { - Setup setup; - IntBufferType bufferType; - size_t deadElems; - int buffer; - Fixture(const Setup &setup_) - : setup(setup_), - bufferType(ARRAYS_SIZE, setup._minArrays, MAX_ARRAYS, NUM_ARRAYS_FOR_NEW_BUFFER, setup._allocGrowFactor), - deadElems(0), - buffer(0) - {} - ~Fixture() { - bufferType.onHold(&setup._usedElems); - bufferType.onFree(setup._usedElems); - } - void onActive() { - bufferType.onActive(setup._bufferId, &setup._usedElems, deadElems, &buffer); - } - size_t arraysToAlloc() { - return bufferType.calcArraysToAlloc(setup._bufferId, setup._neededElems, setup._resizing); - } -}; - -void -assertArraysToAlloc(size_t exp, const Setup &setup) -{ - Fixture f(setup); - f.onActive(); - EXPECT_EQUAL(exp, f.arraysToAlloc()); -} - -TEST("require that complete arrays are allocated") -{ - TEST_DO(assertArraysToAlloc(1, Setup().needed(1))); - TEST_DO(assertArraysToAlloc(1, Setup().needed(2))); - TEST_DO(assertArraysToAlloc(1, Setup().needed(3))); - TEST_DO(assertArraysToAlloc(1, Setup().needed(4))); - TEST_DO(assertArraysToAlloc(2, Setup().needed(5))); -} - -TEST("require that reserved elements are taken into account when not resizing") -{ - TEST_DO(assertArraysToAlloc(2, Setup().needed(1).bufferId(0))); - TEST_DO(assertArraysToAlloc(2, Setup().needed(4).bufferId(0))); - TEST_DO(assertArraysToAlloc(3, Setup().needed(5).bufferId(0))); -} - -TEST("require that arrays to alloc is based on currently used elements (no resizing)") -{ - TEST_DO(assertArraysToAlloc(2, Setup().used(4 * 4).needed(4))); - TEST_DO(assertArraysToAlloc(4, Setup().used(8 * 4).needed(4))); -} - -TEST("require that arrays to alloc is based on currently used elements (with resizing)") -{ - TEST_DO(assertArraysToAlloc(4 + 2, Setup().used(4 * 4).needed(4).resizing(true))); - TEST_DO(assertArraysToAlloc(8 + 4, Setup().used(8 * 4).needed(4).resizing(true))); - TEST_DO(assertArraysToAlloc(4 + 3, Setup().used(4 * 4).needed(3 * 4).resizing(true))); -} - -TEST("require that arrays to alloc always contain elements needed") -{ - TEST_DO(assertArraysToAlloc(2, Setup().used(4 * 4).needed(2 * 4))); - TEST_DO(assertArraysToAlloc(3, Setup().used(4 * 4).needed(3 * 4))); - TEST_DO(assertArraysToAlloc(4, Setup().used(4 * 4).needed(4 * 4))); -} - -TEST("require that arrays to alloc is capped to max arrays") -{ - TEST_DO(assertArraysToAlloc(127, Setup().used(254 * 4).needed(4))); - TEST_DO(assertArraysToAlloc(128, Setup().used(256 * 4).needed(4))); - TEST_DO(assertArraysToAlloc(128, Setup().used(258 * 4).needed(8))); -} - -TEST("require that arrays to alloc is capped to min arrays") -{ - TEST_DO(assertArraysToAlloc(16, Setup().used(30 * 4).needed(4).minArrays(16))); - TEST_DO(assertArraysToAlloc(16, Setup().used(32 * 4).needed(4).minArrays(16))); - TEST_DO(assertArraysToAlloc(17, Setup().used(34 * 4).needed(4).minArrays(16))); -} - -TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/searchlib/src/tests/datastore/datastore/CMakeLists.txt b/searchlib/src/tests/datastore/datastore/CMakeLists.txt deleted file mode 100644 index 1bc6210a13f..00000000000 --- a/searchlib/src/tests/datastore/datastore/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(searchlib_datastore_test_app TEST - SOURCES - datastore_test.cpp - DEPENDS - searchlib - gtest -) -vespa_add_test(NAME searchlib_datastore_test_app COMMAND searchlib_datastore_test_app) diff --git a/searchlib/src/tests/datastore/datastore/datastore_test.cpp b/searchlib/src/tests/datastore/datastore/datastore_test.cpp deleted file mode 100644 index 281e5d52365..00000000000 --- a/searchlib/src/tests/datastore/datastore/datastore_test.cpp +++ /dev/null @@ -1,584 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include -#include -#include -#include - -#include -LOG_SETUP("datastore_test"); - -namespace search::datastore { - -using vespalib::alloc::MemoryAllocator; - -struct IntReclaimer -{ - static void reclaim(int *) {} -}; - -class MyStore : public DataStore > { -private: - using ParentType = DataStore >; - using ParentType::_activeBufferIds; -public: - MyStore() {} - - void - holdBuffer(uint32_t bufferId) - { - ParentType::holdBuffer(bufferId); - } - - void - holdElem(EntryRef ref, uint64_t len) - { - ParentType::holdElem(ref, len); - } - - void - transferHoldLists(generation_t generation) - { - ParentType::transferHoldLists(generation); - } - - void trimElemHoldList(generation_t usedGen) override { - ParentType::trimElemHoldList(usedGen); - } - void incDead(EntryRef ref, uint64_t dead) { - ParentType::incDead(ref, dead); - } - void ensureBufferCapacity(size_t sizeNeeded) { - ParentType::ensureBufferCapacity(0, sizeNeeded); - } - void enableFreeLists() { - ParentType::enableFreeLists(); - } - - void - switchActiveBuffer() - { - ParentType::switchActiveBuffer(0, 0u); - } - size_t activeBufferId() const { return _activeBufferIds[0]; } -}; - - -using GrowthStats = std::vector; - -constexpr float ALLOC_GROW_FACTOR = 0.4; -constexpr size_t HUGE_PAGE_ARRAY_SIZE = (MemoryAllocator::HUGEPAGE_SIZE / sizeof(int)); - -template -class GrowStore -{ - using Store = DataStoreT; - Store _store; - BufferType _firstType; - BufferType _type; - uint32_t _typeId; -public: - GrowStore(size_t arraySize, size_t minArrays, size_t maxArrays, size_t numArraysForNewBuffer) - : _store(), - _firstType(1, 1, maxArrays, 0, ALLOC_GROW_FACTOR), - _type(arraySize, minArrays, maxArrays, numArraysForNewBuffer, ALLOC_GROW_FACTOR), - _typeId(0) - { - (void) _store.addType(&_firstType); - _typeId = _store.addType(&_type); - _store.initActiveBuffers(); - } - ~GrowStore() { _store.dropBuffers(); } - - Store &store() { return _store; } - uint32_t typeId() const { return _typeId; } - - GrowthStats getGrowthStats(size_t bufs) { - GrowthStats sizes; - int prevBufferId = -1; - while (sizes.size() < bufs) { - RefType iRef = (_type.getArraySize() == 1) ? - (_store.template allocator(_typeId).alloc().ref) : - (_store.template allocator(_typeId).allocArray(_type.getArraySize()).ref); - int bufferId = iRef.bufferId(); - if (bufferId != prevBufferId) { - if (prevBufferId >= 0) { - const auto &state = _store.getBufferState(prevBufferId); - sizes.push_back(state.capacity()); - } - prevBufferId = bufferId; - } - } - return sizes; - } - GrowthStats getFirstBufGrowStats() { - GrowthStats sizes; - int i = 0; - int prevBuffer = -1; - size_t prevAllocated = _store.getMemoryUsage().allocatedBytes(); - for (;;) { - RefType iRef = _store.template allocator(_typeId).alloc().ref; - size_t allocated = _store.getMemoryUsage().allocatedBytes(); - if (allocated != prevAllocated) { - sizes.push_back(i); - prevAllocated = allocated; - } - int buffer = iRef.bufferId(); - if (buffer != prevBuffer) { - if (prevBuffer >= 0) { - return sizes; - } - prevBuffer = buffer; - } - ++i; - } - } - vespalib::MemoryUsage getMemoryUsage() const { return _store.getMemoryUsage(); } -}; - -using MyRef = MyStore::RefType; - -void -assertMemStats(const DataStoreBase::MemStats &exp, - const DataStoreBase::MemStats &act) -{ - EXPECT_EQ(exp._allocElems, act._allocElems); - EXPECT_EQ(exp._usedElems, act._usedElems); - EXPECT_EQ(exp._deadElems, act._deadElems); - EXPECT_EQ(exp._holdElems, act._holdElems); - EXPECT_EQ(exp._freeBuffers, act._freeBuffers); - EXPECT_EQ(exp._activeBuffers, act._activeBuffers); - EXPECT_EQ(exp._holdBuffers, act._holdBuffers); -} - -TEST(DataStoreTest, require_that_entry_ref_is_working) -{ - using MyRefType = EntryRefT<22>; - EXPECT_EQ(4194304u, MyRefType::offsetSize()); - EXPECT_EQ(1024u, MyRefType::numBuffers()); - { - MyRefType r(0, 0); - EXPECT_EQ(0u, r.offset()); - EXPECT_EQ(0u, r.bufferId()); - } - { - MyRefType r(237, 13); - EXPECT_EQ(237u, r.offset()); - EXPECT_EQ(13u, r.bufferId()); - } - { - MyRefType r(4194303, 1023); - EXPECT_EQ(4194303u, r.offset()); - EXPECT_EQ(1023u, r.bufferId()); - } - { - MyRefType r1(6498, 76); - MyRefType r2(r1); - EXPECT_EQ(r1.offset(), r2.offset()); - EXPECT_EQ(r1.bufferId(), r2.bufferId()); - } -} - -TEST(DataStoreTest, require_that_aligned_entry_ref_is_working) -{ - using MyRefType = AlignedEntryRefT<22, 2>; // 4 byte alignement - EXPECT_EQ(4 * 4194304u, MyRefType::offsetSize()); - EXPECT_EQ(1024u, MyRefType::numBuffers()); - EXPECT_EQ(0u, MyRefType::align(0)); - EXPECT_EQ(4u, MyRefType::align(1)); - EXPECT_EQ(4u, MyRefType::align(2)); - EXPECT_EQ(4u, MyRefType::align(3)); - EXPECT_EQ(4u, MyRefType::align(4)); - EXPECT_EQ(8u, MyRefType::align(5)); - { - MyRefType r(0, 0); - EXPECT_EQ(0u, r.offset()); - EXPECT_EQ(0u, r.bufferId()); - } - { - MyRefType r(237, 13); - EXPECT_EQ(MyRefType::align(237), r.offset()); - EXPECT_EQ(13u, r.bufferId()); - } - { - MyRefType r(MyRefType::offsetSize() - 4, 1023); - EXPECT_EQ(MyRefType::align(MyRefType::offsetSize() - 4), r.offset()); - EXPECT_EQ(1023u, r.bufferId()); - } -} - -TEST(DataStoreTest, require_that_entries_can_be_added_and_retrieved) -{ - using IntStore = DataStore; - IntStore ds; - EntryRef r1 = ds.addEntry(10); - EntryRef r2 = ds.addEntry(20); - EntryRef r3 = ds.addEntry(30); - EXPECT_EQ(1u, IntStore::RefType(r1).offset()); - EXPECT_EQ(2u, IntStore::RefType(r2).offset()); - EXPECT_EQ(3u, IntStore::RefType(r3).offset()); - EXPECT_EQ(0u, IntStore::RefType(r1).bufferId()); - EXPECT_EQ(0u, IntStore::RefType(r2).bufferId()); - EXPECT_EQ(0u, IntStore::RefType(r3).bufferId()); - EXPECT_EQ(10, ds.getEntry(r1)); - EXPECT_EQ(20, ds.getEntry(r2)); - EXPECT_EQ(30, ds.getEntry(r3)); -} - -TEST(DataStoreTest, require_that_add_entry_triggers_change_of_buffer) -{ - using Store = DataStore >; - Store s; - uint64_t num = 0; - uint32_t lastId = 0; - uint64_t lastNum = 0; - for (;;++num) { - EntryRef r = s.addEntry(num); - EXPECT_EQ(num, s.getEntry(r)); - uint32_t bufferId = Store::RefType(r).bufferId(); - if (bufferId > lastId) { - LOG(info, "Changed to bufferId %u after %" PRIu64 " nums", bufferId, num); - EXPECT_EQ(Store::RefType::offsetSize() - (lastId == 0), num - lastNum); - lastId = bufferId; - lastNum = num; - } - if (bufferId == 2) { - break; - } - } - EXPECT_EQ(Store::RefType::offsetSize() * 2 - 1, num); - LOG(info, "Added %" PRIu64 " nums in 2 buffers", num); -} - -TEST(DataStoreTest, require_that_we_can_hold_and_trim_buffers) -{ - MyStore s; - EXPECT_EQ(0u, MyRef(s.addEntry(1)).bufferId()); - s.switchActiveBuffer(); - EXPECT_EQ(1u, s.activeBufferId()); - s.holdBuffer(0); // hold last buffer - s.transferHoldLists(10); - - EXPECT_EQ(1u, MyRef(s.addEntry(2)).bufferId()); - s.switchActiveBuffer(); - EXPECT_EQ(2u, s.activeBufferId()); - s.holdBuffer(1); // hold last buffer - s.transferHoldLists(20); - - EXPECT_EQ(2u, MyRef(s.addEntry(3)).bufferId()); - s.switchActiveBuffer(); - EXPECT_EQ(3u, s.activeBufferId()); - s.holdBuffer(2); // hold last buffer - s.transferHoldLists(30); - - EXPECT_EQ(3u, MyRef(s.addEntry(4)).bufferId()); - s.holdBuffer(3); // hold current buffer - s.transferHoldLists(40); - - EXPECT_TRUE(s.getBufferState(0).size() != 0); - EXPECT_TRUE(s.getBufferState(1).size() != 0); - EXPECT_TRUE(s.getBufferState(2).size() != 0); - EXPECT_TRUE(s.getBufferState(3).size() != 0); - s.trimHoldLists(11); - EXPECT_TRUE(s.getBufferState(0).size() == 0); - EXPECT_TRUE(s.getBufferState(1).size() != 0); - EXPECT_TRUE(s.getBufferState(2).size() != 0); - EXPECT_TRUE(s.getBufferState(3).size() != 0); - - s.switchActiveBuffer(); - EXPECT_EQ(0u, s.activeBufferId()); - EXPECT_EQ(0u, MyRef(s.addEntry(5)).bufferId()); - s.trimHoldLists(41); - EXPECT_TRUE(s.getBufferState(0).size() != 0); - EXPECT_TRUE(s.getBufferState(1).size() == 0); - EXPECT_TRUE(s.getBufferState(2).size() == 0); - EXPECT_TRUE(s.getBufferState(3).size() == 0); -} - -TEST(DataStoreTest, require_that_we_can_hold_and_trim_elements) -{ - MyStore s; - MyRef r1 = s.addEntry(1); - s.holdElem(r1, 1); - s.transferHoldLists(10); - MyRef r2 = s.addEntry(2); - s.holdElem(r2, 1); - s.transferHoldLists(20); - MyRef r3 = s.addEntry(3); - s.holdElem(r3, 1); - s.transferHoldLists(30); - EXPECT_EQ(1, s.getEntry(r1)); - EXPECT_EQ(2, s.getEntry(r2)); - EXPECT_EQ(3, s.getEntry(r3)); - s.trimElemHoldList(11); - EXPECT_EQ(0, s.getEntry(r1)); - EXPECT_EQ(2, s.getEntry(r2)); - EXPECT_EQ(3, s.getEntry(r3)); - s.trimElemHoldList(31); - EXPECT_EQ(0, s.getEntry(r1)); - EXPECT_EQ(0, s.getEntry(r2)); - EXPECT_EQ(0, s.getEntry(r3)); -} - -using IntHandle = Handle; - -MyRef -to_ref(IntHandle handle) -{ - return MyRef(handle.ref); -} - -std::ostream& -operator<<(std::ostream &os, const IntHandle &rhs) -{ - MyRef ref(rhs.ref); - os << "{ref.bufferId=" << ref.bufferId() << ", ref.offset=" << ref.offset() << ", data=" << rhs.data << "}"; - return os; -} - -void -expect_successive_handles(const IntHandle &first, const IntHandle &second) -{ - EXPECT_EQ(to_ref(first).offset() + 1, to_ref(second).offset()); -} - -TEST(DataStoreTest, require_that_we_can_use_free_lists) -{ - MyStore s; - s.enableFreeLists(); - auto allocator = s.freeListAllocator(); - auto h1 = allocator.alloc(1); - s.holdElem(h1.ref, 1); - s.transferHoldLists(10); - auto h2 = allocator.alloc(2); - expect_successive_handles(h1, h2); - s.holdElem(h2.ref, 1); - s.transferHoldLists(20); - s.trimElemHoldList(11); - auto h3 = allocator.alloc(3); // reuse h1.ref - EXPECT_EQ(h1, h3); - auto h4 = allocator.alloc(4); - expect_successive_handles(h2, h4); - s.trimElemHoldList(21); - auto h5 = allocator.alloc(5); // reuse h2.ref - EXPECT_EQ(h2, h5); - auto h6 = allocator.alloc(6); - expect_successive_handles(h4, h6); - EXPECT_EQ(3, s.getEntry(h1.ref)); - EXPECT_EQ(5, s.getEntry(h2.ref)); - EXPECT_EQ(3, s.getEntry(h3.ref)); - EXPECT_EQ(4, s.getEntry(h4.ref)); - EXPECT_EQ(5, s.getEntry(h5.ref)); - EXPECT_EQ(6, s.getEntry(h6.ref)); -} - -TEST(DataStoreTest, require_that_we_can_use_free_lists_with_raw_allocator) -{ - GrowStore grow_store(3, 64, 64, 64); - auto &s = grow_store.store(); - s.enableFreeLists(); - auto allocator = s.freeListRawAllocator(grow_store.typeId()); - - auto h1 = allocator.alloc(3); - auto h2 = allocator.alloc(3); - expect_successive_handles(h1, h2); - s.holdElem(h1.ref, 3); - s.holdElem(h2.ref, 3); - s.transferHoldLists(10); - s.trimElemHoldList(11); - - auto h3 = allocator.alloc(3); // reuse h2.ref from free list - EXPECT_EQ(h2, h3); - - auto h4 = allocator.alloc(3); // reuse h1.ref from free list - EXPECT_EQ(h1, h4); - - auto h5 = allocator.alloc(3); - expect_successive_handles(h2, h5); - expect_successive_handles(h3, h5); -} - -TEST(DataStoreTest, require_that_memory_stats_are_calculated) -{ - MyStore s; - DataStoreBase::MemStats m; - m._allocElems = MyRef::offsetSize(); - m._usedElems = 1; // ref = 0 is reserved - m._deadElems = 1; // ref = 0 is reserved - m._holdElems = 0; - m._activeBuffers = 1; - m._freeBuffers = MyRef::numBuffers() - 1; - m._holdBuffers = 0; - assertMemStats(m, s.getMemStats()); - - // add entry - MyRef r = s.addEntry(10); - m._usedElems++; - assertMemStats(m, s.getMemStats()); - - // inc dead - s.incDead(r, 1); - m._deadElems++; - assertMemStats(m, s.getMemStats()); - - // hold buffer - s.addEntry(20); - s.addEntry(30); - s.holdBuffer(r.bufferId()); - s.transferHoldLists(100); - m._usedElems += 2; - m._holdElems += 2; // used - dead - m._activeBuffers--; - m._holdBuffers++; - assertMemStats(m, s.getMemStats()); - - // new active buffer - s.switchActiveBuffer(); - s.addEntry(40); - m._allocElems += MyRef::offsetSize(); - m._usedElems++; - m._activeBuffers++; - m._freeBuffers--; - - // trim hold buffer - s.trimHoldLists(101); - m._allocElems -= MyRef::offsetSize(); - m._usedElems = 1; - m._deadElems = 0; - m._holdElems = 0; - m._freeBuffers = MyRef::numBuffers() - 1; - m._holdBuffers = 0; - assertMemStats(m, s.getMemStats()); -} - -TEST(DataStoreTest, require_that_memory_usage_is_calculated) -{ - MyStore s; - MyRef r = s.addEntry(10); - s.addEntry(20); - s.addEntry(30); - s.addEntry(40); - s.incDead(r, 1); - s.holdBuffer(r.bufferId()); - s.transferHoldLists(100); - vespalib::MemoryUsage m = s.getMemoryUsage(); - EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes()); - EXPECT_EQ(5 * sizeof(int), m.usedBytes()); - EXPECT_EQ(2 * sizeof(int), m.deadBytes()); - EXPECT_EQ(3 * sizeof(int), m.allocatedBytesOnHold()); - s.trimHoldLists(101); -} - -TEST(DataStoreTest, require_that_we_can_disable_elemement_hold_list) -{ - MyStore s; - MyRef r1 = s.addEntry(10); - MyRef r2 = s.addEntry(20); - MyRef r3 = s.addEntry(30); - (void) r3; - vespalib::MemoryUsage m = s.getMemoryUsage(); - EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes()); - EXPECT_EQ(4 * sizeof(int), m.usedBytes()); - EXPECT_EQ(1 * sizeof(int), m.deadBytes()); - EXPECT_EQ(0 * sizeof(int), m.allocatedBytesOnHold()); - s.holdElem(r1, 1); - m = s.getMemoryUsage(); - EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes()); - EXPECT_EQ(4 * sizeof(int), m.usedBytes()); - EXPECT_EQ(1 * sizeof(int), m.deadBytes()); - EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold()); - s.disableElemHoldList(); - s.holdElem(r2, 1); - m = s.getMemoryUsage(); - EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes()); - EXPECT_EQ(4 * sizeof(int), m.usedBytes()); - EXPECT_EQ(2 * sizeof(int), m.deadBytes()); - EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold()); - s.transferHoldLists(100); - s.trimHoldLists(101); -} - -using IntGrowStore = GrowStore>; - -namespace { - -void assertGrowStats(GrowthStats expSizes, - GrowthStats expFirstBufSizes, - size_t expInitMemUsage, - size_t minArrays, size_t numArraysForNewBuffer, size_t maxArrays = 128) -{ - EXPECT_EQ(expSizes, IntGrowStore(1, minArrays, maxArrays, numArraysForNewBuffer).getGrowthStats(expSizes.size())); - EXPECT_EQ(expFirstBufSizes, IntGrowStore(1, minArrays, maxArrays, numArraysForNewBuffer).getFirstBufGrowStats()); - EXPECT_EQ(expInitMemUsage, IntGrowStore(1, minArrays, maxArrays, numArraysForNewBuffer).getMemoryUsage().allocatedBytes()); -} - -} - -TEST(DataStoreTest, require_that_buffer_growth_works) -{ - // Always switch to new buffer, min size 4 - assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 }, - { 4 }, 20, 4, 0); - // Resize if buffer size is less than 4, min size 0 - assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 }, - { 0, 1, 2, 4 }, 4, 0, 4); - // Always switch to new buffer, min size 16 - assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 }, - { 16 }, 68, 16, 0); - // Resize if buffer size is less than 16, min size 0 - assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 }, - { 0, 1, 2, 4, 8, 16 }, 4, 0, 16); - // Resize if buffer size is less than 16, min size 4 - assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 }, - { 4, 8, 16 }, 20, 4, 16); - // Always switch to new buffer, min size 0 - assertGrowStats({ 1, 1, 1, 1, 1, 2, 2, 4, 8, 8, 16, 32 }, - { 0, 1 }, 4, 0, 0); - - // Buffers with sizes larger than the huge page size of the mmap allocator. - ASSERT_EQ(524288u, HUGE_PAGE_ARRAY_SIZE); - assertGrowStats({ 262144, 262144, 262144, 524288, 524288, 524288 * 2, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5 }, - { 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 }, - 4, 0, HUGE_PAGE_ARRAY_SIZE / 2, HUGE_PAGE_ARRAY_SIZE * 5); -} - -using RefType15 = EntryRefT<15>; // offsetSize=32768 - -namespace { - -template -void assertGrowStats(GrowthStats expSizes, uint32_t arraySize) -{ - uint32_t minArrays = 2048; - uint32_t maxArrays = RefType15::offsetSize(); - uint32_t numArraysForNewBuffer = 2048; - GrowStore store(arraySize, minArrays, maxArrays, numArraysForNewBuffer); - EXPECT_EQ(expSizes, store.getGrowthStats(expSizes.size())); -} - -} - -TEST(DataStoreTest, require_that_offset_in_EntryRefT_is_within_bounds_when_allocating_memory_buffers_where_wanted_number_of_bytes_is_not_a_power_of_2_and_less_than_huge_page_size) -{ - /* - * When allocating new memory buffers for the data store the following happens (ref. calcAllocation() in bufferstate.cpp): - * 1) Calculate how many arrays to alloc. - * In this case we alloc a minimum of 2048 and a maximum of 32768. - * 2) Calculate how many bytes to alloc: arraysToAlloc * arraySize * elementSize. - * In this case elementSize is (1 or 4) and arraySize varies (3, 5, 7). - * 3) Round up bytes to alloc to match the underlying allocator (power of 2 if less than huge page size): - * After this we might end up with more bytes than the offset in EntryRef can handle. In this case this is 32768. - * 4) Cap bytes to alloc to the max offset EntryRef can handle. - * The max bytes to alloc is: maxArrays * arraySize * elementSize. - */ - assertGrowStats({8192,8192,8192,16384,16384,32768,65536,65536,98304,98304,98304,98304}, 3); - assertGrowStats({16384,16384,16384,32768,32768,65536,131072,131072,163840,163840,163840,163840}, 5); - assertGrowStats({16384,16384,16384,32768,32768,65536,131072,131072,229376,229376,229376,229376}, 7); - assertGrowStats({8192,8192,8192,16384,16384,32768,65536,65536,98304,98304,98304,98304}, 3); - assertGrowStats({16384,16384,16384,32768,32768,65536,131072,131072,163840,163840,163840,163840}, 5); - assertGrowStats({16384,16384,16384,32768,32768,65536,131072,131072,229376,229376,229376,229376}, 7); -} - -} - -GTEST_MAIN_RUN_ALL_TESTS() diff --git a/searchlib/src/tests/datastore/unique_store/CMakeLists.txt b/searchlib/src/tests/datastore/unique_store/CMakeLists.txt deleted file mode 100644 index 0db25d3129b..00000000000 --- a/searchlib/src/tests/datastore/unique_store/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(searchlib_unique_store_test_app TEST - SOURCES - unique_store_test.cpp - DEPENDS - searchlib -) -vespa_add_test(NAME searchlib_unique_store_test_app COMMAND searchlib_unique_store_test_app) diff --git a/searchlib/src/tests/datastore/unique_store/unique_store_test.cpp b/searchlib/src/tests/datastore/unique_store/unique_store_test.cpp deleted file mode 100644 index d60144c18f1..00000000000 --- a/searchlib/src/tests/datastore/unique_store/unique_store_test.cpp +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include -LOG_SETUP("unique_store_test"); -#include -#include -#include -#include -#include -#include - -using namespace search::datastore; -using vespalib::MemoryUsage; -using vespalib::ArrayRef; -using generation_t = vespalib::GenerationHandler::generation_t; -using MemStats = search::datastore::test::MemStats; - -template > -struct Fixture -{ - using EntryRefType = RefT; - using UniqueStoreType = UniqueStore; - using UniqueStoreAddResult = typename UniqueStoreType::AddResult; - using value_type = EntryT; - using ReferenceStore = std::map>; - - UniqueStoreType store; - ReferenceStore refStore; - generation_t generation; - Fixture() - : store(), - refStore(), - generation(1) - {} - void assertAdd(const EntryT &input) { - EntryRef ref = add(input); - assertGet(ref, input); - } - EntryRef add(const EntryT &input) { - UniqueStoreAddResult addResult = store.add(input); - EntryRef result = addResult.ref(); - auto insres = refStore.insert(std::make_pair(result, std::make_pair(input, 1u))); - EXPECT_EQUAL(insres.second, addResult.inserted()); - if (!insres.second) { - ++insres.first->second.second; - } - return result; - } - void alignRefStore(EntryRef ref, const EntryT &input, uint32_t refcnt) { - if (refcnt > 0) { - auto insres = refStore.insert(std::make_pair(ref, std::make_pair(input, refcnt))); - if (!insres.second) { - insres.first->second.second = refcnt; - } - } else { - refStore.erase(ref); - } - } - void assertGet(EntryRef ref, const EntryT &exp) const { - EntryT act = store.get(ref); - EXPECT_EQUAL(exp, act); - } - void remove(EntryRef ref) { - ASSERT_EQUAL(1u, refStore.count(ref)); - store.remove(ref); - if (refStore[ref].second > 1) { - --refStore[ref].second; - } else { - refStore.erase(ref); - } - } - void remove(const EntryT &input) { - remove(getEntryRef(input)); - } - uint32_t getBufferId(EntryRef ref) const { - return EntryRefType(ref).bufferId(); - } - void assertBufferState(EntryRef ref, const MemStats expStats) const { - EXPECT_EQUAL(expStats._used, store.bufferState(ref).size()); - EXPECT_EQUAL(expStats._hold, store.bufferState(ref).getHoldElems()); - EXPECT_EQUAL(expStats._dead, store.bufferState(ref).getDeadElems()); - } - void assertMemoryUsage(const MemStats expStats) const { - MemoryUsage act = store.getMemoryUsage(); - EXPECT_EQUAL(expStats._used, act.usedBytes()); - EXPECT_EQUAL(expStats._hold, act.allocatedBytesOnHold()); - EXPECT_EQUAL(expStats._dead, act.deadBytes()); - } - void assertStoreContent() const { - for (const auto &elem : refStore) { - TEST_DO(assertGet(elem.first, elem.second.first)); - } - } - EntryRef getEntryRef(const EntryT &input) { - for (const auto &elem : refStore) { - if (elem.second.first == input) { - return elem.first; - } - } - return EntryRef(); - } - void trimHoldLists() { - store.freeze(); - store.transferHoldLists(generation++); - store.trimHoldLists(generation); - } - void compactWorst() { - ICompactionContext::UP ctx = store.compactWorst(); - std::vector refs; - for (const auto &elem : refStore) { - refs.push_back(elem.first); - } - refs.push_back(EntryRef()); - std::vector compactedRefs = refs; - ctx->compact(ArrayRef(compactedRefs)); - ASSERT_FALSE(refs.back().valid()); - refs.pop_back(); - ReferenceStore compactedRefStore; - for (size_t i = 0; i < refs.size(); ++i) { - ASSERT_EQUAL(0u, compactedRefStore.count(compactedRefs[i])); - ASSERT_EQUAL(1u, refStore.count(refs[i])); - compactedRefStore.insert(std::make_pair(compactedRefs[i], refStore[refs[i]])); - } - refStore = compactedRefStore; - } - size_t entrySize() const { return sizeof(EntryT); } - auto getBuilder(uint32_t uniqueValuesHint) { return store.getBuilder(uniqueValuesHint); } - auto getSaver() { return store.getSaver(); } -}; - -using NumberFixture = Fixture; -using StringFixture = Fixture; -using SmallOffsetNumberFixture = Fixture>; - -TEST("require that we test with trivial and non-trivial types") -{ - EXPECT_TRUE(vespalib::can_skip_destruction::value); - EXPECT_FALSE(vespalib::can_skip_destruction::value); -} - -TEST_F("require that we can add and get values of trivial type", NumberFixture) -{ - TEST_DO(f.assertAdd(1)); - TEST_DO(f.assertAdd(2)); - TEST_DO(f.assertAdd(3)); - TEST_DO(f.assertAdd(1)); -} - -TEST_F("require that we can add and get values of non-trivial type", StringFixture) -{ - TEST_DO(f.assertAdd("aa")); - TEST_DO(f.assertAdd("bbb")); - TEST_DO(f.assertAdd("ccc")); - TEST_DO(f.assertAdd("aa")); -} - -TEST_F("require that elements are put on hold when value is removed", NumberFixture) -{ - EntryRef ref = f.add(1); - // Note: The first buffer have the first element reserved -> we expect 2 elements used here. - TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(0).dead(1))); - f.store.remove(ref); - TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(1).dead(1))); -} - -TEST_F("require that elements are reference counted", NumberFixture) -{ - EntryRef ref = f.add(1); - EntryRef ref2 = f.add(1); - EXPECT_EQUAL(ref.ref(), ref2.ref()); - // Note: The first buffer have the first element reserved -> we expect 2 elements used here. - TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(0).dead(1))); - f.store.remove(ref); - TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(0).dead(1))); - f.store.remove(ref); - TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(1).dead(1))); -} - -TEST_F("require that new underlying buffer is allocated when current is full", SmallOffsetNumberFixture) -{ - uint32_t firstBufferId = f.getBufferId(f.add(1)); - for (uint32_t i = 0; i < (F1::EntryRefType::offsetSize() - 2); ++i) { - uint32_t bufferId = f.getBufferId(f.add(i + 2)); - EXPECT_EQUAL(firstBufferId, bufferId); - } - TEST_DO(f.assertStoreContent()); - - uint32_t bias = F1::EntryRefType::offsetSize(); - uint32_t secondBufferId = f.getBufferId(f.add(bias + 1)); - EXPECT_NOT_EQUAL(firstBufferId, secondBufferId); - for (uint32_t i = 0; i < 10u; ++i) { - uint32_t bufferId = f.getBufferId(f.add(bias + i + 2)); - EXPECT_EQUAL(secondBufferId, bufferId); - } - TEST_DO(f.assertStoreContent()); -} - -TEST_F("require that compaction works", NumberFixture) -{ - EntryRef val1Ref = f.add(1); - EntryRef val2Ref = f.add(2); - f.remove(f.add(4)); - f.trimHoldLists(); - TEST_DO(f.assertBufferState(val1Ref, MemStats().used(4).dead(2))); // Note: First element is reserved - uint32_t val1BufferId = f.getBufferId(val1Ref); - - EXPECT_EQUAL(2u, f.refStore.size()); - f.compactWorst(); - EXPECT_EQUAL(2u, f.refStore.size()); - TEST_DO(f.assertStoreContent()); - - // Buffer has been compacted - EXPECT_NOT_EQUAL(val1BufferId, f.getBufferId(f.getEntryRef(1))); - // Old ref should still point to data. - f.assertGet(val1Ref, 1); - f.assertGet(val2Ref, 2); - EXPECT_TRUE(f.store.bufferState(val1Ref).isOnHold()); - f.trimHoldLists(); - EXPECT_TRUE(f.store.bufferState(val1Ref).isFree()); - TEST_DO(f.assertStoreContent()); -} - -TEST_F("require that builder works", NumberFixture) -{ - auto builder = f.getBuilder(2); - builder.add(10); - builder.add(20); - builder.setupRefCounts(); - EntryRef val10Ref = builder.mapEnumValueToEntryRef(1); - EntryRef val20Ref = builder.mapEnumValueToEntryRef(2); - TEST_DO(f.assertBufferState(val10Ref, MemStats().used(3).dead(1))); // Note: First element is reserved - EXPECT_TRUE(val10Ref.valid()); - EXPECT_TRUE(val20Ref.valid()); - EXPECT_NOT_EQUAL(val10Ref.ref(), val20Ref.ref()); - f.assertGet(val10Ref, 10); - f.assertGet(val20Ref, 20); - builder.makeDictionary(); - // Align refstore with the two entries added by builder. - f.alignRefStore(val10Ref, 10, 1); - f.alignRefStore(val20Ref, 20, 1); - EXPECT_EQUAL(val10Ref.ref(), f.add(10).ref()); - EXPECT_EQUAL(val20Ref.ref(), f.add(20).ref()); -} - -TEST_F("require that saver works", NumberFixture) -{ - EntryRef val10Ref = f.add(10); - EntryRef val20Ref = f.add(20); - f.remove(f.add(40)); - f.trimHoldLists(); - - auto saver = f.getSaver(); - std::vector refs; - saver.foreach_key([&](EntryRef ref) { refs.push_back(ref.ref()); }); - std::vector expRefs; - expRefs.push_back(val10Ref.ref()); - expRefs.push_back(val20Ref.ref()); - EXPECT_EQUAL(expRefs, refs); - saver.enumerateValues(); - uint32_t invalidEnum = saver.mapEntryRefToEnumValue(EntryRef()); - uint32_t enumValue10 = saver.mapEntryRefToEnumValue(val10Ref); - uint32_t enumValue20 = saver.mapEntryRefToEnumValue(val20Ref); - EXPECT_EQUAL(0u, invalidEnum); - EXPECT_EQUAL(1u, enumValue10); - EXPECT_EQUAL(2u, enumValue20); -} - -TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/searchlib/src/tests/diskindex/fusion/fusion_test.cpp b/searchlib/src/tests/diskindex/fusion/fusion_test.cpp index c46869392f7..e8e780b4376 100644 --- a/searchlib/src/tests/diskindex/fusion/fusion_test.cpp +++ b/searchlib/src/tests/diskindex/fusion/fusion_test.cpp @@ -1,8 +1,8 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include -#include -#include +#include +#include +#include #include #include #include diff --git a/searchlib/src/tests/memoryindex/compact_words_store/compact_words_store_test.cpp b/searchlib/src/tests/memoryindex/compact_words_store/compact_words_store_test.cpp index a78e4dead86..bda29115db6 100644 --- a/searchlib/src/tests/memoryindex/compact_words_store/compact_words_store_test.cpp +++ b/searchlib/src/tests/memoryindex/compact_words_store/compact_words_store_test.cpp @@ -1,8 +1,8 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include -#include #include +#include #include #include #include diff --git a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp index 2b9b77d32a3..4075a06f882 100644 --- a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp +++ b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp @@ -1,7 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include -#include +#include +#include #include #include #include diff --git a/searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp b/searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp index 368804b93d8..7c77dc1883c 100644 --- a/searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp +++ b/searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp @@ -4,8 +4,8 @@ #include #include #include -#include -#include +#include +#include #include #include diff --git a/searchlib/src/tests/predicate/predicate_index_test.cpp b/searchlib/src/tests/predicate/predicate_index_test.cpp index 78827c96d2f..12bc192079a 100644 --- a/searchlib/src/tests/predicate/predicate_index_test.cpp +++ b/searchlib/src/tests/predicate/predicate_index_test.cpp @@ -7,8 +7,8 @@ #include #include #include -#include -#include +#include +#include #include LOG_SETUP("predicate_index_test"); diff --git a/searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp b/searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp index 5b3f213b698..d91d50bdc8f 100644 --- a/searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp +++ b/searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp @@ -4,8 +4,8 @@ #include #include #include -#include -#include +#include +#include #include #include diff --git a/searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp b/searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp index e20895b559f..2e5001a1adb 100644 --- a/searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp +++ b/searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp @@ -3,8 +3,8 @@ #include #include -#include -#include +#include +#include #include #include diff --git a/searchlib/src/tests/predicate/simple_index_test.cpp b/searchlib/src/tests/predicate/simple_index_test.cpp index c9cce14784f..84495dbdbe2 100644 --- a/searchlib/src/tests/predicate/simple_index_test.cpp +++ b/searchlib/src/tests/predicate/simple_index_test.cpp @@ -4,11 +4,11 @@ #include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include diff --git a/searchlib/src/tests/util/bufferwriter/bufferwriter_test.cpp b/searchlib/src/tests/util/bufferwriter/bufferwriter_test.cpp index 1830d25fc72..bafb4105996 100644 --- a/searchlib/src/tests/util/bufferwriter/bufferwriter_test.cpp +++ b/searchlib/src/tests/util/bufferwriter/bufferwriter_test.cpp @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include diff --git a/searchlib/src/tests/util/bufferwriter/work.cpp b/searchlib/src/tests/util/bufferwriter/work.cpp index b496bbc8888..bd5bf4a9d81 100644 --- a/searchlib/src/tests/util/bufferwriter/work.cpp +++ b/searchlib/src/tests/util/bufferwriter/work.cpp @@ -1,7 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "work.h" -#include +#include namespace search { diff --git a/searchlib/src/vespa/searchlib/CMakeLists.txt b/searchlib/src/vespa/searchlib/CMakeLists.txt index 2ec3cabaf4a..e4e1f92898f 100644 --- a/searchlib/src/vespa/searchlib/CMakeLists.txt +++ b/searchlib/src/vespa/searchlib/CMakeLists.txt @@ -4,9 +4,7 @@ vespa_add_library(searchlib $ $ $ - $ $ - $ $ $ $ diff --git a/searchlib/src/vespa/searchlib/attribute/attributefilebufferwriter.h b/searchlib/src/vespa/searchlib/attribute/attributefilebufferwriter.h index a6052f5f6b6..c4a26ff79c5 100644 --- a/searchlib/src/vespa/searchlib/attribute/attributefilebufferwriter.h +++ b/searchlib/src/vespa/searchlib/attribute/attributefilebufferwriter.h @@ -3,7 +3,7 @@ #pragma once #include "iattributefilewriter.h" -#include +#include namespace search { diff --git a/searchlib/src/vespa/searchlib/attribute/attributeiterators.hpp b/searchlib/src/vespa/searchlib/attribute/attributeiterators.hpp index 131d580c671..c51267c9f80 100644 --- a/searchlib/src/vespa/searchlib/attribute/attributeiterators.hpp +++ b/searchlib/src/vespa/searchlib/attribute/attributeiterators.hpp @@ -3,8 +3,8 @@ #pragma once #include "attributeiterators.h" -#include -#include +#include +#include #include #include #include diff --git a/searchlib/src/vespa/searchlib/attribute/dociditerator.h b/searchlib/src/vespa/searchlib/attribute/dociditerator.h index 1eb1829f3fd..0e336dc634f 100644 --- a/searchlib/src/vespa/searchlib/attribute/dociditerator.h +++ b/searchlib/src/vespa/searchlib/attribute/dociditerator.h @@ -3,7 +3,7 @@ #pragma once #include "postingdata.h" -#include +#include #include namespace search { diff --git a/searchlib/src/vespa/searchlib/attribute/enumattributesaver.cpp b/searchlib/src/vespa/searchlib/attribute/enumattributesaver.cpp index 69ec066176a..7f0422ce78d 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumattributesaver.cpp +++ b/searchlib/src/vespa/searchlib/attribute/enumattributesaver.cpp @@ -2,7 +2,7 @@ #include "enumattributesaver.h" #include "iattributesavetarget.h" -#include +#include namespace search { diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.h b/searchlib/src/vespa/searchlib/attribute/enumstore.h index fd075504916..2af1fdc1b9d 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumstore.h +++ b/searchlib/src/vespa/searchlib/attribute/enumstore.h @@ -4,15 +4,15 @@ #include "enumstorebase.h" #include +#include +#include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include -#include namespace search { diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp index 2cddd5b0286..c1b35b5a5fe 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp +++ b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp @@ -6,15 +6,15 @@ #include "enumcomparator.h" #include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include #include +#include namespace search { diff --git a/searchlib/src/vespa/searchlib/attribute/enumstorebase.cpp b/searchlib/src/vespa/searchlib/attribute/enumstorebase.cpp index 47f11b88115..94c431368cb 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumstorebase.cpp +++ b/searchlib/src/vespa/searchlib/attribute/enumstorebase.cpp @@ -2,13 +2,13 @@ #include "enumstorebase.h" #include "enumstore.h" -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include #include #include #include diff --git a/searchlib/src/vespa/searchlib/attribute/enumstorebase.h b/searchlib/src/vespa/searchlib/attribute/enumstorebase.h index 4b895542e59..48bf4a56874 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumstorebase.h +++ b/searchlib/src/vespa/searchlib/attribute/enumstorebase.h @@ -3,12 +3,12 @@ #pragma once #include -#include +#include +#include #include #include #include #include -#include #include #include diff --git a/searchlib/src/vespa/searchlib/attribute/loadedvalue.h b/searchlib/src/vespa/searchlib/attribute/loadedvalue.h index 69a6c948d13..38baed6b3ea 100644 --- a/searchlib/src/vespa/searchlib/attribute/loadedvalue.h +++ b/searchlib/src/vespa/searchlib/attribute/loadedvalue.h @@ -4,7 +4,7 @@ #include #include -#include +#include namespace search { diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h index 7bb0253d626..6246a656bf6 100644 --- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h +++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h @@ -3,7 +3,7 @@ #pragma once #include "multi_value_mapping_base.h" -#include +#include #include namespace search::attribute { diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp index 13f0a42346b..eb4d5fadd2a 100644 --- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp +++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp @@ -3,7 +3,7 @@ #pragma once #include "multi_value_mapping.h" -#include +#include #include namespace search::attribute { diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h index f1e9b804016..d3ceb948d6a 100644 --- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h +++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h @@ -2,7 +2,7 @@ #pragma once -#include +#include #include #include #include diff --git a/searchlib/src/vespa/searchlib/attribute/multinumericattributesaver.cpp b/searchlib/src/vespa/searchlib/attribute/multinumericattributesaver.cpp index fe19bf236aa..51821389a92 100644 --- a/searchlib/src/vespa/searchlib/attribute/multinumericattributesaver.cpp +++ b/searchlib/src/vespa/searchlib/attribute/multinumericattributesaver.cpp @@ -3,7 +3,7 @@ #include "multinumericattributesaver.h" #include "multivalueattributesaverutils.h" #include "multivalue.h" -#include +#include using vespalib::GenerationHandler; using search::multivalueattributesaver::CountWriter; diff --git a/searchlib/src/vespa/searchlib/attribute/multistringattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multistringattribute.hpp index 6c459465b51..7ce887f9292 100644 --- a/searchlib/src/vespa/searchlib/attribute/multistringattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/multistringattribute.hpp @@ -6,10 +6,10 @@ #include "multistringattribute.h" #include "enumattribute.hpp" #include "multienumattribute.hpp" -#include #include #include #include +#include #include namespace search { diff --git a/searchlib/src/vespa/searchlib/attribute/multivalueattributesaverutils.cpp b/searchlib/src/vespa/searchlib/attribute/multivalueattributesaverutils.cpp index 16acf70eb59..697dbcfd27b 100644 --- a/searchlib/src/vespa/searchlib/attribute/multivalueattributesaverutils.cpp +++ b/searchlib/src/vespa/searchlib/attribute/multivalueattributesaverutils.cpp @@ -1,7 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "multivalueattributesaverutils.h" -#include +#include namespace search::multivalueattributesaver { diff --git a/searchlib/src/vespa/searchlib/attribute/multivalueattributesaverutils.h b/searchlib/src/vespa/searchlib/attribute/multivalueattributesaverutils.h index e34f43deb0b..213fd4c4777 100644 --- a/searchlib/src/vespa/searchlib/attribute/multivalueattributesaverutils.h +++ b/searchlib/src/vespa/searchlib/attribute/multivalueattributesaverutils.h @@ -3,7 +3,7 @@ #pragma once #include "iattributesavetarget.h" -#include +#include #include namespace search::multivalueattributesaver { diff --git a/searchlib/src/vespa/searchlib/attribute/posting_list_merger.h b/searchlib/src/vespa/searchlib/attribute/posting_list_merger.h index 8568661dfdd..6d7693da30e 100644 --- a/searchlib/src/vespa/searchlib/attribute/posting_list_merger.h +++ b/searchlib/src/vespa/searchlib/attribute/posting_list_merger.h @@ -2,7 +2,7 @@ #pragma once -#include +#include #include #include diff --git a/searchlib/src/vespa/searchlib/attribute/postingdata.h b/searchlib/src/vespa/searchlib/attribute/postingdata.h index 001e1015d19..db8522d0ce5 100644 --- a/searchlib/src/vespa/searchlib/attribute/postingdata.h +++ b/searchlib/src/vespa/searchlib/attribute/postingdata.h @@ -2,7 +2,7 @@ #pragma once -#include +#include namespace search { diff --git a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h index c180672e798..829afeb9388 100644 --- a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h +++ b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h @@ -6,8 +6,8 @@ #include #include #include -#include -#include +#include +#include #include "dociditerator.h" #include "postinglistsearchcontext.h" #include "postingchange.h" diff --git a/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.cpp b/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.cpp index b520b53551c..3c3242ce47c 100644 --- a/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.cpp +++ b/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.cpp @@ -4,7 +4,7 @@ #include "postinglistsearchcontext.hpp" #include "attributeiterators.hpp" #include "diversity.hpp" -#include +#include namespace search::attribute { diff --git a/searchlib/src/vespa/searchlib/attribute/postinglisttraits.cpp b/searchlib/src/vespa/searchlib/attribute/postinglisttraits.cpp index 28a4a51c84c..6af4bec6a88 100644 --- a/searchlib/src/vespa/searchlib/attribute/postinglisttraits.cpp +++ b/searchlib/src/vespa/searchlib/attribute/postinglisttraits.cpp @@ -1,11 +1,11 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "postinglisttraits.h" -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include namespace search { diff --git a/searchlib/src/vespa/searchlib/attribute/postinglisttraits.h b/searchlib/src/vespa/searchlib/attribute/postinglisttraits.h index 3a03a03641f..d07bdd3100d 100644 --- a/searchlib/src/vespa/searchlib/attribute/postinglisttraits.h +++ b/searchlib/src/vespa/searchlib/attribute/postinglisttraits.h @@ -2,7 +2,7 @@ #pragma once -#include +#include namespace search::attribute { diff --git a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp index 3f5223118a7..848e4055fdf 100644 --- a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp +++ b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp @@ -1,12 +1,11 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "postingstore.h" -#include -#include #include #include #include - +#include +#include namespace search::attribute { diff --git a/searchlib/src/vespa/searchlib/attribute/reference.h b/searchlib/src/vespa/searchlib/attribute/reference.h index 8d482c7c3c9..bc7f3615fa4 100644 --- a/searchlib/src/vespa/searchlib/attribute/reference.h +++ b/searchlib/src/vespa/searchlib/attribute/reference.h @@ -3,7 +3,7 @@ #pragma once #include -#include +#include namespace search::attribute { diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp b/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp index 6cb9def9d8d..f91108d066b 100644 --- a/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp +++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp @@ -4,11 +4,11 @@ #include "reference_attribute_saver.h" #include "attributesaver.h" #include "readerbase.h" -#include -#include -#include #include #include +#include +#include +#include #include #include diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute.h b/searchlib/src/vespa/searchlib/attribute/reference_attribute.h index 21fb2c7337b..87d5a5c27bb 100644 --- a/searchlib/src/vespa/searchlib/attribute/reference_attribute.h +++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute.h @@ -5,7 +5,7 @@ #include "not_implemented_attribute.h" #include "reference_mappings.h" #include "reference.h" -#include +#include #include namespace search { class IGidToLidMapperFactory; } diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.cpp b/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.cpp index 19e4f69b4f6..cb8bb6661d6 100644 --- a/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.cpp +++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.cpp @@ -1,8 +1,8 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "reference_attribute_saver.h" -#include #include +#include #include "iattributesavetarget.h" diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.h b/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.h index c86e43d5d95..42a26911669 100644 --- a/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.h +++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.h @@ -4,8 +4,8 @@ #include "attributesaver.h" #include -#include -#include +#include +#include #include #include "reference_attribute.h" #include "reference.h" diff --git a/searchlib/src/vespa/searchlib/attribute/reference_mappings.cpp b/searchlib/src/vespa/searchlib/attribute/reference_mappings.cpp index 5bba0196d5c..f0207828d0f 100644 --- a/searchlib/src/vespa/searchlib/attribute/reference_mappings.cpp +++ b/searchlib/src/vespa/searchlib/attribute/reference_mappings.cpp @@ -2,8 +2,8 @@ #include "reference_mappings.h" #include "reference.h" -#include -#include +#include +#include namespace search::attribute { diff --git a/searchlib/src/vespa/searchlib/attribute/reference_mappings.h b/searchlib/src/vespa/searchlib/attribute/reference_mappings.h index f9119c6aa02..8ec997b307e 100644 --- a/searchlib/src/vespa/searchlib/attribute/reference_mappings.h +++ b/searchlib/src/vespa/searchlib/attribute/reference_mappings.h @@ -2,7 +2,7 @@ #pragma once -#include +#include #include #include diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.cpp b/searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.cpp index 3aae6b32dad..dd92421301c 100644 --- a/searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.cpp +++ b/searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.cpp @@ -1,8 +1,8 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "singleenumattributesaver.h" -#include #include +#include #include "iattributesavetarget.h" diff --git a/searchlib/src/vespa/searchlib/attribute/singlestringattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlestringattribute.hpp index 0842d91c174..4925cee023c 100644 --- a/searchlib/src/vespa/searchlib/attribute/singlestringattribute.hpp +++ b/searchlib/src/vespa/searchlib/attribute/singlestringattribute.hpp @@ -6,10 +6,10 @@ #include "stringattribute.h" #include "singleenumattribute.hpp" #include "attributevector.hpp" -#include #include #include #include +#include #include namespace search { diff --git a/searchlib/src/vespa/searchlib/btree/CMakeLists.txt b/searchlib/src/vespa/searchlib/btree/CMakeLists.txt deleted file mode 100644 index d72f5c97dec..00000000000 --- a/searchlib/src/vespa/searchlib/btree/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_library(searchlib_btree OBJECT - SOURCES - btree_key_data.cpp - btreeaggregator.cpp - btreebuilder.cpp - btreeinserter.cpp - btreeiterator.cpp - btreenode.cpp - btreenodeallocator.cpp - btreenodestore.cpp - btreeremover.cpp - btreeroot.cpp - btreerootbase.cpp - btreestore.cpp - DEPENDS -) diff --git a/searchlib/src/vespa/searchlib/btree/OWNERS b/searchlib/src/vespa/searchlib/btree/OWNERS deleted file mode 100644 index b7b549c6058..00000000000 --- a/searchlib/src/vespa/searchlib/btree/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -toregge -geirst diff --git a/searchlib/src/vespa/searchlib/btree/btree.h b/searchlib/src/vespa/searchlib/btree/btree.h deleted file mode 100644 index 5d20964e169..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btree.h +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreeroot.h" -#include "noaggrcalc.h" -#include - -namespace search::btree { - -/** - * Class that wraps a btree root and an allocator and that provides the same API as - * a standalone btree root without needing to pass the allocator to all functions. - **/ -template , - typename TraitsT = BTreeDefaultTraits, - class AggrCalcT = NoAggrCalc> -class BTree -{ -public: - typedef BTreeRoot TreeType; - typedef BTreeNodeAllocator NodeAllocatorType; - typedef BTreeBuilder Builder; - typedef typename TreeType::InternalNodeType InternalNodeType; - typedef typename TreeType::LeafNodeType LeafNodeType; - typedef typename TreeType::KeyType KeyType; - typedef typename TreeType::DataType DataType; - typedef typename TreeType::Iterator Iterator; - typedef typename TreeType::ConstIterator ConstIterator; - typedef typename TreeType::FrozenView FrozenView; - typedef typename TreeType::AggrCalcType AggrCalcType; -private: - NodeAllocatorType _alloc; - TreeType _tree; - - BTree(const BTree &rhs); - - BTree & - operator=(BTree &rhs); - -public: - BTree(); - ~BTree(); - - const NodeAllocatorType &getAllocator() const { return _alloc; } - NodeAllocatorType &getAllocator() { return _alloc; } - - void - disableFreeLists() { - _alloc.disableFreeLists(); - } - - void - disableElemHoldList() - { - _alloc.disableElemHoldList(); - } - - // Inherit doc from BTreeRoot - void clear() { - _tree.clear(_alloc); - } - void assign(Builder & rhs) { - _tree.assign(rhs, _alloc); - } - bool insert(const KeyType & key, const DataType & data, CompareT comp = CompareT()) { - return _tree.insert(key, data, _alloc, comp); - } - - void - insert(Iterator &itr, - const KeyType &key, const DataType &data) - { - _tree.insert(itr, key, data); - } - - Iterator find(const KeyType & key, CompareT comp = CompareT()) const { - return _tree.find(key, _alloc, comp); - } - Iterator lowerBound(const KeyType & key, CompareT comp = CompareT()) const { - return _tree.lowerBound(key, _alloc, comp); - } - Iterator upperBound(const KeyType & key, CompareT comp = CompareT()) const { - return _tree.upperBound(key, _alloc, comp); - } - bool remove(const KeyType & key, CompareT comp = CompareT()) { - return _tree.remove(key, _alloc, comp); - } - - void - remove(Iterator &itr) - { - _tree.remove(itr); - } - - Iterator begin() const { - return _tree.begin(_alloc); - } - FrozenView getFrozenView() const { - return _tree.getFrozenView(_alloc); - } - size_t size() const { - return _tree.size(_alloc); - } - vespalib::string toString() const { - return _tree.toString(_alloc); - } - bool isValid(CompareT comp = CompareT()) const { - return _tree.isValid(_alloc, comp); - } - bool isValidFrozen(CompareT comp = CompareT()) const { - return _tree.isValidFrozen(_alloc, comp); - } - size_t bitSize() const { - return _tree.bitSize(_alloc); - } - size_t bitSize(BTreeNode::Ref node) const { - return _tree.bitSize(node, _alloc); - } - void setRoot(BTreeNode::Ref newRoot) { - _tree.setRoot(newRoot, _alloc); - } - BTreeNode::Ref getRoot() const { - return _tree.getRoot(); - } - vespalib::MemoryUsage getMemoryUsage() const { - return _alloc.getMemoryUsage(); - } - - const AggrT & - getAggregated() const - { - return _tree.getAggregated(_alloc); - } - - void - thaw(Iterator &itr) - { - assert(&itr.getAllocator() == &getAllocator()); - _tree.thaw(itr); - } - - template - void - foreach_key(FunctionType func) const - { - _alloc.getNodeStore().foreach_key(_tree.getRoot(), func); - } - - template - void - foreach(FunctionType func) const - { - _alloc.getNodeStore().foreach(_tree.getRoot(), func); - } -}; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btree.hpp b/searchlib/src/vespa/searchlib/btree/btree.hpp deleted file mode 100644 index 928d8d6cfcd..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btree.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btree.h" - -namespace search { -namespace btree { - -template -BTree::BTree() - : _alloc(), - _tree() -{ -} - -template -BTree::~BTree() -{ - clear(); - _alloc.freeze(); - _alloc.clearHoldLists(); -} - - -} // namespace search::btree -} // namespace search - diff --git a/searchlib/src/vespa/searchlib/btree/btree_key_data.cpp b/searchlib/src/vespa/searchlib/btree/btree_key_data.cpp deleted file mode 100644 index f30855e0589..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btree_key_data.cpp +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "btree_key_data.h" - -namespace search::btree { - -BTreeNoLeafData BTreeNoLeafData::_instance; - -template class BTreeKeyData; -template class BTreeKeyData; - -} // namespace search::btree diff --git a/searchlib/src/vespa/searchlib/btree/btree_key_data.h b/searchlib/src/vespa/searchlib/btree/btree_key_data.h deleted file mode 100644 index 737651b755d..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btree_key_data.h +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include - -namespace search::btree { - -/** - * Empty class to use as DataT template parameter for BTree classes to - * indicate that leaf nodes have no data (similar to std::set having less - * information than std::map). - */ -class BTreeNoLeafData -{ -public: - static BTreeNoLeafData _instance; -}; - - -template -class BTreeKeyData -{ -public: - using KeyType = KeyT; - using DataType = DataT; - - KeyT _key; - DataT _data; - - BTreeKeyData() - : _key(), - _data() - {} - - BTreeKeyData(const KeyT &key, const DataT &data) - : _key(key), - _data(data) - {} - - void setData(const DataT &data) { _data = data; } - const DataT &getData() const { return _data; } - - /** - * This operator only works when using direct keys. References to - * externally stored keys will not be properly sorted. - */ - bool operator<(const BTreeKeyData &rhs) const { - return _key < rhs._key; - } -}; - - -template -class BTreeKeyData -{ -public: - using KeyType = KeyT; - using DataType = BTreeNoLeafData; - - KeyT _key; - - BTreeKeyData() : _key() {} - - BTreeKeyData(const KeyT &key, const BTreeNoLeafData &) - : _key(key) - { - } - - void setData(const BTreeNoLeafData &) { } - const BTreeNoLeafData &getData() const { return BTreeNoLeafData::_instance; } - - /** - * This operator only works when using direct keys. References to - * externally stored keys will not be properly sorted. - */ - bool operator<(const BTreeKeyData &rhs) const { - return _key < rhs._key; - } -}; - -extern template class BTreeKeyData; -extern template class BTreeKeyData; - -} // namespace search::btree diff --git a/searchlib/src/vespa/searchlib/btree/btreeaggregator.cpp b/searchlib/src/vespa/searchlib/btree/btreeaggregator.cpp deleted file mode 100644 index 2eb627192dc..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeaggregator.cpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "btreeaggregator.hpp" -#include "minmaxaggrcalc.h" - -namespace search::btree { - -template class BTreeAggregator; -template class BTreeAggregator; -template class BTreeAggregator; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreeaggregator.h b/searchlib/src/vespa/searchlib/btree/btreeaggregator.h deleted file mode 100644 index 38c6f579f53..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeaggregator.h +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreenode.h" -#include "btreenodeallocator.h" -#include "btreetraits.h" -#include "noaggrcalc.h" - -namespace search::btree { - -template -class BTreeAggregator -{ -public: - using NodeAllocatorType = BTreeNodeAllocator; - using InternalNodeType = BTreeInternalNode; - using LeafNodeType = BTreeLeafNode; - using AggregatedType = AggrT; - - static AggrT aggregate(const LeafNodeType &node, AggrCalcT aggrCalc); - static AggrT aggregate(const InternalNodeType &node, const NodeAllocatorType &allocator, AggrCalcT aggrCalc); - - static void recalc(LeafNodeType &node, const AggrCalcT &aggrCalc); - - static void recalc(LeafNodeType &node, const NodeAllocatorType &, const AggrCalcT &aggrCalc) { - recalc(node, aggrCalc); - } - - static void recalc(InternalNodeType &node, const NodeAllocatorType &allocator, const AggrCalcT &aggrCalc); - static AggregatedType recalc(LeafNodeType &node, LeafNodeType &splitNode, const AggrCalcT &aggrCalc); - - static AggregatedType recalc(InternalNodeType &node, InternalNodeType &splitNode, - const NodeAllocatorType &allocator, const AggrCalcT &aggrCalc); -}; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreeaggregator.hpp b/searchlib/src/vespa/searchlib/btree/btreeaggregator.hpp deleted file mode 100644 index e1318ab5a66..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeaggregator.hpp +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreeaggregator.h" - -namespace search::btree { - -template -AggrT -BTreeAggregator::aggregate(const LeafNodeType &node, AggrCalcT aggrCalc) -{ - AggrT a; - for (uint32_t i = 0, ie = node.validSlots(); i < ie; ++i) { - aggrCalc.add(a, aggrCalc.getVal(node.getData(i))); - } - return a; -} - -template -AggrT -BTreeAggregator::aggregate(const InternalNodeType &node, const NodeAllocatorType &allocator, AggrCalcT aggrCalc) -{ - AggrT a; - for (uint32_t i = 0, ie = node.validSlots(); i < ie; ++i) { - const BTreeNode::Ref childRef = node.getChild(i); - const AggrT &ca(allocator.getAggregated(childRef)); - aggrCalc.add(a, ca); - } - return a; -} - -template -void -BTreeAggregator:: -recalc(LeafNodeType &node, const AggrCalcT &aggrCalc) -{ - node.getAggregated() = aggregate(node, aggrCalc); -} - -template -void -BTreeAggregator:: -recalc(InternalNodeType &node, - const NodeAllocatorType &allocator, - const AggrCalcT &aggrCalc) -{ - node.getAggregated() = aggregate(node, allocator, aggrCalc); -} - - -template -typename BTreeAggregator::AggregatedType -BTreeAggregator:: -recalc(LeafNodeType &node, - LeafNodeType &splitNode, - const AggrCalcT &aggrCalc) -{ - AggrT a; - recalc(node, aggrCalc); - recalc(splitNode, aggrCalc); - a = node.getAggregated(); - aggrCalc.add(a, splitNode.getAggregated()); - return a; -} - - -template -typename BTreeAggregator::AggregatedType -BTreeAggregator:: - recalc(InternalNodeType &node, - InternalNodeType &splitNode, - const NodeAllocatorType &allocator, - const AggrCalcT &aggrCalc) -{ - AggrT a; - recalc(node, allocator, aggrCalc); - recalc(splitNode, allocator, aggrCalc); - a = node.getAggregated(); - aggrCalc.add(a, splitNode.getAggregated()); - return a; -} - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreebuilder.cpp b/searchlib/src/vespa/searchlib/btree/btreebuilder.cpp deleted file mode 100644 index 133c5d245c9..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreebuilder.cpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "btreenode.hpp" -#include "btreebuilder.hpp" - -namespace search::btree { - -template class BTreeBuilder; -template class BTreeBuilder; -template class BTreeBuilder; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreebuilder.h b/searchlib/src/vespa/searchlib/btree/btreebuilder.h deleted file mode 100644 index 767f02d03ee..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreebuilder.h +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreenode.h" -#include "btreerootbase.h" -#include "btreenodeallocator.h" -#include "noaggrcalc.h" -#include "minmaxaggrcalc.h" -#include "btreeaggregator.h" - -namespace search::btree { - -template -class BTreeBuilder -{ -public: - using NodeAllocatorType = BTreeNodeAllocator; - using BTreeRootBaseType = typename NodeAllocatorType::BTreeRootBaseType; - using InternalNodeType = typename NodeAllocatorType::InternalNodeType; - using LeafNodeType = typename NodeAllocatorType::LeafNodeType; - using Aggregator = BTreeAggregator; -private: - using KeyType = KeyT; - using DataType = DataT; - using InternalNodeTypeRefPair = typename InternalNodeType::RefPair; - using LeafNodeTypeRefPair = typename LeafNodeType::RefPair; - using NodeRef = BTreeNode::Ref; - - NodeAllocatorType &_allocator; - int _numInternalNodes; - int _numLeafNodes; - uint32_t _numInserts; - std::vector _inodes; - LeafNodeTypeRefPair _leaf; - AggrCalcT _defaultAggrCalc; - const AggrCalcT &_aggrCalc; - - void normalize(); - void allocNewLeafNode(); - InternalNodeType *createInternalNode(); -public: - BTreeBuilder(NodeAllocatorType &allocator); - BTreeBuilder(NodeAllocatorType &allocator, const AggrCalcT &aggrCalc); - ~BTreeBuilder(); - - void recursiveDelete(NodeRef node); - void insert(const KeyT &key, const DataT &data); - NodeRef handover(); - void reuse(); - void clear(); -}; - -extern template class BTreeBuilder; -extern template class BTreeBuilder; -extern template class BTreeBuilder; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreebuilder.hpp b/searchlib/src/vespa/searchlib/btree/btreebuilder.hpp deleted file mode 100644 index fb912499c6c..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreebuilder.hpp +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreebuilder.h" - -namespace search::btree { - -template -BTreeBuilder:: -BTreeBuilder(NodeAllocatorType &allocator) - : _allocator(allocator), - _numInternalNodes(0), - _numLeafNodes(0), - _numInserts(0), - _inodes(), - _leaf(), - _defaultAggrCalc(), - _aggrCalc(_defaultAggrCalc) -{ - _leaf = _allocator.allocLeafNode(); - ++_numLeafNodes; -} - - -template -BTreeBuilder:: -BTreeBuilder(NodeAllocatorType &allocator, const AggrCalcT &aggrCalc) - : _allocator(allocator), - _numInternalNodes(0), - _numLeafNodes(0), - _numInserts(0), - _inodes(), - _leaf(), - _defaultAggrCalc(), - _aggrCalc(aggrCalc) -{ - _leaf = _allocator.allocLeafNode(); - ++_numLeafNodes; -} - - -template -BTreeBuilder:: -~BTreeBuilder() -{ - clear(); -} - - -template -void -BTreeBuilder:: -recursiveDelete(NodeRef node) -{ - assert(_allocator.isValidRef(node)); - if (_allocator.isLeafRef(node)) { - _allocator.holdNode(node, _allocator.mapLeafRef(node)); - _numLeafNodes--; - return; - } - InternalNodeType *inode = _allocator.mapInternalRef(node); - for (unsigned int i = 0; i < inode->validSlots(); ++i) { - recursiveDelete(inode->getChild(i)); - } - _allocator.holdNode(node, inode); - _numInternalNodes--; -} - - -template -void -BTreeBuilder:: -normalize() -{ - std::vector leftInodes; // left to rightmost nodes in tree - LeafNodeType *leftLeaf; - NodeRef child; - unsigned int level; - LeafNodeType *leafNode = _leaf.data; - - if (_inodes.size() == 0) { - if (leafNode->validSlots() == 0) { - assert(_numLeafNodes == 1); - assert(_numInserts == 0); - _allocator.holdNode(_leaf.ref, _leaf.data); - _numLeafNodes--; - _leaf = LeafNodeTypeRefPair(NodeRef(), static_cast(nullptr)); - - } - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*leafNode, _aggrCalc); - } - assert(_numInserts == leafNode->validSlots()); - return; - } - - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*leafNode, _aggrCalc); - } - /* Adjust validLeaves for rightmost nodes */ - for (level = 0; level < _inodes.size(); level++) { - InternalNodeType *inode = _inodes[level].data; - NodeRef lcRef(inode->getLastChild()); - assert(NodeAllocatorType::isValidRef(lcRef)); - assert((level == 0) == _allocator.isLeafRef(lcRef)); - inode->incValidLeaves(_allocator.validLeaves(inode->getLastChild())); - inode->update(inode->validSlots() - 1, - level == 0 ? - _allocator.mapLeafRef(lcRef)->getLastKey() : - _allocator.mapInternalRef(lcRef)->getLastKey(), - lcRef); - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*inode, _allocator, _aggrCalc); - } - } - for (level = 0; level + 1 < _inodes.size(); level++) { - leftInodes.push_back(NodeRef()); - } - /* Build vector of left to rightmost internal nodes (except root level) */ - level = _inodes.size() - 1; - for (;;) { - NodeRef iRef = _inodes[level].ref; - InternalNodeType *inode = _inodes[level].data; - if (inode->validSlots() < 2) { - /* Use last child of left to rightmost node on level */ - assert(level + 1 < _inodes.size()); - iRef = leftInodes[level]; - inode = _allocator.mapInternalRef(iRef); - assert(inode != nullptr); - assert(inode->validSlots() >= 1); - child = inode->getLastChild(); - } else { - /* Use next to last child of rightmost node on level */ - child = inode->getChild(inode->validSlots() - 2); - } - if (level == 0) - break; - level--; - assert(!_allocator.isLeafRef(child)); - leftInodes[level] = child; - } - /* Remember left to rightmost leaf node */ - assert(_allocator.isLeafRef(child)); - leftLeaf = _allocator.mapLeafRef(child); - - /* Check fanout on rightmost leaf node */ - if (leafNode->validSlots() < LeafNodeType::minSlots()) { - InternalNodeType *pnode = _inodes[0].data; - if (leftLeaf->validSlots() + leafNode->validSlots() < - 2 * LeafNodeType::minSlots()) { - leftLeaf->stealAllFromRightNode(leafNode); - if (pnode->validSlots() == 1) { - InternalNodeType *lpnode = - _allocator.mapInternalRef(leftInodes[0]); - lpnode->incValidLeaves(pnode->validLeaves()); - pnode->setValidLeaves(0); - } - /* Unlink from parent node */ - pnode->remove(pnode->validSlots() - 1); - _allocator.holdNode(_leaf.ref, leafNode); - _numLeafNodes--; - _leaf = LeafNodeTypeRefPair(child, leftLeaf); - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*leftLeaf, _aggrCalc); - } - } else { - leafNode->stealSomeFromLeftNode(leftLeaf); - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*leftLeaf, _aggrCalc); - Aggregator::recalc(*leafNode, _aggrCalc); - } - if (pnode->validSlots() == 1) { - InternalNodeType *lpnode = - _allocator.mapInternalRef(leftInodes[0]); - uint32_t steal = leafNode->validLeaves() - - pnode->validLeaves(); - pnode->incValidLeaves(steal); - lpnode->decValidLeaves(steal); - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*lpnode, _allocator, _aggrCalc); - Aggregator::recalc(*pnode, _allocator, _aggrCalc); - } - } - } - if (pnode->validSlots() > 0) { - uint32_t s = pnode->validSlots() - 1; - LeafNodeType *l = _allocator.mapLeafRef(pnode->getChild(s)); - pnode->writeKey(s, l->getLastKey()); - if (s > 0) { - --s; - l = _allocator.mapLeafRef(pnode->getChild(s)); - pnode->writeKey(s, l->getLastKey()); - } - } - if (!leftInodes.empty() && _allocator.isValidRef(leftInodes[0])) { - InternalNodeType *lpnode = - _allocator.mapInternalRef(leftInodes[0]); - uint32_t s = lpnode->validSlots() - 1; - LeafNodeType *l = _allocator.mapLeafRef(lpnode->getChild(s)); - lpnode->writeKey(s, l->getLastKey()); - } - } - - /* Check fanout on rightmost internal nodes except root node */ - for (level = 0; level + 1 < _inodes.size(); level++) { - InternalNodeType *inode = _inodes[level].data; - NodeRef leftInodeRef = leftInodes[level]; - assert(NodeAllocatorType::isValidRef(leftInodeRef)); - InternalNodeType *leftInode = _allocator.mapInternalRef(leftInodeRef); - - InternalNodeType *pnode = _inodes[level + 1].data; - if (inode->validSlots() < InternalNodeType::minSlots()) { - if (leftInode->validSlots() + inode->validSlots() < - 2 * InternalNodeType::minSlots()) { - leftInode->stealAllFromRightNode(inode); - if (pnode->validSlots() == 1) { - InternalNodeType *lpnode = - _allocator.mapInternalRef(leftInodes[level + 1]); - lpnode->incValidLeaves(pnode->validLeaves()); - pnode->setValidLeaves(0); - } - /* Unlink from parent node */ - pnode->remove(pnode->validSlots() - 1); - _allocator.holdNode(_inodes[level].ref, inode); - _numInternalNodes--; - _inodes[level] = InternalNodeTypeRefPair(leftInodeRef, leftInode); - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*leftInode, _allocator, _aggrCalc); - } - } else { - inode->stealSomeFromLeftNode(leftInode, _allocator); - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*leftInode, _allocator, _aggrCalc); - Aggregator::recalc(*inode, _allocator, _aggrCalc); - } - if (pnode->validSlots() == 1) { - InternalNodeType *lpnode = - _allocator.mapInternalRef(leftInodes[level + 1]); - uint32_t steal = inode->validLeaves() - - pnode->validLeaves(); - pnode->incValidLeaves(steal); - lpnode->decValidLeaves(steal); - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*lpnode, _allocator, _aggrCalc); - Aggregator::recalc(*pnode, _allocator, _aggrCalc); - } - } - } - } - if (pnode->validSlots() > 0) { - uint32_t s = pnode->validSlots() - 1; - InternalNodeType *n = - _allocator.mapInternalRef(pnode->getChild(s)); - pnode->writeKey(s, n->getLastKey()); - if (s > 0) { - --s; - n = _allocator.mapInternalRef(pnode->getChild(s)); - pnode->writeKey(s, n->getLastKey()); - } - } - if (level + 1 < leftInodes.size() && - _allocator.isValidRef(leftInodes[level + 1])) { - InternalNodeType *lpnode = - _allocator.mapInternalRef(leftInodes[level + 1]); - uint32_t s = lpnode->validSlots() - 1; - InternalNodeType *n = - _allocator.mapInternalRef(lpnode->getChild(s)); - lpnode->writeKey(s, n->getLastKey()); - } - } - /* Check fanout on root node */ - assert(level < _inodes.size()); - InternalNodeType *inode = _inodes[level].data; - assert(inode != nullptr); - assert(inode->validSlots() >= 1); - if (inode->validSlots() == 1) { - /* Remove top level from proposed tree since fanout is 1 */ - NodeRef iRef = _inodes[level].ref; - _inodes.pop_back(); - _allocator.holdNode(iRef, inode); - _numInternalNodes--; - } - if (!_inodes.empty()) { - assert(_numInserts == _inodes.back().data->validLeaves()); - } else { - assert(_numInserts == _leaf.data->validLeaves()); - } -} - - -template -void -BTreeBuilder:: -allocNewLeafNode() -{ - InternalNodeType *inode; - NodeRef child; - - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*_leaf.data, _aggrCalc); - } - LeafNodeTypeRefPair lPair(_allocator.allocLeafNode()); - _numLeafNodes++; - - child = lPair.ref; - - unsigned int level = 0; - for (;;) { - if (level >= _inodes.size()) { - InternalNodeTypeRefPair iPair( - _allocator.allocInternalNode(level + 1)); - inode = iPair.data; - _numInternalNodes++; - if (level > 0) { - InternalNodeType *cnode = _inodes[level - 1].data; - inode->insert(0, cnode->getLastKey(), - _inodes[level - 1].ref); - inode->setValidLeaves(cnode->validLeaves()); - } else { - inode->insert(0, _leaf.data->getLastKey(), _leaf.ref); - inode->setValidLeaves(_leaf.data->validLeaves()); - } - inode->insert(1, KeyType(), child); - _inodes.push_back(iPair); - break; - } - inode = _inodes[level].data; - assert(inode->validSlots() > 0); - NodeRef lcRef(inode->getLastChild()); - inode->incValidLeaves(_allocator.validLeaves(lcRef)); - inode->update(inode->validSlots() - 1, - level == 0 ? - _allocator.mapLeafRef(lcRef)->getLastKey() : - _allocator.mapInternalRef(lcRef)->getLastKey(), - lcRef); - if (inode->validSlots() >= InternalNodeType::maxSlots()) { - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*inode, _allocator, _aggrCalc); - } - InternalNodeTypeRefPair iPair( - _allocator.allocInternalNode(level + 1)); - inode = iPair.data; - _numInternalNodes++; - inode->insert(0, KeyType(), child); - child = iPair.ref; - level++; - continue; - } - inode->insert(inode->validSlots(), KeyType(), child); - break; - } - while (level > 0) { - assert(inode->validSlots() > 0); - child = inode->getLastChild(); - assert(!_allocator.isLeafRef(child)); - inode = _allocator.mapInternalRef(child); - level--; - _inodes[level] = InternalNodeTypeRefPair(child, inode); - } - _leaf = lPair; -} - - -template -void -BTreeBuilder:: -insert(const KeyT &key, - const DataT &data) -{ - if (_leaf.data->validSlots() >= LeafNodeType::maxSlots()) - allocNewLeafNode(); - LeafNodeType *leaf = _leaf.data; - leaf->insert(leaf->validSlots(), key, data); - ++_numInserts; -} - - -template -typename BTreeBuilder::NodeRef -BTreeBuilder:: -handover() -{ - NodeRef ret; - - normalize(); - - if (!_inodes.empty()) { - ret = _inodes.back().ref; - } else { - ret = _leaf.ref; - } - - _leaf = LeafNodeTypeRefPair(NodeRef(), static_cast(nullptr)); - - _inodes.clear(); - _numInternalNodes = 0; - _numLeafNodes = 0; - return ret; -} - - -template -void -BTreeBuilder:: -reuse() -{ - clear(); - _leaf = _allocator.allocLeafNode(); - ++_numLeafNodes; - _numInserts = 0u; -} - - -template -void -BTreeBuilder:: -clear() -{ - if (!_inodes.empty()) { - recursiveDelete(_inodes.back().ref); - _leaf = LeafNodeTypeRefPair(NodeRef(), static_cast(nullptr)); - _inodes.clear(); - } - if (NodeAllocatorType::isValidRef(_leaf.ref)) { - assert(_leaf.data != nullptr); - assert(_numLeafNodes == 1); - _allocator.holdNode(_leaf.ref, _leaf.data); - --_numLeafNodes; - _leaf = LeafNodeTypeRefPair(NodeRef(), static_cast(nullptr)); - } else { - assert(_leaf.data == nullptr); - } - assert(_numLeafNodes == 0); - assert(_numInternalNodes == 0); -} - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreeinserter.cpp b/searchlib/src/vespa/searchlib/btree/btreeinserter.cpp deleted file mode 100644 index f307c474f90..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeinserter.cpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "btreeinserter.h" -#include "btreenodeallocator.h" -#include "btreerootbase.hpp" -#include "btreeinserter.hpp" -#include "btreenode.hpp" - -#include -LOG_SETUP(".searchlib.btree.btreeinserter"); - -namespace search::btree { - -template class BTreeInserter; -template class BTreeInserter; -template class BTreeInserter, - BTreeDefaultTraits, - MinMaxAggrCalc>; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreeinserter.h b/searchlib/src/vespa/searchlib/btree/btreeinserter.h deleted file mode 100644 index a3fa2916a88..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeinserter.h +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreenode.h" -#include "btreenodeallocator.h" -#include "btreerootbase.h" -#include "btreeaggregator.h" -#include "noaggrcalc.h" -#include "minmaxaggrcalc.h" -#include "btreeiterator.h" - -namespace search -{ - -namespace btree -{ - -template , - typename TraitsT = BTreeDefaultTraits, - class AggrCalcT = NoAggrCalc> -class BTreeInserter -{ -public: - typedef BTreeNodeAllocator NodeAllocatorType; - typedef BTreeAggregator Aggregator; - typedef BTreeIterator Iterator; - typedef BTreeInternalNode - InternalNodeType; - typedef BTreeLeafNode - LeafNodeType; - typedef KeyT KeyType; - typedef DataT DataType; - typedef typename InternalNodeType::RefPair InternalNodeTypeRefPair; - typedef typename LeafNodeType::RefPair LeafNodeTypeRefPair; - using Inserter = BTreeInserter; - -private: - static void rebalanceLeafEntries(LeafNodeType *leafNode, Iterator &itr, AggrCalcT aggrCalc); - -public: - static void - insert(BTreeNode::Ref &root, - Iterator &itr, - const KeyType &key, const DataType &data, - const AggrCalcT &aggrCalc); -}; - -extern template class BTreeInserter; -extern template class BTreeInserter; -extern template class BTreeInserter, - BTreeDefaultTraits, - MinMaxAggrCalc>; - -} // namespace search::btree -} // namespace search - diff --git a/searchlib/src/vespa/searchlib/btree/btreeinserter.hpp b/searchlib/src/vespa/searchlib/btree/btreeinserter.hpp deleted file mode 100644 index d1da94c1b17..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeinserter.hpp +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreeinserter.h" -#include "btreerootbase.hpp" -#include "btreeiterator.hpp" -#include - -namespace search { -namespace btree { - -namespace { - -template -void -considerThawNode(NodeType *&node, BTreeNode::Ref &ref, NodeAllocatorType &allocator) -{ - if (node->getFrozen()) { - auto thawed = allocator.thawNode(ref, node); - ref = thawed.ref; - node = thawed.data; - } -} - -} - -template -void -BTreeInserter::rebalanceLeafEntries(LeafNodeType *leafNode, Iterator &itr, AggrCalcT aggrCalc) -{ - NodeAllocatorType &allocator(itr.getAllocator()); - auto &pathElem = itr.getPath(0); - InternalNodeType *parentNode = pathElem.getWNode(); - uint32_t parentIdx = pathElem.getIdx(); - BTreeNode::Ref leafRef = parentNode->getChild(parentIdx); - BTreeNode::Ref leftRef = BTreeNode::Ref(); - LeafNodeType *leftNode = nullptr; - BTreeNode::Ref rightRef = BTreeNode::Ref(); - LeafNodeType *rightNode = nullptr; - if (parentIdx > 0) { - leftRef = parentNode->getChild(parentIdx - 1); - leftNode = allocator.mapLeafRef(leftRef); - } - if (parentIdx + 1 < parentNode->validSlots()) { - rightRef = parentNode->getChild(parentIdx + 1); - rightNode = allocator.mapLeafRef(rightRef); - } - if (leftNode != nullptr && leftNode->validSlots() < LeafNodeType::maxSlots() && - (rightNode == nullptr || leftNode->validSlots() < rightNode->validSlots())) { - considerThawNode(leftNode, leftRef, allocator); - uint32_t oldLeftValid = leftNode->validSlots(); - if (itr.getLeafNodeIdx() == 0 && (oldLeftValid + 1 == LeafNodeType::maxSlots())) { - parentNode->update(parentIdx - 1, leftNode->getLastKey(), leftRef); - itr.adjustGivenNoEntriesToLeftLeafNode(); - } else { - leftNode->stealSomeFromRightNode(leafNode, allocator); - uint32_t given = leftNode->validSlots() - oldLeftValid; - parentNode->update(parentIdx, leafNode->getLastKey(), leafRef); - parentNode->update(parentIdx - 1, leftNode->getLastKey(), leftRef); - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*leftNode, allocator, aggrCalc); - Aggregator::recalc(*leafNode, allocator, aggrCalc); - } - itr.adjustGivenEntriesToLeftLeafNode(given); - } - } else if (rightNode != nullptr && rightNode->validSlots() < LeafNodeType::maxSlots()) { - considerThawNode(rightNode, rightRef, allocator); - rightNode->stealSomeFromLeftNode(leafNode, allocator); - parentNode->update(parentIdx, leafNode->getLastKey(), leafRef); - parentNode->update(parentIdx + 1, rightNode->getLastKey(), rightRef); - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*rightNode, allocator, aggrCalc); - Aggregator::recalc(*leafNode, allocator, aggrCalc); - } - itr.adjustGivenEntriesToRightLeafNode(); - } -} - -template -void -BTreeInserter:: -insert(BTreeNode::Ref &root, - Iterator &itr, - const KeyType &key, const DataType &data, - const AggrCalcT &aggrCalc) -{ - if (!NodeAllocatorType::isValidRef(root)) { - root = itr.insertFirst(key, data, aggrCalc); - return; - } - NodeAllocatorType &allocator(itr.getAllocator()); - bool inRange = itr.valid(); - if (!inRange) { - --itr; - } - root = itr.thaw(root); - LeafNodeType *lnode = itr.getLeafNode(); - if (lnode->isFull() && itr.getPathSize() > 0) { - rebalanceLeafEntries(lnode, itr, aggrCalc); - lnode = itr.getLeafNode(); - } - uint32_t idx = itr.getLeafNodeIdx() + (inRange ? 0 : 1); - BTreeNode::Ref splitNodeRef; - const KeyT *splitLastKey = nullptr; - bool inRightSplit = false; - AggrT oldca(AggrCalcT::hasAggregated() ? lnode->getAggregated() : AggrT()); - AggrT ca; - if (lnode->isFull()) { - LeafNodeTypeRefPair splitNode = allocator.allocLeafNode(); - lnode->splitInsert(splitNode.data, idx, key, data); - if (AggrCalcT::hasAggregated()) { - ca = Aggregator::recalc(*lnode, *splitNode.data, aggrCalc); - } - splitNodeRef = splitNode.ref; // to signal that a split occured - splitLastKey = &splitNode.data->getLastKey(); - inRightSplit = itr.setLeafNodeIdx(idx, splitNode.data); - } else { - lnode->insert(idx, key, data); - itr.setLeafNodeIdx(idx); - if (AggrCalcT::hasAggregated()) { - aggrCalc.add(lnode->getAggregated(), aggrCalc.getVal(data)); - ca = lnode->getAggregated(); - } - } - const KeyT *lastKey = &lnode->getLastKey(); - uint32_t level = 0; - uint32_t levels = itr.getPathSize(); - for (; level < levels; ++level) { - typename Iterator::PathElement &pe = itr.getPath(level); - InternalNodeType *node(pe.getWNode()); - idx = pe.getIdx(); - AggrT olda(AggrCalcT::hasAggregated() ? - node->getAggregated() : AggrT()); - BTreeNode::Ref subNode = node->getChild(idx); - node->update(idx, *lastKey, subNode); - node->incValidLeaves(1); - if (NodeAllocatorType::isValidRef(splitNodeRef)) { - idx++; // the extra node is inserted in the next slot - if (node->isFull()) { - InternalNodeTypeRefPair splitNode = - allocator.allocInternalNode(level + 1); - node->splitInsert(splitNode.data, idx, - *splitLastKey, splitNodeRef, allocator); - inRightSplit = pe.adjustSplit(inRightSplit, splitNode.data); - if (AggrCalcT::hasAggregated()) { - ca = Aggregator::recalc(*node, *splitNode.data, - allocator, aggrCalc); - } - splitNodeRef = splitNode.ref; - splitLastKey = &splitNode.data->getLastKey(); - } else { - node->insert(idx, *splitLastKey, splitNodeRef); - pe.adjustSplit(inRightSplit); - inRightSplit = false; - if (AggrCalcT::hasAggregated()) { - aggrCalc.add(node->getAggregated(), oldca, ca); - ca = node->getAggregated(); - } - splitNodeRef = BTreeNode::Ref(); - splitLastKey = nullptr; - } - } else { - if (AggrCalcT::hasAggregated()) { - aggrCalc.add(node->getAggregated(), oldca, ca); - ca = node->getAggregated(); - } - } - if (AggrCalcT::hasAggregated()) { - oldca = olda; - } - lastKey = &node->getLastKey(); - } - if (NodeAllocatorType::isValidRef(splitNodeRef)) { - root = itr.addLevel(root, splitNodeRef, inRightSplit, aggrCalc); - } -} - - -} // namespace search::btree -} // namespace search - diff --git a/searchlib/src/vespa/searchlib/btree/btreeiterator.cpp b/searchlib/src/vespa/searchlib/btree/btreeiterator.cpp deleted file mode 100644 index 9444cee975d..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeiterator.cpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "btreeiterator.h" -#include "btreeroot.h" -#include "btreenodeallocator.h" -#include "btreeiterator.hpp" -#include "btreenode.hpp" - -namespace search::btree { - -template class BTreeIteratorBase; -template class BTreeIteratorBase; -template class BTreeIteratorBase; -template class BTreeConstIterator; -template class BTreeConstIterator; -template class BTreeConstIterator; -template class BTreeIterator; -template class BTreeIterator; -template class BTreeIterator; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreeiterator.h b/searchlib/src/vespa/searchlib/btree/btreeiterator.h deleted file mode 100644 index de9637c00f1..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeiterator.h +++ /dev/null @@ -1,884 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreenode.h" -#include "btreenodeallocator.h" -#include "btreetraits.h" -#include - -namespace search::btree { - -template -class BTreeInserter; -template -class BTreeRemoverBase; -template -class BTreeRemover; -template -class BTreeIterator; - -/** - * Helper class to provide internal or leaf node and position within node. - */ -template -class NodeElement -{ - template - friend class BTreeInserter; - template - friend class BTreeRemoverBase; - template - friend class BTreeRemover; - template - friend class BTreeIterator; - - typedef NodeT NodeType; - typedef typename NodeType::KeyType KeyType; - typedef typename NodeType::DataType DataType; - const NodeType *_node; - uint32_t _idx; - - NodeType * - getWNode() const - { - return const_cast(_node); - } - -public: - NodeElement() - : _node(nullptr), - _idx(0u) - { - } - - NodeElement(const NodeType *node, uint32_t idx) - : _node(node), - _idx(idx) - { - } - - void - setNode(const NodeType *node) - { - _node = node; - } - - const NodeType * - getNode() const - { - return _node; - } - - void - setIdx(uint32_t idx) - { - _idx = idx; - } - - uint32_t - getIdx() const - { - return _idx; - } - - void - incIdx() - { - ++_idx; - } - - void - decIdx() - { - --_idx; - } - - void - setNodeAndIdx(const NodeType *node, uint32_t idx) - { - _node = node; - _idx = idx; - } - - const KeyType & - getKey() const - { - return _node->getKey(_idx); - } - - const DataType & - getData() const - { - return _node->getData(_idx); - } - - bool - valid() const - { - return _node != nullptr; - } - - void - adjustLeftVictimKilled() - { - assert(_idx > 0); - --_idx; - } - - void - adjustSteal(uint32_t stolen) - { - assert(_idx + stolen < _node->validSlots()); - _idx += stolen; - } - - void - adjustSplit(bool inRightSplit) - { - if (inRightSplit) - ++_idx; - } - - bool - adjustSplit(bool inRightSplit, const NodeType *splitNode) - { - adjustSplit(inRightSplit); - if (_idx >= _node->validSlots()) { - _idx -= _node->validSlots(); - _node = splitNode; - return true; - } - return false; - } - - void - swap(NodeElement &rhs) - { - std::swap(_node, rhs._node); - std::swap(_idx, rhs._idx); - } - - bool - operator!=(const NodeElement &rhs) const - { - return _node != rhs._node || - _idx != rhs._idx; - } -}; - - -/** - * Base class for B-tree iterators. It defines all members needed - * for the iterator and methods that don't depend on tree ordering. - */ -template -class BTreeIteratorBase -{ -protected: - typedef BTreeNodeAllocator NodeAllocatorType; - typedef BTreeInternalNode InternalNodeType; - typedef BTreeLeafNode LeafNodeType; - typedef typename InternalNodeType::RefPair InternalNodeTypeRefPair; - typedef typename LeafNodeType::RefPair LeafNodeTypeRefPair; - typedef BTreeLeafNodeTemp LeafNodeTempType; - typedef BTreeKeyData KeyDataType; - typedef KeyT KeyType; - typedef DataT DataType; - template - friend class BTreeInserter; - template - friend class BTreeRemoverBase; - template - friend class BTreeRemover; - - typedef NodeElement LeafElement; - - /** - * Current leaf node and current index within it. - */ - LeafElement _leaf; - /** - * Pointer to internal node and index to the child used to - * traverse down the tree - */ - typedef NodeElement PathElement; - /** - * Path from current leaf node up to the root (path[0] is the - * parent of the leaf node) - */ - PathElement _path[PATH_SIZE]; - size_t _pathSize; - - const NodeAllocatorType *_allocator; - - const LeafNodeType *_leafRoot; // Root node for small tree/array - - // Temporary leaf node when iterating over short arrays - std::unique_ptr _compatLeafNode; - -private: - /* - * Find the next leaf node, called by operator++() as needed. - */ - void findNextLeafNode(); - - /* - * Find the previous leaf node, called by operator--() as needed. - */ - VESPA_DLL_LOCAL void findPrevLeafNode(); - -protected: - /* - * Report current position in tree. - * - * @param pidx Number of levels above leaf nodes to take into account. - */ - size_t - position(uint32_t pidx) const; - - /** - * Create iterator pointing to first element in the tree referenced - * by root. - * - * @param root Reference to root of tree - * @param allocator B-tree node allocator helper class. - */ - BTreeIteratorBase(BTreeNode::Ref root, const NodeAllocatorType &allocator); - - /** - * Compability constructor, creating a temporary tree with only a - * temporary leaf node owned by the iterator. - */ - template - BTreeIteratorBase(const KeyDataType *shortArray, - uint32_t arraySize, - const NodeAllocatorType &allocator, - const AggrCalcT &aggrCalc); - - /** - * Default constructor. Iterator is not associated with a tree. - */ - BTreeIteratorBase(); - - /** - * Step iterator forwards. If at end then leave it at end. - */ - BTreeIteratorBase & - operator++() { - if (_leaf.getNode() == nullptr) { - return *this; - } - _leaf.incIdx(); - if (_leaf.getIdx() < _leaf.getNode()->validSlots()) { - return *this; - } - findNextLeafNode(); - return *this; - } - - /** - * Step iterator backwards. If at end then place it at last valid - * position in tree (cf. rbegin()) - */ - BTreeIteratorBase & - operator--(); - - ~BTreeIteratorBase(); - BTreeIteratorBase(const BTreeIteratorBase &other); - BTreeIteratorBase &operator=(const BTreeIteratorBase &other); - - - /** - * Set new tree height and clear portions of path that are now - * beyond new tree height. For internal use only. - * - * @param pathSize New tree height (number of levels of internal nodes) - */ - void - clearPath(uint32_t pathSize); -public: - - bool - operator==(const BTreeIteratorBase & rhs) const { - if (_leaf.getNode() != rhs._leaf.getNode() || - _leaf.getIdx() != rhs._leaf.getIdx()) { - return false; - } - return true; - } - - bool - operator!=(const BTreeIteratorBase & rhs) const - { - return !operator==(rhs); - } - - /** - * Swap iterator with the other. - * - * @param rhs Other iterator. - */ - void - swap(BTreeIteratorBase & rhs); - - /** - * Get key at current iterator location. - */ - const KeyType & - getKey() const - { - return _leaf.getKey(); - } - - /** - * Get data at current iterator location. - */ - const DataType & - getData() const - { - return _leaf.getData(); - } - - /** - * Check if iterator is at a valid element, i.e. not at end. - */ - bool - valid() const - { - return _leaf.valid(); - } - - /** - * Return the number of elements in the tree. - */ - size_t - size() const; - - - /** - * Return the current position in the tree. - */ - size_t - position() const - { - return position(_pathSize); - } - - /** - * Return the distance between two positions in the tree. - */ - ssize_t - operator-(const BTreeIteratorBase &rhs) const; - - /** - * Return if the tree has data or not (e.g. keys and data or only keys). - */ - static bool - hasData() - { - return LeafNodeType::hasData(); - } - - /** - * Move the iterator directly to end. Used by findHelper method in BTree. - */ - void - setupEnd(); - - /** - * Setup iterator to be empty and not be associated with any tree. - */ - void - setupEmpty(); - - /** - * Move iterator to beyond last element in the current tree. - */ - void - end() __attribute__((noinline)); - - /** - * Move iterator to beyond last element in the given tree. - * - * @param rootRef Reference to root of tree. - */ - void - end(BTreeNode::Ref rootRef); - - /** - * Move iterator to first element in the current tree. - */ - void - begin(); - - /** - * Move iterator to first element in the given tree. - * - * @param rootRef Reference to root of tree. - */ - void - begin(BTreeNode::Ref rootRef); - - /** - * Move iterator to last element in the current tree. - */ - void - rbegin(); - - /* - * Get aggregated values for the current tree. - */ - const AggrT & - getAggregated() const; - - bool - identical(const BTreeIteratorBase &rhs) const; - - template - void - foreach_key(FunctionType func) const - { - if (_pathSize > 0) { - _path[_pathSize - 1].getNode()-> - foreach_key(_allocator->getNodeStore(), func); - } else if (_leafRoot != nullptr) { - _leafRoot->foreach_key(func); - } - } -}; - - -/** - * Iterator class for read access to B-trees. It defines methods to - * navigate in the tree, useable for implementing search iterators and - * for positioning in preparation for tree changes (cf. BTreeInserter and - * BTreeRemover). - */ -template , - typename TraitsT = BTreeDefaultTraits> -class BTreeConstIterator : public BTreeIteratorBase -{ -protected: - typedef BTreeIteratorBase ParentType; - typedef typename ParentType::NodeAllocatorType NodeAllocatorType; - typedef typename ParentType::InternalNodeType InternalNodeType; - typedef typename ParentType::LeafNodeType LeafNodeType; - typedef typename ParentType::InternalNodeTypeRefPair - InternalNodeTypeRefPair; - typedef typename ParentType::LeafNodeTypeRefPair LeafNodeTypeRefPair; - typedef typename ParentType::LeafNodeTempType LeafNodeTempType; - typedef typename ParentType::KeyDataType KeyDataType; - typedef typename ParentType::KeyType KeyType; - typedef typename ParentType::DataType DataType; - typedef typename ParentType::PathElement PathElement; - - using ParentType::_leaf; - using ParentType::_path; - using ParentType::_pathSize; - using ParentType::_allocator; - using ParentType::_leafRoot; - using ParentType::_compatLeafNode; - using ParentType::clearPath; - using ParentType::setupEmpty; -public: - using ParentType::end; - -protected: - /** Pointer to seek node and path index to the parent node **/ - typedef std::pair SeekNode; - -public: - /** - * Create iterator pointing to first element in the tree referenced - * by root. - * - * @param root Reference to root of tree - * @param allocator B-tree node allocator helper class. - */ - BTreeConstIterator(BTreeNode::Ref root, const NodeAllocatorType &allocator) - : ParentType(root, allocator) - { - } - - /** - * Compability constructor, creating a temporary tree with only a - * temporary leaf node owned by the iterator. - */ - template - BTreeConstIterator(const KeyDataType *shortArray, - uint32_t arraySize, - const NodeAllocatorType &allocator, - const AggrCalcT &aggrCalc) - : ParentType(shortArray, arraySize, allocator, aggrCalc) - { - } - - /** - * Default constructor. Iterator is not associated with a tree. - */ - BTreeConstIterator() - : ParentType() - { - } - - /** - * Step iterator forwards. If at end then leave it at end. - */ - BTreeConstIterator & - operator++() - { - ParentType::operator++(); - return *this; - } - - /** - * Step iterator backwards. If at end then place it at last valid - * position in tree (cf. rbegin()) - */ - BTreeConstIterator & - operator--() - { - ParentType::operator--(); - return *this; - } - - /** - * Position iterator at first position with a key that is greater - * than or equal to the key argument. The iterator must be set up - * for the same tree before this method is called. - * - * @param key Key to search for - * @param comp Comparator for the tree ordering. - */ - void - lower_bound(const KeyType & key, CompareT comp = CompareT()); - - /** - * Position iterator at first position with a key that is greater - * than or equal to the key argument in the tree referenced by rootRef. - * - * @param key Key to search for - * @param comp Comparator for the tree ordering. - */ - void - lower_bound(BTreeNode::Ref rootRef, - const KeyType & key, CompareT comp = CompareT()); - - /** - * Step iterator forwards until it is at a position with a key - * that is greater than or equal to the key argument. Original - * position must be valid with a key that is less than the key argument. - * - * Tree traits determine if binary or linear search is performed within - * each tree node. - * - * @param key Key to search for - * @param comp Comparator for the tree ordering. - */ - void - seek(const KeyType &key, CompareT comp = CompareT()); - - /** - * Step iterator forwards until it is at a position with a key - * that is greater than or equal to the key argument. Original - * position must be valid with a key that is less than the key argument. - * - * Binary search is performed within each tree node. - * - * @param key Key to search for - * @param comp Comparator for the tree ordering. - */ - void - binarySeek(const KeyType &key, CompareT comp = CompareT()); - - /** - * Step iterator forwards until it is at a position with a key - * that is greater than or equal to the key argument. Original - * position must be valid with a key that is less than the key argument. - * - * Linear search is performed within each tree node. - * - * @param key Key to search for - * @param comp Comparator for the tree ordering. - */ - void - linearSeek(const KeyType &key, CompareT comp = CompareT()); - - /** - * Step iterator forwards until it is at a position with a key - * that is greater than the key argument. Original position must - * be valid with a key that is less than or equal to the key argument. - * - * Tree traits determine if binary or linear search is performed within - * each tree node. - * - * @param key Key to search for - * @param comp Comparator for the tree ordering. - */ - void - seekPast(const KeyType &key, CompareT comp = CompareT()); - - /** - * Step iterator forwards until it is at a position with a key - * that is greater than the key argument. Original position must - * be valid with a key that is less than or equal to the key argument. - * - * Binary search is performed within each tree node. - * - * @param key Key to search for - * @param comp Comparator for the tree ordering. - */ - void - binarySeekPast(const KeyType &key, CompareT comp = CompareT()); - - /** - * Step iterator forwards until it is at a position with a key - * that is greater than the key argument. Original position must - * be valid with a key that is less than or equal to the key argument. - * - * Linear search is performed within each tree node. - * - * @param key Key to search for - * @param comp Comparator for the tree ordering. - */ - void - linearSeekPast(const KeyType &key, CompareT comp = CompareT()); - - /** - * Validate the iterator as a valid iterator or positioned at - * end in the tree referenced by rootRef. Validation failure - * triggers asserts. This method is for internal debugging use only. - * - * @param rootRef Reference to root of tree to operate on - * @param comp Comparator for the tree ordering. - */ - void - validate(BTreeNode::Ref rootRef, CompareT comp = CompareT()); -}; - - -/** - * Iterator class for write access to B-trees. It contains some helper - * methods used by BTreeInserter and BTreeRemover when modifying a tree. - */ -template , - typename TraitsT = BTreeDefaultTraits> -class BTreeIterator : public BTreeConstIterator -{ -public: - typedef BTreeConstIterator ParentType; - typedef typename ParentType::NodeAllocatorType NodeAllocatorType; - typedef typename ParentType::InternalNodeType InternalNodeType; - typedef typename ParentType::LeafNodeType LeafNodeType; - typedef typename ParentType::InternalNodeTypeRefPair - InternalNodeTypeRefPair; - typedef typename ParentType::LeafNodeTypeRefPair LeafNodeTypeRefPair; - typedef typename ParentType::LeafNodeTempType LeafNodeTempType; - typedef typename ParentType::KeyDataType KeyDataType; - typedef typename ParentType::KeyType KeyType; - typedef typename ParentType::DataType DataType; - typedef typename ParentType::PathElement PathElement; - template - friend class BTreeInserter; - template - friend class BTreeRemoverBase; - template - friend class BTreeRemover; - - using ParentType::_leaf; - using ParentType::_path; - using ParentType::_pathSize; - using ParentType::_allocator; - using ParentType::_leafRoot; - using ParentType::_compatLeafNode; - using ParentType::end; - using EntryRef = datastore::EntryRef; - - BTreeIterator(BTreeNode::Ref root, const NodeAllocatorType &allocator) - : ParentType(root, allocator) - { - } - - template - BTreeIterator(const KeyDataType *shortArray, - uint32_t arraySize, - const NodeAllocatorType &allocator, - const AggrCalcT &aggrCalc) - : ParentType(shortArray, arraySize, allocator, aggrCalc) - { - } - - BTreeIterator() - : ParentType() - { - } - - BTreeIterator & - operator++() - { - ParentType::operator++(); - return *this; - } - - BTreeIterator & - operator--() - { - ParentType::operator--(); - return *this; - } - - NodeAllocatorType & - getAllocator() const - { - return const_cast(*_allocator); - } - - BTreeNode::Ref - moveFirstLeafNode(BTreeNode::Ref rootRef); - - void - moveNextLeafNode(); - - void - writeData(const DataType &data) - { - _leaf.getWNode()->writeData(_leaf.getIdx(), data); - } - - /** - * Set a new key for the current iterator position. - * The new key must have the same semantic meaning as the old key. - * Typically used when compacting data store containing keys. - */ - void - writeKey(const KeyType &key); - - /** - * Updata data at the current iterator position. The tree should - * have been thawed. - * - * @param data New data value - * @param aggrCalc Calculator for updating aggregated information. - */ - template - void - updateData(const DataType &data, const AggrCalcT &aggrCalc); - - /** - * Thaw a path from the root node down the the current leaf node in - * the current tree, allowing for updates to be performed without - * disturbing the frozen version of the tree. - */ - BTreeNode::Ref - thaw(BTreeNode::Ref rootRef); - -private: - /* Insert into empty tree */ - template - BTreeNode::Ref - insertFirst(const KeyType &key, const DataType &data, - const AggrCalcT &aggrCalc); - - LeafNodeType * - getLeafNode() const - { - return _leaf.getWNode(); - } - - bool - setLeafNodeIdx(uint32_t idx, const LeafNodeType *splitLeafNode); - - void - setLeafNodeIdx(uint32_t idx) - { - _leaf.setIdx(idx); - } - - uint32_t - getLeafNodeIdx() const - { - return _leaf.getIdx(); - } - - uint32_t - getPathSize() const - { - return _pathSize; - } - - PathElement & - getPath(uint32_t pidx) - { - return _path[pidx]; - } - - template - BTreeNode::Ref - addLevel(BTreeNode::Ref rootRef, BTreeNode::Ref splitNodeRef, - bool inRightSplit, const AggrCalcT &aggrCalc); - - BTreeNode::Ref - removeLevel(BTreeNode::Ref rootRef, InternalNodeType *rootNode); - - void - removeLast(BTreeNode::Ref rootRef); - - void - adjustSteal(uint32_t level, bool leftVictimKilled, uint32_t stolen) - { - assert(_pathSize > level); - if (leftVictimKilled) { - _path[level].adjustLeftVictimKilled(); - } - if (stolen != 0) { - if (level > 0) - _path[level - 1].adjustSteal(stolen); - else - _leaf.adjustSteal(stolen); - } - } - - void adjustGivenNoEntriesToLeftLeafNode(); - void adjustGivenEntriesToLeftLeafNode(uint32_t given); - void adjustGivenEntriesToRightLeafNode(); -}; - -extern template class BTreeIteratorBase; -extern template class BTreeIteratorBase; -extern template class BTreeIteratorBase; -extern template class BTreeConstIterator; -extern template class BTreeConstIterator; -extern template class BTreeConstIterator; -extern template class BTreeIterator; -extern template class BTreeIterator; -extern template class BTreeIterator; - -} - diff --git a/searchlib/src/vespa/searchlib/btree/btreeiterator.hpp b/searchlib/src/vespa/searchlib/btree/btreeiterator.hpp deleted file mode 100644 index b26f249c51b..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeiterator.hpp +++ /dev/null @@ -1,1361 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreeiterator.h" -#include "btreeaggregator.h" -#include "btreenode.hpp" -#include - -namespace search::btree { - -#define STRICT_BTREE_ITERATOR_SEEK - -template -BTreeIteratorBase:: -BTreeIteratorBase(const BTreeIteratorBase &other) - : _leaf(other._leaf), - _pathSize(other._pathSize), - _allocator(other._allocator), - _leafRoot(other._leafRoot), - _compatLeafNode() -{ - for (size_t i = 0; i < _pathSize; ++i) { - _path[i] = other._path[i]; - } - if (other._compatLeafNode.get()) { - _compatLeafNode.reset( new LeafNodeTempType(*other._compatLeafNode)); - } - if (other._leaf.getNode() == other._compatLeafNode.get()) { - _leaf.setNode(_compatLeafNode.get()); - } - if (other._leafRoot == other._compatLeafNode.get()) { - _leafRoot = _compatLeafNode.get(); - } -} - -template -void -BTreeIteratorBase:: -swap(BTreeIteratorBase & other) -{ - std::swap(_leaf, other._leaf); - std::swap(_pathSize, other._pathSize); - std::swap(_path, other._path); - std::swap(_allocator, other._allocator); - std::swap(_leafRoot, other._leafRoot); - std::swap(_compatLeafNode, other._compatLeafNode); -} - - -template -void -BTreeIteratorBase:: -clearPath(uint32_t pathSize) -{ - uint32_t level = _pathSize; - while (level > pathSize) { - --level; - _path[level].setNodeAndIdx(nullptr, 0u); - } - _pathSize = pathSize; -} - - -template -BTreeIteratorBase & -BTreeIteratorBase:: -operator=(const BTreeIteratorBase &other) -{ - if (&other == this) { - return *this; - } - BTreeIteratorBase tmp(other); - swap(tmp); - return *this; -} - -template -BTreeIteratorBase:: -~BTreeIteratorBase() -{ -} - -template -void -BTreeIteratorBase:: -setupEnd() -{ - _leaf.setNodeAndIdx(nullptr, 0u); -} - - -template -void -BTreeIteratorBase:: -setupEmpty() -{ - clearPath(0u); - _leaf.setNodeAndIdx(nullptr, 0u); - _leafRoot = nullptr; -} - - -template -void -BTreeIteratorBase:: -end() -{ - if (_pathSize == 0) { - if (_leafRoot == nullptr) - return; - _leaf.setNodeAndIdx(nullptr, 0u); - return; - } - uint32_t level = _pathSize - 1; - PathElement &pe = _path[level]; - const InternalNodeType *inode = pe.getNode(); - uint32_t idx = inode->validSlots(); - pe.setIdx(idx); - BTreeNode::Ref childRef = inode->getChild(idx - 1); - while (level > 0) { - --level; - assert(!_allocator->isLeafRef(childRef)); - inode = _allocator->mapInternalRef(childRef); - idx = inode->validSlots(); - _path[level].setNodeAndIdx(inode, idx); - childRef = inode->getChild(idx - 1); - assert(childRef.valid()); - } - assert(_allocator->isLeafRef(childRef)); - _leaf.setNodeAndIdx(nullptr, 0u); -} - - -template -void -BTreeIteratorBase:: -end(BTreeNode::Ref rootRef) -{ - if (!rootRef.valid()) { - setupEmpty(); - return; - } - if (_allocator->isLeafRef(rootRef)) { - clearPath(0u); - const LeafNodeType *lnode = _allocator->mapLeafRef(rootRef); - _leafRoot = lnode; - _leaf.setNodeAndIdx(nullptr, 0u); - return; - } - _leafRoot = nullptr; - const InternalNodeType *inode = _allocator->mapInternalRef(rootRef); - uint32_t idx = inode->validSlots(); - uint32_t pidx = inode->getLevel(); - clearPath(pidx); - --pidx; - assert(pidx < PATH_SIZE); - _path[pidx].setNodeAndIdx(inode, idx); - BTreeNode::Ref childRef = inode->getChild(idx - 1); - assert(childRef.valid()); - while (pidx != 0) { - --pidx; - inode = _allocator->mapInternalRef(childRef); - idx = inode->validSlots(); - assert(idx > 0u); - _path[pidx].setNodeAndIdx(inode, idx); - childRef = inode->getChild(idx - 1); - assert(childRef.valid()); - } - _leaf.setNodeAndIdx(nullptr, 0u); -} - - -template -void -BTreeIteratorBase:: -findNextLeafNode() -{ - uint32_t pidx; - for (pidx = 0; pidx < _pathSize; ++pidx) { - PathElement & elem = _path[pidx]; - const InternalNodeType * inode = elem.getNode(); - elem.incIdx(); // advance to the next child - if (elem.getIdx() < inode->validSlots()) { - BTreeNode::Ref node = inode->getChild(elem.getIdx()); - while (pidx > 0) { - // find the first leaf node under this child and update path - inode = _allocator->mapInternalRef(node); - pidx--; - _path[pidx].setNodeAndIdx(inode, 0u); - node = inode->getChild(0); - } - _leaf.setNodeAndIdx(_allocator->mapLeafRef(node), 0u); - return; - } - } - _leaf.setNodeAndIdx(nullptr, 0u); -} - - -template -void -BTreeIteratorBase:: -findPrevLeafNode() -{ - uint32_t pidx; - for (pidx = 0; pidx < _pathSize; ++pidx) { - PathElement & elem = _path[pidx]; - const InternalNodeType * inode = elem.getNode(); - if (elem.getIdx() > 0u) { - elem.decIdx(); // advance to the previous child - BTreeNode::Ref node = inode->getChild(elem.getIdx()); - while (pidx > 0) { - // find the last leaf node under this child and update path - inode = _allocator->mapInternalRef(node); - uint16_t slot = inode->validSlots() - 1; - pidx--; - _path[pidx].setNodeAndIdx(inode, slot); - node = inode->getChild(slot); - } - const LeafNodeType *lnode(_allocator->mapLeafRef(node)); - _leaf.setNodeAndIdx(lnode, lnode->validSlots() - 1); - return; - } - } - // XXX: position wraps around for now, to end of list. - end(); -} - - -template -void -BTreeIteratorBase:: -begin() -{ - uint32_t pidx = _pathSize; - if (pidx > 0u) { - --pidx; - PathElement &elem = _path[pidx]; - elem.setIdx(0); - BTreeNode::Ref node = elem.getNode()->getChild(0); - while (pidx > 0) { - // find the first leaf node under this child and update path - const InternalNodeType * inode = _allocator->mapInternalRef(node); - pidx--; - _path[pidx].setNodeAndIdx(inode, 0u); - node = inode->getChild(0); - } - _leaf.setNodeAndIdx(_allocator->mapLeafRef(node), 0u); - } else { - _leaf.setNodeAndIdx(_leafRoot, 0u); - } -} - - -template -void -BTreeIteratorBase:: -begin(BTreeNode::Ref rootRef) -{ - if (!rootRef.valid()) { - setupEmpty(); - return; - } - if (_allocator->isLeafRef(rootRef)) { - clearPath(0u); - const LeafNodeType *lnode = _allocator->mapLeafRef(rootRef); - _leafRoot = lnode; - _leaf.setNodeAndIdx(lnode, 0u); - return; - } - _leafRoot = nullptr; - const InternalNodeType *inode = _allocator->mapInternalRef(rootRef); - uint32_t pidx = inode->getLevel(); - clearPath(pidx); - --pidx; - assert(pidx < PATH_SIZE); - _path[pidx].setNodeAndIdx(inode, 0); - BTreeNode::Ref childRef = inode->getChild(0); - assert(childRef.valid()); - while (pidx != 0) { - --pidx; - inode = _allocator->mapInternalRef(childRef); - _path[pidx].setNodeAndIdx(inode, 0); - childRef = inode->getChild(0); - assert(childRef.valid()); - } - _leaf.setNodeAndIdx(_allocator->mapLeafRef(childRef), 0u); -} - - -template -void -BTreeIteratorBase:: -rbegin() -{ - uint32_t pidx = _pathSize; - if (pidx > 0u) { - --pidx; - PathElement &elem = _path[pidx]; - const InternalNodeType * inode = elem.getNode(); - uint16_t slot = inode->validSlots() - 1; - elem.setIdx(slot); - BTreeNode::Ref node = inode->getChild(slot); - while (pidx > 0) { - // find the last leaf node under this child and update path - inode = _allocator->mapInternalRef(node); - slot = inode->validSlots() - 1; - pidx--; - _path[pidx].setNodeAndIdx(inode, slot); - node = inode->getChild(slot); - } - const LeafNodeType *lnode(_allocator->mapLeafRef(node)); - _leaf.setNodeAndIdx(lnode, lnode->validSlots() - 1); - } else { - _leaf.setNodeAndIdx(_leafRoot, - (_leafRoot != nullptr) ? - _leafRoot->validSlots() - 1 : - 0u); - } -} - - -template -const AggrT & -BTreeIteratorBase:: -getAggregated() const -{ - // XXX: Undefined behavior if tree is empty. - uint32_t pidx = _pathSize; - if (pidx > 0u) { - return _path[pidx - 1].getNode()->getAggregated(); - } else if (_leafRoot != nullptr) { - return _leafRoot->getAggregated(); - } else { - return LeafNodeType::getEmptyAggregated(); - } -} - - -template -size_t -BTreeIteratorBase:: -position(uint32_t levels) const -{ - assert(_pathSize >= levels); - if (_leaf.getNode() == nullptr) - return size(); - size_t res = _leaf.getIdx(); - if (levels == 0) - return res; - { - const PathElement & elem = _path[0]; - const InternalNodeType * inode = elem.getNode(); - uint32_t slots = inode->validSlots(); - if (elem.getIdx() * 2 > slots) { - res += inode->validLeaves(); - for (uint32_t c = elem.getIdx(); c < slots; ++c) { - BTreeNode::Ref node = inode->getChild(c); - const LeafNodeType *lnode = _allocator->mapLeafRef(node); - res -= lnode->validSlots(); - } - } else { - for (uint32_t c = 0; c < elem.getIdx(); ++c) { - BTreeNode::Ref node = inode->getChild(c); - const LeafNodeType *lnode = _allocator->mapLeafRef(node); - res += lnode->validSlots(); - } - } - } - for (uint32_t pidx = 1; pidx < levels; ++pidx) { - const PathElement & elem = _path[pidx]; - const InternalNodeType * inode = elem.getNode(); - uint32_t slots = inode->validSlots(); - if (elem.getIdx() * 2 > slots) { - res += inode->validLeaves(); - for (uint32_t c = elem.getIdx(); c < slots; ++c) { - BTreeNode::Ref node = inode->getChild(c); - const InternalNodeType *jnode = - _allocator->mapInternalRef(node); - res -= jnode->validLeaves(); - } - } else { - for (uint32_t c = 0; c < elem.getIdx(); ++c) { - BTreeNode::Ref node = inode->getChild(c); - const InternalNodeType *jnode = - _allocator->mapInternalRef(node); - res += jnode->validLeaves(); - } - } - } - return res; -} - - -template -BTreeIteratorBase:: -BTreeIteratorBase(BTreeNode::Ref root, - const NodeAllocatorType &allocator) - : _leaf(nullptr, 0u), - _path(), - _pathSize(0), - _allocator(&allocator), - _leafRoot(nullptr), - _compatLeafNode() -{ - begin(root); -} - - -template -template -BTreeIteratorBase:: -BTreeIteratorBase(const KeyDataType *shortArray, - uint32_t arraySize, - const NodeAllocatorType &allocator, - const AggrCalcT &aggrCalc) - : _leaf(nullptr, 0u), - _path(), - _pathSize(0), - _allocator(&allocator), - _leafRoot(nullptr), - _compatLeafNode() -{ - if(arraySize > 0) { - _compatLeafNode.reset(new LeafNodeTempType(shortArray, arraySize)); - _leaf.setNode(_compatLeafNode.get()); - _leafRoot = _leaf.getNode(); - typedef BTreeAggregator Aggregator; - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(const_cast(*_leaf.getNode()), - aggrCalc); - } - } -} - - -template -BTreeIteratorBase:: -BTreeIteratorBase() - : _leaf(nullptr, 0u), - _path(), - _pathSize(0), - _allocator(nullptr), - _leafRoot(nullptr), - _compatLeafNode() -{ -} - - -template -BTreeIteratorBase & -BTreeIteratorBase:: -operator--() -{ - if (_leaf.getNode() == nullptr) { - rbegin(); - return *this; - } - if (_leaf.getIdx() > 0u) { - _leaf.decIdx(); - return *this; - } - findPrevLeafNode(); - return *this; -} - - -template -size_t -BTreeIteratorBase:: -size() const -{ - if (_pathSize > 0) { - return _path[_pathSize - 1].getNode()->validLeaves(); - } - if (_leafRoot != nullptr) { - return _leafRoot->validSlots(); - } - return 0u; -} - - -template -ssize_t -BTreeIteratorBase:: -operator-(const BTreeIteratorBase &rhs) const -{ - if (_leaf.getNode() == nullptr) { - if (rhs._leaf.getNode() == nullptr) - return 0; - // *this might not be normalized (i.e. default constructor) - return rhs.size() - rhs.position(rhs._pathSize); - } else if (rhs._leaf.getNode() == nullptr) { - // rhs might not be normalized (i.e. default constructor) - return position(_pathSize) - size(); - } - assert(_pathSize == rhs._pathSize); - if (_pathSize != 0) { - uint32_t pidx = _pathSize; - while (pidx > 0) { - assert(_path[pidx - 1].getNode() == rhs._path[pidx - 1].getNode()); - if (_path[pidx - 1].getIdx() != rhs._path[pidx - 1].getIdx()) - break; - --pidx; - } - return position(pidx) - rhs.position(pidx); - } else { - assert(_leaf.getNode() == nullptr || rhs._leaf.getNode() == nullptr || - _leaf.getNode() == rhs._leaf.getNode()); - return position(0) - rhs.position(0); - } -} - - -template -bool -BTreeIteratorBase:: -identical(const BTreeIteratorBase &rhs) const -{ - if (_pathSize != rhs._pathSize || _leaf != rhs._leaf) { - HDR_ABORT("should not be reached"); - } - for (uint32_t level = 0; level < _pathSize; ++level) { - if (_path[level] != rhs._path[level]) { - HDR_ABORT("should not be reached"); - } - } - if (_leafRoot != rhs._leafRoot) { - HDR_ABORT("should not be reached"); - } - return true; -} - - -template -void -BTreeConstIterator:: -lower_bound(const KeyType & key, CompareT comp) -{ - if (_pathSize == 0) { - if (_leafRoot == nullptr) - return; - uint32_t idx = _leafRoot->template lower_bound(key, comp); - if (idx >= _leafRoot->validSlots()) { - _leaf.setNodeAndIdx(nullptr, 0u); - } else { - _leaf.setNodeAndIdx(_leafRoot, idx); - } - return; - } - uint32_t level = _pathSize - 1; - PathElement &pe = _path[level]; - const InternalNodeType *inode = pe.getNode(); - uint32_t idx = inode->template lower_bound(key, comp); - if (__builtin_expect(idx >= inode->validSlots(), false)) { - end(); - return; - } - pe.setIdx(idx); - BTreeNode::Ref childRef = inode->getChild(idx); - while (level > 0) { - --level; - assert(!_allocator->isLeafRef(childRef)); - inode = _allocator->mapInternalRef(childRef); - idx = inode->template lower_bound(key, comp); - assert(idx < inode->validSlots()); - _path[level].setNodeAndIdx(inode, idx); - childRef = inode->getChild(idx); - assert(childRef.valid()); - } - assert(_allocator->isLeafRef(childRef)); - const LeafNodeType *lnode = _allocator->mapLeafRef(childRef); - idx = lnode->template lower_bound(key, comp); - assert(idx < lnode->validSlots()); - _leaf.setNodeAndIdx(lnode, idx); -} - - -template -void -BTreeConstIterator:: -lower_bound(BTreeNode::Ref rootRef, const KeyType & key, CompareT comp) -{ - if (!rootRef.valid()) { - setupEmpty(); - return; - } - if (_allocator->isLeafRef(rootRef)) { - clearPath(0u); - const LeafNodeType *lnode = _allocator->mapLeafRef(rootRef); - _leafRoot = lnode; - uint32_t idx = lnode->template lower_bound(key, comp); - if (idx >= lnode->validSlots()) { - _leaf.setNodeAndIdx(nullptr, 0u); - } else { - _leaf.setNodeAndIdx(lnode, idx); - } - return; - } - _leafRoot = nullptr; - const InternalNodeType *inode = _allocator->mapInternalRef(rootRef); - uint32_t idx = inode->template lower_bound(key, comp); - if (idx >= inode->validSlots()) { - end(rootRef); - return; - } - uint32_t pidx = inode->getLevel(); - clearPath(pidx); - --pidx; - assert(pidx < TraitsT::PATH_SIZE); - _path[pidx].setNodeAndIdx(inode, idx); - BTreeNode::Ref childRef = inode->getChild(idx); - assert(childRef.valid()); - while (pidx != 0) { - --pidx; - inode = _allocator->mapInternalRef(childRef); - idx = inode->template lower_bound(key, comp); - assert(idx < inode->validSlots()); - _path[pidx].setNodeAndIdx(inode, idx); - childRef = inode->getChild(idx); - assert(childRef.valid()); - } - const LeafNodeType *lnode = _allocator->mapLeafRef(childRef); - idx = lnode->template lower_bound(key, comp); - assert(idx < lnode->validSlots()); - _leaf.setNodeAndIdx(lnode, idx); -} - - -template -void -BTreeConstIterator:: -seek(const KeyType & key, CompareT comp) -{ - if (TraitsT::BINARY_SEEK) { - binarySeek(key, comp); - } else { - linearSeek(key, comp); - } -} - -template -void -BTreeConstIterator:: -binarySeek(const KeyType & key, CompareT comp) -{ - const LeafNodeType *lnode = _leaf.getNode(); - uint32_t lidx = _leaf.getIdx(); -#ifdef STRICT_BTREE_ITERATOR_SEEK - assert(_leaf.valid() && comp(lnode->getKey(lidx), key)); -#endif - ++lidx; - if (lidx < lnode->validSlots()) { - if (!comp(lnode->getKey(lidx), key)) { - _leaf.setIdx(lidx); - return; - } else { - ++lidx; - } - } - if (comp(lnode->getLastKey(), key)) { - uint32_t level = 0; - uint32_t levels = _pathSize; - while (level < levels && - comp(_path[level].getNode()->getLastKey(), key)) - ++level; - if (__builtin_expect(level >= levels, false)) { - end(); - return; - } else { - const InternalNodeType *node = _path[level].getNode(); - uint32_t idx = _path[level].getIdx(); - idx = node->template lower_bound(idx + 1, key, comp); - _path[level].setIdx(idx); - while (level > 0) { - --level; - node = _allocator->mapInternalRef(node->getChild(idx)); - idx = node->template lower_bound(0, key, comp); - _path[level].setNodeAndIdx(node, idx); - } - lnode = _allocator->mapLeafRef(node->getChild(idx)); - _leaf.setNode(lnode); - lidx = 0; - } - } - lidx = lnode->template lower_bound(lidx, key, comp); - _leaf.setIdx(lidx); -} - -template -void -BTreeConstIterator:: -linearSeek(const KeyType & key, CompareT comp) -{ - const LeafNodeType *lnode = _leaf.getNode(); - uint32_t lidx = _leaf.getIdx(); -#ifdef STRICT_BTREE_ITERATOR_SEEK - assert(_leaf.valid() && comp(lnode->getKey(lidx), key)); -#endif - ++lidx; - if (lidx < lnode->validSlots()) { - if (!comp(lnode->getKey(lidx), key)) { - _leaf.setIdx(lidx); - return; - } else { - ++lidx; - } - } - if (comp(lnode->getLastKey(), key)) { - uint32_t level = 0; - uint32_t levels = _pathSize; - while (level < levels && - comp(_path[level].getNode()->getLastKey(), key)) - ++level; - if (__builtin_expect(level >= levels, false)) { - end(); - return; - } else { - const InternalNodeType *node = _path[level].getNode(); - uint32_t idx = _path[level].getIdx(); - do { - ++idx; - } while (comp(node->getKey(idx), key)); - _path[level].setIdx(idx); - while (level > 0) { - --level; - node = _allocator->mapInternalRef(node->getChild(idx)); - idx = 0; - while (comp(node->getKey(idx), key)) { - ++idx; - } - _path[level].setNodeAndIdx(node, idx); - } - lnode = _allocator->mapLeafRef(node->getChild(idx)); - _leaf.setNode(lnode); - lidx = 0; - } - } - while (comp(lnode->getKey(lidx), key)) { - ++lidx; - } - _leaf.setIdx(lidx); -} - -template -void -BTreeConstIterator:: -seekPast(const KeyType & key, CompareT comp) -{ - if (TraitsT::BINARY_SEEK) { - binarySeekPast(key, comp); - } else { - linearSeekPast(key, comp); - } -} - -template -void -BTreeConstIterator:: -binarySeekPast(const KeyType & key, CompareT comp) -{ - const LeafNodeType *lnode = _leaf.getNode(); - uint32_t lidx = _leaf.getIdx(); -#ifdef STRICT_BTREE_ITERATOR_SEEK - assert(_leaf.valid() && !comp(key, lnode->getKey(lidx))); -#endif - ++lidx; - if (lidx < lnode->validSlots()) { - if (comp(key, lnode->getKey(lidx))) { - _leaf.setIdx(lidx); - return; - } else { - ++lidx; - } - } - if (!comp(key, lnode->getLastKey())) { - uint32_t level = 0; - uint32_t levels = _pathSize; - while (level < levels && - !comp(key, _path[level].getNode()->getLastKey())) - ++level; - if (__builtin_expect(level >= levels, false)) { - end(); - return; - } else { - const InternalNodeType *node = _path[level].getNode(); - uint32_t idx = _path[level].getIdx(); - idx = node->template upper_bound(idx + 1, key, comp); - _path[level].setIdx(idx); - while (level > 0) { - --level; - node = _allocator->mapInternalRef(node->getChild(idx)); - idx = node->template upper_bound(0, key, comp); - _path[level].setNodeAndIdx(node, idx); - } - lnode = _allocator->mapLeafRef(node->getChild(idx)); - _leaf.setNode(lnode); - lidx = 0; - } - } - lidx = lnode->template upper_bound(lidx, key, comp); - _leaf.setIdx(lidx); -} - -template -void -BTreeConstIterator:: -linearSeekPast(const KeyType & key, CompareT comp) -{ - const LeafNodeType *lnode = _leaf.getNode(); - uint32_t lidx = _leaf.getIdx(); -#ifdef STRICT_BTREE_ITERATOR_SEEK - assert(_leaf.valid() && !comp(key, lnode->getKey(lidx))); -#endif - ++lidx; - if (lidx < lnode->validSlots()) { - if (comp(key, lnode->getKey(lidx))) { - _leaf.setIdx(lidx); - return; - } else { - ++lidx; - } - } - if (!comp(key, lnode->getLastKey())) { - uint32_t level = 0; - uint32_t levels = _pathSize; - while (level < levels && - !comp(key, _path[level].getNode()->getLastKey())) - ++level; - if (__builtin_expect(level >= levels, false)) { - end(); - return; - } else { - const InternalNodeType *node = _path[level].getNode(); - uint32_t idx = _path[level].getIdx(); - do { - ++idx; - } while (!comp(key, node->getKey(idx))); - _path[level].setIdx(idx); - while (level > 0) { - --level; - node = _allocator->mapInternalRef(node->getChild(idx)); - idx = 0; - while (!comp(key, node->getKey(idx))) { - ++idx; - } - _path[level].setNodeAndIdx(node, idx); - } - lnode = _allocator->mapLeafRef(node->getChild(idx)); - _leaf.setNode(lnode); - lidx = 0; - } - } - while (!comp(key, lnode->getKey(lidx))) { - ++lidx; - } - _leaf.setIdx(lidx); -} - - -template -void -BTreeConstIterator:: -validate(BTreeNode::Ref rootRef, CompareT comp) -{ - bool frozen = false; - if (!rootRef.valid()) { - assert(_pathSize == 0u); - assert(_leafRoot == nullptr); - assert(_leaf.getNode() == nullptr); - return; - } - uint32_t level = _pathSize; - BTreeNode::Ref nodeRef = rootRef; - const KeyT *parentKey = nullptr; - const KeyT *leafKey = nullptr; - if (_leaf.getNode() != nullptr) { - leafKey = &_leaf.getNode()->getKey(_leaf.getIdx()); - } - while (level > 0) { - --level; - assert(!_allocator->isLeafRef(nodeRef)); - const PathElement &pe = _path[level]; - assert(pe.getNode() == _allocator->mapInternalRef(nodeRef)); - uint32_t idx = pe.getIdx(); - if (leafKey == nullptr) { - assert(idx == 0 || - idx == pe.getNode()->validSlots()); - if (idx == pe.getNode()->validSlots()) - --idx; - } - assert(idx < pe.getNode()->validSlots()); - assert(!frozen || pe.getNode()->getFrozen()); - (void) frozen; - frozen = pe.getNode()->getFrozen(); - if (parentKey != nullptr) { - assert(idx + 1 == pe.getNode()->validSlots() || - comp(pe.getNode()->getKey(idx), *parentKey)); - assert(!comp(*parentKey, pe.getNode()->getKey(idx))); - (void) comp; - } - if (leafKey != nullptr) { - assert(idx == 0 || - comp(pe.getNode()->getKey(idx - 1), *leafKey)); - assert(idx + 1 == pe.getNode()->validSlots() || - comp(*leafKey, pe.getNode()->getKey(idx + 1))); - assert(!comp(pe.getNode()->getKey(idx), *leafKey)); - (void) comp; - } - parentKey = &pe.getNode()->getKey(idx); - nodeRef = pe.getNode()->getChild(idx); - assert(nodeRef.valid()); - } - assert(_allocator->isLeafRef(nodeRef)); - if (_pathSize == 0) { - assert(_leafRoot == _allocator->mapLeafRef(nodeRef)); - assert(_leaf.getNode() == nullptr || _leaf.getNode() == _leafRoot); - } else { - assert(_leafRoot == nullptr); - assert(_leaf.getNode() == _allocator->mapLeafRef(nodeRef) || - _leaf.getNode() == nullptr); - } -} - - -template -BTreeNode::Ref -BTreeIterator:: -moveFirstLeafNode(BTreeNode::Ref rootRef) -{ - if (!NodeAllocatorType::isValidRef(rootRef)) { - assert(_pathSize == 0); - assert(_leaf.getNode() == nullptr); - return rootRef; - } - - assert(_leaf.getNode() != nullptr); - NodeAllocatorType &allocator = getAllocator(); - - if (_pathSize == 0) { - BTreeNode::Ref newRootRef = rootRef; - assert(_leaf.getNode() == allocator.mapLeafRef(rootRef)); - if (allocator.getCompacting(rootRef)) { - LeafNodeTypeRefPair lPair(allocator.moveLeafNode(_leaf.getNode())); - _leaf.setNode(lPair.data); - // Before updating root - std::atomic_thread_fence(std::memory_order_release); - newRootRef = lPair.ref; - } - _leaf.setIdx(_leaf.getNode()->validSlots() - 1); - return newRootRef; - } - - uint32_t level = _pathSize; - BTreeNode::Ref newRootRef = rootRef; - - --level; - InternalNodeType *node = _path[level].getWNode(); - assert(node == allocator.mapInternalRef(rootRef)); - bool moved = allocator.getCompacting(rootRef); - if (moved) { - InternalNodeTypeRefPair iPair(allocator.moveInternalNode(node)); - newRootRef = iPair.ref; - node = iPair.data; - } - _path[level].setNodeAndIdx(node, 0u); - while (level > 0) { - --level; - EntryRef nodeRef = node->getChild(0); - InternalNodeType *pnode = node; - node = allocator.mapInternalRef(nodeRef); - if (allocator.getCompacting(nodeRef)) { - InternalNodeTypeRefPair iPair = allocator.moveInternalNode(node); - nodeRef = iPair.ref; - node = iPair.data; - pnode->setChild(0, nodeRef); - moved = true; - } - _path[level].setNodeAndIdx(node, 0u); - } - EntryRef nodeRef = node->getChild(0); - _leaf.setNode(allocator.mapLeafRef(nodeRef)); - if (allocator.getCompacting(nodeRef)) { - LeafNodeTypeRefPair - lPair(allocator.moveLeafNode(_leaf.getNode())); - _leaf.setNode(lPair.data); - node->setChild(0, lPair.ref); - moved = true; - } - if (moved) { - // Before updating root - std::atomic_thread_fence(std::memory_order_release); - } - _leaf.setIdx(_leaf.getNode()->validSlots() - 1); - return newRootRef; -} - - -template -void -BTreeIterator:: -moveNextLeafNode() -{ - uint32_t level = 0; - uint32_t levels = _pathSize; - while (level < levels && - _path[level].getNode()->validSlots() <= _path[level].getIdx() + 1) - ++level; - if (__builtin_expect(level >= levels, false)) { - end(); - return; - } else { - NodeAllocatorType &allocator = getAllocator(); - InternalNodeType *node = _path[level].getWNode(); - uint32_t idx = _path[level].getIdx(); - ++idx; - _path[level].setIdx(idx); - while (level > 0) { - --level; - EntryRef nodeRef = node->getChild(idx); - InternalNodeType *pnode = node; - node = allocator.mapInternalRef(nodeRef); - if (allocator.getCompacting(nodeRef)) { - InternalNodeTypeRefPair iPair(allocator.moveInternalNode(node)); - nodeRef = iPair.ref; - node = iPair.data; - std::atomic_thread_fence(std::memory_order_release); - pnode->setChild(idx, nodeRef); - } - idx = 0; - _path[level].setNodeAndIdx(node, idx); - } - EntryRef nodeRef = node->getChild(idx); - _leaf.setNode(allocator.mapLeafRef(nodeRef)); - if (allocator.getCompacting(nodeRef)) { - LeafNodeTypeRefPair lPair(allocator.moveLeafNode(_leaf.getNode())); - _leaf.setNode(lPair.data); - std::atomic_thread_fence(std::memory_order_release); - node->setChild(idx, lPair.ref); - } - _leaf.setIdx(_leaf.getNode()->validSlots() - 1); - } -} - - -template -void -BTreeIterator:: -writeKey(const KeyType & key) -{ - LeafNodeType * lnode = getLeafNode(); - lnode->writeKey(_leaf.getIdx(), key); - // must also update the key towards the root as long as the key is - // the last one in the current node - if (_leaf.getIdx() + 1 == lnode->validSlots()) { - for (uint32_t i = 0; i < _pathSize; ++i) { - const PathElement & pe = _path[i]; - InternalNodeType *inode = pe.getWNode(); - uint32_t childIdx = pe.getIdx(); - inode->writeKey(childIdx, key); - if (childIdx + 1 != inode->validSlots()) { - break; - } - } - } -} - - -template -template -void -BTreeIterator:: -updateData(const DataType & data, const AggrCalcT &aggrCalc) -{ - LeafNodeType * lnode = getLeafNode(); - if (AggrCalcT::hasAggregated()) { - AggrT oldca(lnode->getAggregated()); - typedef BTreeAggregator Aggregator; - if (aggrCalc.update(lnode->getAggregated(), - aggrCalc.getVal(lnode->getData(_leaf.getIdx())), - aggrCalc.getVal(data))) { - lnode->writeData(_leaf.getIdx(), data); - Aggregator::recalc(*lnode, aggrCalc); - } else { - lnode->writeData(_leaf.getIdx(), data); - } - AggrT ca(lnode->getAggregated()); - // must also update aggregated values towards the root. - for (uint32_t i = 0; i < _pathSize; ++i) { - const PathElement & pe = _path[i]; - InternalNodeType * inode = pe.getWNode(); - AggrT oldpa(inode->getAggregated()); - if (aggrCalc.update(inode->getAggregated(), - oldca, ca)) { - Aggregator::recalc(*inode, *_allocator, aggrCalc); - } - AggrT pa(inode->getAggregated()); - oldca = oldpa; - ca = pa; - } - } else { - lnode->writeData(_leaf.getIdx(), data); - } -} - - -template -BTreeNode::Ref -BTreeIterator:: -thaw(BTreeNode::Ref rootRef) -{ - assert(_leaf.getNode() != nullptr && _compatLeafNode.get() == nullptr); - if (!_leaf.getNode()->getFrozen()) - return rootRef; - NodeAllocatorType &allocator = getAllocator(); - if (_pathSize == 0) { - LeafNodeType *leafNode = allocator.mapLeafRef(rootRef); - assert(leafNode == _leaf.getNode()); - assert(leafNode == _leafRoot); - LeafNodeTypeRefPair thawedLeaf = allocator.thawNode(rootRef, - leafNode); - _leaf.setNode(thawedLeaf.data); - _leafRoot = thawedLeaf.data; - return thawedLeaf.ref; - } - assert(_leafRoot == nullptr); - assert(_path[_pathSize - 1].getNode() == - allocator.mapInternalRef(rootRef)); - BTreeNode::Ref childRef(_path[0].getNode()->getChild(_path[0].getIdx())); - LeafNodeType *leafNode = allocator.mapLeafRef(childRef); - assert(leafNode == _leaf.getNode()); - LeafNodeTypeRefPair thawedLeaf = allocator.thawNode(childRef, - leafNode); - _leaf.setNode(thawedLeaf.data); - childRef = thawedLeaf.ref; - uint32_t level = 0; - uint32_t levels = _pathSize; - while (level < levels) { - PathElement &pe = _path[level]; - InternalNodeType *node(pe.getWNode()); - BTreeNode::Ref nodeRef = level + 1 < levels ? - _path[level + 1].getNode()-> - getChild(_path[level + 1].getIdx()) : - rootRef; - assert(node == allocator.mapInternalRef(nodeRef)); - if (!node->getFrozen()) { - node->setChild(pe.getIdx(), childRef); - return rootRef; - } - InternalNodeTypeRefPair thawed = allocator.thawNode(nodeRef, node); - node = thawed.data; - pe.setNode(node); - node->setChild(pe.getIdx(), childRef); - childRef = thawed.ref; - ++level; - } - return childRef; // Root node was thawed -} - - -template -template -BTreeNode::Ref -BTreeIterator:: -insertFirst(const KeyType &key, const DataType &data, - const AggrCalcT &aggrCalc) -{ - assert(_pathSize == 0); - assert(_leafRoot == nullptr); - NodeAllocatorType &allocator = getAllocator(); - LeafNodeTypeRefPair lnode = allocator.allocLeafNode(); - lnode.data->insert(0, key, data); - if (AggrCalcT::hasAggregated()) { - AggrT a; - aggrCalc.add(a, aggrCalc.getVal(data)); - lnode.data->getAggregated() = a; - } - _leafRoot = lnode.data; - _leaf.setNodeAndIdx(lnode.data, 0u); - return lnode.ref; -} - - -template -bool -BTreeIterator:: -setLeafNodeIdx(uint32_t idx, const LeafNodeType *splitLeafNode) -{ - uint32_t leafSlots = _leaf.getNode()->validSlots(); - if (idx >= leafSlots) { - _leaf.setNodeAndIdx(splitLeafNode, - idx - leafSlots); - if (_pathSize == 0) { - _leafRoot = splitLeafNode; - } - return true; - } else { - _leaf.setIdx(idx); - return false; - } -} - - -template -template -BTreeNode::Ref -BTreeIterator:: -addLevel(BTreeNode::Ref rootRef, BTreeNode::Ref splitNodeRef, - bool inRightSplit, const AggrCalcT &aggrCalc) -{ - typedef BTreeAggregator Aggregator; - - NodeAllocatorType &allocator(getAllocator()); - - InternalNodeTypeRefPair inodePair(allocator.allocInternalNode(_pathSize + 1)); - InternalNodeType *inode = inodePair.data; - inode->setValidLeaves(allocator.validLeaves(rootRef) + - allocator.validLeaves(splitNodeRef)); - inode->insert(0, allocator.getLastKey(rootRef), rootRef); - inode->insert(1, allocator.getLastKey(splitNodeRef), splitNodeRef); - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*inode, allocator, aggrCalc); - } - _path[_pathSize].setNodeAndIdx(inode, inRightSplit ? 1u : 0u); - if (_pathSize == 0) { - _leafRoot = nullptr; - } - ++_pathSize; - return inodePair.ref; -} - - -template -BTreeNode::Ref -BTreeIterator:: -removeLevel(BTreeNode::Ref rootRef, InternalNodeType *rootNode) -{ - BTreeNode::Ref newRoot = rootNode->getChild(0); - NodeAllocatorType &allocator(getAllocator()); - allocator.holdNode(rootRef, rootNode); - --_pathSize; - _path[_pathSize].setNodeAndIdx(nullptr, 0u); - if (_pathSize == 0) { - _leafRoot = _leaf.getNode(); - } - return newRoot; -} - - -template -void -BTreeIterator:: -removeLast(BTreeNode::Ref rootRef) -{ - NodeAllocatorType &allocator(getAllocator()); - allocator.holdNode(rootRef, getLeafNode()); - _leafRoot = nullptr; - _leaf.setNode(nullptr); -} - -template -void -BTreeIterator::adjustGivenNoEntriesToLeftLeafNode() -{ - auto &pathElem = _path[0]; - uint32_t parentIdx = pathElem.getIdx() - 1; - BTreeNode::Ref leafRef = pathElem.getNode()->getChild(parentIdx); - const LeafNodeType *leafNode = _allocator->mapLeafRef(leafRef); - pathElem.setIdx(parentIdx); - _leaf.setNodeAndIdx(leafNode, leafNode->validSlots()); -} - -template -void -BTreeIterator::adjustGivenEntriesToLeftLeafNode(uint32_t given) -{ - uint32_t leafIdx = _leaf.getIdx(); - if (leafIdx >= given) { - _leaf.setIdx(leafIdx - given); - } else { - auto &pathElem = _path[0]; - uint32_t parentIdx = pathElem.getIdx() - 1; - BTreeNode::Ref leafRef = pathElem.getNode()->getChild(parentIdx); - const LeafNodeType *leafNode = _allocator->mapLeafRef(leafRef); - leafIdx += leafNode->validSlots(); - assert(given <= leafIdx); - pathElem.setIdx(parentIdx); - _leaf.setNodeAndIdx(leafNode, leafIdx - given); - } -} - -template -void -BTreeIterator::adjustGivenEntriesToRightLeafNode() -{ - uint32_t leafIdx = _leaf.getIdx(); - const LeafNodeType *leafNode = _leaf.getNode(); - if (leafIdx > leafNode->validSlots()) { - auto &pathElem = _path[0]; - const InternalNodeType *parentNode = pathElem.getNode(); - uint32_t parentIdx = pathElem.getIdx() + 1; - leafIdx -= leafNode->validSlots(); - BTreeNode::Ref leafRef = parentNode->getChild(parentIdx); - leafNode = _allocator->mapLeafRef(leafRef); - assert(leafIdx <= leafNode->validSlots()); - pathElem.setIdx(parentIdx); - _leaf.setNodeAndIdx(leafNode, leafIdx); - } -} - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreenode.cpp b/searchlib/src/vespa/searchlib/btree/btreenode.cpp deleted file mode 100644 index b3d7b60adb6..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreenode.cpp +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "btreenode.hpp" - -namespace search::btree { - -NoAggregated BTreeNodeAggregatedWrap::_instance; - -template <> -MinMaxAggregated BTreeNodeAggregatedWrap::_instance = MinMaxAggregated(); - -template class BTreeNodeDataWrap; -template class BTreeNodeDataWrap; -template class BTreeNodeT; -template class BTreeNodeTT; -template class BTreeNodeTT; -template class BTreeNodeTT; -template class BTreeNodeTT; -template class BTreeInternalNode; -template class BTreeInternalNode; -template class BTreeLeafNode; -template class BTreeLeafNode; -template class BTreeLeafNode; -template class BTreeLeafNodeTemp; -template class BTreeLeafNodeTemp; -template class BTreeLeafNodeTemp; - -} // namespace search::btree diff --git a/searchlib/src/vespa/searchlib/btree/btreenode.h b/searchlib/src/vespa/searchlib/btree/btreenode.h deleted file mode 100644 index 0b1f1e8612a..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreenode.h +++ /dev/null @@ -1,508 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "noaggregated.h" -#include "minmaxaggregated.h" -#include "btree_key_data.h" -#include -#include -#include -#include -#include - -namespace search::datastore { - -template class Allocator; -template class BufferType; - -namespace allocator { -template struct Assigner; -} - -} - -namespace search::btree { - -template class BTreeNodeAllocator; -template class BTreeNodeStore; - -class NoAggregated; - -class BTreeNode { -private: - uint8_t _level; - bool _isFrozen; -public: - static constexpr uint8_t EMPTY_LEVEL = 255; - static constexpr uint8_t LEAF_LEVEL = 0; -protected: - uint16_t _validSlots; - BTreeNode(uint8_t level) - : _level(level), - _isFrozen(false), - _validSlots(0) - {} - - BTreeNode(const BTreeNode &rhs) - : _level(rhs._level), - _isFrozen(rhs._isFrozen), - _validSlots(rhs._validSlots) - {} - - BTreeNode & - operator=(const BTreeNode &rhs) - { - assert(!_isFrozen); - _level = rhs._level; - _isFrozen = rhs._isFrozen; - _validSlots = rhs._validSlots; - return *this; - } - - ~BTreeNode() { assert(_isFrozen); } - -public: - typedef datastore::EntryRef Ref; - - bool isLeaf() const { return _level == 0u; } - bool getFrozen() const { return _isFrozen; } - void freeze() { _isFrozen = true; } - void unFreeze() { _isFrozen = false; } - void setLevel(uint8_t level) { _level = level; } - uint32_t getLevel() const { return _level; } - uint32_t validSlots() const { return _validSlots; } - void setValidSlots(uint16_t validSlots_) { _validSlots = validSlots_; } -}; - - -/** - * Use of BTreeNoLeafData class triggers the below partial - * specialization of BTreeNodeDataWrap to prevent unneeded storage - * overhead. - */ -template -class BTreeNodeDataWrap -{ -public: - DataT _data[NumSlots]; - - BTreeNodeDataWrap() : _data() {} - ~BTreeNodeDataWrap() { } - - void copyData(const BTreeNodeDataWrap &rhs, uint32_t validSlots) { - const DataT *rdata = rhs._data; - DataT *ldata = _data; - DataT *ldatae = _data + validSlots; - for (; ldata != ldatae; ++ldata, ++rdata) - *ldata = *rdata; - } - - const DataT &getData(uint32_t idx) const { return _data[idx]; } - void setData(uint32_t idx, const DataT &data) { _data[idx] = data; } - static bool hasData() { return true; } -}; - - -template -class BTreeNodeDataWrap -{ -public: - BTreeNodeDataWrap() {} - - void copyData(const BTreeNodeDataWrap &rhs, uint32_t validSlots) { - (void) rhs; - (void) validSlots; - } - - const BTreeNoLeafData &getData(uint32_t idx) const { - (void) idx; - return BTreeNoLeafData::_instance; - } - - void setData(uint32_t idx, const BTreeNoLeafData &data) { - (void) idx; - (void) data; - } - - static bool hasData() { return false; } -}; - - -template -class BTreeNodeAggregatedWrap -{ - typedef AggrT AggregatedType; - - AggrT _aggr; - static AggrT _instance; - -public: - BTreeNodeAggregatedWrap() - : _aggr() - {} - AggrT &getAggregated() { return _aggr; } - const AggrT &getAggregated() const { return _aggr; } - static const AggrT &getEmptyAggregated() { return _instance; } -}; - - -template <> -class BTreeNodeAggregatedWrap -{ - typedef NoAggregated AggregatedType; - - static NoAggregated _instance; -public: - BTreeNodeAggregatedWrap() {} - - NoAggregated &getAggregated() { return _instance; } - const NoAggregated &getAggregated() const { return _instance; } - static const NoAggregated &getEmptyAggregated() { return _instance; } -}; - - -template -class BTreeNodeT : public BTreeNode { -protected: - KeyT _keys[NumSlots]; - BTreeNodeT(uint8_t level) - : BTreeNode(level), - _keys() - {} - - ~BTreeNodeT() {} - - BTreeNodeT(const BTreeNodeT &rhs) - : BTreeNode(rhs) - { - const KeyT *rkeys = rhs._keys; - KeyT *lkeys = _keys; - KeyT *lkeyse = _keys + _validSlots; - for (; lkeys != lkeyse; ++lkeys, ++rkeys) - *lkeys = *rkeys; - } - - BTreeNodeT & - operator=(const BTreeNodeT &rhs) - { - BTreeNode::operator=(rhs); - const KeyT *rkeys = rhs._keys; - KeyT *lkeys = _keys; - KeyT *lkeyse = _keys + _validSlots; - for (; lkeys != lkeyse; ++lkeys, ++rkeys) - *lkeys = *rkeys; - return *this; - } - -public: - const KeyT & getKey(uint32_t idx) const { return _keys[idx]; } - const KeyT & getLastKey() const { return _keys[validSlots() - 1]; } - void writeKey(uint32_t idx, const KeyT & key) { _keys[idx] = key; } - - template - uint32_t lower_bound(uint32_t sidx, const KeyT & key, CompareT comp) const; - - template - uint32_t lower_bound(const KeyT & key, CompareT comp) const; - - template - uint32_t upper_bound(uint32_t sidx, const KeyT & key, CompareT comp) const; - - bool isFull() const { return validSlots() == NumSlots; } - bool isAtLeastHalfFull() const { return validSlots() >= minSlots(); } - static uint32_t maxSlots() { return NumSlots; } - static uint32_t minSlots() { return NumSlots / 2; } -}; - -template -class BTreeNodeTT : public BTreeNodeT, - public BTreeNodeDataWrap, - public BTreeNodeAggregatedWrap -{ -public: - typedef BTreeNodeT ParentType; - typedef BTreeNodeDataWrap DataWrapType; - typedef BTreeNodeAggregatedWrap AggrWrapType; - using ParentType::_validSlots; - using ParentType::validSlots; - using ParentType::getFrozen; - using ParentType::_keys; - using DataWrapType::getData; - using DataWrapType::setData; - using DataWrapType::copyData; -protected: - BTreeNodeTT(uint8_t level) - : ParentType(level), - DataWrapType() - {} - - ~BTreeNodeTT() {} - - BTreeNodeTT(const BTreeNodeTT &rhs) - : ParentType(rhs), - DataWrapType(rhs), - AggrWrapType(rhs) - { - copyData(rhs, _validSlots); - } - - BTreeNodeTT &operator=(const BTreeNodeTT &rhs) { - ParentType::operator=(rhs); - AggrWrapType::operator=(rhs); - copyData(rhs, _validSlots); - return *this; - } - -public: - typedef BTreeNodeTT NodeType; - void insert(uint32_t idx, const KeyT & key, const DataT & data); - void update(uint32_t idx, const KeyT & key, const DataT & data) { - // assert(idx < NodeType::maxSlots()); - // assert(!getFrozen()); - _keys[idx] = key; - setData(idx, data); - } - void splitInsert(NodeType * splitNode, uint32_t idx, const KeyT & key, const DataT & data); - void remove(uint32_t idx); - void stealAllFromLeftNode(const NodeType * victim); - void stealAllFromRightNode(const NodeType * victim); - void stealSomeFromLeftNode(NodeType * victim); - void stealSomeFromRightNode(NodeType * victim); - void cleanRange(uint32_t from, uint32_t to); - void clean(); - void cleanFrozen(); -}; - -template -class BTreeInternalNode : public BTreeNodeTT -{ -public: - typedef BTreeNodeTT ParentType; - typedef BTreeInternalNode InternalNodeType; - template - friend class BTreeNodeAllocator; - template - friend class BTreeNodeStore; - template - friend class BTreeNodeDataWrap; - template - friend class datastore::BufferType; - template - friend class datastore::Allocator; - template - friend struct datastore::allocator::Assigner; - typedef BTreeNode::Ref Ref; - typedef datastore::Handle RefPair; - using ParentType::_keys; - using ParentType::validSlots; - using ParentType::_validSlots; - using ParentType::getFrozen; - using ParentType::getData; - using ParentType::setData; - using ParentType::setLevel; - using ParentType::EMPTY_LEVEL; - typedef KeyT KeyType; - typedef Ref DataType; -private: - uint32_t _validLeaves; - - BTreeInternalNode() - : ParentType(EMPTY_LEVEL), - _validLeaves(0u) - {} - - BTreeInternalNode(const BTreeInternalNode &rhs) - : ParentType(rhs), - _validLeaves(rhs._validLeaves) - {} - - ~BTreeInternalNode() {} - - BTreeInternalNode &operator=(const BTreeInternalNode &rhs) { - ParentType::operator=(rhs); - _validLeaves = rhs._validLeaves; - return *this; - } - - template - uint32_t countValidLeaves(uint32_t start, uint32_t end, NodeAllocatorType &allocator); - -public: - BTreeNode::Ref getChild(uint32_t idx) const { return getData(idx); } - void setChild(uint32_t idx, BTreeNode::Ref child) { setData(idx, child); } - BTreeNode::Ref getLastChild() const { return getChild(validSlots() - 1); } - uint32_t validLeaves() const { return _validLeaves; } - void setValidLeaves(uint32_t newValidLeaves) { _validLeaves = newValidLeaves; } - void incValidLeaves(uint32_t delta) { _validLeaves += delta; } - void decValidLeaves(uint32_t delta) { _validLeaves -= delta; } - - template - void splitInsert(BTreeInternalNode *splitNode, uint32_t idx, const KeyT &key, - const BTreeNode::Ref &data, NodeAllocatorType &allocator); - - void stealAllFromLeftNode(const BTreeInternalNode *victim); - void stealAllFromRightNode(const BTreeInternalNode *victim); - - template - void stealSomeFromLeftNode(BTreeInternalNode *victim, NodeAllocatorType &allocator); - - template - void stealSomeFromRightNode(BTreeInternalNode *victim, NodeAllocatorType &allocator); - - void clean(); - void cleanFrozen(); - - template - void foreach_key(NodeStoreType &store, FunctionType func) const { - const BTreeNode::Ref *it = this->_data; - const BTreeNode::Ref *ite = it + _validSlots; - if (this->getLevel() > 1u) { - for (; it != ite; ++it) { - store.mapInternalRef(*it)->foreach_key(store, func); - } - } else { - for (; it != ite; ++it) { - store.mapLeafRef(*it)->foreach_key(func); - } - } - } - - template - void foreach(NodeStoreType &store, FunctionType func) const { - const BTreeNode::Ref *it = this->_data; - const BTreeNode::Ref *ite = it + _validSlots; - if (this->getLevel() > 1u) { - for (; it != ite; ++it) { - store.mapInternalRef(*it)->foreach(store, func); - } - } else { - for (; it != ite; ++it) { - store.mapLeafRef(*it)->foreach(func); - } - } - } -}; - -template -class BTreeLeafNode : public BTreeNodeTT -{ -public: - typedef BTreeNodeTT ParentType; - typedef BTreeLeafNode LeafNodeType; - template - friend class BTreeNodeAllocator; - template - friend class BTreeNodeStore; - template - friend class datastore::BufferType; - template - friend class datastore::Allocator; - template - friend struct datastore::allocator::Assigner; - typedef BTreeNode::Ref Ref; - typedef datastore::Handle RefPair; - using ParentType::validSlots; - using ParentType::_validSlots; - using ParentType::_keys; - using ParentType::freeze; - using ParentType::stealSomeFromLeftNode; - using ParentType::stealSomeFromRightNode; - using ParentType::LEAF_LEVEL; - typedef BTreeKeyData KeyDataType; - typedef KeyT KeyType; - typedef DataT DataType; -private: - BTreeLeafNode() : ParentType(LEAF_LEVEL) {} - -protected: - BTreeLeafNode(const BTreeLeafNode &rhs) - : ParentType(rhs) - {} - - BTreeLeafNode(const KeyDataType *smallArray, uint32_t arraySize); - - ~BTreeLeafNode() {} - - BTreeLeafNode &operator=(const BTreeLeafNode &rhs) { - ParentType::operator=(rhs); - return *this; - } - -public: - template - void stealSomeFromLeftNode(BTreeLeafNode *victim, NodeAllocatorType &allocator) - { - (void) allocator; - stealSomeFromLeftNode(victim); - } - - template - void stealSomeFromRightNode(BTreeLeafNode *victim, NodeAllocatorType &allocator) { - (void) allocator; - stealSomeFromRightNode(victim); - } - - const DataT &getLastData() const { return this->getData(validSlots() - 1); } - void writeData(uint32_t idx, const DataT &data) { this->setData(idx, data); } - uint32_t validLeaves() const { return validSlots(); } - - template - void foreach_key(FunctionType func) const { - const KeyT *it = _keys; - const KeyT *ite = it + _validSlots; - for (; it != ite; ++it) { - func(*it); - } - } - - template - void foreach(FunctionType func) const { - const KeyT *it = _keys; - const KeyT *ite = it + _validSlots; - uint32_t idx = 0; - for (; it != ite; ++it) { - func(*it, this->getData(idx++)); - } - } -}; - - -template -class BTreeLeafNodeTemp : public BTreeLeafNode -{ -public: - typedef BTreeLeafNode ParentType; - typedef typename ParentType::KeyDataType KeyDataType; - - BTreeLeafNodeTemp(const KeyDataType *smallArray, - uint32_t arraySize) - : ParentType(smallArray, arraySize) - {} - - ~BTreeLeafNodeTemp() {} -}; - -extern template class BTreeNodeDataWrap; -extern template class BTreeNodeDataWrap; -extern template class BTreeNodeT; -extern template class BTreeNodeTT; -extern template class BTreeNodeTT; -extern template class BTreeNodeTT; -extern template class BTreeNodeTT; -extern template class BTreeInternalNode; -extern template class BTreeInternalNode; -extern template class BTreeLeafNode; -extern template class BTreeLeafNode; -extern template class BTreeLeafNode; -extern template class BTreeLeafNodeTemp; -extern template class BTreeLeafNodeTemp; -extern template class BTreeLeafNodeTemp; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreenode.hpp b/searchlib/src/vespa/searchlib/btree/btreenode.hpp deleted file mode 100644 index 421e83a8440..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreenode.hpp +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreenode.h" -#include - -namespace search::btree { - -namespace { - -class SplitInsertHelper { -private: - uint32_t _idx; - uint32_t _median; - bool _medianBumped; -public: - SplitInsertHelper(uint32_t idx, uint32_t validSlots) : - _idx(idx), - _median(validSlots / 2), - _medianBumped(false) - { - if (idx > _median) { - _median++; - _medianBumped = true; - } - } - uint32_t getMedian() const { return _median; } - bool insertInSplitNode() const { - if (_median >= _idx && !_medianBumped) { - return false; - } - return true; - } -}; - - -} - -template -template -uint32_t -BTreeNodeT:: -lower_bound(uint32_t sidx, const KeyT & key, CompareT comp) const -{ - const KeyT * itr = std::lower_bound - (_keys + sidx, _keys + validSlots(), key, comp); - return itr - _keys; -} - -template -template -uint32_t -BTreeNodeT::lower_bound(const KeyT & key, CompareT comp) const -{ - - const KeyT * itr = std::lower_bound - (_keys, _keys + validSlots(), key, comp); - return itr - _keys; -} - - -template -template -uint32_t -BTreeNodeT:: -upper_bound(uint32_t sidx, const KeyT & key, CompareT comp) const -{ - const KeyT * itr = std::upper_bound - (_keys + sidx, _keys + validSlots(), key, comp); - return itr - _keys; -} - - -template -void -BTreeNodeTT::insert(uint32_t idx, - const KeyT &key, - const DataT &data) -{ - assert(validSlots() < NodeType::maxSlots()); - assert(!getFrozen()); - for (uint32_t i = validSlots(); i > idx; --i) { -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Warray-bounds" // This dirty one is due a suspected bug in gcc 6.2 - _keys[i] = _keys[i - 1]; -#pragma GCC diagnostic pop - setData(i, getData(i - 1)); - } - _keys[idx] = key; - setData(idx, data); - _validSlots++; -} - -template -void -BTreeNodeTT::splitInsert(NodeType *splitNode, - uint32_t idx, - const KeyT &key, - const DataT &data) -{ - assert(!getFrozen()); - assert(!splitNode->getFrozen()); - SplitInsertHelper sih(idx, validSlots()); - splitNode->_validSlots = validSlots() - sih.getMedian(); - for (uint32_t i = sih.getMedian(); i < validSlots(); ++i) { - splitNode->_keys[i - sih.getMedian()] = _keys[i]; - splitNode->setData(i - sih.getMedian(), getData(i)); - } - cleanRange(sih.getMedian(), validSlots()); - _validSlots = sih.getMedian(); - if (sih.insertInSplitNode()) { - splitNode->insert(idx - sih.getMedian(), key, data); - } else { - insert(idx, key, data); - } -} - -template -void -BTreeNodeTT::remove(uint32_t idx) -{ - assert(!getFrozen()); - for (uint32_t i = idx + 1; i < validSlots(); ++i) { - _keys[i - 1] = _keys[i]; - setData(i - 1, getData(i)); - } - _validSlots--; - _keys[validSlots()] = KeyT(); - setData(validSlots(), DataT()); -} - -template -void -BTreeNodeTT:: -stealAllFromLeftNode(const NodeType *victim) -{ - assert(validSlots() + victim->validSlots() <= NodeType::maxSlots()); - assert(!getFrozen()); - for (int i = validSlots() - 1; i >= 0; --i) { - _keys[i + victim->validSlots()] = _keys[i]; - setData(i + victim->validSlots(), getData(i)); - } - for (uint32_t i = 0; i < victim->validSlots(); ++i) { - _keys[i] = victim->_keys[i]; - setData(i, victim->getData(i)); - } - _validSlots += victim->validSlots(); -} - -template -void -BTreeNodeTT:: -stealAllFromRightNode(const NodeType *victim) -{ - assert(validSlots() + victim->validSlots() <= NodeType::maxSlots()); - assert(!getFrozen()); - for (uint32_t i = 0; i < victim->validSlots(); ++i) { - _keys[validSlots() + i] = victim->_keys[i]; - setData(validSlots() + i, victim->getData(i)); - } - _validSlots += victim->validSlots(); -} - -template -void -BTreeNodeTT:: -stealSomeFromLeftNode(NodeType *victim) -{ - assert(validSlots() + victim->validSlots() >= NodeType::minSlots()); - assert(!getFrozen()); - assert(!victim->getFrozen()); - uint32_t median = (validSlots() + victim->validSlots() + 1) / 2; - uint32_t steal = median - validSlots(); - _validSlots += steal; - for (int32_t i = validSlots() - 1; i >= static_cast(steal); --i) { - _keys[i] = _keys[i - steal]; - setData(i, getData(i - steal)); - } - for (uint32_t i = 0; i < steal; ++i) { - _keys[i] = victim->_keys[victim->validSlots() - steal + i]; - setData(i, victim->getData(victim->validSlots() - steal + i)); - } - victim->cleanRange(victim->validSlots() - steal, victim->validSlots()); - victim->_validSlots -= steal; -} - -template -void -BTreeNodeTT:: -stealSomeFromRightNode(NodeType *victim) -{ - assert(validSlots() + victim->validSlots() >= NodeType::minSlots()); - assert(!getFrozen()); - assert(!victim->getFrozen()); - uint32_t median = (validSlots() + victim->validSlots() + 1) / 2; - uint32_t steal = median - validSlots(); - for (uint32_t i = 0; i < steal; ++i) { - _keys[validSlots() + i] = victim->_keys[i]; - setData(validSlots() + i, victim->getData(i)); - } - _validSlots += steal; - for (uint32_t i = steal; i < victim->validSlots(); ++i) { - victim->_keys[i - steal] = victim->_keys[i]; - victim->setData(i - steal, victim->getData(i)); - } - victim->cleanRange(victim->validSlots() - steal, victim->validSlots()); - victim->_validSlots -= steal; -} - - -template -void -BTreeNodeTT::cleanRange(uint32_t from, - uint32_t to) -{ - assert(from < to); - assert(to <= validSlots()); - assert(validSlots() <= NodeType::maxSlots()); - assert(!getFrozen()); - KeyT emptyKey = KeyT(); - for (KeyT *k = _keys + from, *ke = _keys + to; k != ke; ++k) - *k = emptyKey; - DataT emptyData = DataT(); - for (uint32_t i = from; i != to; ++i) - setData(i, emptyData); -} - - -template -void -BTreeNodeTT::clean() -{ - if (validSlots() == 0) - return; - cleanRange(0, validSlots()); - _validSlots = 0; -} - - -template -void -BTreeNodeTT::cleanFrozen() -{ - assert(validSlots() <= NodeType::maxSlots()); - assert(getFrozen()); - if (validSlots() == 0) - return; - KeyT emptyKey = KeyT(); - for (KeyT *k = _keys, *ke = _keys + validSlots(); k != ke; ++k) - *k = emptyKey; - DataT emptyData = DataT(); - for (uint32_t i = 0, ie = validSlots(); i != ie; ++i) - setData(i, emptyData); - _validSlots = 0; -} - - -template -template -void -BTreeInternalNode:: -splitInsert(BTreeInternalNode *splitNode, uint32_t idx, const KeyT &key, - const BTreeNode::Ref &data, - NodeAllocatorType &allocator) -{ - assert(!getFrozen()); - assert(!splitNode->getFrozen()); - SplitInsertHelper sih(idx, validSlots()); - splitNode->_validSlots = validSlots() - sih.getMedian(); - uint32_t splitLeaves = 0; - uint32_t newLeaves = allocator.validLeaves(data); - for (uint32_t i = sih.getMedian(); i < validSlots(); ++i) { - splitNode->_keys[i - sih.getMedian()] = _keys[i]; - splitNode->setData(i - sih.getMedian(), getData(i)); - splitLeaves += allocator.validLeaves(getData(i)); - } - splitNode->_validLeaves = splitLeaves; - this->cleanRange(sih.getMedian(), validSlots()); - _validLeaves -= splitLeaves + newLeaves; - _validSlots = sih.getMedian(); - if (sih.insertInSplitNode()) { - splitNode->insert(idx - sih.getMedian(), key, data); - splitNode->_validLeaves += newLeaves; - } else { - this->insert(idx, key, data); - _validLeaves += newLeaves; - } -} - - -template -void -BTreeInternalNode:: -stealAllFromLeftNode(const BTreeInternalNode *victim) -{ - ParentType::stealAllFromLeftNode(victim); - _validLeaves += victim->_validLeaves; -} - -template -void -BTreeInternalNode:: -stealAllFromRightNode(const BTreeInternalNode *victim) -{ - ParentType::stealAllFromRightNode(victim); - _validLeaves += victim->_validLeaves; -} - -template -template -uint32_t -BTreeInternalNode::countValidLeaves(uint32_t start, uint32_t end, NodeAllocatorType &allocator) -{ - assert(start <= end); - assert(end <= validSlots()); - uint32_t leaves = 0; - for (uint32_t i = start; i < end; ++i) { - leaves += allocator.validLeaves(getData(i)); - } - return leaves; -} - -template -template -void -BTreeInternalNode:: -stealSomeFromLeftNode(BTreeInternalNode *victim, NodeAllocatorType &allocator) -{ - uint16_t oldValidSlots = validSlots(); - ParentType::stealSomeFromLeftNode(victim); - uint32_t stolenLeaves = countValidLeaves(0, validSlots() - oldValidSlots, allocator); - incValidLeaves(stolenLeaves); - victim->decValidLeaves(stolenLeaves); -} - - -template -template -void -BTreeInternalNode:: -stealSomeFromRightNode(BTreeInternalNode *victim, NodeAllocatorType &allocator) -{ - uint16_t oldValidSlots = validSlots(); - ParentType::stealSomeFromRightNode(victim); - uint32_t stolenLeaves = countValidLeaves(oldValidSlots, validSlots(), allocator); - incValidLeaves(stolenLeaves); - victim->decValidLeaves(stolenLeaves); -} - - -template -void -BTreeInternalNode::clean() -{ - ParentType::clean(); - _validLeaves = 0; -} - - -template -void -BTreeInternalNode::cleanFrozen() -{ - ParentType::cleanFrozen(); - _validLeaves = 0; -} - - -template -BTreeLeafNode:: -BTreeLeafNode(const KeyDataType *smallArray, uint32_t arraySize) - : ParentType(LEAF_LEVEL) -{ - assert(arraySize <= BTreeLeafNode::maxSlots()); - _validSlots = arraySize; - for (uint32_t idx = 0; idx < arraySize; ++idx) { - _keys[idx] = smallArray[idx]._key; - this->setData(idx, smallArray[idx].getData()); - } - freeze(); -} - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreenodeallocator.cpp b/searchlib/src/vespa/searchlib/btree/btreenodeallocator.cpp deleted file mode 100644 index 1a05d68b04f..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreenodeallocator.cpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "btreenodeallocator.hpp" -#include - -template class vespalib::Array; - -namespace search::btree { - -template class BTreeNodeAllocator; -template class BTreeNodeAllocator; -template class BTreeNodeAllocator; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreenodeallocator.h b/searchlib/src/vespa/searchlib/btree/btreenodeallocator.h deleted file mode 100644 index d2d2cf44a46..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreenodeallocator.h +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreenode.h" -#include "btreenodestore.h" -#include -#include -#include -#include -#include - -namespace search::btree { - -template class BTreeRootBase; - -template -class BTreeNodeAllocator -{ -public: - using InternalNodeType = BTreeInternalNode; - using LeafNodeType = BTreeLeafNode; - using InternalNodeTypeRefPair = typename InternalNodeType::RefPair; - using LeafNodeTypeRefPair = typename LeafNodeType::RefPair; - using BTreeRootBaseType = BTreeRootBase; - using generation_t = vespalib::GenerationHandler::generation_t; - using NodeStore = BTreeNodeStore; - using EntryRef = datastore::EntryRef; - using DataStoreBase = datastore::DataStoreBase; - -private: - BTreeNodeAllocator(const BTreeNodeAllocator &rhs); - - BTreeNodeAllocator & operator=(const BTreeNodeAllocator &rhs); - - NodeStore _nodeStore; - - typedef vespalib::Array RefVector; - typedef vespalib::Array BTreeRootBaseTypeVector; - - // Nodes that might not be frozen. - RefVector _internalToFreeze; - RefVector _leafToFreeze; - BTreeRootBaseTypeVector _treeToFreeze; - - // Nodes held until freeze is performed - RefVector _internalHoldUntilFreeze; - RefVector _leafHoldUntilFreeze; - -public: - BTreeNodeAllocator(); - ~BTreeNodeAllocator(); - - void disableFreeLists() { - _nodeStore.disableFreeLists(); - } - - void disableElemHoldList() { - _nodeStore.disableElemHoldList(); - } - - /** - * Allocate internal node. - */ - InternalNodeTypeRefPair allocInternalNode(uint8_t level); - - /* - * Allocate leaf node. - */ - LeafNodeTypeRefPair allocLeafNode(); - InternalNodeTypeRefPair thawNode(BTreeNode::Ref nodeRef, InternalNodeType *node); - LeafNodeTypeRefPair thawNode(BTreeNode::Ref nodeRef, LeafNodeType *node); - BTreeNode::Ref thawNode(BTreeNode::Ref node); - - /** - * hold internal node until freeze/generation constraint is satisfied. - */ - void holdNode(BTreeNode::Ref nodeRef, InternalNodeType *node); - - /** - * hold leaf node until freeze/generation constraint is satisfied. - */ - void holdNode(BTreeNode::Ref nodeRef, LeafNodeType *node); - - /** - * Mark that tree needs to be frozen. Tree must be kept alive until - * freeze operation has completed. - */ - void needFreeze(BTreeRootBaseType *tree); - - /** - * Freeze all nodes that are not already frozen. - */ - void freeze(); - - /** - * Try to free held nodes if nobody can be referencing them. - */ - void trimHoldLists(generation_t usedGen); - - /** - * Transfer nodes from hold1 lists to hold2 lists, they are no - * longer referenced by new frozen structures, but readers accessing - * older versions of the frozen structure must leave before elements - * can be unheld. - */ - void transferHoldLists(generation_t generation); - - void clearHoldLists(); - - static bool isValidRef(BTreeNode::Ref ref) { return NodeStore::isValidRef(ref); } - - bool isLeafRef(BTreeNode::Ref ref) const { - if (!isValidRef(ref)) - return false; - return _nodeStore.isLeafRef(ref); - } - - const InternalNodeType *mapInternalRef(BTreeNode::Ref ref) const { - return _nodeStore.mapInternalRef(ref); - } - - InternalNodeType *mapInternalRef(BTreeNode::Ref ref) { - return _nodeStore.mapInternalRef(ref); - } - - const LeafNodeType *mapLeafRef(BTreeNode::Ref ref) const { - return _nodeStore.mapLeafRef(ref); - } - - LeafNodeType *mapLeafRef(BTreeNode::Ref ref) { - return _nodeStore.mapLeafRef(ref); - } - - template - const NodeType *mapRef(BTreeNode::Ref ref) const { - return _nodeStore.template mapRef(ref); - } - - template - NodeType *mapRef(BTreeNode::Ref ref) { - return _nodeStore.template mapRef(ref); - } - - InternalNodeTypeRefPair moveInternalNode(const InternalNodeType *node); - LeafNodeTypeRefPair moveLeafNode(const LeafNodeType *node); - uint32_t validLeaves(BTreeNode::Ref ref) const; - - /* - * Extract level from ref. - */ - uint32_t getLevel(BTreeNode::Ref ref) const; - const KeyT &getLastKey(BTreeNode::Ref node) const; - const AggrT &getAggregated(BTreeNode::Ref node) const; - - vespalib::MemoryUsage getMemoryUsage() const; - - vespalib::string toString(BTreeNode::Ref ref) const; - vespalib::string toString(const BTreeNode * node) const; - - bool getCompacting(EntryRef ref) const { return _nodeStore.getCompacting(ref); } - std::vector startCompact() { return _nodeStore.startCompact(); } - - void finishCompact(const std::vector &toHold) { - return _nodeStore.finishCompact(toHold); - } - - template - void foreach_key(EntryRef ref, FunctionType func) const { - _nodeStore.foreach_key(ref, func); - } - - template - void foreach(EntryRef ref, FunctionType func) const { - _nodeStore.foreach(ref, func); - } - - const NodeStore &getNodeStore() const { return _nodeStore; } -}; - -extern template class BTreeNodeAllocator; -extern template class BTreeNodeAllocator; -extern template class BTreeNodeAllocator; - -} - diff --git a/searchlib/src/vespa/searchlib/btree/btreenodeallocator.hpp b/searchlib/src/vespa/searchlib/btree/btreenodeallocator.hpp deleted file mode 100644 index 197869a7c71..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreenodeallocator.hpp +++ /dev/null @@ -1,434 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreenodeallocator.h" -#include "btreerootbase.h" -#include -#include -#include "btreenodestore.hpp" - -namespace search::btree { - -template -BTreeNodeAllocator:: -BTreeNodeAllocator() - : _nodeStore(), - _internalToFreeze(), - _leafToFreeze(), - _treeToFreeze(), - _internalHoldUntilFreeze(), - _leafHoldUntilFreeze() -{ -} - - -template -BTreeNodeAllocator:: -~BTreeNodeAllocator() -{ - assert(_internalToFreeze.empty()); - assert(_leafToFreeze.empty()); - assert(_treeToFreeze.empty()); - assert(_internalHoldUntilFreeze.empty()); - assert(_leafHoldUntilFreeze.empty()); - DataStoreBase::MemStats stats = _nodeStore.getMemStats(); - assert(stats._usedBytes == stats._deadBytes); - assert(stats._holdBytes == 0); - (void) stats; -} - - -template -typename BTreeNodeAllocator:: -InternalNodeTypeRefPair -BTreeNodeAllocator:: -allocInternalNode(uint8_t level) -{ - if (_internalHoldUntilFreeze.empty()) { - InternalNodeTypeRefPair nodeRef = _nodeStore.allocInternalNode(); - assert(nodeRef.ref.valid()); - _internalToFreeze.push_back(nodeRef.ref); - nodeRef.data->setLevel(level); - return nodeRef; - } - BTreeNode::Ref nodeRef = _internalHoldUntilFreeze.back(); - _internalHoldUntilFreeze.pop_back(); - InternalNodeType *node = mapInternalRef(nodeRef); - assert(!node->getFrozen()); - node->setLevel(level); - return InternalNodeTypeRefPair(nodeRef, node); -} - - -template -typename BTreeNodeAllocator:: -LeafNodeTypeRefPair -BTreeNodeAllocator:: -allocLeafNode() -{ - if (_leafHoldUntilFreeze.empty()) { - LeafNodeTypeRefPair nodeRef = _nodeStore.allocLeafNode(); - _leafToFreeze.push_back(nodeRef.ref); - return nodeRef; - } - BTreeNode::Ref nodeRef = _leafHoldUntilFreeze.back(); - _leafHoldUntilFreeze.pop_back(); - LeafNodeType *node = mapLeafRef(nodeRef); - assert(!node->getFrozen()); - return LeafNodeTypeRefPair(nodeRef, node); -} - - - -template -typename BTreeNodeAllocator:: -InternalNodeTypeRefPair -BTreeNodeAllocator:: -thawNode(BTreeNode::Ref nodeRef, InternalNodeType *node) -{ - if (_internalHoldUntilFreeze.empty()) { - InternalNodeTypeRefPair retNodeRef = - _nodeStore.allocInternalNodeCopy(*node); - assert(retNodeRef.data->getFrozen()); - retNodeRef.data->unFreeze(); - assert(retNodeRef.ref.valid()); - _internalToFreeze.push_back(retNodeRef.ref); - holdNode(nodeRef, node); - return retNodeRef; - } - BTreeNode::Ref retNodeRef = _internalHoldUntilFreeze.back(); - InternalNodeType *retNode = mapInternalRef(retNodeRef); - _internalHoldUntilFreeze.pop_back(); - assert(!retNode->getFrozen()); - *retNode = static_cast(*node); - assert(retNode->getFrozen()); - retNode->unFreeze(); - holdNode(nodeRef, node); - return InternalNodeTypeRefPair(retNodeRef, retNode); -} - - -template -typename BTreeNodeAllocator:: -LeafNodeTypeRefPair -BTreeNodeAllocator:: -thawNode(BTreeNode::Ref nodeRef, LeafNodeType *node) -{ - if (_leafHoldUntilFreeze.empty()) { - LeafNodeTypeRefPair retNodeRef = - _nodeStore.allocLeafNodeCopy(*node); - assert(retNodeRef.data->getFrozen()); - retNodeRef.data->unFreeze(); - _leafToFreeze.push_back(retNodeRef.ref); - holdNode(nodeRef, node); - return retNodeRef; - } - BTreeNode::Ref retNodeRef = _leafHoldUntilFreeze.back(); - LeafNodeType *retNode = mapLeafRef(retNodeRef); - _leafHoldUntilFreeze.pop_back(); - assert(!retNode->getFrozen()); - *retNode = static_cast(*node); - assert(retNode->getFrozen()); - retNode->unFreeze(); - holdNode(nodeRef, node); - return LeafNodeTypeRefPair(retNodeRef, retNode); -} - -template -BTreeNode::Ref -BTreeNodeAllocator:: -thawNode(BTreeNode::Ref node) -{ - if (isLeafRef(node)) - return thawNode(node, mapLeafRef(node)).ref; - else - return thawNode(node, mapInternalRef(node)).ref; -} - - -template -void -BTreeNodeAllocator:: -holdNode(BTreeNode::Ref nodeRef, - InternalNodeType *node) -{ - if (node->getFrozen()) { - _nodeStore.holdElem(nodeRef); - } else { - node->clean(); - _internalHoldUntilFreeze.push_back(nodeRef); - } -} - - -template -void -BTreeNodeAllocator:: -holdNode(BTreeNode::Ref nodeRef, - LeafNodeType *node) -{ - if (node->getFrozen()) { - _nodeStore.holdElem(nodeRef); - } else { - node->clean(); - _leafHoldUntilFreeze.push_back(nodeRef); - } -} - - -template -void -BTreeNodeAllocator:: -freeze() -{ - // Freeze nodes. - - if (!_internalToFreeze.empty() || !_leafToFreeze.empty()) { - { - for (auto &i : _internalToFreeze) { - assert(i.valid()); - mapInternalRef(i)->freeze(); - } - _internalToFreeze.clear(); - } - { - for (auto &i : _leafToFreeze) { - assert(i.valid()); - mapLeafRef(i)->freeze(); - } - _leafToFreeze.clear(); - } - - // Tree node freezes must be visible before tree freezes to - // ensure that readers see a frozen world - std::atomic_thread_fence(std::memory_order_release); - } - - // Freeze trees. - - if (!_treeToFreeze.empty()) { - for (auto &i : _treeToFreeze) { - i->freeze(*this); - } - _treeToFreeze.clear(); - // Tree freezes must be visible before held nodes are freed - std::atomic_thread_fence(std::memory_order_release); - } - - - // Free nodes that were only held due to freezing. - - { - for (auto &i : _internalHoldUntilFreeze) { - assert(!isLeafRef(i)); - InternalNodeType *inode = mapInternalRef(i); - (void) inode; - assert(inode->getFrozen()); - _nodeStore.freeElem(i); - } - _internalHoldUntilFreeze.clear(); - } - { - for (auto &i : _leafHoldUntilFreeze) { - assert(isLeafRef(i)); - LeafNodeType *lnode = mapLeafRef(i); - (void) lnode; - assert(lnode->getFrozen()); - _nodeStore.freeElem(i); - } - _leafHoldUntilFreeze.clear(); - } -} - - -template -void -BTreeNodeAllocator:: -needFreeze(BTreeRootBaseType *tree) -{ - _treeToFreeze.push_back(tree); -} - - -template -void -BTreeNodeAllocator:: -trimHoldLists(generation_t usedGen) -{ - _nodeStore.trimHoldLists(usedGen); -} - -template -void -BTreeNodeAllocator:: -transferHoldLists(generation_t generation) -{ - _nodeStore.transferHoldLists(generation); -} - - -template -void -BTreeNodeAllocator:: -clearHoldLists() -{ - _nodeStore.clearHoldLists(); -} - - -template -typename BTreeNodeAllocator:: -InternalNodeTypeRefPair -BTreeNodeAllocator:: -moveInternalNode(const InternalNodeType *node) -{ - InternalNodeTypeRefPair iPair; - iPair = _nodeStore.allocNewInternalNodeCopy(*node); - assert(iPair.ref.valid()); - _internalToFreeze.push_back(iPair.ref); - return iPair; -} - - -template -typename BTreeNodeAllocator:: -LeafNodeTypeRefPair -BTreeNodeAllocator:: -moveLeafNode(const LeafNodeType *node) -{ - LeafNodeTypeRefPair lPair; - lPair = _nodeStore.allocNewLeafNodeCopy(*node); - _leafToFreeze.push_back(lPair.ref); - return lPair; -} - - -template -uint32_t -BTreeNodeAllocator:: -validLeaves(BTreeNode::Ref ref) const -{ - if (isLeafRef(ref)) - return mapLeafRef(ref)->validSlots(); - else - return mapInternalRef(ref)->validLeaves(); -} - - -template -uint32_t -BTreeNodeAllocator:: -getLevel(BTreeNode::Ref ref) const -{ - if (isLeafRef(ref)) - return BTreeNode::LEAF_LEVEL; - else - return mapInternalRef(ref)->getLevel(); -} - - -template -const KeyT & -BTreeNodeAllocator:: -getLastKey(BTreeNode::Ref node) const -{ - if (isLeafRef(node)) - return mapLeafRef(node)->getLastKey(); - else - return mapInternalRef(node)->getLastKey(); -} - - -template -const AggrT & -BTreeNodeAllocator:: -getAggregated(BTreeNode::Ref node) const -{ - if (!node.valid()) - return LeafNodeType::getEmptyAggregated(); - if (isLeafRef(node)) - return mapLeafRef(node)->getAggregated(); - else - return mapInternalRef(node)->getAggregated(); -} - - -template -vespalib::MemoryUsage -BTreeNodeAllocator:: -getMemoryUsage() const -{ - vespalib::MemoryUsage usage = _nodeStore.getMemoryUsage(); - return usage; -} - -template -vespalib::string -BTreeNodeAllocator:: -toString(BTreeNode::Ref ref) const -{ - if (!isValidRef(ref)) { - return "NULL"; - } - if (isLeafRef(ref)) - return toString(mapLeafRef(ref)); - else - return toString(mapInternalRef(ref)); -} - -template -vespalib::string -BTreeNodeAllocator:: -toString(const BTreeNode * node) const -{ - if (node == nullptr) { - return "NULL"; - } - vespalib::asciistream ss; - if (node->isLeaf()) { - const LeafNodeType * lnode = static_cast(node); - ss << "L: keys(" << lnode->validSlots() << ")["; - for (uint32_t i = 0; i < lnode->validSlots(); ++i) { - if (i > 0) ss << ","; - ss << lnode->getKey(i); - } - ss << "]"; - } else { - const InternalNodeType * inode = - static_cast(node); - ss << "I: validLeaves(" << inode->validLeaves() << - "), keys(" << inode->validSlots() << ")["; - for (uint32_t i = 0; i < inode->validSlots(); ++i) { - if (i > 0) ss << ","; - ss << inode->getKey(i); - } - ss << "]"; - } - return ss.str(); -} - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreenodestore.cpp b/searchlib/src/vespa/searchlib/btree/btreenodestore.cpp deleted file mode 100644 index bfb1e2fa6a1..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreenodestore.cpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "btreenodestore.hpp" -#include "btreerootbase.h" -#include "btreeroot.h" -#include "btreenodeallocator.h" -#include - -namespace search::btree { - -template class BTreeNodeStore; -template class BTreeNodeStore; -template class BTreeNodeStore; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreenodestore.h b/searchlib/src/vespa/searchlib/btree/btreenodestore.h deleted file mode 100644 index d8eae945602..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreenodestore.h +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreenode.h" -#include "btreetraits.h" -#include - -namespace search::btree { - -class BTreeNodeReclaimer -{ -public: - static void reclaim(BTreeNode * node) { - node->unFreeze(); - } -}; - - -template -class BTreeNodeBufferType : public datastore::BufferType -{ - typedef datastore::BufferType ParentType; - using ParentType::_emptyEntry; - using ParentType::_arraySize; - using CleanContext = typename ParentType::CleanContext; -public: - BTreeNodeBufferType(uint32_t minArrays, uint32_t maxArrays) - : ParentType(1, minArrays, maxArrays) - { - _emptyEntry.freeze(); - } - - void initializeReservedElements(void *buffer, size_t reservedElements) override; - - void cleanHold(void *buffer, size_t offset, size_t numElems, CleanContext cleanCtx) override; -}; - - -template -class BTreeNodeStore -{ -public: - typedef datastore::DataStoreT > DataStoreType; - typedef DataStoreType::RefType RefType; - typedef BTreeInternalNode InternalNodeType; - typedef BTreeLeafNode LeafNodeType; - typedef typename InternalNodeType::RefPair InternalNodeTypeRefPair; - typedef typename LeafNodeType::RefPair LeafNodeTypeRefPair; - typedef vespalib::GenerationHandler::generation_t generation_t; - using EntryRef = datastore::EntryRef; - - enum NodeTypes - { - NODETYPE_INTERNAL = 0, - NODETYPE_LEAF = 1 - }; - - -private: - static constexpr size_t MIN_BUFFER_ARRAYS = 128u; - DataStoreType _store; - BTreeNodeBufferType _internalNodeType; - BTreeNodeBufferType _leafNodeType; - -public: - BTreeNodeStore(); - - ~BTreeNodeStore(); - - void disableFreeLists() { _store.disableFreeLists(); } - void disableElemHoldList() { _store.disableElemHoldList(); } - - static bool isValidRef(EntryRef ref) { return ref.valid(); } - - bool isLeafRef(EntryRef ref) const { - RefType iRef(ref); - return _store.getTypeId(iRef.bufferId()) == NODETYPE_LEAF; - } - - const InternalNodeType *mapInternalRef(EntryRef ref) const { - RefType iRef(ref); - return _store.getEntry(iRef); - } - - InternalNodeType *mapInternalRef(EntryRef ref) { - RefType iRef(ref); - return _store.getEntry(iRef); - } - - const LeafNodeType *mapLeafRef(EntryRef ref) const { - RefType iRef(ref); - return _store.getEntry(iRef); - } - - LeafNodeType *mapLeafRef(EntryRef ref) { - RefType iRef(ref); - return _store.getEntry(iRef); - } - - template - const NodeType *mapRef(EntryRef ref) const { - RefType iRef(ref); - return _store.getEntry(iRef); - } - - template - NodeType *mapRef(EntryRef ref) { - RefType iRef(ref); - return _store.getEntry(iRef); - } - - LeafNodeTypeRefPair allocNewLeafNode() { - return _store.allocator(NODETYPE_LEAF).alloc(); - } - - LeafNodeTypeRefPair allocLeafNode() { - return _store.freeListAllocator(NODETYPE_LEAF).alloc(); - } - - LeafNodeTypeRefPair allocNewLeafNodeCopy(const LeafNodeType &rhs) { - return _store.allocator(NODETYPE_LEAF).alloc(rhs); - } - - LeafNodeTypeRefPair allocLeafNodeCopy(const LeafNodeType &rhs) { - return _store.freeListAllocator(NODETYPE_LEAF).alloc(rhs); - } - - InternalNodeTypeRefPair allocNewInternalNode() { - return _store.allocator(NODETYPE_INTERNAL).alloc(); - } - - InternalNodeTypeRefPair allocInternalNode() { - return _store.freeListAllocator(NODETYPE_INTERNAL).alloc(); - } - - InternalNodeTypeRefPair allocNewInternalNodeCopy(const InternalNodeType &rhs) { - return _store.allocator(NODETYPE_INTERNAL).alloc(rhs); - } - - InternalNodeTypeRefPair allocInternalNodeCopy(const InternalNodeType &rhs) { - return _store.freeListAllocator(NODETYPE_INTERNAL).alloc(rhs); - } - - void holdElem(EntryRef ref) { - _store.holdElem(ref, 1); - } - - void freeElem(EntryRef ref) { - _store.freeElem(ref, 1); - } - - std::vector startCompact(); - - void finishCompact(const std::vector &toHold); - - void transferHoldLists(generation_t generation) { - _store.transferHoldLists(generation); - } - - // Inherit doc from DataStoreBase - datastore::DataStoreBase::MemStats getMemStats() const { - return _store.getMemStats(); - } - - // Inherit doc from DataStoreBase - void trimHoldLists(generation_t usedGen) { - _store.trimHoldLists(usedGen); - } - - void clearHoldLists() { - _store.clearHoldLists(); - } - - // Inherit doc from DataStoreBase - vespalib::MemoryUsage getMemoryUsage() const { - return _store.getMemoryUsage(); - } - - // Inherit doc from DataStoreT - bool getCompacting(EntryRef ref) const { - return _store.getCompacting(ref); - } - - template - void foreach_key(EntryRef ref, FunctionType func) const { - if (!ref.valid()) - return; - if (isLeafRef(ref)) { - mapLeafRef(ref)->foreach_key(func); - } else { - mapInternalRef(ref)->foreach_key(*this, func); - } - } - - template - void foreach(EntryRef ref, FunctionType func) const { - if (!ref.valid()) - return; - if (isLeafRef(ref)) { - mapLeafRef(ref)->foreach(func); - } else { - mapInternalRef(ref)->foreach(*this, func); - } - } -}; - -extern template class BTreeNodeStore; -extern template class BTreeNodeStore; -extern template class BTreeNodeStore; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreenodestore.hpp b/searchlib/src/vespa/searchlib/btree/btreenodestore.hpp deleted file mode 100644 index cf2889c42af..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreenodestore.hpp +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreenodestore.h" -#include - -namespace search::btree { - -template -void -BTreeNodeBufferType::initializeReservedElements(void *buffer, size_t reservedElements) -{ - ParentType::initializeReservedElements(buffer, reservedElements); - EntryType *e = static_cast(buffer); - for (size_t j = reservedElements; j != 0; --j) { - e->freeze(); - ++e; - } -} - - -template -void -BTreeNodeBufferType::cleanHold(void *buffer, size_t offset, size_t numElems, CleanContext) -{ - EntryType *e = static_cast(buffer) + offset; - for (size_t j = numElems; j != 0; --j) { - e->cleanFrozen(); - ++e; - } -} - - - - -template -BTreeNodeStore:: -BTreeNodeStore() - : _store(), - _internalNodeType(MIN_BUFFER_ARRAYS, RefType::offsetSize()), - _leafNodeType(MIN_BUFFER_ARRAYS, RefType::offsetSize()) -{ - _store.addType(&_internalNodeType); - _store.addType(&_leafNodeType); - _store.initActiveBuffers(); - _store.enableFreeLists(); -} - -template -BTreeNodeStore:: -~BTreeNodeStore() -{ - _store.dropBuffers(); // Drop buffers before type handlers are dropped -} - - -template -std::vector -BTreeNodeStore:: -startCompact() -{ - std::vector iToHold = _store.startCompact(NODETYPE_INTERNAL); - std::vector lToHold = _store.startCompact(NODETYPE_LEAF); - std::vector ret = iToHold; - ret.insert(ret.end(), lToHold.begin(), lToHold.end()); - return ret; -} - - -template -void -BTreeNodeStore:: -finishCompact(const std::vector &toHold) -{ - _store.finishCompact(toHold); -} - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreeremover.cpp b/searchlib/src/vespa/searchlib/btree/btreeremover.cpp deleted file mode 100644 index 2322eebf784..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeremover.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "btreeremover.h" -#include "btreenodeallocator.h" -#include "btreerootbase.hpp" -#include "btreeremover.hpp" -#include "btreenode.hpp" - -namespace search::btree { - -template class BTreeRemover; -template class BTreeRemover; -template class BTreeRemover, - BTreeDefaultTraits, - MinMaxAggrCalc>; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreeremover.h b/searchlib/src/vespa/searchlib/btree/btreeremover.h deleted file mode 100644 index 87355aa4ce7..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeremover.h +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreenode.h" -#include "btreenodeallocator.h" -#include "btreerootbase.h" -#include "btreeaggregator.h" -#include "noaggrcalc.h" -#include "minmaxaggrcalc.h" -#include "btreeiterator.h" - -namespace search -{ - -namespace btree -{ - -template -class BTreeRemoverBase -{ -public: - typedef BTreeNodeAllocator NodeAllocatorType; - typedef BTreeAggregator Aggregator; - typedef BTreeInternalNode InternalNodeType; - typedef BTreeLeafNode LeafNodeType; - typedef typename InternalNodeType::RefPair InternalNodeTypeRefPair; - typedef typename LeafNodeType::RefPair LeafNodeTypeRefPair; - - template - static void - steal(InternalNodeType *pNode, - BTreeNode::Ref sNodeRef, - NodeType *sNode, - uint32_t idx, - NodeAllocatorType &allocator, - const AggrCalcT &aggrCalc, - Iterator &itr, - uint32_t level); -}; - -template , - typename TraitsT = BTreeDefaultTraits, - class AggrCalcT = NoAggrCalc> -class BTreeRemover : public BTreeRemoverBase - -{ -public: - typedef BTreeRemoverBase ParentType; - typedef BTreeNodeAllocator NodeAllocatorType; - typedef BTreeAggregator Aggregator; - typedef BTreeInternalNode - InternalNodeType; - typedef BTreeLeafNode - LeafNodeType; - typedef KeyT KeyType; - typedef DataT DataType; - typedef typename InternalNodeType::RefPair InternalNodeTypeRefPair; - typedef typename LeafNodeType::RefPair LeafNodeTypeRefPair; - typedef BTreeIterator Iterator; - - static void - remove(BTreeNode::Ref &root, - Iterator &itr, - const AggrCalcT &aggrCalc); -}; - -extern template class BTreeRemover; -extern template class BTreeRemover; -extern template class BTreeRemover, - BTreeDefaultTraits, - MinMaxAggrCalc>; - -} // namespace search::btree -} // namespace search - diff --git a/searchlib/src/vespa/searchlib/btree/btreeremover.hpp b/searchlib/src/vespa/searchlib/btree/btreeremover.hpp deleted file mode 100644 index c304ea13016..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeremover.hpp +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreeremover.h" -#include "btreerootbase.hpp" -#include - -namespace search -{ - -namespace btree -{ - -template -template -void -BTreeRemoverBase:: -steal(InternalNodeType *pNode, - BTreeNode::Ref sNodeRef, - NodeType * sNode, uint32_t idx, NodeAllocatorType &allocator, - const AggrCalcT &aggrCalc, - Iterator &itr, - uint32_t level) -{ - BTreeNode::Ref leftVictimRef = BTreeNode::Ref(); - NodeType * leftVictim = nullptr; - BTreeNode::Ref rightVictimRef = BTreeNode::Ref(); - NodeType * rightVictim = nullptr; - if (idx > 0) { - leftVictimRef = pNode->getChild(idx - 1); - leftVictim = allocator.template mapRef(leftVictimRef); - } - if (idx + 1 < pNode->validSlots()) { - rightVictimRef = pNode->getChild(idx + 1); - rightVictim = allocator.template mapRef(rightVictimRef); - } - if (leftVictim != nullptr && - leftVictim->validSlots() + sNode->validSlots() <= - NodeType::maxSlots()) - { - uint32_t stolen = leftVictim->validSlots(); - sNode->stealAllFromLeftNode(leftVictim); - pNode->update(idx, sNode->getLastKey(), sNodeRef); - pNode->remove(idx - 1); - allocator.holdNode(leftVictimRef, leftVictim); - itr.adjustSteal(level, true, stolen); - } else if (rightVictim != nullptr && - rightVictim->validSlots() + sNode->validSlots() <= - NodeType::maxSlots()) - { - sNode->stealAllFromRightNode(rightVictim); - pNode->update(idx, sNode->getLastKey(), sNodeRef); - pNode->remove(idx + 1); - allocator.holdNode(rightVictimRef, rightVictim); - } else if (leftVictim != nullptr && - (rightVictim == nullptr || - leftVictim->validSlots() > rightVictim->validSlots())) - { - if (leftVictim->getFrozen()) { - NodeTypeRefPair thawed = - allocator.thawNode(leftVictimRef, leftVictim); - leftVictimRef = thawed.ref; - leftVictim = thawed.data; - } - uint32_t oldLeftValid = leftVictim->validSlots(); - sNode->stealSomeFromLeftNode(leftVictim, allocator); - uint32_t stolen = oldLeftValid - leftVictim->validSlots(); - pNode->update(idx, sNode->getLastKey(), sNodeRef); - pNode->update(idx - 1, leftVictim->getLastKey(), leftVictimRef); - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*leftVictim, allocator, aggrCalc); - } - itr.adjustSteal(level, false, stolen); - } else if (rightVictim != nullptr) { - if (rightVictim->getFrozen()) { - NodeTypeRefPair thawed = - allocator.thawNode(rightVictimRef, rightVictim); - rightVictimRef = thawed.ref; - rightVictim = thawed.data; - } - sNode->stealSomeFromRightNode(rightVictim, allocator); - pNode->update(idx, sNode->getLastKey(), sNodeRef); - pNode->update(idx + 1, rightVictim->getLastKey(), rightVictimRef); - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*rightVictim, allocator, aggrCalc); - } - } - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*sNode, allocator, aggrCalc); - } -} - - -template -void -BTreeRemover:: -remove(BTreeNode::Ref &root, - Iterator &itr, - const AggrCalcT &aggrCalc) -{ - assert(itr.valid()); - root = itr.thaw(root); - - uint32_t idx = itr.getLeafNodeIdx(); - LeafNodeType * lnode = itr.getLeafNode(); - if (lnode->validSlots() == 1u) { - itr.removeLast(root); - root = BTreeNode::Ref(); - return; - } - NodeAllocatorType &allocator(itr.getAllocator()); - AggrT oldca(AggrCalcT::hasAggregated() ? lnode->getAggregated() : AggrT()); - AggrT ca; - if (AggrCalcT::hasAggregated() && - aggrCalc.remove(lnode->getAggregated(), - aggrCalc.getVal(lnode->getData(idx)))) { - lnode->remove(idx); - Aggregator::recalc(*lnode, aggrCalc); - } else { - lnode->remove(idx); - } - if (AggrCalcT::hasAggregated()) { - ca = lnode->getAggregated(); - } - bool steppedBack = idx >= lnode->validSlots(); - if (steppedBack) { - itr.setLeafNodeIdx(itr.getLeafNodeIdx() - 1); - --idx; - } - uint32_t level = 0; - uint32_t levels = itr.getPathSize(); - InternalNodeType *node = nullptr; - for (; level < levels; ++level) { - typename Iterator::PathElement &pe = itr.getPath(level); - node = pe.getWNode(); - idx = pe.getIdx(); - AggrT olda(AggrCalcT::hasAggregated() ? - node->getAggregated() : AggrT()); - BTreeNode::Ref subNode = node->getChild(idx); - node->update(idx, allocator.getLastKey(subNode), subNode); - node->decValidLeaves(1); - if (level == 0) { - LeafNodeType * sNode = allocator.mapLeafRef(subNode); - assert(sNode == lnode); - if (!sNode->isAtLeastHalfFull()) { - // too few elements in sub node, steal from left or - // right sibling - ParentType::template steal - (node, subNode, sNode, idx, allocator, aggrCalc, - itr, level); - } - } else { - InternalNodeType * sNode = allocator.mapInternalRef(subNode); - if (!sNode->isAtLeastHalfFull()) { - // too few elements in sub node, steal from left or - // right sibling - ParentType::template steal - (node, subNode, sNode, idx, allocator, aggrCalc, - itr, level); - } - } - if (AggrCalcT::hasAggregated()) { - if (aggrCalc.remove(node->getAggregated(), oldca, ca)) { - Aggregator::recalc(*node, allocator, aggrCalc); - } - ca = node->getAggregated(); - oldca = olda; - } - } - if (level > 0 && node->validSlots() == 1) { - root = itr.removeLevel(root, node); - } - if (steppedBack) - ++itr; -} - - -} // namespace search::btree -} // namespace search - diff --git a/searchlib/src/vespa/searchlib/btree/btreeroot.cpp b/searchlib/src/vespa/searchlib/btree/btreeroot.cpp deleted file mode 100644 index a576b6ce1e0..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeroot.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "btreeroot.h" -#include "btreenodeallocator.h" -#include "btreeiterator.hpp" -#include "btreeroot.hpp" -#include "btreenode.hpp" - -namespace search::btree { - -template class BTreeRootT; -template class BTreeRootT; -template class BTreeRootT; -template class BTreeRoot; -template class BTreeRoot; -template class BTreeRoot, BTreeDefaultTraits, MinMaxAggrCalc>; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreeroot.h b/searchlib/src/vespa/searchlib/btree/btreeroot.h deleted file mode 100644 index b5759a6a341..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeroot.h +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreeiterator.h" -#include "btreenode.h" -#include "btreenodeallocator.h" -#include "btreerootbase.h" -#include "noaggrcalc.h" -#include "minmaxaggrcalc.h" - -namespace search::btree { - -template -class BTreeNodeAllocator; -template class -BTreeBuilder; -template class -BTreeAggregator; - -template , - typename TraitsT = BTreeDefaultTraits> -class BTreeRootT : public BTreeRootBase -{ -public: - typedef BTreeRootBase ParentType; - typedef typename ParentType::NodeAllocatorType NodeAllocatorType; - typedef BTreeKeyData KeyDataType; - typedef typename ParentType::InternalNodeType InternalNodeType; - typedef typename ParentType::LeafNodeType LeafNodeType; - typedef BTreeLeafNodeTemp - LeafNodeTempType; - typedef BTreeIterator Iterator; - typedef BTreeConstIterator - ConstIterator; - - typedef typename ParentType::KeyType KeyType; - typedef typename ParentType::DataType DataType; -protected: - typedef typename ParentType::BTreeRootBaseType BTreeRootBaseType; - typedef BTreeRootT BTreeRootTType; - typedef typename InternalNodeType::RefPair InternalNodeTypeRefPair; - typedef typename LeafNodeType::RefPair LeafNodeTypeRefPair; - using ParentType::_root; - using ParentType::getFrozenRoot; - using ParentType::getFrozenRootRelaxed; - using ParentType::isFrozen; - - vespalib::string toString(BTreeNode::Ref node, const NodeAllocatorType &allocator) const; -public: - /** - * Read view of the frozen version of the tree. - * Should be used by reader threads. - **/ - class FrozenView { - private: - BTreeNode::Ref _frozenRoot; - const NodeAllocatorType & _allocator; - public: - typedef ConstIterator Iterator; - FrozenView(BTreeNode::Ref frozenRoot, - const NodeAllocatorType & allocator); - ConstIterator find(const KeyType& key, - CompareT comp = CompareT()) const; - ConstIterator lowerBound(const KeyType &key, - CompareT comp = CompareT()) const; - ConstIterator upperBound(const KeyType &key, - CompareT comp = CompareT()) const; - ConstIterator begin() const { - return ConstIterator(_frozenRoot, _allocator); - } - void begin(std::vector &where) const { - where.emplace_back(_frozenRoot, _allocator); - } - - BTreeNode::Ref getRoot() const { return _frozenRoot; } - size_t size() const; - const NodeAllocatorType &getAllocator() const { return _allocator; } - - template - void foreach_key(FunctionType func) const { - _allocator.getNodeStore().foreach_key(_frozenRoot, func); - } - - template - void foreach(FunctionType func) const { - _allocator.getNodeStore().foreach(_frozenRoot, func); - } - }; - -private: - - static Iterator findHelper(BTreeNode::Ref root, const KeyType & key, - const NodeAllocatorType & allocator, CompareT comp = CompareT()); - - static Iterator lowerBoundHelper(BTreeNode::Ref root, const KeyType & key, - const NodeAllocatorType & allocator, CompareT comp = CompareT()); - - static Iterator upperBoundHelper(BTreeNode::Ref root, const KeyType & key, - const NodeAllocatorType & allocator, CompareT comp = CompareT()); - -public: - BTreeRootT(); - ~BTreeRootT(); - - void clear(NodeAllocatorType &allocator); - - Iterator find(const KeyType & key, const NodeAllocatorType &allocator, CompareT comp = CompareT()) const; - - Iterator lowerBound(const KeyType & key, const NodeAllocatorType & allocator, CompareT comp = CompareT()) const; - Iterator upperBound(const KeyType & key, const NodeAllocatorType & allocator, CompareT comp = CompareT()) const; - - Iterator begin(const NodeAllocatorType &allocator) const { - return Iterator(_root, allocator); - } - - FrozenView getFrozenView(const NodeAllocatorType & allocator) const { - return FrozenView(getFrozenRoot(), allocator); - } - - size_t size(const NodeAllocatorType &allocator) const; - size_t frozenSize(const NodeAllocatorType &allocator) const; - vespalib::string toString(const NodeAllocatorType &allocator) const; - size_t bitSize(const NodeAllocatorType &allocator) const; - size_t bitSize(BTreeNode::Ref node, const NodeAllocatorType &allocator) const; - void thaw(Iterator &itr); -}; - - -template , - typename TraitsT = BTreeDefaultTraits, - class AggrCalcT = NoAggrCalc> -class BTreeRoot : public BTreeRootT -{ -public: - typedef BTreeRootT ParentType; - typedef typename ParentType::ParentType Parent2Type; - typedef typename ParentType::NodeAllocatorType NodeAllocatorType; - typedef typename ParentType::KeyType KeyType; - typedef typename ParentType::DataType DataType; - typedef typename ParentType::LeafNodeType LeafNodeType; - typedef typename ParentType::InternalNodeType InternalNodeType; - typedef typename ParentType::LeafNodeTypeRefPair LeafNodeTypeRefPair; - typedef typename ParentType::InternalNodeTypeRefPair - InternalNodeTypeRefPair; - typedef typename ParentType::Iterator Iterator; - typedef BTreeBuilder Builder; - typedef BTreeAggregator Aggregator; - typedef AggrCalcT AggrCalcType; - using Parent2Type::_root; - using Parent2Type::getFrozenRoot; - using Parent2Type::getFrozenRootRelaxed; - using Parent2Type::isFrozen; - -protected: - bool isValid(BTreeNode::Ref node, bool ignoreMinSlots, uint32_t level, - const NodeAllocatorType &allocator, CompareT comp, AggrCalcT aggrCalc) const; - -public: - /** - * Create a tree from a tree builder. This is a destructive - * assignment, old content of tree is destroyed and tree - * builder is emptied when tree grabs ownership of nodes. - */ - void - assign(Builder &rhs, NodeAllocatorType &allocator); - - bool - insert(const KeyType & key, const DataType & data, - NodeAllocatorType &allocator, CompareT comp = CompareT(), - const AggrCalcT &aggrCalc = AggrCalcT()); - - void - insert(Iterator &itr, - const KeyType &key, const DataType &data, - const AggrCalcT &aggrCalc = AggrCalcT()); - - bool - remove(const KeyType & key, - NodeAllocatorType &allocator, CompareT comp = CompareT(), - const AggrCalcT &aggrCalc = AggrCalcT()); - - void - remove(Iterator &itr, - const AggrCalcT &aggrCalc = AggrCalcT()); - - bool isValid(const NodeAllocatorType &allocator, CompareT comp = CompareT()) const; - - bool isValidFrozen(const NodeAllocatorType &allocator, CompareT comp = CompareT()) const; -}; - - - -extern template class BTreeRootT; -extern template class BTreeRootT; -extern template class BTreeRootT; -extern template class BTreeRoot; -extern template class BTreeRoot; -extern template class BTreeRoot, BTreeDefaultTraits, MinMaxAggrCalc>; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreeroot.hpp b/searchlib/src/vespa/searchlib/btree/btreeroot.hpp deleted file mode 100644 index 22703f2dfd2..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreeroot.hpp +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreeroot.h" -#include "btreebuilder.h" -#include "btreerootbase.hpp" -#include "btreeinserter.hpp" -#include "btreeremover.hpp" -#include "btreeaggregator.hpp" -#include - -namespace search::btree { - -//----------------------- BTreeRoot ------------------------------------------// - -template -vespalib::string -BTreeRootT:: -toString(BTreeNode::Ref node, - const NodeAllocatorType &allocator) const -{ - if (allocator.isLeafRef(node)) { - vespalib::asciistream ss; - ss << "{" << allocator.toString(node) << "}"; - return ss.str(); - } else { - const InternalNodeType * inode = allocator.mapInternalRef(node); - vespalib::asciistream ss; - ss << "{" << allocator.toString(inode) << ",children(" << inode->validSlots() << ")["; - for (size_t i = 0; i < inode->validSlots(); ++i) { - if (i > 0) ss << ","; - ss << "c[" << i << "]" << toString(inode->getChild(i), allocator); - } - ss << "]}"; - return ss.str(); - } -} - -template -bool -BTreeRoot:: -isValid(BTreeNode::Ref node, - bool ignoreMinSlots, uint32_t level, const NodeAllocatorType &allocator, - CompareT comp, AggrCalcT aggrCalc) const -{ - if (allocator.isLeafRef(node)) { - if (level != 0) { - return false; - } - const LeafNodeType * lnode = allocator.mapLeafRef(node); - if (level != lnode->getLevel()) { - return false; - } - if (lnode->validSlots() > LeafNodeType::maxSlots()) - return false; - if (lnode->validSlots() < LeafNodeType::minSlots() && !ignoreMinSlots) - return false; - for (size_t i = 1; i < lnode->validSlots(); ++i) { - if (!comp(lnode->getKey(i - 1), lnode->getKey(i))) { - return false; - } - } - if (AggrCalcT::hasAggregated()) { - AggrT aggregated = Aggregator::aggregate(*lnode, aggrCalc); - if (aggregated != lnode->getAggregated()) { - return false; - } - } - } else { - if (level == 0) { - return false; - } - const InternalNodeType * inode = allocator.mapInternalRef(node); - if (level != inode->getLevel()) { - return false; - } - if (inode->validSlots() > InternalNodeType::maxSlots()) - return false; - if (inode->validSlots() < InternalNodeType::minSlots() && - !ignoreMinSlots) - return false; - size_t lChildren = 0; - size_t iChildren = 0; - uint32_t validLeaves = 0; - for (size_t i = 0; i < inode->validSlots(); ++i) { - if (i > 0 && !comp(inode->getKey(i - 1), inode->getKey(i))) { - return false; - } - const BTreeNode::Ref childRef = inode->getChild(i); - if (!allocator.isValidRef(childRef)) - return false; - validLeaves += allocator.validLeaves(childRef); - if (allocator.isLeafRef(childRef)) - lChildren++; - else - iChildren++; - if (comp(inode->getKey(i), allocator.getLastKey(childRef))) { - return false; - } - if (comp(allocator.getLastKey(childRef), inode->getKey(i))) { - return false; - } - if (!isValid(childRef, false, level - 1, allocator, comp, aggrCalc)) { - return false; - } - } - if (validLeaves != inode->validLeaves()) { - return false; - } - if (lChildren < inode->validSlots() && iChildren < inode->validSlots()) { - return false; - } - if (AggrCalcT::hasAggregated()) { - AggrT aggregated = Aggregator::aggregate(*inode, allocator, aggrCalc); - if (aggregated != inode->getAggregated()) { - return false; - } - } - } - return true; -} - -template -typename BTreeRootT::Iterator -BTreeRootT:: -findHelper(BTreeNode::Ref root, const KeyType & key, - const NodeAllocatorType & allocator, CompareT comp) -{ - Iterator itr(BTreeNode::Ref(), allocator); - itr.lower_bound(root, key, comp); - if (itr.valid() && comp(key, itr.getKey())) { - itr.setupEnd(); - } - return itr; -} - -template -typename BTreeRootT::Iterator -BTreeRootT:: -lowerBoundHelper(BTreeNode::Ref root, const KeyType & key, - const NodeAllocatorType & allocator, CompareT comp) -{ - Iterator itr(BTreeNode::Ref(), allocator); - itr.lower_bound(root, key, comp); - return itr; -} - -template -typename BTreeRootT::Iterator -BTreeRootT:: -upperBoundHelper(BTreeNode::Ref root, const KeyType & key, - const NodeAllocatorType & allocator, CompareT comp) -{ - Iterator itr(root, allocator); - if (itr.valid() && !comp(key, itr.getKey())) { - itr.seekPast(key, comp); - } - return itr; -} - - -//----------------------- BTreeRoot::FrozenView ----------------------------------// - -template -BTreeRootT:: -FrozenView::FrozenView(BTreeNode::Ref frozenRoot, - const NodeAllocatorType & allocator) : - _frozenRoot(frozenRoot), - _allocator(allocator) -{ -} - -template -typename BTreeRootT::ConstIterator -BTreeRootT:: -FrozenView::find(const KeyType & key, - CompareT comp) const -{ - ConstIterator itr(BTreeNode::Ref(), _allocator); - itr.lower_bound(_frozenRoot, key, comp); - if (itr.valid() && comp(key, itr.getKey())) { - itr.setupEnd(); - } - return itr; -} - -template -typename BTreeRootT::ConstIterator -BTreeRootT:: -FrozenView::lowerBound(const KeyType & key, - CompareT comp) const -{ - ConstIterator itr(BTreeNode::Ref(), _allocator); - itr.lower_bound(_frozenRoot, key, comp); - return itr; -} - -template -typename BTreeRootT::ConstIterator -BTreeRootT:: -FrozenView::upperBound(const KeyType & key, - CompareT comp) const -{ - ConstIterator itr(_frozenRoot, _allocator); - if (itr.valid() && !comp(key, itr.getKey())) { - itr.seekPast(key, comp); - } - return itr; -} - -template -size_t -BTreeRootT:: -FrozenView::size() const -{ - if (NodeAllocatorType::isValidRef(_frozenRoot)) { - return _allocator.validLeaves(_frozenRoot); - } - return 0u; -} - -//----------------------- BTreeRoot ----------------------------------------------// - -template -BTreeRootT::BTreeRootT() = default; - -template -BTreeRootT::~BTreeRootT() = default; - -template -void -BTreeRootT:: -clear(NodeAllocatorType &allocator) -{ - if (NodeAllocatorType::isValidRef(_root)) { - this->recursiveDelete(_root, allocator); - _root = BTreeNode::Ref(); - if (NodeAllocatorType::isValidRef(getFrozenRootRelaxed())) - allocator.needFreeze(this); - } -} - -template -typename BTreeRootT::Iterator -BTreeRootT:: -find(const KeyType & key, const NodeAllocatorType & allocator, - CompareT comp) const -{ - return findHelper(_root, key, allocator, comp); -} - -template -typename BTreeRootT::Iterator -BTreeRootT:: -lowerBound(const KeyType & key, const NodeAllocatorType & allocator, - CompareT comp) const -{ - return lowerBoundHelper(_root, key, allocator, comp); -} - -template -typename BTreeRootT::Iterator -BTreeRootT:: -upperBound(const KeyType & key, const NodeAllocatorType & allocator, - CompareT comp) const -{ - return upperBoundHelper(_root, key, allocator, comp); -} - - -template -size_t -BTreeRootT:: -size(const NodeAllocatorType &allocator) const -{ - if (NodeAllocatorType::isValidRef(_root)) { - return allocator.validLeaves(_root); - } - return 0u; -} - - -template -size_t -BTreeRootT:: -frozenSize(const NodeAllocatorType &allocator) const -{ - BTreeNode::Ref frozenRoot = getFrozenRoot(); - if (NodeAllocatorType::isValidRef(frozenRoot)) { - return allocator.validLeaves(frozenRoot); - } - return 0u; -} - - -template -vespalib::string -BTreeRootT:: -toString(const NodeAllocatorType &allocator) const -{ - vespalib::asciistream ss; - if (NodeAllocatorType::isValidRef(_root)) { - ss << "root(" << toString(_root, allocator) << ")"; - } - return ss.str(); -} - -template -bool -BTreeRoot:: -isValid(const NodeAllocatorType &allocator, - CompareT comp) const -{ - if (NodeAllocatorType::isValidRef(_root)) { - uint32_t level = allocator.getLevel(_root); - return isValid(_root, true, level, allocator, comp, AggrCalcT()); - } - return true; -} - - -template -bool -BTreeRoot:: -isValidFrozen(const NodeAllocatorType &allocator, - CompareT comp) const -{ - BTreeNode::Ref frozenRoot = getFrozenRoot(); - if (NodeAllocatorType::isValidRef(frozenRoot)) { - uint32_t level = allocator.getLevel(frozenRoot); - return isValid(frozenRoot, true, level, allocator, comp, AggrCalcT()); - } - return true; -} - - -template -size_t -BTreeRootT:: -bitSize(const NodeAllocatorType &allocator) const -{ - size_t ret = sizeof(BTreeRootT) * 8; - if (NodeAllocatorType::isValidRef(_root)) - ret += bitSize(_root, allocator); - return ret; -} - - -template -size_t -BTreeRootT:: -bitSize(BTreeNode::Ref node, - const NodeAllocatorType &allocator) const -{ - if (allocator.isLeafRef(node)) { - return sizeof(LeafNodeType) * 8; - } else { - size_t ret = sizeof(InternalNodeType) * 8; - const InternalNodeType * inode = allocator.mapInternalRef(node); - size_t slots = inode->validSlots(); - for (size_t i = 0; i < slots; ++i) { - ret += bitSize(inode->getChild(i), allocator); - } - return ret; - } -} - - -template -void -BTreeRootT:: -thaw(Iterator &itr) -{ - bool oldFrozen = isFrozen(); - _root = itr.thaw(_root); - if (oldFrozen && !isFrozen()) - itr.getAllocator().needFreeze(this); -} - - -template -void -BTreeRoot:: -assign(Builder &rhs, - NodeAllocatorType &allocator) -{ - this->clear(allocator); - - bool oldFrozen = isFrozen(); - _root = rhs.handover(); - if (oldFrozen && !isFrozen()) - allocator.needFreeze(this); -} - - -template -bool -BTreeRoot:: -insert(const KeyType & key, const DataType & data, - NodeAllocatorType &allocator, CompareT comp, - const AggrCalcT &aggrCalc) -{ - Iterator itr(BTreeNode::Ref(), allocator); - itr.lower_bound(_root, key, comp); - if (itr.valid() && !comp(key, itr.getKey())) - return false; // Element already exists - insert(itr, key, data, aggrCalc); - return true; -} - - -template -void -BTreeRoot:: -insert(Iterator &itr, - const KeyType &key, const DataType &data, - const AggrCalcT &aggrCalc) -{ - typedef BTreeInserter Inserter; - bool oldFrozen = isFrozen(); - Inserter::insert(_root, itr, key, data, - aggrCalc); - if (oldFrozen && !isFrozen()) - itr.getAllocator().needFreeze(this); -} - - -template -bool -BTreeRoot:: -remove(const KeyType & key, - NodeAllocatorType &allocator, CompareT comp, - const AggrCalcT &aggrCalc) -{ - Iterator itr(BTreeNode::Ref(), allocator); - itr.lower_bound(_root, key, comp); - if (!itr.valid() || comp(key, itr.getKey())) - return false; - remove(itr, aggrCalc); - return true; -} - - -template -void -BTreeRoot:: -remove(Iterator &itr, - const AggrCalcT &aggrCalc) -{ - typedef BTreeRemover - Remover; - bool oldFrozen = isFrozen(); - Remover::remove(_root, itr, aggrCalc); - if (oldFrozen && !isFrozen()) - itr.getAllocator().needFreeze(this); -} - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreerootbase.cpp b/searchlib/src/vespa/searchlib/btree/btreerootbase.cpp deleted file mode 100644 index 12394761bf9..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreerootbase.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "btreerootbase.h" -#include "btreerootbase.hpp" - -namespace search::btree { - -template class BTreeRootBase; -template class BTreeRootBase; -template class BTreeRootBase; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreerootbase.h b/searchlib/src/vespa/searchlib/btree/btreerootbase.h deleted file mode 100644 index ed4889214ca..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreerootbase.h +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreetraits.h" -#include "btreenode.h" -#include "btreenodeallocator.h" -#include - -namespace search::btree { - -template -class BTreeRootBase -{ -protected: - typedef KeyT KeyType; - typedef DataT DataType; - typedef AggrT AggregatedType; - typedef BTreeRootBase - BTreeRootBaseType; - typedef BTreeInternalNode InternalNodeType; - typedef BTreeLeafNode LeafNodeType; - typedef BTreeNodeAllocator NodeAllocatorType; - - BTreeNode::Ref _root; - std::atomic _frozenRoot; - - static_assert(sizeof(_root) == sizeof(_frozenRoot), - "BTree root reference size mismatch"); - - BTreeRootBase(); - BTreeRootBase(const BTreeRootBase &rhs); - BTreeRootBase &operator=(const BTreeRootBase &rhs); - ~BTreeRootBase(); - -public: - void freeze(NodeAllocatorType &allocator); - - bool isFrozen() const { - return (_root.ref() == _frozenRoot.load(std::memory_order_relaxed)); - } - - void setRoot(BTreeNode::Ref newRoot, NodeAllocatorType &allocator) { - bool oldFrozen = isFrozen(); - _root = newRoot; - if (oldFrozen && !isFrozen()) - allocator.needFreeze(this); - } - - void setRoots(BTreeNode::Ref newRoot) { - _root = newRoot; - _frozenRoot = newRoot.ref(); - } - - BTreeNode::Ref getRoot() const { - return _root; - } - - BTreeNode::Ref getFrozenRoot() const { - return BTreeNode::Ref(_frozenRoot.load(std::memory_order_acquire)); - } - - BTreeNode::Ref getFrozenRootRelaxed() const { - return BTreeNode::Ref(_frozenRoot.load(std::memory_order_relaxed)); - } - - const AggrT &getAggregated(const NodeAllocatorType &allocator) const { - return allocator.getAggregated(_root); - } - - void recycle() { - _root = BTreeNode::Ref(); - _frozenRoot = BTreeNode::Ref().ref(); - } - -protected: - void recursiveDelete(BTreeNode::Ref node, NodeAllocatorType &allocator); -}; - -extern template class BTreeRootBase; -extern template class BTreeRootBase; -extern template class BTreeRootBase; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreerootbase.hpp b/searchlib/src/vespa/searchlib/btree/btreerootbase.hpp deleted file mode 100644 index 0b4ef18aad9..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreerootbase.hpp +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreerootbase.h" - -namespace search::btree { - -template -BTreeRootBase::BTreeRootBase() - : _root(BTreeNode::Ref()), - _frozenRoot(BTreeNode::Ref().ref()) -{ -} - - -template -BTreeRootBase:: -BTreeRootBase(const BTreeRootBase &rhs) - : _root(rhs._root), - _frozenRoot(rhs._frozenRoot.load()) -{ -} - - -template -BTreeRootBase::~BTreeRootBase() -{ - assert(!_root.valid()); -#if 0 - assert(!_frozenRoot.valid()); -#endif -} - - -template -BTreeRootBase & -BTreeRootBase:: -operator=(const BTreeRootBase &rhs) -{ - _root = rhs._root; - _frozenRoot.store(rhs._frozenRoot.load(), std::memory_order_release); - return *this; -} - - -template -void -BTreeRootBase:: -freeze(NodeAllocatorType &allocator) -{ - if (NodeAllocatorType::isValidRef(_root)) { - if (allocator.isLeafRef(_root)) - assert(allocator.mapLeafRef(_root)->getFrozen()); - else - assert(allocator.mapInternalRef(_root)->getFrozen()); - } - _frozenRoot.store(_root.ref(), std::memory_order_release); -} - - -template -void -BTreeRootBase:: -recursiveDelete(BTreeNode::Ref node, NodeAllocatorType &allocator) -{ - assert(allocator.isValidRef(node)); - if (!allocator.isLeafRef(node)) { - InternalNodeType * inode = allocator.mapInternalRef(node); - for (size_t i = 0; i < inode->validSlots(); ++i) { - recursiveDelete(inode->getChild(i), allocator); - } - allocator.holdNode(node, inode); - } else { - allocator.holdNode(node, allocator.mapLeafRef(node)); - } -} - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreestore.cpp b/searchlib/src/vespa/searchlib/btree/btreestore.cpp deleted file mode 100644 index bead11295b3..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreestore.cpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "btreestore.h" -#include "btreestore.hpp" -#include "btreeiterator.hpp" - -namespace search::btree { - -template class BTreeStore, BTreeDefaultTraits>; -template class BTreeStore, BTreeDefaultTraits>; -template class BTreeStore, BTreeDefaultTraits, MinMaxAggrCalc>; - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreestore.h b/searchlib/src/vespa/searchlib/btree/btreestore.h deleted file mode 100644 index c2242bd6b63..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreestore.h +++ /dev/null @@ -1,509 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreenode.h" -#include "btreebuilder.h" -#include "btreeroot.h" -#include "noaggrcalc.h" -#include "minmaxaggrcalc.h" -#include -#include - -namespace search::btree { - -template -class BTreeStore -{ -public: - typedef KeyT KeyType; - typedef DataT DataType; - typedef AggrT AggregatedType; - typedef datastore::DataStoreT > DataStoreType; - typedef DataStoreType::RefType RefType; - typedef BTreeKeyData KeyDataType; - - typedef BTreeRoot BTreeType; - typedef BTreeInternalNode InternalNodeType; - typedef BTreeLeafNode - LeafNodeType; - typedef datastore::Handle BTreeTypeRefPair; - typedef datastore::Handle KeyDataTypeRefPair; - typedef typename InternalNodeType::RefPair InternalNodeTypeRefPair; - typedef typename LeafNodeType::RefPair LeafNodeTypeRefPair; - typedef vespalib::GenerationHandler::generation_t generation_t; - typedef BTreeNodeAllocator NodeAllocatorType; - typedef typename BTreeType::Iterator Iterator; - typedef typename BTreeType::ConstIterator ConstIterator; - typedef const KeyDataType * AddIter; - typedef const KeyType * RemoveIter; - typedef BTreeBuilder Builder; - using EntryRef = datastore::EntryRef; - template - using BufferType = datastore::BufferType; - using BufferState = datastore::BufferState; - - static constexpr uint32_t clusterLimit = 8; - - enum BufferTypes - { - BUFFERTYPE_ARRAY1 = 0, - BUFFERTYPE_ARRAY2 = 1, - BUFFERTYPE_ARRAY3 = 2, - BUFFERTYPE_ARRAY4 = 3, - BUFFERTYPE_ARRAY5 = 4, - BUFFERTYPE_ARRAY6 = 5, - BUFFERTYPE_ARRAY7 = 6, - BUFFERTYPE_ARRAY8 = 7, - BUFFERTYPE_BTREE = 8 - }; -protected: - struct TreeReclaimer { - static void reclaim(BTreeType * tree) { - tree->recycle(); - } - }; - - DataStoreType _store; - BufferType _treeType; - BufferType _small1Type; - BufferType _small2Type; - BufferType _small3Type; - BufferType _small4Type; - BufferType _small5Type; - BufferType _small6Type; - BufferType _small7Type; - BufferType _small8Type; - NodeAllocatorType _allocator; - AggrCalcT _aggrCalc; - Builder _builder; - - BTreeType * getWTreeEntry(RefType ref) { - return _store.getEntry(ref); - } - -public: - BTreeStore(); - - BTreeStore(bool init); - - ~BTreeStore(); - - const NodeAllocatorType &getAllocator() const { return _allocator; } - - void - disableFreeLists() { - _store.disableFreeLists(); - _allocator.disableFreeLists(); - } - - void - disableElemHoldList() - { - _store.disableElemHoldList(); - _allocator.disableElemHoldList(); - } - - BTreeTypeRefPair - allocNewBTree() { - return _store.allocator(BUFFERTYPE_BTREE).alloc(); - } - - BTreeTypeRefPair - allocBTree() { - return _store.freeListAllocator(BUFFERTYPE_BTREE).alloc(); - } - - BTreeTypeRefPair - allocNewBTreeCopy(const BTreeType &rhs) { - return _store.allocator(BUFFERTYPE_BTREE).alloc(rhs); - } - - BTreeTypeRefPair - allocBTreeCopy(const BTreeType &rhs) { - return _store.freeListAllocator >(BUFFERTYPE_BTREE).alloc(rhs); - } - - KeyDataTypeRefPair - allocNewKeyData(uint32_t clusterSize); - - KeyDataTypeRefPair - allocKeyData(uint32_t clusterSize); - - KeyDataTypeRefPair - allocNewKeyDataCopy(const KeyDataType *rhs, uint32_t clusterSize); - - KeyDataTypeRefPair - allocKeyDataCopy(const KeyDataType *rhs, uint32_t clusterSize); - - std::vector - startCompact(); - - void - finishCompact(const std::vector &toHold); - - - const KeyDataType * - lower_bound(const KeyDataType *b, const KeyDataType *e, - const KeyType &key, CompareT comp); - - void - makeTree(EntryRef &ref, - const KeyDataType *array, uint32_t clusterSize); - - void - makeArray(EntryRef &ref, EntryRef leafRef, LeafNodeType *leafNode); - - bool - insert(EntryRef &ref, - const KeyType &key, const DataType &data, - CompareT comp = CompareT()); - - bool - remove(EntryRef &ref, - const KeyType &key, - CompareT comp = CompareT()); - - uint32_t - getNewClusterSize(const KeyDataType *o, - const KeyDataType *oe, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp); - - void - applyCluster(const KeyDataType *o, - const KeyDataType *oe, - KeyDataType *d, - const KeyDataType *de, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp); - - - void - applyModifyTree(BTreeType *tree, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp); - - void - applyBuildTree(BTreeType *tree, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp); - - void - applyNewArray(EntryRef &ref, - AddIter aOrg, - AddIter ae); - - void - applyNewTree(EntryRef &ref, - AddIter a, - AddIter ae, - CompareT comp); - - void - applyNew(EntryRef &ref, - AddIter a, - AddIter ae, - CompareT comp); - - - bool - applyCluster(EntryRef &ref, - uint32_t clusterSize, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp); - - void - applyTree(BTreeType *tree, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp); - - void - normalizeTree(EntryRef &ref, - BTreeType *tree, - bool wasArray); - /** - * Apply multiple changes at once. - * - * additions and removals should be sorted on key without duplicates. - * Overlap between additions and removals indicates updates. - */ - void - apply(EntryRef &ref, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp = CompareT()); - - void - clear(const EntryRef ref); - - size_t - size(const EntryRef ref) const; - - size_t - frozenSize(const EntryRef ref) const; - - Iterator - begin(const EntryRef ref) const; - - ConstIterator - beginFrozen(const EntryRef ref) const; - - void - beginFrozen(const EntryRef ref, std::vector &where) const; - - uint32_t - getTypeId(RefType ref) const - { - return _store.getBufferState(ref.bufferId()).getTypeId(); - } - - static bool - isSmallArray(uint32_t typeId) - { - return typeId < clusterLimit; - } - - bool - isSmallArray(const EntryRef ref) const; - - /** - * Returns the cluster size for the type id. - * Cluster size == 0 means we have a tree for the given reference. - * The reference must be valid. - **/ - static uint32_t - getClusterSize(uint32_t typeId) - { - return (typeId < clusterLimit) ? typeId + 1 : 0; - } - - /** - * Returns the cluster size for the entry pointed to by the given reference. - * Cluster size == 0 means we have a tree for the given reference. - * The reference must be valid. - **/ - uint32_t - getClusterSize(RefType ref) const - { - return getClusterSize(getTypeId(ref)); - } - - const BTreeType * getTreeEntry(RefType ref) const { - return _store.getEntry(ref); - } - - const KeyDataType * getKeyDataEntry(RefType ref, uint32_t arraySize) const { - return _store.getEntryArray(ref, arraySize); - } - - void freeze() { - _allocator.freeze(); - } - - // Inherit doc from DataStoreBase - void - trimHoldLists(generation_t usedGen) - { - _allocator.trimHoldLists(usedGen); - _store.trimHoldLists(usedGen); - } - - // Inherit doc from DataStoreBase - void - transferHoldLists(generation_t generation) - { - _allocator.transferHoldLists(generation); - _store.transferHoldLists(generation); - } - - void - clearHoldLists() - { - _allocator.clearHoldLists(); - _store.clearHoldLists(); - } - - - // Inherit doc from DataStoreBase - vespalib::MemoryUsage getMemoryUsage() const { - vespalib::MemoryUsage usage; - usage.merge(_allocator.getMemoryUsage()); - usage.merge(_store.getMemoryUsage()); - return usage; - } - - void - clearBuilder() - { - _builder.clear(); - } - - AggregatedType - getAggregated(const EntryRef ref) const; - - template - void - foreach_unfrozen_key(EntryRef ref, FunctionType func) const; - - template - void - foreach_frozen_key(EntryRef ref, FunctionType func) const; - - template - void - foreach_unfrozen(EntryRef ref, FunctionType func) const; - - template - void - foreach_frozen(EntryRef ref, FunctionType func) const; - -private: - static constexpr size_t MIN_BUFFER_ARRAYS = 128u; - template - void - foreach_key(EntryRef ref, FunctionType func) const; - - template - void - foreach(EntryRef ref, FunctionType func) const; -}; - -template -template -void -BTreeStore:: -foreach_unfrozen_key(EntryRef ref, FunctionType func) const { - foreach_key(ref, func); -} - -template -template -void -BTreeStore:: -foreach_frozen_key(EntryRef ref, FunctionType func) const -{ - foreach_key(ref, func); -} - -template -template -void -BTreeStore:: -foreach_unfrozen(EntryRef ref, FunctionType func) const -{ - foreach(ref, func); -} - - -template -template -void -BTreeStore:: -foreach_frozen(EntryRef ref, FunctionType func) const -{ - foreach(ref, func); -} - -template -template -void -BTreeStore:: -foreach_key(EntryRef ref, FunctionType func) const -{ - if (!ref.valid()) - return; - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - const BTreeType *tree = getTreeEntry(iRef); - _allocator.getNodeStore().foreach_key(Frozen ? tree->getFrozenRoot() : tree->getRoot(), func); - } else { - const KeyDataType *p = getKeyDataEntry(iRef, clusterSize); - const KeyDataType *pe = p + clusterSize; - for (; p != pe; ++p) { - func(p->_key); - } - } -} - -template -template -void -BTreeStore:: -foreach(EntryRef ref, FunctionType func) const -{ - if (!ref.valid()) - return; - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - const BTreeType *tree = getTreeEntry(iRef); - _allocator.getNodeStore().foreach(Frozen ? tree->getFrozenRoot() : tree->getRoot(), func); - } else { - const KeyDataType *p = getKeyDataEntry(iRef, clusterSize); - const KeyDataType *pe = p + clusterSize; - for (; p != pe; ++p) { - func(p->_key, p->getData()); - } - } -} - - -extern template class BTreeStore, - BTreeDefaultTraits>; - -extern template class BTreeStore, - BTreeDefaultTraits>; - -extern template class BTreeStore, - BTreeDefaultTraits, - MinMaxAggrCalc>; - -} - - diff --git a/searchlib/src/vespa/searchlib/btree/btreestore.hpp b/searchlib/src/vespa/searchlib/btree/btreestore.hpp deleted file mode 100644 index ae9a67b4e8b..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreestore.hpp +++ /dev/null @@ -1,957 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "btreestore.h" -#include "btreebuilder.h" -#include "btreebuilder.hpp" -#include -#include - -namespace search::btree { - -template -BTreeStore:: -BTreeStore() - : BTreeStore(true) -{ -} - -template -BTreeStore:: -BTreeStore(bool init) - : _store(), - _treeType(1, MIN_BUFFER_ARRAYS, RefType::offsetSize()), - _small1Type(1, MIN_BUFFER_ARRAYS, RefType::offsetSize()), - _small2Type(2, MIN_BUFFER_ARRAYS, RefType::offsetSize()), - _small3Type(3, MIN_BUFFER_ARRAYS, RefType::offsetSize()), - _small4Type(4, MIN_BUFFER_ARRAYS, RefType::offsetSize()), - _small5Type(5, MIN_BUFFER_ARRAYS, RefType::offsetSize()), - _small6Type(6, MIN_BUFFER_ARRAYS, RefType::offsetSize()), - _small7Type(7, MIN_BUFFER_ARRAYS, RefType::offsetSize()), - _small8Type(8, MIN_BUFFER_ARRAYS, RefType::offsetSize()), - _allocator(), - _aggrCalc(), - _builder(_allocator, _aggrCalc) -{ - // XXX: order here makes typeId + 1 == clusterSize for small arrays, - // code elsewhere depends on it. - _store.addType(&_small1Type); - _store.addType(&_small2Type); - _store.addType(&_small3Type); - _store.addType(&_small4Type); - _store.addType(&_small5Type); - _store.addType(&_small6Type); - _store.addType(&_small7Type); - _store.addType(&_small8Type); - _store.addType(&_treeType); - if (init) { - _store.initActiveBuffers(); - _store.enableFreeLists(); - } -} - - -template -BTreeStore::~BTreeStore() -{ - _builder.clear(); - _store.dropBuffers(); // Drop buffers before type handlers are dropped -} - - -template -typename BTreeStore:: -KeyDataTypeRefPair -BTreeStore:: -allocNewKeyData(uint32_t clusterSize) -{ - assert(clusterSize >= 1 && clusterSize <= clusterLimit); - uint32_t typeId = clusterSize - 1; - return _store.allocator(typeId).allocArray(clusterSize); -} - - -template -typename BTreeStore:: -KeyDataTypeRefPair -BTreeStore:: -allocKeyData(uint32_t clusterSize) -{ - assert(clusterSize >= 1 && clusterSize <= clusterLimit); - uint32_t typeId = clusterSize - 1; - return _store.freeListAllocator>(typeId).allocArray(clusterSize); -} - - -template -typename BTreeStore:: -KeyDataTypeRefPair -BTreeStore:: -allocNewKeyDataCopy(const KeyDataType *rhs, uint32_t clusterSize) -{ - assert(clusterSize >= 1 && clusterSize <= clusterLimit); - uint32_t typeId = clusterSize - 1; - return _store.allocator(typeId).allocArray(vespalib::ConstArrayRef(rhs, clusterSize)); -} - - -template -typename BTreeStore:: -KeyDataTypeRefPair -BTreeStore:: -allocKeyDataCopy(const KeyDataType *rhs, uint32_t clusterSize) -{ - assert(clusterSize >= 1 && clusterSize <= clusterLimit); - uint32_t typeId = clusterSize - 1; - return _store.freeListAllocator>(typeId). - allocArray(vespalib::ConstArrayRef(rhs, clusterSize)); -} - - -template -std::vector -BTreeStore::startCompact() -{ - std::vector ret = _store.startCompact(clusterLimit); - for (uint32_t clusterSize = 1; clusterSize <= clusterLimit; ++clusterSize) { - uint32_t typeId = clusterSize - 1; - std::vector toHold = _store.startCompact(typeId); - for (auto i : toHold) { - ret.push_back(i); - } - } - return ret; -} - - -template -void -BTreeStore:: -finishCompact(const std::vector &toHold) -{ - _store.finishCompact(toHold); -} - - -template -const typename BTreeStore:: -KeyDataType * -BTreeStore:: -lower_bound(const KeyDataType *b, const KeyDataType *e, - const KeyType &key, CompareT comp) -{ - const KeyDataType *i = b; - for (; i != e; ++i) { - if (!comp(i->_key, key)) - break; - } - return i; -} - - -template -void -BTreeStore:: -makeTree(EntryRef &ref, - const KeyDataType *array, uint32_t clusterSize) -{ - LeafNodeTypeRefPair lPair(_allocator.allocLeafNode()); - LeafNodeType *lNode = lPair.data; - lNode->setValidSlots(clusterSize); - const KeyDataType *o = array; - for (uint32_t idx = 0; idx < clusterSize; ++idx, ++o) { - lNode->update(idx, o->_key, o->getData()); - } - typedef BTreeAggregator Aggregator; - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*lNode, _aggrCalc); - } - lNode->freeze(); - BTreeTypeRefPair tPair(allocBTree()); - tPair.data->setRoots(lPair.ref); - _store.holdElem(ref, clusterSize); - ref = tPair.ref; -} - - -template -void -BTreeStore:: -makeArray(EntryRef &ref, EntryRef root, LeafNodeType *leafNode) -{ - uint32_t clusterSize = leafNode->validSlots(); - KeyDataTypeRefPair kPair(allocKeyData(clusterSize)); - KeyDataType *kd = kPair.data; - // Copy whole leaf node - for (uint32_t idx = 0; idx < clusterSize; ++idx, ++kd) { - kd->_key = leafNode->getKey(idx); - kd->setData(leafNode->getData(idx)); - } - assert(kd == kPair.data + clusterSize); - _store.holdElem(ref, 1); - if (!leafNode->getFrozen()) { - leafNode->freeze(); - } - _allocator.holdNode(root, leafNode); - ref = kPair.ref; -} - - -template -bool -BTreeStore:: -insert(EntryRef &ref, - const KeyType &key, const DataType &data, - CompareT comp) -{ -#ifdef FORCE_APPLY - bool retVal = true; - if (ref.valid()) { - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - const BTreeType *tree = getTreeEntry(iRef); - Iterator itr = tree->find(key, _allocator, comp); - if (itr.valid()) - retVal = false; - } else { - const KeyDataType *old = getKeyDataEntry(iRef, clusterSize); - const KeyDataType *olde = old + clusterSize; - const KeyDataType *oldi = lower_bound(old, olde, key, comp); - if (oldi < olde && !comp(key, oldi->_key)) - retVal = false; // key already present - } - } - KeyDataType addition(key, data); - if (retVal) { - apply(ref, &addition, &addition+1, nullptr, nullptr, comp); - } - return retVal; -#else - if (!ref.valid()) { - KeyDataTypeRefPair kPair(allocKeyData(1)); - KeyDataType *kd = kPair.data; - kd->_key = key; - kd->setData(data); - ref = kPair.ref; - return true; - } - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - BTreeType *tree = getWTreeEntry(iRef); - return tree->insert(key, data, _allocator, comp, _aggrCalc); - } - const KeyDataType *old = getKeyDataEntry(iRef, clusterSize); - const KeyDataType *olde = old + clusterSize; - const KeyDataType *oldi = lower_bound(old, olde, key, comp); - if (oldi < olde && !comp(key, oldi->_key)) - return false; // key already present - if (clusterSize < clusterLimit) { - // Grow array - KeyDataTypeRefPair kPair(allocKeyData(clusterSize + 1)); - KeyDataType *kd = kPair.data; - // Copy data before key - for (const KeyDataType *i = old; i != oldi; ++i, ++kd) { - kd->_key = i->_key; - kd->setData(i->getData()); - } - // Copy key - kd->_key = key; - kd->setData(data); - ++kd; - // Copy data after key - for (const KeyDataType *i = oldi; i != olde; ++i, ++kd) { - kd->_key = i->_key; - kd->setData(i->getData()); - } - assert(kd == kPair.data + clusterSize + 1); - _store.holdElem(ref, clusterSize); - ref = kPair.ref; - return true; - } - // Convert from short array to tree - LeafNodeTypeRefPair lPair(_allocator.allocLeafNode()); - LeafNodeType *lNode = lPair.data; - uint32_t idx = 0; - lNode->setValidSlots(clusterSize + 1); - // Copy data before key - for (const KeyDataType *i = old; i != oldi; ++i, ++idx) { - lNode->update(idx, i->_key, i->getData()); - } - // Copy key - lNode->update(idx, key, data); - ++idx; - // Copy data after key - for (const KeyDataType *i = oldi; i != olde; ++i, ++idx) { - lNode->update(idx, i->_key, i->getData()); - } - assert(idx == clusterSize + 1); - typedef BTreeAggregator Aggregator; - if (AggrCalcT::hasAggregated()) { - Aggregator::recalc(*lNode, _aggrCalc); - } - lNode->freeze(); - BTreeTypeRefPair tPair(allocBTree()); - tPair.data->setRoots(lPair.ref); // allow immediate access to readers - _store.holdElem(ref, clusterSize); - ref = tPair.ref; - return true; -#endif -} - - -template -bool -BTreeStore:: -remove(EntryRef &ref, - const KeyType &key, - CompareT comp) -{ -#ifdef FORCE_APPLY - bool retVal = true; - if (!ref.valid()) - retVal = false; // not found - else { - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - const BTreeType *tree = getTreeEntry(iRef); - Iterator itr = tree->find(key, _allocator, comp); - if (!itr.valid()) - retVal = false; - } else { - const KeyDataType *old = getKeyDataEntry(iRef, clusterSize); - const KeyDataType *olde = old + clusterSize; - const KeyDataType *oldi = lower_bound(old, olde, key, comp); - if (oldi == olde || comp(key, oldi->_key)) - retVal = false; // not found - } - } - std::vector additions; - std::vector removals; - removals.push_back(key); - apply(ref, - &additions[0], &additions[additions.size()], - &removals[0], &removals[removals.size()], - comp); - return retVal; -#else - if (!ref.valid()) - return false; // not found - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize != 0) { - const KeyDataType *old = getKeyDataEntry(iRef, clusterSize); - const KeyDataType *olde = old + clusterSize; - const KeyDataType *oldi = lower_bound(old, olde, key, comp); - if (oldi == olde || comp(key, oldi->_key)) - return false; // not found - if (clusterSize == 1) { - _store.holdElem(ref, 1); - ref = EntryRef(); - return true; - } - // Copy to smaller array - KeyDataTypeRefPair kPair(allocKeyData(clusterSize - 1)); - KeyDataType *kd = kPair.data; - // Copy data before key - for (const KeyDataType *i = old; i != oldi; ++i, ++kd) { - kd->_key = i->_key; - kd->setData(i->getData()); - } - // Copy data after key - for (const KeyDataType *i = oldi + 1; i != olde; ++i, ++kd) { - kd->_key = i->_key; - kd->setData(i->getData()); - } - assert(kd == kPair.data + clusterSize - 1); - _store.holdElem(ref, clusterSize); - ref = kPair.ref; - return true; - } - BTreeType *tree = getWTreeEntry(iRef); - if (!tree->remove(key, _allocator, comp, _aggrCalc)) - return false; // not found - EntryRef root = tree->getRoot(); - assert(NodeAllocatorType::isValidRef(root)); - if (!_allocator.isLeafRef(root)) - return true; - LeafNodeType *lNode = _allocator.mapLeafRef(root); - clusterSize = lNode->validSlots(); - assert(clusterSize > 0); - if (clusterSize > clusterLimit) - return true; - // Convert from tree to short array - makeArray(ref, root, lNode); - return true; -#endif -} - - -template -uint32_t -BTreeStore:: -getNewClusterSize(const KeyDataType *o, - const KeyDataType *oe, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp) -{ - uint32_t d = 0u; - if (o == oe && a == ae) - return 0u; - while (a != ae || r != re) { - if (r != re && (a == ae || comp(*r, a->_key))) { - // remove - while (o != oe && comp(o->_key, *r)) { - ++d; - ++o; - } - if (o != oe && !comp(*r, o->_key)) - ++o; - ++r; - } else { - // add or update - while (o != oe && comp(o->_key, a->_key)) { - ++d; - ++o; - } - if (o != oe && !comp(a->_key, o->_key)) - ++o; - ++d; - if (r != re && !comp(a->_key, *r)) - ++r; - ++a; - } - } - while (o != oe) { - ++d; - ++o; - } - return d; -} - - -template -void -BTreeStore:: -applyCluster(const KeyDataType *o, - const KeyDataType *oe, - KeyDataType *d, - const KeyDataType *de, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp) -{ - while (a != ae || r != re) { - if (r != re && (a == ae || comp(*r, a->_key))) { - // remove - while (o != oe && comp(o->_key, *r)) { - d->_key = o->_key; - d->setData(o->getData()); - ++d; - ++o; - } - if (o != oe && !comp(*r, o->_key)) - ++o; - ++r; - } else { - // add or update - while (o != oe && comp(o->_key, a->_key)) { - d->_key = o->_key; - d->setData(o->getData()); - ++d; - ++o; - } - if (o != oe && !comp(a->_key, o->_key)) - ++o; - d->_key = a->_key; - d->setData(a->getData()); - ++d; - if (r != re && !comp(a->_key, *r)) - ++r; - ++a; - } - } - while (o != oe) { - d->_key = o->_key; - d->setData(o->getData()); - ++d; - ++o; - } - assert(d == de); - (void) de; -} - - -template -void -BTreeStore:: -applyModifyTree(BTreeType *tree, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp) -{ - if (a == ae && r == re) - return; - Iterator itr(BTreeNode::Ref(), _allocator); - itr.lower_bound(tree->getRoot(), - (a != ae && r != re) ? (comp(a->_key, *r) ? a->_key : *r) : - ((a != ae) ? a->_key : *r), - comp); - while (a != ae || r != re) { - if (r != re && (a == ae || comp(*r, a->_key))) { - // remove - if (itr.valid() && comp(itr.getKey(), *r)) { - itr.binarySeek(*r, comp); - } - if (itr.valid() && !comp(*r, itr.getKey())) { - tree->remove(itr, _aggrCalc); - } - ++r; - } else { - // update or add - if (itr.valid() && comp(itr.getKey(), a->_key)) { - itr.binarySeek(a->_key, comp); - } - if (itr.valid() && !comp(a->_key, itr.getKey())) { - tree->thaw(itr); - itr.updateData(a->getData(), _aggrCalc); - } else { - tree->insert(itr, a->_key, a->getData(), _aggrCalc); - } - if (r != re && !comp(a->_key, *r)) { - ++r; - } - ++a; - } - } -} - - -template -void -BTreeStore:: -applyBuildTree(BTreeType *tree, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp) -{ - Iterator itr = tree->begin(_allocator); - Builder &builder = _builder; - builder.reuse(); - while (a != ae || r != re) { - if (r != re && (a == ae || comp(*r, a->_key))) { - // remove - while (itr.valid() && comp(itr.getKey(), *r)) { - builder.insert(itr.getKey(), itr.getData()); - ++itr; - } - if (itr.valid() && !comp(*r, itr.getKey())) - ++itr; - ++r; - } else { - // add or update - while (itr.valid() && comp(itr.getKey(), a->_key)) { - builder.insert(itr.getKey(), itr.getData()); - ++itr; - } - if (itr.valid() && !comp(a->_key, itr.getKey())) - ++itr; - builder.insert(a->_key, a->getData()); - if (r != re && !comp(a->_key, *r)) - ++r; - ++a; - } - } - while (itr.valid()) { - builder.insert(itr.getKey(), itr.getData()); - ++itr; - } - tree->assign(builder, _allocator); -} - - -template -void -BTreeStore:: -applyNewArray(EntryRef &ref, - AddIter aOrg, - AddIter ae) -{ - assert(!ref.valid()); - if (aOrg == ae) { - // No new data - return; - } - size_t additionSize(ae - aOrg); - uint32_t clusterSize = additionSize; - assert(clusterSize <= clusterLimit); - KeyDataTypeRefPair kPair(allocKeyData(clusterSize)); - KeyDataType *kd = kPair.data; - AddIter a = aOrg; - for (;a != ae; ++a, ++kd) { - kd->_key = a->_key; - kd->setData(a->getData()); - } - assert(kd == kPair.data + clusterSize); - assert(a == ae); - ref = kPair.ref; - } - - -template -void -BTreeStore:: -applyNewTree(EntryRef &ref, - AddIter a, - AddIter ae, - CompareT comp) -{ - assert(!ref.valid()); - size_t additionSize(ae - a); - BTreeTypeRefPair tPair(allocBTree()); - BTreeType *tree = tPair.data; - applyBuildTree(tree, a, ae, nullptr, nullptr, comp); - assert(tree->size(_allocator) == additionSize); - (void) additionSize; - ref = tPair.ref; -} - - -template -void -BTreeStore:: -applyNew(EntryRef &ref, - AddIter a, - AddIter ae, - CompareT comp) -{ - // No old data - assert(!ref.valid()); - size_t additionSize(ae - a); - uint32_t clusterSize = additionSize; - if (clusterSize <= clusterLimit) { - applyNewArray(ref, a, ae); - } else { - applyNewTree(ref, a, ae, comp); - } -} - - -template -bool -BTreeStore:: -applyCluster(EntryRef &ref, - uint32_t clusterSize, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp) -{ - size_t additionSize(ae - a); - size_t removeSize(re - r); - uint32_t newSizeMin = - std::max(clusterSize, - static_cast(additionSize)) - - std::min(clusterSize, static_cast(removeSize)); - RefType iRef(ref); - const KeyDataType *ob = getKeyDataEntry(iRef, clusterSize); - const KeyDataType *oe = ob + clusterSize; - if (newSizeMin <= clusterLimit) { - uint32_t newSize = getNewClusterSize(ob, oe, a, ae, r, re, comp); - if (newSize == 0) { - _store.holdElem(ref, clusterSize); - ref = EntryRef(); - return true; - } - if (newSize <= clusterLimit) { - KeyDataTypeRefPair kPair(allocKeyData(newSize)); - applyCluster(ob, oe, kPair.data, kPair.data + newSize, - a, ae, r, re, comp); - _store.holdElem(ref, clusterSize); - ref = kPair.ref; - return true; - } - } - // Convert from short array to tree - makeTree(ref, ob, clusterSize); - return false; -} - - -template -void -BTreeStore:: -applyTree(BTreeType *tree, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp) -{ - // Old data was tree or has been converted to a tree - uint32_t treeSize = tree->size(_allocator); - size_t additionSize(ae - a); - size_t removeSize(re - r); - uint64_t buildCost = treeSize * 2 + additionSize; - typedef bitcompression::EncodeContext64BE EC; - uint64_t modifyCost = (EC::asmlog2(treeSize + additionSize) + 1) * - (additionSize + removeSize); - if (modifyCost < buildCost) - applyModifyTree(tree, a, ae, r, re, comp); - else - applyBuildTree(tree, a, ae, r, re, comp); -} - - -template -void -BTreeStore:: -normalizeTree(EntryRef &ref, - BTreeType *tree, - bool wasArray) -{ - EntryRef root = tree->getRoot(); - if (!NodeAllocatorType::isValidRef(root)) { - _store.holdElem(ref, 1); - ref = EntryRef(); - return; - } - if (!_allocator.isLeafRef(root)) - return; - LeafNodeType *lNode = _allocator.mapLeafRef(root); - uint32_t treeSize = lNode->validSlots(); - assert(treeSize > 0); - if (treeSize > clusterLimit) - return; - assert(!wasArray); // Should never have used tree - (void) wasArray; - // Convert from tree to short array - makeArray(ref, root, lNode); -} - - -template -void -BTreeStore:: -apply(EntryRef &ref, - AddIter a, - AddIter ae, - RemoveIter r, - RemoveIter re, - CompareT comp) -{ - if (!ref.valid()) { - // No old data - applyNew(ref, a, ae, comp); - return; - } - RefType iRef(ref); - bool wasArray = false; - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize != 0) { - wasArray = true; - if (applyCluster(ref, clusterSize, a, ae, r, re, comp)) - return; - iRef = ref; - } - // Old data was tree or has been converted to a tree - BTreeType *tree = getWTreeEntry(iRef); - applyTree(tree, a, ae, r, re, comp); - normalizeTree(ref, tree, wasArray); -} - - -template -void -BTreeStore:: -clear(const EntryRef ref) -{ - if (!ref.valid()) - return; - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - BTreeType *tree = getWTreeEntry(iRef); - tree->clear(_allocator); - _store.holdElem(ref, 1); - } else { - _store.holdElem(ref, clusterSize); - } -} - - -template -size_t -BTreeStore:: -size(const EntryRef ref) const -{ - if (!ref.valid()) - return 0; - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - const BTreeType *tree = getTreeEntry(iRef); - return tree->size(_allocator); - } - return clusterSize; -} - - -template -size_t -BTreeStore:: -frozenSize(const EntryRef ref) const -{ - if (!ref.valid()) - return 0; - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - const BTreeType *tree = getTreeEntry(iRef); - return tree->frozenSize(_allocator); - } - return clusterSize; -} - - -template -bool -BTreeStore:: -isSmallArray(const EntryRef ref) const -{ - if (!ref.valid()) - return true; - RefType iRef(ref); - uint32_t typeId(_store.getBufferState(iRef.bufferId()).getTypeId()); - return typeId < clusterLimit; -} - - -template -typename BTreeStore:: -Iterator -BTreeStore:: -begin(const EntryRef ref) const -{ - if (!ref.valid()) - return Iterator(); - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - const BTreeType *tree = getTreeEntry(iRef); - return tree->begin(_allocator); - } - const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize); - return Iterator(shortArray, clusterSize, _allocator, _aggrCalc); -} - - -template -typename BTreeStore:: -ConstIterator -BTreeStore:: -beginFrozen(const EntryRef ref) const -{ - if (!ref.valid()) - return ConstIterator(); - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - const BTreeType *tree = getTreeEntry(iRef); - return tree->getFrozenView(_allocator).begin(); - } - const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize); - return ConstIterator(shortArray, clusterSize, _allocator, _aggrCalc); -} - -template -void -BTreeStore:: -beginFrozen(const EntryRef ref, std::vector &where) const -{ - if (!ref.valid()) { - where.emplace_back(); - return; - } - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - const BTreeType *tree = getTreeEntry(iRef); - tree->getFrozenView(_allocator).begin(where); - return; - } - const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize); - where.emplace_back(shortArray, clusterSize, _allocator, _aggrCalc); -} - -template -typename BTreeStore:: -AggregatedType -BTreeStore:: -getAggregated(const EntryRef ref) const -{ - if (!ref.valid()) - return AggregatedType(); - RefType iRef(ref); - uint32_t clusterSize = getClusterSize(iRef); - if (clusterSize == 0) { - const BTreeType *tree = getTreeEntry(iRef); - return tree->getAggregated(_allocator); - } - const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize); - AggregatedType a; - for (uint32_t i = 0; i < clusterSize; ++i) { - _aggrCalc.add(a, _aggrCalc.getVal(shortArray[i].getData())); - } - return a; -} - -} diff --git a/searchlib/src/vespa/searchlib/btree/btreetraits.h b/searchlib/src/vespa/searchlib/btree/btreetraits.h deleted file mode 100644 index efa7cb4de34..00000000000 --- a/searchlib/src/vespa/searchlib/btree/btreetraits.h +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include - -namespace search::btree { - -template -struct BTreeTraits { - static const size_t LEAF_SLOTS = LS; - static const size_t INTERNAL_SLOTS = IS; - static const size_t PATH_SIZE = PS; - static const bool BINARY_SEEK = BS; -}; - -typedef BTreeTraits<16, 16, 10, true> BTreeDefaultTraits; - -} diff --git a/searchlib/src/vespa/searchlib/btree/minmaxaggrcalc.h b/searchlib/src/vespa/searchlib/btree/minmaxaggrcalc.h deleted file mode 100644 index b33422ec3e3..00000000000 --- a/searchlib/src/vespa/searchlib/btree/minmaxaggrcalc.h +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "minmaxaggregated.h" - -namespace search::btree { - -class MinMaxAggrCalc -{ -public: - MinMaxAggrCalc() { } - static bool hasAggregated() { return true; } - static int32_t getVal(int32_t val) { return val; } - static void add(MinMaxAggregated &a, int32_t val) { a.add(val); } - static void add(MinMaxAggregated &a, const MinMaxAggregated &ca) { a.add(ca); } - static void add(MinMaxAggregated &a, const MinMaxAggregated &oldca, const MinMaxAggregated &ca) { a.add(oldca, ca); } - - /* Returns true if recalculation is needed */ - static bool - remove(MinMaxAggregated &a, int32_t val) - { - return a.remove(val); - } - - /* Returns true if recalculation is needed */ - static bool - remove(MinMaxAggregated &a, const MinMaxAggregated &oldca, - const MinMaxAggregated &ca) - { - return a.remove(oldca, ca); - } - - /* Returns true if recalculation is needed */ - static bool - update(MinMaxAggregated &a, int32_t oldVal, int32_t val) - { - return a.update(oldVal, val); - } - - /* Returns true if recalculation is needed */ - static bool - update(MinMaxAggregated &a, const MinMaxAggregated &oldca, - const MinMaxAggregated &ca) - { - return a.update(oldca, ca); - } -}; - -} diff --git a/searchlib/src/vespa/searchlib/btree/minmaxaggregated.h b/searchlib/src/vespa/searchlib/btree/minmaxaggregated.h deleted file mode 100644 index add570a6e6b..00000000000 --- a/searchlib/src/vespa/searchlib/btree/minmaxaggregated.h +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include -#include - -namespace search::btree { - -class MinMaxAggregated -{ - int32_t _min; - int32_t _max; - -public: - MinMaxAggregated() - : _min(std::numeric_limits::max()), - _max(std::numeric_limits::min()) - { } - - MinMaxAggregated(int32_t min, int32_t max) - : _min(min), - _max(max) - { } - - int32_t getMin() const { return _min; } - int32_t getMax() const { return _max; } - - bool operator==(const MinMaxAggregated &rhs) const { - return ((_min == rhs._min) && (_max == rhs._max)); - } - - bool operator!=(const MinMaxAggregated &rhs) const { - return ((_min != rhs._min) || (_max != rhs._max)); - } - - void - add(int32_t val) - { - if (_min > val) - _min = val; - if (_max < val) - _max = val; - } - - void - add(const MinMaxAggregated &ca) - { - if (_min > ca._min) - _min = ca._min; - if (_max < ca._max) - _max = ca._max; - } - - void - add(const MinMaxAggregated &oldca, - const MinMaxAggregated &ca) - { - (void) oldca; - add(ca); - } - - /* Returns true if recalculation is needed */ - bool - remove(int32_t val) - { - return (_min == val || _max == val); - } - - /* Returns true if recalculation is needed */ - bool - remove(const MinMaxAggregated &oldca, - const MinMaxAggregated &ca) - { - return (_min == oldca._min && _min != ca._min) || - (_max == oldca._max && _max != ca._max); - } - - /* Returns true if recalculation is needed */ - bool - update(int32_t oldVal, int32_t val) - { - if ((_min == oldVal && _min < val) || - (_max == oldVal && _max > val)) { - return true; - } - add(val); - return false; - } - - /* Returns true if recalculation is needed */ - bool - update(const MinMaxAggregated &oldca, - const MinMaxAggregated &ca) - { - if ((_min == oldca._min && _min < ca._min) || - (_max == oldca._max && _max > ca._max)) { - return true; - } - add(ca); - return false; - } -}; - -} diff --git a/searchlib/src/vespa/searchlib/btree/noaggrcalc.h b/searchlib/src/vespa/searchlib/btree/noaggrcalc.h deleted file mode 100644 index e77e8bc204a..00000000000 --- a/searchlib/src/vespa/searchlib/btree/noaggrcalc.h +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "noaggregated.h" - -namespace search::btree { - -class NoAggrCalc -{ -public: - NoAggrCalc() - { - } - - static bool - hasAggregated() - { - return false; - } - - template - static inline int32_t - getVal(const DataT &val) - { - (void) val; - return 0; - } - - static void - add(NoAggregated &a, int32_t val) - { - (void) a; - (void) val; - } - - static void - add(NoAggregated &a, const NoAggregated &ca) - { - (void) a; - (void) ca; - } - - static void - add(NoAggregated &a, - const NoAggregated &oldca, - const NoAggregated &ca) - { - (void) a; - (void) oldca; - (void) ca; - } - - /* Returns true if recalculation is needed */ - static bool - remove(NoAggregated &a, int32_t val) - { - (void) a; - (void) val; - return false; - } - - /* Returns true if recalculation is needed */ - static bool - remove(NoAggregated &a, const NoAggregated &oldca, const NoAggregated &ca) - { - (void) a; - (void) oldca; - (void) ca; - return false; - } - - /* Returns true if recalculation is needed */ - static bool - update(NoAggregated &a, int32_t oldVal, int32_t val) - { - (void) a; - (void) oldVal; - (void) val; - return false; - } - - /* Returns true if recalculation is needed */ - static bool - update(NoAggregated &a, const NoAggregated &oldca, const NoAggregated &ca) - { - (void) a; - (void) oldca; - (void) ca; - return false; - } -}; - -} diff --git a/searchlib/src/vespa/searchlib/btree/noaggregated.h b/searchlib/src/vespa/searchlib/btree/noaggregated.h deleted file mode 100644 index e16465f5e0a..00000000000 --- a/searchlib/src/vespa/searchlib/btree/noaggregated.h +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -namespace search::btree { - -class NoAggregated -{ -public: - NoAggregated() { } - bool operator==(const NoAggregated &) const { return true; } - bool operator!=(const NoAggregated &) const { return false; } -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/CMakeLists.txt b/searchlib/src/vespa/searchlib/datastore/CMakeLists.txt deleted file mode 100644 index 5af7bd21d78..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_library(searchlib_datastore OBJECT - SOURCES - array_store_config.cpp - buffer_type.cpp - bufferstate.cpp - datastore.cpp - datastorebase.cpp - entryref.cpp - DEPENDS -) diff --git a/searchlib/src/vespa/searchlib/datastore/allocator.h b/searchlib/src/vespa/searchlib/datastore/allocator.h deleted file mode 100644 index 8a522266c1a..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/allocator.h +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "datastorebase.h" -#include "entryref.h" -#include "handle.h" -#include - -namespace search::datastore { - -/** - * Allocator used to allocate entries of a specific type in an underlying data store. - */ -template -class Allocator -{ -public: - using ConstArrayRef = vespalib::ConstArrayRef; - using HandleType = Handle; - -protected: - DataStoreBase &_store; - uint32_t _typeId; - -public: - Allocator(DataStoreBase &store, uint32_t typeId); - - template - HandleType alloc(Args && ... args); - - HandleType allocArray(ConstArrayRef array); - HandleType allocArray(size_t size); -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/allocator.hpp b/searchlib/src/vespa/searchlib/datastore/allocator.hpp deleted file mode 100644 index fa22ba0c3ed..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/allocator.hpp +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "allocator.h" -#include "bufferstate.h" - -namespace search::datastore { - -template -Allocator::Allocator(DataStoreBase &store, uint32_t typeId) - : _store(store), - _typeId(typeId) -{ -} - -template -template -typename Allocator::HandleType -Allocator::alloc(Args && ... args) -{ - _store.ensureBufferCapacity(_typeId, 1); - uint32_t activeBufferId = _store.getActiveBufferId(_typeId); - BufferState &state = _store.getBufferState(activeBufferId); - assert(state.isActive()); - size_t oldBufferSize = state.size(); - RefT ref(oldBufferSize, activeBufferId); - EntryT *entry = _store.getEntry(ref); - new (static_cast(entry)) EntryT(std::forward(args)...); - state.pushed_back(1); - return HandleType(ref, entry); -} - -template -typename Allocator::HandleType -Allocator::allocArray(ConstArrayRef array) -{ - _store.ensureBufferCapacity(_typeId, array.size()); - uint32_t activeBufferId = _store.getActiveBufferId(_typeId); - BufferState &state = _store.getBufferState(activeBufferId); - assert(state.isActive()); - assert(state.getArraySize() == array.size()); - size_t oldBufferSize = state.size(); - assert((oldBufferSize % array.size()) == 0); - RefT ref((oldBufferSize / array.size()), activeBufferId); - EntryT *buf = _store.template getEntryArray(ref, array.size()); - for (size_t i = 0; i < array.size(); ++i) { - new (static_cast(buf + i)) EntryT(array[i]); - } - state.pushed_back(array.size()); - return HandleType(ref, buf); -} - -template -typename Allocator::HandleType -Allocator::allocArray(size_t size) -{ - _store.ensureBufferCapacity(_typeId, size); - uint32_t activeBufferId = _store.getActiveBufferId(_typeId); - BufferState &state = _store.getBufferState(activeBufferId); - assert(state.isActive()); - assert(state.getArraySize() == size); - size_t oldBufferSize = state.size(); - assert((oldBufferSize % size) == 0); - RefT ref((oldBufferSize / size), activeBufferId); - EntryT *buf = _store.template getEntryArray(ref, size); - for (size_t i = 0; i < size; ++i) { - new (static_cast(buf + i)) EntryT(); - } - state.pushed_back(size); - return HandleType(ref, buf); -} - -} - diff --git a/searchlib/src/vespa/searchlib/datastore/array_store.h b/searchlib/src/vespa/searchlib/datastore/array_store.h deleted file mode 100644 index d9d5afcbd43..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/array_store.h +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "array_store_config.h" -#include "buffer_type.h" -#include "bufferstate.h" -#include "datastore.h" -#include "entryref.h" -#include "i_compaction_context.h" -#include - -namespace search::datastore { - -/** - * Datastore for storing arrays of type EntryT that is accessed via a 32-bit EntryRef. - * - * The default EntryRef type uses 19 bits for offset (524288 values) and 13 bits for buffer id (8192 buffers). - * Arrays of size [1,maxSmallArraySize] are stored in buffers with arrays of equal size. - * Arrays of size >maxSmallArraySize are stored in buffers with vespalib::Array instances that are heap allocated. - * - * The max value of maxSmallArraySize is (2^bufferBits - 1). - */ -template > -class ArrayStore -{ -public: - using ConstArrayRef = vespalib::ConstArrayRef; - using DataStoreType = DataStoreT; - using SmallArrayType = BufferType; - using LargeArray = vespalib::Array; - using AllocSpec = ArrayStoreConfig::AllocSpec; - -private: - class LargeArrayType : public BufferType { - private: - using ParentType = BufferType; - using ParentType::_emptyEntry; - using CleanContext = typename ParentType::CleanContext; - public: - LargeArrayType(const AllocSpec &spec); - virtual void cleanHold(void *buffer, size_t offset, size_t numElems, CleanContext cleanCtx) override; - }; - - - uint32_t _largeArrayTypeId; - uint32_t _maxSmallArraySize; - DataStoreType _store; - std::vector> _smallArrayTypes; - LargeArrayType _largeArrayType; - using generation_t = vespalib::GenerationHandler::generation_t; - - void initArrayTypes(const ArrayStoreConfig &cfg); - // 1-to-1 mapping between type ids and sizes for small arrays is enforced during initialization. - uint32_t getTypeId(size_t arraySize) const { return arraySize; } - size_t getArraySize(uint32_t typeId) const { return typeId; } - EntryRef addSmallArray(const ConstArrayRef &array); - EntryRef addLargeArray(const ConstArrayRef &array); - ConstArrayRef getSmallArray(RefT ref, size_t arraySize) const { - const EntryT *buf = _store.template getEntryArray(ref, arraySize); - return ConstArrayRef(buf, arraySize); - } - ConstArrayRef getLargeArray(RefT ref) const { - const LargeArray *buf = _store.template getEntry(ref); - return ConstArrayRef(&(*buf)[0], buf->size()); - } - -public: - ArrayStore(const ArrayStoreConfig &cfg); - ~ArrayStore(); - EntryRef add(const ConstArrayRef &array); - ConstArrayRef get(EntryRef ref) const { - if (!ref.valid()) { - return ConstArrayRef(); - } - RefT internalRef(ref); - uint32_t typeId = _store.getTypeId(internalRef.bufferId()); - if (typeId != _largeArrayTypeId) { - size_t arraySize = getArraySize(typeId); - return getSmallArray(internalRef, arraySize); - } else { - return getLargeArray(internalRef); - } - } - void remove(EntryRef ref); - ICompactionContext::UP compactWorst(bool compactMemory, bool compactAddressSpace); - vespalib::MemoryUsage getMemoryUsage() const { return _store.getMemoryUsage(); } - - /** - * Returns the address space usage by this store as the ratio between active buffers - * and the total number available buffers. - */ - vespalib::AddressSpace addressSpaceUsage() const; - - // Pass on hold list management to underlying store - void transferHoldLists(generation_t generation) { _store.transferHoldLists(generation); } - void trimHoldLists(generation_t firstUsed) { _store.trimHoldLists(firstUsed); } - vespalib::GenerationHolder &getGenerationHolder() { return _store.getGenerationHolder(); } - void setInitializing(bool initializing) { _store.setInitializing(initializing); } - - // Should only be used for unit testing - const BufferState &bufferState(EntryRef ref) const; - - static ArrayStoreConfig optimizedConfigForHugePage(size_t maxSmallArraySize, - size_t hugePageSize, - size_t smallPageSize, - size_t minNumArraysForNewBuffer, - float allocGrowFactor); -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/array_store.hpp b/searchlib/src/vespa/searchlib/datastore/array_store.hpp deleted file mode 100644 index 524013652c5..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/array_store.hpp +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "array_store.h" -#include "datastore.hpp" -#include -#include - -namespace search::datastore { - -template -ArrayStore::LargeArrayType::LargeArrayType(const AllocSpec &spec) - : BufferType(1, spec.minArraysInBuffer, spec.maxArraysInBuffer, spec.numArraysForNewBuffer, spec.allocGrowFactor) -{ -} - -template -void -ArrayStore::LargeArrayType::cleanHold(void *buffer, size_t offset, size_t numElems, CleanContext cleanCtx) -{ - LargeArray *elem = static_cast(buffer) + offset; - for (size_t i = 0; i < numElems; ++i) { - cleanCtx.extraBytesCleaned(sizeof(EntryT) * elem->size()); - *elem = _emptyEntry; - ++elem; - } -} - -template -void -ArrayStore::initArrayTypes(const ArrayStoreConfig &cfg) -{ - _largeArrayTypeId = _store.addType(&_largeArrayType); - assert(_largeArrayTypeId == 0); - for (uint32_t arraySize = 1; arraySize <= _maxSmallArraySize; ++arraySize) { - const AllocSpec &spec = cfg.specForSize(arraySize); - _smallArrayTypes.push_back(std::make_unique - (arraySize, spec.minArraysInBuffer, spec.maxArraysInBuffer, - spec.numArraysForNewBuffer, spec.allocGrowFactor)); - uint32_t typeId = _store.addType(_smallArrayTypes.back().get()); - assert(typeId == arraySize); // Enforce 1-to-1 mapping between type ids and sizes for small arrays - } -} - -template -ArrayStore::ArrayStore(const ArrayStoreConfig &cfg) - : _largeArrayTypeId(0), - _maxSmallArraySize(cfg.maxSmallArraySize()), - _store(), - _smallArrayTypes(), - _largeArrayType(cfg.specForSize(0)) -{ - initArrayTypes(cfg); - _store.initActiveBuffers(); -} - -template -ArrayStore::~ArrayStore() -{ - _store.clearHoldLists(); - _store.dropBuffers(); -} - -template -EntryRef -ArrayStore::add(const ConstArrayRef &array) -{ - if (array.size() == 0) { - return EntryRef(); - } - if (array.size() <= _maxSmallArraySize) { - return addSmallArray(array); - } else { - return addLargeArray(array); - } -} - -template -EntryRef -ArrayStore::addSmallArray(const ConstArrayRef &array) -{ - uint32_t typeId = getTypeId(array.size()); - return _store.template allocator(typeId).allocArray(array).ref; -} - -template -EntryRef -ArrayStore::addLargeArray(const ConstArrayRef &array) -{ - _store.ensureBufferCapacity(_largeArrayTypeId, 1); - uint32_t activeBufferId = _store.getActiveBufferId(_largeArrayTypeId); - BufferState &state = _store.getBufferState(activeBufferId); - assert(state.isActive()); - size_t oldBufferSize = state.size(); - RefT ref(oldBufferSize, activeBufferId); - LargeArray *buf = _store.template getEntry(ref); - new (static_cast(buf)) LargeArray(array.cbegin(), array.cend()); - state.pushed_back(1, sizeof(EntryT) * array.size()); - return ref; -} - -template -void -ArrayStore::remove(EntryRef ref) -{ - if (ref.valid()) { - RefT internalRef(ref); - uint32_t typeId = _store.getTypeId(internalRef.bufferId()); - if (typeId != _largeArrayTypeId) { - size_t arraySize = getArraySize(typeId); - _store.holdElem(ref, arraySize); - } else { - _store.holdElem(ref, 1, sizeof(EntryT) * get(ref).size()); - } - } -} - -namespace arraystore { - -template -class CompactionContext : public ICompactionContext { -private: - using ArrayStoreType = ArrayStore; - DataStoreBase &_dataStore; - ArrayStoreType &_store; - std::vector _bufferIdsToCompact; - - bool compactingBuffer(uint32_t bufferId) { - return std::find(_bufferIdsToCompact.begin(), _bufferIdsToCompact.end(), - bufferId) != _bufferIdsToCompact.end(); - } -public: - CompactionContext(DataStoreBase &dataStore, - ArrayStoreType &store, - std::vector bufferIdsToCompact) - : _dataStore(dataStore), - _store(store), - _bufferIdsToCompact(std::move(bufferIdsToCompact)) - {} - ~CompactionContext() override { - _dataStore.finishCompact(_bufferIdsToCompact); - } - void compact(vespalib::ArrayRef refs) override { - if (!_bufferIdsToCompact.empty()) { - for (auto &ref : refs) { - if (ref.valid()) { - RefT internalRef(ref); - if (compactingBuffer(internalRef.bufferId())) { - EntryRef newRef = _store.add(_store.get(ref)); - std::atomic_thread_fence(std::memory_order_release); - ref = newRef; - } - } - } - } - } -}; - -} - -template -ICompactionContext::UP -ArrayStore::compactWorst(bool compactMemory, bool compactAddressSpace) -{ - std::vector bufferIdsToCompact = _store.startCompactWorstBuffers(compactMemory, compactAddressSpace); - return std::make_unique> - (_store, *this, std::move(bufferIdsToCompact)); -} - -template -vespalib::AddressSpace -ArrayStore::addressSpaceUsage() const -{ - return _store.getAddressSpaceUsage(); -} - -template -const BufferState & -ArrayStore::bufferState(EntryRef ref) const -{ - RefT internalRef(ref); - return _store.getBufferState(internalRef.bufferId()); -} - -template -ArrayStoreConfig -ArrayStore::optimizedConfigForHugePage(size_t maxSmallArraySize, - size_t hugePageSize, - size_t smallPageSize, - size_t minNumArraysForNewBuffer, - float allocGrowFactor) -{ - return ArrayStoreConfig::optimizeForHugePage(maxSmallArraySize, - hugePageSize, - smallPageSize, - sizeof(EntryT), - RefT::offsetSize(), - minNumArraysForNewBuffer, - allocGrowFactor); -} - -} diff --git a/searchlib/src/vespa/searchlib/datastore/array_store_config.cpp b/searchlib/src/vespa/searchlib/datastore/array_store_config.cpp deleted file mode 100644 index 0581183f675..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/array_store_config.cpp +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "array_store_config.h" -#include - -namespace search::datastore { - -ArrayStoreConfig::ArrayStoreConfig(size_t maxSmallArraySize, const AllocSpec &defaultSpec) - : _allocSpecs() -{ - for (size_t i = 0; i < (maxSmallArraySize + 1); ++i) { - _allocSpecs.push_back(defaultSpec); - } -} - -ArrayStoreConfig::ArrayStoreConfig(const AllocSpecVector &allocSpecs) - : _allocSpecs(allocSpecs) -{ -} - -const ArrayStoreConfig::AllocSpec & -ArrayStoreConfig::specForSize(size_t arraySize) const -{ - assert(arraySize < _allocSpecs.size()); - return _allocSpecs[arraySize]; -} - -namespace { - -size_t -capToLimits(size_t value, size_t minLimit, size_t maxLimit) -{ - size_t result = std::max(value, minLimit); - return std::min(result, maxLimit); -} - -size_t -alignToSmallPageSize(size_t value, size_t minLimit, size_t smallPageSize) -{ - return ((value - minLimit) / smallPageSize) * smallPageSize + minLimit; -} - -} - -ArrayStoreConfig -ArrayStoreConfig::optimizeForHugePage(size_t maxSmallArraySize, - size_t hugePageSize, - size_t smallPageSize, - size_t entrySize, - size_t maxEntryRefOffset, - size_t minNumArraysForNewBuffer, - float allocGrowFactor) -{ - AllocSpecVector allocSpecs; - allocSpecs.emplace_back(0, maxEntryRefOffset, minNumArraysForNewBuffer, allocGrowFactor); // large array spec; - for (size_t arraySize = 1; arraySize <= maxSmallArraySize; ++arraySize) { - size_t numArraysForNewBuffer = hugePageSize / (entrySize * arraySize); - numArraysForNewBuffer = capToLimits(numArraysForNewBuffer, minNumArraysForNewBuffer, maxEntryRefOffset); - numArraysForNewBuffer = alignToSmallPageSize(numArraysForNewBuffer, minNumArraysForNewBuffer, smallPageSize); - allocSpecs.emplace_back(0, maxEntryRefOffset, numArraysForNewBuffer, allocGrowFactor); - } - return ArrayStoreConfig(allocSpecs); -} - -} diff --git a/searchlib/src/vespa/searchlib/datastore/array_store_config.h b/searchlib/src/vespa/searchlib/datastore/array_store_config.h deleted file mode 100644 index a39c4454308..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/array_store_config.h +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include -#include - -namespace search::datastore { - -/** - * Config specifying layout and buffer allocation strategy for an array store. - */ -class ArrayStoreConfig -{ -public: - /** - * Specification of buffer allocation strategy for arrays of a given size. - */ - struct AllocSpec { - // Minimum number of arrays to allocate in a buffer. - size_t minArraysInBuffer; - // Maximum number of arrays to allocate in a buffer. - size_t maxArraysInBuffer; - // Number of arrays needed before allocating a new buffer instead of just resizing the first one. - size_t numArraysForNewBuffer; - // Grow factor used when allocating a new buffer. - float allocGrowFactor; - AllocSpec(size_t minArraysInBuffer_, - size_t maxArraysInBuffer_, - size_t numArraysForNewBuffer_, - float allocGrowFactor_) - : minArraysInBuffer(minArraysInBuffer_), - maxArraysInBuffer(maxArraysInBuffer_), - numArraysForNewBuffer(numArraysForNewBuffer_), - allocGrowFactor(allocGrowFactor_) {} - }; - - using AllocSpecVector = std::vector; - -private: - AllocSpecVector _allocSpecs; - - /** - * Setup an array store with arrays of size [1-(allocSpecs.size()-1)] allocated in buffers and - * larger arrays are heap allocated. The allocation spec for a given array size is found in the given vector. - * Allocation spec for large arrays is located at position 0. - */ - ArrayStoreConfig(const AllocSpecVector &allocSpecs); - -public: - /** - * Setup an array store with arrays of size [1-maxSmallArraySize] allocated in buffers - * with the given default allocation spec. Larger arrays are heap allocated. - */ - ArrayStoreConfig(size_t maxSmallArraySize, const AllocSpec &defaultSpec); - - size_t maxSmallArraySize() const { return _allocSpecs.size() - 1; } - const AllocSpec &specForSize(size_t arraySize) const; - - /** - * Generate a config that is optimized for the given memory huge page size. - */ - static ArrayStoreConfig optimizeForHugePage(size_t maxSmallArraySize, - size_t hugePageSize, - size_t smallPageSize, - size_t entrySize, - size_t maxEntryRefOffset, - size_t minNumArraysForNewBuffer, - float allocGrowFactor); -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/buffer_type.cpp b/searchlib/src/vespa/searchlib/datastore/buffer_type.cpp deleted file mode 100644 index 3955a6cb399..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/buffer_type.cpp +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "buffer_type.h" -#include -#include - -namespace search::datastore { - -namespace { - -constexpr float DEFAULT_ALLOC_GROW_FACTOR = 0.2; - -} - -void -BufferTypeBase::CleanContext::extraBytesCleaned(size_t value) -{ - assert(_extraBytes >= value); - _extraBytes -= value; -} - -BufferTypeBase::BufferTypeBase(uint32_t arraySize, - uint32_t minArrays, - uint32_t maxArrays, - uint32_t numArraysForNewBuffer, - float allocGrowFactor) - : _arraySize(arraySize), - _minArrays(std::min(minArrays, maxArrays)), - _maxArrays(maxArrays), - _numArraysForNewBuffer(std::min(numArraysForNewBuffer, maxArrays)), - _allocGrowFactor(allocGrowFactor), - _activeBuffers(0), - _holdBuffers(0), - _activeUsedElems(0), - _holdUsedElems(0), - _lastUsedElems(nullptr) -{ -} - -BufferTypeBase::BufferTypeBase(uint32_t arraySize, - uint32_t minArrays, - uint32_t maxArrays) - : BufferTypeBase(arraySize, minArrays, maxArrays, 0u, DEFAULT_ALLOC_GROW_FACTOR) -{ -} - -BufferTypeBase::~BufferTypeBase() -{ - assert(_activeBuffers == 0); - assert(_holdBuffers == 0); - assert(_activeUsedElems == 0); - assert(_holdUsedElems == 0); - assert(_lastUsedElems == nullptr); -} - -size_t -BufferTypeBase::getReservedElements(uint32_t bufferId) const -{ - return bufferId == 0 ? _arraySize : 0u; -} - -void -BufferTypeBase::flushLastUsed() -{ - if (_lastUsedElems != nullptr) { - _activeUsedElems += *_lastUsedElems; - _lastUsedElems = nullptr; - } -} - -void -BufferTypeBase::onActive(uint32_t bufferId, size_t *usedElems, size_t &deadElems, void *buffer) -{ - flushLastUsed(); - ++_activeBuffers; - _lastUsedElems = usedElems; - size_t reservedElems = getReservedElements(bufferId); - if (reservedElems != 0u) { - initializeReservedElements(buffer, reservedElems); - *usedElems = reservedElems; - deadElems = reservedElems; - } -} - -void -BufferTypeBase::onHold(const size_t *usedElems) -{ - if (usedElems == _lastUsedElems) { - flushLastUsed(); - } - --_activeBuffers; - ++_holdBuffers; - assert(_activeUsedElems >= *usedElems); - _activeUsedElems -= *usedElems; - _holdUsedElems += *usedElems; -} - -void -BufferTypeBase::onFree(size_t usedElems) -{ - --_holdBuffers; - assert(_holdUsedElems >= usedElems); - _holdUsedElems -= usedElems; -} - -void -BufferTypeBase::clampMaxArrays(uint32_t maxArrays) -{ - _maxArrays = std::min(_maxArrays, maxArrays); - _minArrays = std::min(_minArrays, _maxArrays); - _numArraysForNewBuffer = std::min(_numArraysForNewBuffer, _maxArrays); -} - -size_t -BufferTypeBase::calcArraysToAlloc(uint32_t bufferId, size_t elemsNeeded, bool resizing) const -{ - size_t reservedElems = getReservedElements(bufferId); - size_t usedElems = (resizing ? 0 : _activeUsedElems); - if (_lastUsedElems != nullptr) { - usedElems += *_lastUsedElems; - } - assert((usedElems % _arraySize) == 0); - size_t usedArrays = usedElems / _arraySize; - size_t neededArrays = (elemsNeeded + (resizing ? usedElems : reservedElems) + _arraySize - 1) / _arraySize; - size_t growArrays = (usedArrays * _allocGrowFactor); - size_t wantedArrays = std::max((resizing ? usedArrays : 0u) + growArrays, - static_cast(_minArrays)); - size_t result = wantedArrays; - if (result < neededArrays) { - result = neededArrays; - } - if (result > _maxArrays) { - result = _maxArrays; - } - assert(result >= neededArrays); - return result; -} - -} - diff --git a/searchlib/src/vespa/searchlib/datastore/buffer_type.h b/searchlib/src/vespa/searchlib/datastore/buffer_type.h deleted file mode 100644 index 116b45fe106..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/buffer_type.h +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include -#include - -namespace search::datastore { - -/** - * Abstract class used to manage allocation and de-allocation of a specific data type in underlying memory buffers in a data store. - * Each buffer is owned by an instance of BufferState. - * - * This class handles allocation of both single elements (_arraySize = 1) and array of elements (_arraySize > 1). - * The strategy for how to grow buffers is specified as well. - */ -class BufferTypeBase -{ -protected: - uint32_t _arraySize; // Number of elements in an allocation unit - uint32_t _minArrays; // Minimum number of arrays to allocate in a buffer - uint32_t _maxArrays; // Maximum number of arrays to allocate in a buffer - // Number of arrays needed before allocating a new buffer instead of just resizing the first one - uint32_t _numArraysForNewBuffer; - float _allocGrowFactor; - uint32_t _activeBuffers; - uint32_t _holdBuffers; - size_t _activeUsedElems; // used elements in all but last active buffer - size_t _holdUsedElems; // used elements in all held buffers - const size_t *_lastUsedElems; // used elements in last active buffer - -public: - class CleanContext { - private: - size_t &_extraBytes; - public: - CleanContext(size_t &extraBytes) : _extraBytes(extraBytes) {} - void extraBytesCleaned(size_t value); - }; - - BufferTypeBase(const BufferTypeBase &rhs) = delete; - BufferTypeBase & operator=(const BufferTypeBase &rhs) = delete; - BufferTypeBase(uint32_t arraySize, uint32_t minArrays, uint32_t maxArrays); - BufferTypeBase(uint32_t arraySize, uint32_t minArrays, uint32_t maxArrays, - uint32_t numArraysForNewBuffer, float allocGrowFactor); - virtual ~BufferTypeBase(); - virtual void destroyElements(void *buffer, size_t numElems) = 0; - virtual void fallbackCopy(void *newBuffer, const void *oldBuffer, size_t numElems) = 0; - // Return number of reserved elements at start of buffer, to avoid - // invalid reference and handle data at negative offset (alignment - // hacks) as used by dense tensor store. - virtual size_t getReservedElements(uint32_t bufferId) const; - // Initialize reserved elements at start of buffer. - virtual void initializeReservedElements(void *buffer, size_t reservedElements) = 0; - virtual size_t elementSize() const = 0; - virtual void cleanHold(void *buffer, size_t offset, size_t numElems, CleanContext cleanCtx) = 0; - size_t getArraySize() const { return _arraySize; } - void flushLastUsed(); - virtual void onActive(uint32_t bufferId, size_t *usedElems, size_t &deadElems, void *buffer); - void onHold(const size_t *usedElems); - virtual void onFree(size_t usedElems); - - /** - * Calculate number of arrays to allocate for new buffer given how many elements are needed. - */ - virtual size_t calcArraysToAlloc(uint32_t bufferId, size_t elementsNeeded, bool resizing) const; - - void clampMaxArrays(uint32_t maxArrays); - - uint32_t getActiveBuffers() const { return _activeBuffers; } - size_t getMaxArrays() const { return _maxArrays; } - uint32_t getNumArraysForNewBuffer() const { return _numArraysForNewBuffer; } -}; - -/** - * Concrete class used to manage allocation and de-allocation of elements of type EntryType in data store buffers. - */ -template -class BufferType : public BufferTypeBase -{ -protected: - EntryType _emptyEntry; - -public: - BufferType(const BufferType &rhs) = delete; - BufferType & operator=(const BufferType &rhs) = delete; - BufferType(uint32_t arraySize, uint32_t minArrays, uint32_t maxArrays); - BufferType(uint32_t arraySize, uint32_t minArrays, uint32_t maxArrays, - uint32_t numArraysForNewBuffer, float allocGrowFactor); - ~BufferType(); - void destroyElements(void *buffer, size_t numElems) override; - void fallbackCopy(void *newBuffer, const void *oldBuffer, size_t numElems) override; - void initializeReservedElements(void *buffer, size_t reservedElements) override; - void cleanHold(void *buffer, size_t offset, size_t numElems, CleanContext cleanCxt) override; - size_t elementSize() const override { return sizeof(EntryType); } -}; - -template -BufferType::BufferType(uint32_t arraySize, uint32_t minArrays, uint32_t maxArrays) - : BufferTypeBase(arraySize, minArrays, maxArrays), - _emptyEntry() -{ } - -template -BufferType::BufferType(uint32_t arraySize, uint32_t minArrays, uint32_t maxArrays, - uint32_t numArraysForNewBuffer, float allocGrowFactor) - : BufferTypeBase(arraySize, minArrays, maxArrays, numArraysForNewBuffer, allocGrowFactor), - _emptyEntry() -{ } - -template -BufferType::~BufferType() { } - -template -void -BufferType::destroyElements(void *buffer, size_t numElems) -{ - EntryType *e = static_cast(buffer); - for (size_t j = numElems; j != 0; --j) { - e->~EntryType(); - ++e; - } -} - -template -void -BufferType::fallbackCopy(void *newBuffer, - const void *oldBuffer, - size_t numElems) -{ - EntryType *d = static_cast(newBuffer); - const EntryType *s = static_cast(oldBuffer); - for (size_t j = numElems; j != 0; --j) { - new (static_cast(d)) EntryType(*s); - ++s; - ++d; - } -} - -template -void -BufferType::initializeReservedElements(void *buffer, size_t reservedElems) -{ - EntryType *e = static_cast(buffer); - for (size_t j = reservedElems; j != 0; --j) { - new (static_cast(e)) EntryType(_emptyEntry); - ++e; - } -} - -template -void -BufferType::cleanHold(void *buffer, size_t offset, size_t numElems, CleanContext) -{ - EntryType *e = static_cast(buffer) + offset; - for (size_t j = numElems; j != 0; --j) { - *e = _emptyEntry; - ++e; - } -} - -} diff --git a/searchlib/src/vespa/searchlib/datastore/bufferstate.cpp b/searchlib/src/vespa/searchlib/datastore/bufferstate.cpp deleted file mode 100644 index 638117c8c60..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/bufferstate.cpp +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "bufferstate.h" -#include - -using vespalib::alloc::Alloc; -using vespalib::alloc::MemoryAllocator; - -namespace search::datastore { - -BufferState::FreeListList::~FreeListList() -{ - assert(_head == NULL); // Owner should have disabled free lists -} - - -BufferState::BufferState() - : _usedElems(0), - _allocElems(0), - _deadElems(0u), - _state(FREE), - _disableElemHoldList(false), - _holdElems(0u), - _extraUsedBytes(0), - _extraHoldBytes(0), - _freeList(), - _freeListList(NULL), - _nextHasFree(NULL), - _prevHasFree(NULL), - _typeHandler(NULL), - _typeId(0), - _arraySize(0), - _compacting(false), - _buffer(Alloc::alloc(0, MemoryAllocator::HUGEPAGE_SIZE)) -{ -} - - -BufferState::~BufferState() -{ - assert(_state == FREE); - assert(_freeListList == NULL); - assert(_nextHasFree == NULL); - assert(_prevHasFree == NULL); - assert(_holdElems == 0); - assert(_freeList.empty()); -} - -namespace { - -struct AllocResult { - size_t elements; - size_t bytes; - AllocResult(size_t elements_, size_t bytes_) : elements(elements_), bytes(bytes_) {} -}; - -size_t -roundUpToMatchAllocator(size_t sz) -{ - if (sz == 0) { - return 0; - } - // We round up the wanted number of bytes to allocate to match - // the underlying allocator to ensure little to no waste of allocated memory. - if (sz < MemoryAllocator::HUGEPAGE_SIZE) { - // Match heap allocator in vespamalloc. - return vespalib::roundUp2inN(sz); - } else { - // Match mmap allocator. - return MemoryAllocator::roundUpToHugePages(sz); - } -} - -AllocResult -calcAllocation(uint32_t bufferId, - BufferTypeBase &typeHandler, - size_t elementsNeeded, - bool resizing) -{ - size_t allocArrays = typeHandler.calcArraysToAlloc(bufferId, elementsNeeded, resizing); - size_t allocElements = allocArrays * typeHandler.getArraySize(); - size_t allocBytes = roundUpToMatchAllocator(allocElements * typeHandler.elementSize()); - size_t maxAllocBytes = typeHandler.getMaxArrays() * typeHandler.getArraySize() * typeHandler.elementSize(); - if (allocBytes > maxAllocBytes) { - // Ensure that allocated bytes does not exceed the maximum handled by this type. - allocBytes = maxAllocBytes; - } - size_t adjustedAllocElements = (allocBytes / typeHandler.elementSize()); - return AllocResult(adjustedAllocElements, allocBytes); -} - -} - -void -BufferState::onActive(uint32_t bufferId, uint32_t typeId, - BufferTypeBase *typeHandler, - size_t elementsNeeded, - void *&buffer) -{ - assert(buffer == NULL); - assert(_buffer.get() == NULL); - assert(_state == FREE); - assert(_typeHandler == NULL); - assert(_allocElems == 0); - assert(_usedElems == 0); - assert(_deadElems == 0u); - assert(_holdElems == 0); - assert(_extraUsedBytes == 0); - assert(_extraHoldBytes == 0); - assert(_freeList.empty()); - assert(_nextHasFree == NULL); - assert(_prevHasFree == NULL); - assert(_freeListList == NULL || _freeListList->_head != this); - - size_t reservedElements = typeHandler->getReservedElements(bufferId); - (void) reservedElements; - AllocResult alloc = calcAllocation(bufferId, *typeHandler, elementsNeeded, false); - assert(alloc.elements >= reservedElements + elementsNeeded); - _buffer.create(alloc.bytes).swap(_buffer); - buffer = _buffer.get(); - assert(buffer != NULL || alloc.elements == 0u); - _allocElems = alloc.elements; - _state = ACTIVE; - _typeHandler = typeHandler; - _typeId = typeId; - _arraySize = _typeHandler->getArraySize(); - typeHandler->onActive(bufferId, &_usedElems, _deadElems, buffer); -} - - -void -BufferState::onHold() -{ - assert(_state == ACTIVE); - assert(_typeHandler != NULL); - _state = HOLD; - _compacting = false; - assert(_deadElems <= _usedElems); - assert(_holdElems <= (_usedElems - _deadElems)); - _holdElems = _usedElems - _deadElems; // Put everyting not dead on hold - _typeHandler->onHold(&_usedElems); - if (!_freeList.empty()) { - removeFromFreeListList(); - FreeList().swap(_freeList); - } - assert(_nextHasFree == NULL); - assert(_prevHasFree == NULL); - assert(_freeListList == NULL || _freeListList->_head != this); - setFreeListList(NULL); -} - - -void -BufferState::onFree(void *&buffer) -{ - assert(buffer == _buffer.get()); - assert(_state == HOLD); - assert(_typeHandler != NULL); - assert(_deadElems <= _usedElems); - assert(_holdElems == _usedElems - _deadElems); - _typeHandler->destroyElements(buffer, _usedElems); - Alloc::alloc().swap(_buffer); - _typeHandler->onFree(_usedElems); - buffer = NULL; - _usedElems = 0; - _allocElems = 0; - _deadElems = 0u; - _holdElems = 0u; - _extraUsedBytes = 0; - _extraHoldBytes = 0; - _state = FREE; - _typeHandler = NULL; - _arraySize = 0; - assert(_freeList.empty()); - assert(_nextHasFree == NULL); - assert(_prevHasFree == NULL); - assert(_freeListList == NULL || _freeListList->_head != this); - setFreeListList(NULL); - _disableElemHoldList = false; -} - - -void -BufferState::dropBuffer(void *&buffer) -{ - if (_state == FREE) { - assert(buffer == NULL); - return; - } - assert(buffer != NULL || _allocElems == 0); - if (_state == ACTIVE) { - onHold(); - } - if (_state == HOLD) { - onFree(buffer); - } - assert(_state == FREE); - assert(buffer == NULL); -} - - -void -BufferState::setFreeListList(FreeListList *freeListList) -{ - if (_state == FREE && freeListList != NULL) { - return; - } - if (freeListList == _freeListList) { - return; // No change - } - if (_freeListList != NULL && !_freeList.empty()) { - removeFromFreeListList(); // Remove from old free list - } - _freeListList = freeListList; - if (!_freeList.empty()) { - if (freeListList != NULL) { - addToFreeListList(); // Changed free list list - } else { - FreeList().swap(_freeList); // Free lists have been disabled - } - } -} - - -void -BufferState::addToFreeListList() -{ - assert(_freeListList != NULL && _freeListList->_head != this); - assert(_nextHasFree == NULL); - assert(_prevHasFree == NULL); - if (_freeListList->_head != NULL) { - _nextHasFree = _freeListList->_head; - _prevHasFree = _nextHasFree->_prevHasFree; - _nextHasFree->_prevHasFree = this; - _prevHasFree->_nextHasFree = this; - } else { - _nextHasFree = this; - _prevHasFree = this; - } - _freeListList->_head = this; -} - - -void -BufferState::removeFromFreeListList() -{ - assert(_freeListList != NULL); - assert(_nextHasFree != NULL); - assert(_prevHasFree != NULL); - if (_nextHasFree == this) { - assert(_prevHasFree == this); - assert(_freeListList->_head == this); - _freeListList->_head = NULL; - } else { - assert(_prevHasFree != this); - _freeListList->_head = _nextHasFree; - _nextHasFree->_prevHasFree = _prevHasFree; - _prevHasFree->_nextHasFree = _nextHasFree; - } - _nextHasFree = NULL; - _prevHasFree = NULL; -} - - -void -BufferState::disableElemHoldList() -{ - _disableElemHoldList = true; -} - - -void -BufferState::fallbackResize(uint32_t bufferId, - size_t elementsNeeded, - void *&buffer, - Alloc &holdBuffer) -{ - assert(_state == ACTIVE); - assert(_typeHandler != NULL); - assert(holdBuffer.get() == NULL); - AllocResult alloc = calcAllocation(bufferId, *_typeHandler, elementsNeeded, true); - assert(alloc.elements >= _usedElems + elementsNeeded); - assert(alloc.elements > _allocElems); - Alloc newBuffer = _buffer.create(alloc.bytes); - _typeHandler->fallbackCopy(newBuffer.get(), buffer, _usedElems); - holdBuffer.swap(_buffer); - std::atomic_thread_fence(std::memory_order_release); - _buffer = std::move(newBuffer); - buffer = _buffer.get(); - _allocElems = alloc.elements; - std::atomic_thread_fence(std::memory_order_release); -} - -} - diff --git a/searchlib/src/vespa/searchlib/datastore/bufferstate.h b/searchlib/src/vespa/searchlib/datastore/bufferstate.h deleted file mode 100644 index 1cc26d8dd18..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/bufferstate.h +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "buffer_type.h" -#include "entryref.h" -#include -#include -#include - -namespace search::datastore { - -/** - * Represents a memory allocated buffer (used in a data store) with its state. - * - * This class has no direct knowledge of what kind of data is stored in the buffer. - * It uses a type handler (BufferTypeBase) to calculate how much memory to allocate, - * and how to destruct elements in a buffer. - * - * It also supports use of free lists, where previously allocated elements can be re-used. - * First the element is put on hold, then on the free list (counted as dead). - */ -class BufferState -{ -public: - typedef vespalib::alloc::Alloc Alloc; - - class FreeListList - { - public: - BufferState *_head; - - FreeListList() : _head(NULL) { } - ~FreeListList(); - }; - - typedef vespalib::Array FreeList; - - enum State { - FREE, - ACTIVE, - HOLD - }; - -private: - size_t _usedElems; - size_t _allocElems; - size_t _deadElems; - State _state; - bool _disableElemHoldList; - size_t _holdElems; - // Number of bytes that are heap allocated by elements that are stored in this buffer. - // For simple types this is 0. - size_t _extraUsedBytes; - // Number of bytes that are heap allocated by elements that are stored in this buffer and is now on hold. - // For simple types this is 0. - size_t _extraHoldBytes; - FreeList _freeList; - FreeListList *_freeListList; // non-NULL if free lists are enabled - - // NULL pointers if not on circular list of buffer states with free elems - BufferState *_nextHasFree; - BufferState *_prevHasFree; - - BufferTypeBase *_typeHandler; - uint32_t _typeId; - uint32_t _arraySize; - bool _compacting; - Alloc _buffer; - -public: - /* - * TODO: Check if per-buffer free lists are useful, or if - *compaction should always be used to free up whole buffers. - */ - - BufferState(); - ~BufferState(); - - /** - * Transition from FREE to ACTIVE state. - * - * @param bufferId Id of buffer to be active. - * @param typeId registered data type for buffer. - * @param typeHandler type handler for registered data type. - * @param elementsNeeded Number of elements needed to be free - * @param buffer start of buffer. - */ - void onActive(uint32_t bufferId, uint32_t typeId, BufferTypeBase *typeHandler, - size_t elementsNeeded, void *&buffer); - - /** - * Transition from ACTIVE to HOLD state. - */ - void onHold(); - - /** - * Transition from HOLD to FREE state. - */ - void onFree(void *&buffer); - - /** - * Set list of buffer states with nonempty free lists. - * - * @param freeListList List of buffer states. If NULL then free lists - * are disabled. - */ - void setFreeListList(FreeListList *freeListList); - - void disableFreeList() { setFreeListList(nullptr); } - - /** - * Add buffer state to list of buffer states with nonempty free lists. - */ - void addToFreeListList(); - - /** - * Remove buffer state from list of buffer states with nonempty free lists. - */ - void removeFromFreeListList(); - - /** - * Disable hold of elements, just mark then as dead without - * cleanup. Typically used when tearing down data structure in a - * controlled manner. - */ - void disableElemHoldList(); - - /** - * Pop element from free list. - */ - EntryRef popFreeList() { - EntryRef ret = _freeList.back(); - _freeList.pop_back(); - if (_freeList.empty()) { - removeFromFreeListList(); - } - _deadElems -= _arraySize; - return ret; - } - - size_t size() const { return _usedElems; } - size_t capacity() const { return _allocElems; } - size_t remaining() const { return _allocElems - _usedElems; } - void pushed_back(size_t numElems, size_t extraBytes = 0) { - _usedElems += numElems; - _extraUsedBytes += extraBytes; - } - void cleanHold(void *buffer, size_t offset, size_t numElems) { - _typeHandler->cleanHold(buffer, offset, numElems, BufferTypeBase::CleanContext(_extraHoldBytes)); - } - void dropBuffer(void *&buffer); - uint32_t getTypeId() const { return _typeId; } - uint32_t getArraySize() const { return _arraySize; } - size_t getDeadElems() const { return _deadElems; } - size_t getHoldElems() const { return _holdElems; } - size_t getExtraUsedBytes() const { return _extraUsedBytes; } - size_t getExtraHoldBytes() const { return _extraHoldBytes; } - bool getCompacting() const { return _compacting; } - void setCompacting() { _compacting = true; } - void fallbackResize(uint32_t bufferId, size_t elementsNeeded, void *&buffer, Alloc &holdBuffer); - - bool isActive(uint32_t typeId) const { - return ((_state == ACTIVE) && (_typeId == typeId)); - } - bool isActive() const { return (_state == ACTIVE); } - bool isOnHold() const { return (_state == HOLD); } - bool isFree() const { return (_state == FREE); } - State getState() const { return _state; } - const BufferTypeBase *getTypeHandler() const { return _typeHandler; } - BufferTypeBase *getTypeHandler() { return _typeHandler; } - - void incDeadElems(size_t value) { _deadElems += value; } - void incHoldElems(size_t value) { _holdElems += value; } - void decHoldElems(size_t value) { - assert(_holdElems >= value); - _holdElems -= value; - } - void incExtraHoldBytes(size_t value) { - _extraHoldBytes += value; - } - - bool hasDisabledElemHoldList() const { return _disableElemHoldList; } - const FreeList &freeList() const { return _freeList; } - FreeList &freeList() { return _freeList; } - const FreeListList *freeListList() const { return _freeListList; } - FreeListList *freeListList() { return _freeListList; } - -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/datastore.cpp b/searchlib/src/vespa/searchlib/datastore/datastore.cpp deleted file mode 100644 index 308dc750113..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/datastore.cpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "datastore.h" -#include "datastore.hpp" -#include -#include - -namespace search::datastore { - -template class DataStoreT >; - -} - -template void vespalib::Array::increase(size_t); -template class vespalib::RcuVector; -template class vespalib::RcuVectorBase; -//template void vespalib::RcuVectorBase::expandAndInsert(const search::datastore::EntryRef &); diff --git a/searchlib/src/vespa/searchlib/datastore/datastore.h b/searchlib/src/vespa/searchlib/datastore/datastore.h deleted file mode 100644 index 6d7376f1b0c..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/datastore.h +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "allocator.h" -#include "datastorebase.h" -#include "free_list_allocator.h" -#include "free_list_raw_allocator.h" -#include "raw_allocator.h" - -namespace search::btree { - -template -struct DefaultReclaimer { - static void reclaim(EntryType *entry) { - (void) entry; - } -}; - -} - -namespace search::datastore { - -/** - * Concrete data store using the given EntryRef type to reference stored data. - */ -template > -class DataStoreT : public DataStoreBase -{ -private: -public: - typedef RefT RefType; - - DataStoreT(const DataStoreT &rhs) = delete; - DataStoreT &operator=(const DataStoreT &rhs) = delete; - DataStoreT(); - ~DataStoreT(); - - /** - * Increase number of dead elements in buffer. - * - * @param ref Reference to dead stored features - * @param dead Number of newly dead elements - */ - void incDead(EntryRef ref, size_t deadElems) { - RefType intRef(ref); - DataStoreBase::incDead(intRef.bufferId(), deadElems); - } - - /** - * Free element(s). - */ - void freeElem(EntryRef ref, size_t numElems); - - /** - * Hold element(s). - */ - void holdElem(EntryRef ref, size_t numElems, size_t extraBytes = 0); - - /** - * Trim elem hold list, freeing elements that no longer needs to be held. - * - * @param usedGen lowest generation that is still used. - */ - void trimElemHoldList(generation_t usedGen) override; - - void clearElemHoldList() override; - - bool getCompacting(EntryRef ref) const { - return getBufferState(RefType(ref).bufferId()).getCompacting(); - } - - template - Allocator allocator(uint32_t typeId); - - template - FreeListAllocator freeListAllocator(uint32_t typeId); - - template - RawAllocator rawAllocator(uint32_t typeId); - - template - FreeListRawAllocator freeListRawAllocator(uint32_t typeId); - -}; - -/** - * Concrete data store storing elements of type EntryType, using the given EntryRef type to reference stored data. - */ -template > -class DataStore : public DataStoreT -{ -protected: - typedef DataStoreT ParentType; - using ParentType::ensureBufferCapacity; - using ParentType::_activeBufferIds; - using ParentType::_freeListLists; - using ParentType::getEntry; - using ParentType::dropBuffers; - using ParentType::initActiveBuffers; - using ParentType::addType; - - BufferType _type; -public: - typedef typename ParentType::RefType RefType; - DataStore(const DataStore &rhs) = delete; - DataStore &operator=(const DataStore &rhs) = delete; - DataStore(); - ~DataStore(); - - EntryRef addEntry(const EntryType &e); - const EntryType &getEntry(EntryRef ref) const; - - template - FreeListAllocator freeListAllocator(); -}; - -extern template class DataStoreT >; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/datastore.hpp b/searchlib/src/vespa/searchlib/datastore/datastore.hpp deleted file mode 100644 index 797cd75cb08..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/datastore.hpp +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "allocator.hpp" -#include "datastore.h" -#include "free_list_allocator.hpp" -#include "free_list_raw_allocator.hpp" -#include "raw_allocator.hpp" -#include - -namespace search::datastore { - -template -DataStoreT::DataStoreT() - : DataStoreBase(RefType::numBuffers(), - RefType::offsetSize() / RefType::align(1)) -{ -} - -template -DataStoreT::~DataStoreT() -{ -} - -template -void -DataStoreT::freeElem(EntryRef ref, size_t numElems) -{ - RefType intRef(ref); - BufferState &state = getBufferState(intRef.bufferId()); - if (state.isActive()) { - if (state.freeListList() != NULL && numElems == state.getArraySize()) { - if (state.freeList().empty()) { - state.addToFreeListList(); - } - state.freeList().push_back(ref); - } - } else { - assert(state.isOnHold()); - } - state.incDeadElems(numElems); - state.cleanHold(getBuffer(intRef.bufferId()), - (intRef.offset() / RefType::align(1)) * - state.getArraySize(), numElems); -} - -template -void -DataStoreT::holdElem(EntryRef ref, size_t numElems, size_t extraBytes) -{ - RefType intRef(ref); - size_t alignedLen = RefType::align(numElems); - BufferState &state = getBufferState(intRef.bufferId()); - assert(state.isActive()); - if (state.hasDisabledElemHoldList()) { - state.incDeadElems(alignedLen); - return; - } - _elemHold1List.push_back(ElemHold1ListElem(ref, alignedLen)); - state.incHoldElems(alignedLen); - state.incExtraHoldBytes(extraBytes); -} - -template -void -DataStoreT::trimElemHoldList(generation_t usedGen) -{ - ElemHold2List &elemHold2List = _elemHold2List; - - ElemHold2List::iterator it(elemHold2List.begin()); - ElemHold2List::iterator ite(elemHold2List.end()); - uint32_t freed = 0; - for (; it != ite; ++it) { - if (static_cast(it->_generation - usedGen) >= 0) - break; - RefType intRef(it->_ref); - BufferState &state = getBufferState(intRef.bufferId()); - freeElem(it->_ref, it->_len); - state.decHoldElems(it->_len); - ++freed; - } - if (freed != 0) { - elemHold2List.erase(elemHold2List.begin(), it); - } -} - -template -void -DataStoreT::clearElemHoldList() -{ - ElemHold2List &elemHold2List = _elemHold2List; - - ElemHold2List::iterator it(elemHold2List.begin()); - ElemHold2List::iterator ite(elemHold2List.end()); - for (; it != ite; ++it) { - RefType intRef(it->_ref); - BufferState &state = getBufferState(intRef.bufferId()); - freeElem(it->_ref, it->_len); - state.decHoldElems(it->_len); - } - elemHold2List.clear(); -} - -template -template -Allocator -DataStoreT::allocator(uint32_t typeId) -{ - return Allocator(*this, typeId); -} - -template -template -FreeListAllocator -DataStoreT::freeListAllocator(uint32_t typeId) -{ - return FreeListAllocator(*this, typeId); -} - -template -template -RawAllocator -DataStoreT::rawAllocator(uint32_t typeId) -{ - return RawAllocator(*this, typeId); -} - -template -template -FreeListRawAllocator -DataStoreT::freeListRawAllocator(uint32_t typeId) -{ - return FreeListRawAllocator(*this, typeId); -} - -template -DataStore::DataStore() - : ParentType(), - _type(1, RefType::offsetSize(), RefType::offsetSize()) -{ - addType(&_type); - initActiveBuffers(); -} - -template -DataStore::~DataStore() -{ - dropBuffers(); // Drop buffers before type handlers are dropped -} - -template -EntryRef -DataStore::addEntry(const EntryType &e) -{ - ensureBufferCapacity(0, 1); - uint32_t activeBufferId = _activeBufferIds[0]; - BufferState &state = this->getBufferState(activeBufferId); - size_t oldSize = state.size(); - EntryType *be = static_cast(this->getBuffer(activeBufferId)) + oldSize; - new (static_cast(be)) EntryType(e); - RefType ref(oldSize, activeBufferId); - state.pushed_back(1); - return ref; -} - -template -const EntryType & -DataStore::getEntry(EntryRef ref) const -{ - RefType intRef(ref); - const EntryType *be = this->template getEntry(intRef); - return *be; -} - -template -template -FreeListAllocator -DataStore::freeListAllocator() -{ - return FreeListAllocator(*this, 0); -} - -extern template class DataStoreT >; - -} - diff --git a/searchlib/src/vespa/searchlib/datastore/datastorebase.cpp b/searchlib/src/vespa/searchlib/datastore/datastorebase.cpp deleted file mode 100644 index 222e820546e..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/datastorebase.cpp +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "datastore.h" -#include -#include - -#include -LOG_SETUP(".searchlib.datastore.datastorebase"); - -using vespalib::GenerationHeldBase; - -namespace search::datastore { - -namespace { - -/* - * Minimum dead bytes in active write buffer before switching to new - * active write buffer even if another active buffer has more dead - * bytes due to considering the active write buffer as too dead. - */ -constexpr size_t TOODEAD_SLACK = 0x4000u; - -/* - * Check if active write buffer is too dead for further use, i.e. if it - * is likely to be the worst buffer at next compaction. If so, filling it - * up completely will be wasted work, as data will have to be moved again - * rather soon. - */ -bool -activeWriteBufferTooDead(const BufferState &state) -{ - size_t deadElems = state.getDeadElems(); - size_t deadBytes = deadElems * state.getArraySize(); - return ((deadBytes >= TOODEAD_SLACK) && (deadElems * 2 >= state.size())); -} - -} - -DataStoreBase::FallbackHold::FallbackHold(size_t bytesSize, - BufferState::Alloc &&buffer, - size_t usedElems, - BufferTypeBase *typeHandler, - uint32_t typeId) - : GenerationHeldBase(bytesSize), - _buffer(std::move(buffer)), - _usedElems(usedElems), - _typeHandler(typeHandler), - _typeId(typeId) -{ -} - -DataStoreBase::FallbackHold::~FallbackHold() -{ - _typeHandler->destroyElements(_buffer.get(), _usedElems); -} - -class DataStoreBase::BufferHold : public GenerationHeldBase { - DataStoreBase &_dsb; - uint32_t _bufferId; - -public: - BufferHold(size_t bytesSize, DataStoreBase &dsb, uint32_t bufferId) - : GenerationHeldBase(bytesSize), - _dsb(dsb), - _bufferId(bufferId) - { - } - - ~BufferHold() override - { - _dsb.doneHoldBuffer(_bufferId); - } -}; - -DataStoreBase::DataStoreBase(uint32_t numBuffers, size_t maxArrays) - : _buffers(numBuffers), - _activeBufferIds(), - _states(numBuffers), - _typeHandlers(), - _freeListLists(), - _freeListsEnabled(false), - _initializing(false), - _elemHold1List(), - _elemHold2List(), - _numBuffers(numBuffers), - _maxArrays(maxArrays), - _genHolder() -{ -} - -DataStoreBase::~DataStoreBase() -{ - disableFreeLists(); - - assert(_elemHold1List.empty()); - assert(_elemHold2List.empty()); -} - -void -DataStoreBase::switchActiveBuffer(uint32_t typeId, size_t elemsNeeded) -{ - size_t activeBufferId = _activeBufferIds[typeId]; - do { - // start using next buffer - activeBufferId = nextBufferId(activeBufferId); - } while (!_states[activeBufferId].isFree()); - onActive(activeBufferId, typeId, elemsNeeded); - _activeBufferIds[typeId] = activeBufferId; -} - -void -DataStoreBase::switchOrGrowActiveBuffer(uint32_t typeId, size_t elemsNeeded) -{ - auto typeHandler = _typeHandlers[typeId]; - uint32_t arraySize = typeHandler->getArraySize(); - size_t numArraysForNewBuffer = typeHandler->getNumArraysForNewBuffer(); - size_t numEntriesForNewBuffer = numArraysForNewBuffer * arraySize; - uint32_t bufferId = _activeBufferIds[typeId]; - if (elemsNeeded + _states[bufferId].size() >= numEntriesForNewBuffer) { - // Don't try to resize existing buffer, new buffer will be large enough - switchActiveBuffer(typeId, elemsNeeded); - } else { - fallbackResize(bufferId, elemsNeeded); - } -} - -void -DataStoreBase::initActiveBuffers() -{ - uint32_t numTypes = _activeBufferIds.size(); - for (uint32_t typeId = 0; typeId < numTypes; ++typeId) { - size_t activeBufferId = 0; - while (!_states[activeBufferId].isFree()) { - // start using next buffer - activeBufferId = nextBufferId(activeBufferId); - } - onActive(activeBufferId, typeId, 0u); - _activeBufferIds[typeId] = activeBufferId; - } -} - -uint32_t -DataStoreBase::addType(BufferTypeBase *typeHandler) -{ - uint32_t typeId = _activeBufferIds.size(); - assert(typeId == _typeHandlers.size()); - typeHandler->clampMaxArrays(_maxArrays); - _activeBufferIds.push_back(0); - _typeHandlers.push_back(typeHandler); - _freeListLists.push_back(BufferState::FreeListList()); - return typeId; -} - -void -DataStoreBase::transferElemHoldList(generation_t generation) -{ - ElemHold2List &elemHold2List = _elemHold2List; - for (const ElemHold1ListElem & elemHold1 : _elemHold1List) { - elemHold2List.push_back(ElemHold2ListElem(elemHold1, generation)); - } - _elemHold1List.clear(); -} - -void -DataStoreBase::transferHoldLists(generation_t generation) -{ - _genHolder.transferHoldLists(generation); - if (hasElemHold1()) { - transferElemHoldList(generation); - } -} - -void -DataStoreBase::doneHoldBuffer(uint32_t bufferId) -{ - _states[bufferId].onFree(_buffers[bufferId].getBuffer()); -} - -void -DataStoreBase::trimHoldLists(generation_t usedGen) -{ - trimElemHoldList(usedGen); // Trim entries before trimming buffers - _genHolder.trimHoldLists(usedGen); -} - -void -DataStoreBase::clearHoldLists() -{ - transferElemHoldList(0); - clearElemHoldList(); - _genHolder.clearHoldLists(); -} - -void -DataStoreBase::dropBuffers() -{ - uint32_t numBuffers = _buffers.size(); - for (uint32_t bufferId = 0; bufferId < numBuffers; ++bufferId) { - _states[bufferId].dropBuffer(_buffers[bufferId].getBuffer()); - } - _genHolder.clearHoldLists(); -} - -vespalib::MemoryUsage -DataStoreBase::getMemoryUsage() const -{ - MemStats stats = getMemStats(); - vespalib::MemoryUsage usage; - usage.setAllocatedBytes(stats._allocBytes); - usage.setUsedBytes(stats._usedBytes); - usage.setDeadBytes(stats._deadBytes); - usage.setAllocatedBytesOnHold(stats._holdBytes); - return usage; -} - -void -DataStoreBase::holdBuffer(uint32_t bufferId) -{ - _states[bufferId].onHold(); - size_t holdBytes = 0u; // getMemStats() still accounts held buffers - GenerationHeldBase::UP hold(new BufferHold(holdBytes, *this, bufferId)); - _genHolder.hold(std::move(hold)); -} - -void -DataStoreBase::enableFreeLists() -{ - for (BufferState & bState : _states) { - if (!bState.isActive() || bState.getCompacting()) { - continue; - } - bState.setFreeListList(&_freeListLists[bState.getTypeId()]); - } - _freeListsEnabled = true; -} - -void -DataStoreBase::disableFreeLists() -{ - for (BufferState & bState : _states) { - bState.setFreeListList(nullptr); - } - _freeListsEnabled = false; -} - -void -DataStoreBase::enableFreeList(uint32_t bufferId) -{ - BufferState &state = _states[bufferId]; - if (_freeListsEnabled && - state.isActive() && - !state.getCompacting()) { - state.setFreeListList(&_freeListLists[state.getTypeId()]); - } -} - -void -DataStoreBase::disableFreeList(uint32_t bufferId) -{ - _states[bufferId].setFreeListList(nullptr); -} - -void -DataStoreBase::disableElemHoldList() -{ - for (auto &state : _states) { - if (!state.isFree()) { - state.disableElemHoldList(); - } - } -} - - -DataStoreBase::MemStats -DataStoreBase::getMemStats() const -{ - MemStats stats; - - for (const BufferState & bState: _states) { - auto typeHandler = bState.getTypeHandler(); - BufferState::State state = bState.getState(); - if ((state == BufferState::FREE) || (typeHandler == nullptr)) { - ++stats._freeBuffers; - } else if (state == BufferState::ACTIVE) { - size_t elementSize = typeHandler->elementSize(); - ++stats._activeBuffers; - stats._allocElems += bState.capacity(); - stats._usedElems += bState.size(); - stats._deadElems += bState.getDeadElems(); - stats._holdElems += bState.getHoldElems(); - stats._allocBytes += bState.capacity() * elementSize; - stats._usedBytes += (bState.size() * elementSize) + bState.getExtraUsedBytes(); - stats._deadBytes += bState.getDeadElems() * elementSize; - stats._holdBytes += (bState.getHoldElems() * elementSize) + bState.getExtraHoldBytes(); - } else if (state == BufferState::HOLD) { - size_t elementSize = typeHandler->elementSize(); - ++stats._holdBuffers; - stats._allocElems += bState.capacity(); - stats._usedElems += bState.size(); - stats._deadElems += bState.getDeadElems(); - stats._holdElems += bState.getHoldElems(); - stats._allocBytes += bState.capacity() * elementSize; - stats._usedBytes += (bState.size() * elementSize) + bState.getExtraUsedBytes(); - stats._deadBytes += bState.getDeadElems() * elementSize; - stats._holdBytes += (bState.getHoldElems() * elementSize) + bState.getExtraHoldBytes(); - } else { - LOG_ABORT("should not be reached"); - } - } - size_t genHolderHeldBytes = _genHolder.getHeldBytes(); - stats._holdBytes += genHolderHeldBytes; - stats._allocBytes += genHolderHeldBytes; - stats._usedBytes += genHolderHeldBytes; - return stats; -} - -vespalib::AddressSpace -DataStoreBase::getAddressSpaceUsage() const -{ - size_t usedArrays = 0; - size_t deadArrays = 0; - size_t limitArrays = 0; - for (const BufferState & bState: _states) { - if (bState.isActive()) { - uint32_t arraySize = bState.getArraySize(); - usedArrays += bState.size() / arraySize; - deadArrays += bState.getDeadElems() / arraySize; - limitArrays += bState.capacity() / arraySize; - } else if (bState.isOnHold()) { - uint32_t arraySize = bState.getArraySize(); - usedArrays += bState.size() / arraySize; - limitArrays += bState.capacity() / arraySize; - } else if (bState.isFree()) { - limitArrays += _maxArrays; - } else { - LOG_ABORT("should not be reached"); - } - } - return vespalib::AddressSpace(usedArrays, deadArrays, limitArrays); -} - -void -DataStoreBase::onActive(uint32_t bufferId, uint32_t typeId, size_t elemsNeeded) -{ - assert(typeId < _typeHandlers.size()); - assert(bufferId < _numBuffers); - _buffers[bufferId].setTypeId(typeId); - BufferState &state = _states[bufferId]; - state.onActive(bufferId, typeId, - _typeHandlers[typeId], - elemsNeeded, - _buffers[bufferId].getBuffer()); - enableFreeList(bufferId); -} - -std::vector -DataStoreBase::startCompact(uint32_t typeId) -{ - std::vector toHold; - - for (uint32_t bufferId = 0; bufferId < _numBuffers; ++bufferId) { - BufferState &state = getBufferState(bufferId); - if (state.isActive() && - state.getTypeId() == typeId && - !state.getCompacting()) { - state.setCompacting(); - toHold.push_back(bufferId); - disableFreeList(bufferId); - } - } - switchActiveBuffer(typeId, 0u); - return toHold; -} - -void -DataStoreBase::finishCompact(const std::vector &toHold) -{ - for (uint32_t bufferId : toHold) { - holdBuffer(bufferId); - } -} - -void -DataStoreBase::fallbackResize(uint32_t bufferId, size_t elemsNeeded) -{ - BufferState &state = getBufferState(bufferId); - BufferState::Alloc toHoldBuffer; - size_t oldUsedElems = state.size(); - size_t oldAllocElems = state.capacity(); - size_t elementSize = state.getTypeHandler()->elementSize(); - state.fallbackResize(bufferId, elemsNeeded, - _buffers[bufferId].getBuffer(), - toHoldBuffer); - GenerationHeldBase::UP - hold(new FallbackHold(oldAllocElems * elementSize, - std::move(toHoldBuffer), - oldUsedElems, - state.getTypeHandler(), - state.getTypeId())); - if (!_initializing) { - _genHolder.hold(std::move(hold)); - } -} - -uint32_t -DataStoreBase::startCompactWorstBuffer(uint32_t typeId) -{ - uint32_t activeBufferId = getActiveBufferId(typeId); - const BufferTypeBase *typeHandler = _typeHandlers[typeId]; - assert(typeHandler->getActiveBuffers() >= 1u); - if (typeHandler->getActiveBuffers() == 1u) { - // Single active buffer for type, no need for scan - _states[activeBufferId].setCompacting(); - _states[activeBufferId].disableElemHoldList(); - disableFreeList(activeBufferId); - switchActiveBuffer(typeId, 0u); - return activeBufferId; - } - // Multiple active buffers for type, must perform full scan - return startCompactWorstBuffer(activeBufferId, - [=](const BufferState &state) { return state.isActive(typeId); }); -} - -template -uint32_t -DataStoreBase::startCompactWorstBuffer(uint32_t initWorstBufferId, BufferStateActiveFilter &&filterFunc) -{ - uint32_t worstBufferId = initWorstBufferId; - size_t worstDeadElems = 0; - for (uint32_t bufferId = 0; bufferId < _numBuffers; ++bufferId) { - const auto &state = getBufferState(bufferId); - if (filterFunc(state)) { - size_t deadElems = state.getDeadElems() - state.getTypeHandler()->getReservedElements(bufferId); - if (deadElems > worstDeadElems) { - worstBufferId = bufferId; - worstDeadElems = deadElems; - } - } - } - markCompacting(worstBufferId); - return worstBufferId; -} - -void -DataStoreBase::markCompacting(uint32_t bufferId) -{ - auto &state = getBufferState(bufferId); - uint32_t typeId = state.getTypeId(); - uint32_t activeBufferId = getActiveBufferId(typeId); - if ((bufferId == activeBufferId) || activeWriteBufferTooDead(getBufferState(activeBufferId))) { - switchActiveBuffer(typeId, 0u); - } - state.setCompacting(); - state.disableElemHoldList(); - state.setFreeListList(nullptr); -} - -std::vector -DataStoreBase::startCompactWorstBuffers(bool compactMemory, bool compactAddressSpace) -{ - constexpr uint32_t noBufferId = std::numeric_limits::max(); - uint32_t worstMemoryBufferId = noBufferId; - uint32_t worstAddressSpaceBufferId = noBufferId; - size_t worstDeadElems = 0; - size_t worstDeadArrays = 0; - for (uint32_t bufferId = 0; bufferId < _numBuffers; ++bufferId) { - const auto &state = getBufferState(bufferId); - if (state.isActive()) { - auto typeHandler = state.getTypeHandler(); - uint32_t arraySize = typeHandler->getArraySize(); - uint32_t reservedElements = typeHandler->getReservedElements(bufferId); - size_t deadElems = state.getDeadElems() - reservedElements; - if (compactMemory && deadElems > worstDeadElems) { - worstMemoryBufferId = bufferId; - worstDeadElems = deadElems; - } - if (compactAddressSpace) { - size_t deadArrays = deadElems / arraySize; - if (deadArrays > worstDeadArrays) { - worstAddressSpaceBufferId = bufferId; - worstDeadArrays = deadArrays; - } - } - } - } - std::vector result; - if (worstMemoryBufferId != noBufferId) { - markCompacting(worstMemoryBufferId); - result.emplace_back(worstMemoryBufferId); - } - if (worstAddressSpaceBufferId != noBufferId && - worstAddressSpaceBufferId != worstMemoryBufferId) { - markCompacting(worstAddressSpaceBufferId); - result.emplace_back(worstAddressSpaceBufferId); - } - return result; -} - -} - diff --git a/searchlib/src/vespa/searchlib/datastore/datastorebase.h b/searchlib/src/vespa/searchlib/datastore/datastorebase.h deleted file mode 100644 index 4886237194f..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/datastorebase.h +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "bufferstate.h" -#include -#include -#include -#include -#include - -namespace search::datastore { - -/** - * Abstract class used to store data of potential different types in underlying memory buffers. - * - * Reference to stored data is via a 32-bit handle (EntryRef). - */ -class DataStoreBase -{ -public: - // Hold list before freeze, before knowing how long elements must be held - class ElemHold1ListElem - { - public: - EntryRef _ref; - size_t _len; // Aligned length - - ElemHold1ListElem(EntryRef ref, size_t len) - : _ref(ref), - _len(len) - { } - }; - -protected: - typedef vespalib::GenerationHandler::generation_t generation_t; - typedef vespalib::GenerationHandler::sgeneration_t sgeneration_t; - -private: - class BufferAndTypeId { - public: - using B = void *; - BufferAndTypeId() : BufferAndTypeId(nullptr, 0) { } - BufferAndTypeId(B buffer, uint32_t typeId) : _buffer(buffer), _typeId(typeId) { } - B getBuffer() const { return _buffer; } - B & getBuffer() { return _buffer; } - uint32_t getTypeId() const { return _typeId; } - void setTypeId(uint32_t typeId) { _typeId = typeId; } - private: - B _buffer; - uint32_t _typeId; - }; - std::vector _buffers; // For fast mapping with known types -protected: - std::vector _activeBufferIds; // typeId -> active buffer - - void * getBuffer(uint32_t bufferId) { return _buffers[bufferId].getBuffer(); } - // Hold list at freeze, when knowing how long elements must be held - class ElemHold2ListElem : public ElemHold1ListElem - { - public: - generation_t _generation; - - ElemHold2ListElem(const ElemHold1ListElem &hold1, generation_t generation) - : ElemHold1ListElem(hold1), - _generation(generation) - { } - }; - - typedef vespalib::Array ElemHold1List; - typedef std::deque ElemHold2List; - - class FallbackHold : public vespalib::GenerationHeldBase - { - public: - BufferState::Alloc _buffer; - size_t _usedElems; - BufferTypeBase *_typeHandler; - uint32_t _typeId; - - FallbackHold(size_t bytesSize, BufferState::Alloc &&buffer, size_t usedElems, - BufferTypeBase *typeHandler, uint32_t typeId); - - ~FallbackHold() override; - }; - - class BufferHold; - -public: - class MemStats - { - public: - size_t _allocElems; - size_t _usedElems; - size_t _deadElems; - size_t _holdElems; - size_t _allocBytes; - size_t _usedBytes; - size_t _deadBytes; - size_t _holdBytes; - uint32_t _freeBuffers; - uint32_t _activeBuffers; - uint32_t _holdBuffers; - - MemStats() - : _allocElems(0), - _usedElems(0), - _deadElems(0), - _holdElems(0), - _allocBytes(0), - _usedBytes(0), - _deadBytes(0), - _holdBytes(0), - _freeBuffers(0), - _activeBuffers(0), - _holdBuffers(0) - { } - - MemStats & - operator+=(const MemStats &rhs) - { - _allocElems += rhs._allocElems; - _usedElems += rhs._usedElems; - _deadElems += rhs._deadElems; - _holdElems += rhs._holdElems; - _allocBytes += rhs._allocBytes; - _usedBytes += rhs._usedBytes; - _deadBytes += rhs._deadBytes; - _holdBytes += rhs._holdBytes; - _freeBuffers += rhs._freeBuffers; - _activeBuffers += rhs._activeBuffers; - _holdBuffers += rhs._holdBuffers; - return *this; - } - }; - -private: - std::vector _states; -protected: - std::vector _typeHandlers; // TypeId -> handler - - std::vector _freeListLists; - bool _freeListsEnabled; - bool _initializing; - - ElemHold1List _elemHold1List; - ElemHold2List _elemHold2List; - - const uint32_t _numBuffers; - const size_t _maxArrays; - - vespalib::GenerationHolder _genHolder; - - DataStoreBase(uint32_t numBuffers, size_t maxArrays); - DataStoreBase(const DataStoreBase &) = delete; - DataStoreBase &operator=(const DataStoreBase &) = delete; - - virtual ~DataStoreBase(); - - /** - * Get next buffer id - * - * @param bufferId current buffer id - * @return next buffer id - */ - uint32_t nextBufferId(uint32_t bufferId) { - uint32_t ret = bufferId + 1; - if (ret == _numBuffers) - ret = 0; - return ret; - } - - /** - * Get active buffer - * - * @return active buffer - */ - void *activeBuffer(uint32_t typeId) { - return _buffers[_activeBufferIds[typeId]].getBuffer(); - } - - /** - * Trim elem hold list, freeing elements that no longer needs to be held. - * - * @param usedGen lowest generation that is still used. - */ - virtual void trimElemHoldList(generation_t usedGen) = 0; - - virtual void clearElemHoldList() = 0; - - template - uint32_t startCompactWorstBuffer(uint32_t initWorstBufferId, BufferStateActiveFilter &&filterFunc); - void markCompacting(uint32_t bufferId); -public: - uint32_t addType(BufferTypeBase *typeHandler); - void initActiveBuffers(); - - /** - * Ensure that active buffer has a given number of elements free at end. - * Switch to new buffer if current buffer is too full. - * - * @param typeId registered data type for buffer. - * @param elemsNeeded Number of elements needed to be free - */ - void ensureBufferCapacity(uint32_t typeId, size_t elemsNeeded) { - if (__builtin_expect(elemsNeeded > - _states[_activeBufferIds[typeId]].remaining(), - false)) { - switchOrGrowActiveBuffer(typeId, elemsNeeded); - } - } - - /** - * Put buffer on hold list, as part of compaction. - * - * @param bufferId Id of buffer to be held. - */ - void holdBuffer(uint32_t bufferId); - - /** - * Switch to new active buffer, typically in preparation for compaction - * or when current active buffer no longer has free space. - * - * @param typeId registered data type for buffer. - * @param elemsNeeded Number of elements needed to be free - */ - void switchActiveBuffer(uint32_t typeId, size_t elemsNeeded); - - void switchOrGrowActiveBuffer(uint32_t typeId, size_t elemsNeeded); - - vespalib::MemoryUsage getMemoryUsage() const; - - vespalib::AddressSpace getAddressSpaceUsage() const; - - /** - * Get active buffer id for the given type id. - */ - uint32_t getActiveBufferId(uint32_t typeId) const { return _activeBufferIds[typeId]; } - const BufferState &getBufferState(uint32_t bufferId) const { return _states[bufferId]; } - BufferState &getBufferState(uint32_t bufferId) { return _states[bufferId]; } - uint32_t getNumBuffers() const { return _numBuffers; } - bool hasElemHold1() const { return !_elemHold1List.empty(); } - - /** - * Transfer element holds from hold1 list to hold2 list. - */ - void transferElemHoldList(generation_t generation); - - /** - * Transfer holds from hold1 to hold2 lists, assigning generation. - */ - void transferHoldLists(generation_t generation); - - /** - * Hold of buffer has ended. - */ - void doneHoldBuffer(uint32_t bufferId); - - /** - * Trim hold lists, freeing buffers that no longer needs to be held. - * - * @param usedGen lowest generation that is still used. - */ - void trimHoldLists(generation_t usedGen); - - void clearHoldLists(); - - template - EntryType *getEntry(RefType ref) { - return static_cast(_buffers[ref.bufferId()].getBuffer()) + ref.offset(); - } - - template - const EntryType *getEntry(RefType ref) const { - return static_cast(_buffers[ref.bufferId()].getBuffer()) + ref.offset(); - } - - template - EntryType *getEntryArray(RefType ref, size_t arraySize) { - return static_cast(_buffers[ref.bufferId()].getBuffer()) + (ref.offset() * arraySize); - } - - template - const EntryType *getEntryArray(RefType ref, size_t arraySize) const { - return static_cast(_buffers[ref.bufferId()].getBuffer()) + (ref.offset() * arraySize); - } - - void dropBuffers(); - - - void incDead(uint32_t bufferId, size_t deadElems) { - BufferState &state = _states[bufferId]; - state.incDeadElems(deadElems); - } - - /** - * Enable free list management. This only works for fixed size elements. - */ - void enableFreeLists(); - - /** - * Disable free list management. - */ - void disableFreeLists(); - - /** - * Enable free list management. This only works for fixed size elements. - */ - void enableFreeList(uint32_t bufferId); - - /** - * Disable free list management. - */ - void disableFreeList(uint32_t bufferId); - void disableElemHoldList(); - - /** - * Returns the free list for the given type id. - */ - BufferState::FreeListList &getFreeList(uint32_t typeId) { - return _freeListLists[typeId]; - } - - MemStats getMemStats() const; - - /* - * Assume that no readers are present while data structure is being - * intialized. - */ - void setInitializing(bool initializing) { _initializing = initializing; } - -private: - /** - * Switch buffer state to active. - * - * @param bufferId Id of buffer to be active. - * @param typeId registered data type for buffer. - * @param elemsNeeded Number of elements needed to be free - */ - void onActive(uint32_t bufferId, uint32_t typeId, size_t elemsNeeded); - -public: - uint32_t getTypeId(uint32_t bufferId) const { - return _buffers[bufferId].getTypeId(); - } - - std::vector startCompact(uint32_t typeId); - - void finishCompact(const std::vector &toHold); - void fallbackResize(uint32_t bufferId, size_t elementsNeeded); - - vespalib::GenerationHolder &getGenerationHolder() { - return _genHolder; - } - - uint32_t startCompactWorstBuffer(uint32_t typeId); - std::vector startCompactWorstBuffers(bool compactMemory, bool compactAddressSpace); -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/entryref.cpp b/searchlib/src/vespa/searchlib/datastore/entryref.cpp deleted file mode 100644 index 649bfa7e4e9..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/entryref.cpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2019 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "entryref.hpp" - -namespace search::datastore { - -template EntryRefT<24u, 8u>::EntryRefT(size_t, uint32_t); -template EntryRefT<31u, 1u>::EntryRefT(size_t, uint32_t); -template EntryRefT<22u,10u>::EntryRefT(size_t, uint32_t); -template EntryRefT<19u,13u>::EntryRefT(size_t, uint32_t); -template EntryRefT<18u, 6u>::EntryRefT(size_t, uint32_t); -template EntryRefT<15u,17u>::EntryRefT(size_t, uint32_t); -template EntryRefT<10u,22u>::EntryRefT(size_t, uint32_t); -template EntryRefT<10u,10u>::EntryRefT(size_t, uint32_t); -template EntryRefT< 3u, 2u>::EntryRefT(size_t, uint32_t); - -} diff --git a/searchlib/src/vespa/searchlib/datastore/entryref.h b/searchlib/src/vespa/searchlib/datastore/entryref.h deleted file mode 100644 index a582d2020f9..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/entryref.h +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include -#include - -namespace search::datastore { - -class EntryRef { -protected: - uint32_t _ref; -public: - EntryRef() : _ref(0u) { } - explicit EntryRef(uint32_t ref_) : _ref(ref_) { } - uint32_t ref() const { return _ref; } - bool valid() const { return _ref != 0u; } - bool operator==(const EntryRef &rhs) const { return _ref == rhs._ref; } - bool operator!=(const EntryRef &rhs) const { return _ref != rhs._ref; } - bool operator <(const EntryRef &rhs) const { return _ref < rhs._ref; } -}; - -/** - * Class for entry reference where we use OffsetBits bits for offset into buffer, - * and (32 - OffsetBits) bits for buffer id. - **/ -template -class EntryRefT : public EntryRef { -public: - EntryRefT() : EntryRef() {} - EntryRefT(size_t offset_, uint32_t bufferId_); - EntryRefT(const EntryRef & ref_) : EntryRef(ref_.ref()) {} - uint32_t hash() const { return offset() + (bufferId() << OffsetBits); } - size_t offset() const { return _ref >> BufferBits; } - uint32_t bufferId() const { return _ref & (numBuffers() - 1); } - static size_t offsetSize() { return 1ul << OffsetBits; } - static uint32_t numBuffers() { return 1 << BufferBits; } - static size_t align(size_t val) { return val; } - static size_t pad(size_t val) { (void) val; return 0ul; } - static constexpr bool isAlignedType = false; -}; - -/** - * Class for entry reference that is similar to EntryRefT, - * except that we use (2^OffsetAlign) byte alignment on the offset. - **/ -template -class AlignedEntryRefT : public EntryRefT { -private: - typedef EntryRefT ParentType; - static const uint32_t PadConstant = ((1 << OffsetAlign) - 1); -public: - AlignedEntryRefT() : ParentType() {} - AlignedEntryRefT(size_t offset_, uint32_t bufferId_) : - ParentType(align(offset_) >> OffsetAlign, bufferId_) {} - AlignedEntryRefT(const EntryRef & ref_) : ParentType(ref_) {} - size_t offset() const { return ParentType::offset() << OffsetAlign; } - static size_t offsetSize() { return ParentType::offsetSize() << OffsetAlign; } - static size_t align(size_t val) { return val + pad(val); } - static size_t pad(size_t val) { return (-val & PadConstant); } - static constexpr bool isAlignedType = true; -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/entryref.hpp b/searchlib/src/vespa/searchlib/datastore/entryref.hpp deleted file mode 100644 index 6e8b94f8989..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/entryref.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2019 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "entryref.h" -#include - -namespace search::datastore { - -template -EntryRefT::EntryRefT(size_t offset_, uint32_t bufferId_) : - EntryRef((offset_ << BufferBits) + bufferId_) -{ - ASSERT_ONCE_OR_LOG(offset_ < offsetSize(), "EntryRefT.offset_overflow", 10000); - ASSERT_ONCE_OR_LOG(bufferId_ < numBuffers(), "EntryRefT.bufferId_overflow", 10000); -} - -} diff --git a/searchlib/src/vespa/searchlib/datastore/free_list_allocator.h b/searchlib/src/vespa/searchlib/datastore/free_list_allocator.h deleted file mode 100644 index a23cb71b90c..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/free_list_allocator.h +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "allocator.h" - -namespace search::datastore { - -/** - * Allocator used to allocate entries of a specific type in an underlying data store - * and uses free lists if available. - */ -template -class FreeListAllocator : public Allocator -{ -public: - using ParentType = Allocator; - using HandleType = typename ParentType::HandleType; - using ConstArrayRef = typename ParentType::ConstArrayRef; - -private: - using ParentType::_store; - using ParentType::_typeId; - -public: - FreeListAllocator(DataStoreBase &store, uint32_t typeId); - - template - HandleType alloc(Args && ... args); - - HandleType allocArray(ConstArrayRef array); - HandleType allocArray(size_t size); -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/free_list_allocator.hpp b/searchlib/src/vespa/searchlib/datastore/free_list_allocator.hpp deleted file mode 100644 index 402fbe26725..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/free_list_allocator.hpp +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "free_list_allocator.h" -#include "bufferstate.h" - -namespace search::datastore { - -template -FreeListAllocator::FreeListAllocator(DataStoreBase &store, uint32_t typeId) - : ParentType(store, typeId) -{ -} - -namespace allocator { - -template -struct Assigner { - static void assign(EntryT &entry, Args && ... args) { - entry = EntryT(std::forward(args)...); - } -}; - -template -struct Assigner { - static void assign(EntryT &entry) { - (void) entry; - } -}; - -// Assignment operator -template -struct Assigner { - static void assign(EntryT &entry, const EntryT &rhs) { - entry = rhs; - } -}; - -// Move assignment -template -struct Assigner { - static void assign(EntryT &entry, EntryT &&rhs) { - entry = std::move(rhs); - } -}; - -} - -template -template -typename Allocator::HandleType -FreeListAllocator::alloc(Args && ... args) -{ - BufferState::FreeListList &freeListList = _store.getFreeList(_typeId); - if (freeListList._head == NULL) { - return ParentType::alloc(std::forward(args)...); - } - BufferState &state = *freeListList._head; - assert(state.isActive()); - RefT ref = state.popFreeList(); - EntryT *entry = _store.template getEntry(ref); - ReclaimerT::reclaim(entry); - allocator::Assigner::assign(*entry, std::forward(args)...); - return HandleType(ref, entry); -} - -template -typename Allocator::HandleType -FreeListAllocator::allocArray(ConstArrayRef array) -{ - BufferState::FreeListList &freeListList = _store.getFreeList(_typeId); - if (freeListList._head == NULL) { - return ParentType::allocArray(array); - } - BufferState &state = *freeListList._head; - assert(state.isActive()); - assert(state.getArraySize() == array.size()); - RefT ref(state.popFreeList()); - EntryT *buf = _store.template getEntryArray(ref, array.size()); - for (size_t i = 0; i < array.size(); ++i) { - *(buf + i) = array[i]; - } - return HandleType(ref, buf); -} - -template -typename Allocator::HandleType -FreeListAllocator::allocArray(size_t size) -{ - BufferState::FreeListList &freeListList = _store.getFreeList(_typeId); - if (freeListList._head == NULL) { - return ParentType::allocArray(size); - } - BufferState &state = *freeListList._head; - assert(state.isActive()); - assert(state.getArraySize() == size); - RefT ref(state.popFreeList()); - EntryT *buf = _store.template getEntryArray(ref, size); - return HandleType(ref, buf); -} - -} - diff --git a/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h deleted file mode 100644 index 514eecc25a8..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.h +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "raw_allocator.h" - -namespace search::datastore { - -/** - * Allocator used to allocate raw buffers (EntryT *) in an underlying data store - * with no construction or de-construction of elements in the buffer. Uses free lists if available. - * - * If free lists are enabled this allocator should only be used when - * allocating the same number of elements each time (equal to cluster size). - */ -template -class FreeListRawAllocator : public RawAllocator -{ -public: - using ParentType = RawAllocator; - using HandleType = typename ParentType::HandleType; - -private: - using ParentType::_store; - using ParentType::_typeId; - -public: - FreeListRawAllocator(DataStoreBase &store, uint32_t typeId); - - HandleType alloc(size_t numElems); -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp b/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp deleted file mode 100644 index 0e97d6a3c33..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/free_list_raw_allocator.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "free_list_raw_allocator.h" - -namespace search::datastore { - -template -FreeListRawAllocator::FreeListRawAllocator(DataStoreBase &store, uint32_t typeId) - : ParentType(store, typeId) -{ -} - -template -typename FreeListRawAllocator::HandleType -FreeListRawAllocator::alloc(size_t numElems) -{ - BufferState::FreeListList &freeListList = _store.getFreeList(_typeId); - if (freeListList._head == nullptr) { - return ParentType::alloc(numElems); - } - BufferState &state = *freeListList._head; - assert(state.isActive()); - assert(state.getArraySize() == numElems); - RefT ref = state.popFreeList(); - // If entry ref is not aligned we must scale the offset according to array size as it was divided when the entry ref was created. - EntryT *entry = !RefT::isAlignedType ? - _store.template getEntryArray(ref, state.getArraySize()) : - _store.template getEntry(ref); - return HandleType(ref, entry); -} - -} - diff --git a/searchlib/src/vespa/searchlib/datastore/handle.h b/searchlib/src/vespa/searchlib/datastore/handle.h deleted file mode 100644 index 49eb4843816..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/handle.h +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "entryref.h" - -namespace search::datastore { - -/** - * Handle to data allocated in a data store and a EntryRef used for read-only access to data later. - */ -template -struct Handle -{ - EntryRef ref; - EntryT *data; - Handle(EntryRef ref_, EntryT *data_) : ref(ref_), data(data_) {} - Handle() : ref(), data() {} - bool operator==(const Handle &rhs) const { - return ref == rhs.ref && - data == rhs.data; - } -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/i_compaction_context.h b/searchlib/src/vespa/searchlib/datastore/i_compaction_context.h deleted file mode 100644 index aa537968f1c..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/i_compaction_context.h +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include - -namespace search::datastore { - -/** - * A compaction context is used when performing a compaction of data buffers in a data store. - * - * All entry refs pointing to allocated data in the store must be passed to the compaction context - * such that these can be updated according to the buffer compaction that happens internally. - */ -struct ICompactionContext { - using UP = std::unique_ptr; - virtual ~ICompactionContext() {} - virtual void compact(vespalib::ArrayRef refs) = 0; -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/raw_allocator.h b/searchlib/src/vespa/searchlib/datastore/raw_allocator.h deleted file mode 100644 index b7c00f75580..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/raw_allocator.h +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "datastorebase.h" -#include "entryref.h" -#include "handle.h" - -namespace search::datastore { - -/** - * Allocator used to allocate raw buffers (EntryT *) in an underlying data store - * with no construction or de-construction of elements in the buffer. - */ -template -class RawAllocator -{ -public: - using HandleType = Handle; - -protected: - DataStoreBase &_store; - uint32_t _typeId; - -public: - RawAllocator(DataStoreBase &store, uint32_t typeId); - - HandleType alloc(size_t numElems) { - return alloc(numElems, 0); - } - HandleType alloc(size_t numElems, size_t extraElems); -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/raw_allocator.hpp b/searchlib/src/vespa/searchlib/datastore/raw_allocator.hpp deleted file mode 100644 index 9b86305a634..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/raw_allocator.hpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "raw_allocator.h" -#include "bufferstate.h" - -namespace search::datastore { - -template -RawAllocator::RawAllocator(DataStoreBase &store, uint32_t typeId) - : _store(store), - _typeId(typeId) -{ -} - -template -typename RawAllocator::HandleType -RawAllocator::alloc(size_t numElems, size_t extraElems) -{ - _store.ensureBufferCapacity(_typeId, numElems + extraElems); - uint32_t activeBufferId = _store.getActiveBufferId(_typeId); - BufferState &state = _store.getBufferState(activeBufferId); - assert(state.isActive()); - size_t oldBufferSize = state.size(); - if (RefT::isAlignedType) { - // AlignedEntryRef constructor scales down offset by alignment - RefT ref(oldBufferSize, activeBufferId); - EntryT *buffer = _store.getEntry(ref); - state.pushed_back(numElems); - return HandleType(ref, buffer); - } else { - // Must perform scaling ourselves, according to array size - size_t arraySize = state.getArraySize(); - assert((numElems % arraySize) == 0u); - RefT ref((oldBufferSize / arraySize), activeBufferId); - EntryT *buffer = _store.getEntryArray(ref, arraySize); - state.pushed_back(numElems); - return HandleType(ref, buffer); - } -} - -} - diff --git a/searchlib/src/vespa/searchlib/datastore/unique_store.h b/searchlib/src/vespa/searchlib/datastore/unique_store.h deleted file mode 100644 index cc261acd53f..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/unique_store.h +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "buffer_type.h" -#include "bufferstate.h" -#include "datastore.h" -#include "entryref.h" -#include "i_compaction_context.h" -#include -#include - -namespace search::datastore { - -template -class UniqueStoreBuilder; - -template -class UniqueStoreSaver; - -/** - * Datastore for unique values of type EntryT that is accessed via a - * 32-bit EntryRef. - */ -template > -class UniqueStore -{ -public: - using DataStoreType = DataStoreT; - using EntryType = EntryT; - using RefType = RefT; - using Saver = UniqueStoreSaver; - using Builder = UniqueStoreBuilder; - /* - * Compare two values in data store based on reference. Invalid - * reference is mapped to local value reference to support - * comparing with new value candidate outside data store. - */ - class Compare { - const DataStoreType &_store; - const EntryType &_value; -public: - Compare(const DataStoreType &store, const EntryType &value) - : _store(store), - _value(value) - { - } - inline const EntryType &get(EntryRef ref) const { - if (ref.valid()) { - RefType iRef(ref); - return *_store.template getEntry(iRef); - } else { - return _value; - } - } - inline bool operator()(const EntryRef lhs, const EntryRef rhs) const - { - const EntryType &lhsValue = get(lhs); - const EntryType &rhsValue = get(rhs); - return lhsValue < rhsValue; - } - }; - using UniqueStoreBufferType = BufferType; - using DictionaryTraits = btree::BTreeTraits<32, 32, 7, true>; - using Dictionary = btree::BTree; - class AddResult { - EntryRef _ref; - bool _inserted; - public: - AddResult(EntryRef ref_, bool inserted_) - : _ref(ref_), - _inserted(inserted_) - { - } - EntryRef ref() const { return _ref; } - bool inserted() { return _inserted; } - }; -private: - DataStoreType _store; - UniqueStoreBufferType _typeHandler; - uint32_t _typeId; - Dictionary _dict; - using generation_t = vespalib::GenerationHandler::generation_t; - -public: - UniqueStore(); - ~UniqueStore(); - EntryRef move(EntryRef ref); - AddResult add(const EntryType &value); - EntryRef find(const EntryType &value); - const EntryType &get(EntryRef ref) const - { - RefType iRef(ref); - return *_store.template getEntry(iRef); - } - void remove(EntryRef ref); - ICompactionContext::UP compactWorst(); - vespalib::MemoryUsage getMemoryUsage() const; - - // Pass on hold list management to underlying store - void transferHoldLists(generation_t generation); - void trimHoldLists(generation_t firstUsed); - vespalib::GenerationHolder &getGenerationHolder() { return _store.getGenerationHolder(); } - void setInitializing(bool initializing) { _store.setInitializing(initializing); } - void freeze(); - uint32_t getNumUniques() const; - - Builder getBuilder(uint32_t uniqueValuesHint); - Saver getSaver() const; - - // Should only be used for unit testing - const BufferState &bufferState(EntryRef ref) const; -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/unique_store.hpp b/searchlib/src/vespa/searchlib/datastore/unique_store.hpp deleted file mode 100644 index e86edb9d3d4..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/unique_store.hpp +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "unique_store.h" -#include "datastore.hpp" -#include -#include -#include -#include -#include -#include -#include -#include "unique_store_builder.hpp" -#include "unique_store_saver.hpp" -#include - -namespace search::datastore { - -constexpr size_t NUM_ARRAYS_FOR_NEW_UNIQUESTORE_BUFFER = 1024u; -constexpr float ALLOC_GROW_FACTOR = 0.2; - -template -UniqueStore::UniqueStore() - : _store(), - _typeHandler(1, 2u, RefT::offsetSize(), NUM_ARRAYS_FOR_NEW_UNIQUESTORE_BUFFER, ALLOC_GROW_FACTOR), - _typeId(0), - _dict() -{ - _typeId = _store.addType(&_typeHandler); - assert(_typeId == 0u); - _store.initActiveBuffers(); -} - -template -UniqueStore::~UniqueStore() -{ - _store.clearHoldLists(); - _store.dropBuffers(); -} - -template -typename UniqueStore::AddResult -UniqueStore::add(const EntryType &value) -{ - Compare comp(_store, value); - auto itr = _dict.lowerBound(RefType(), comp); - if (itr.valid() && !comp(EntryRef(), itr.getKey())) { - uint32_t refCount = itr.getData(); - assert(refCount != std::numeric_limits::max()); - itr.writeData(refCount + 1); - RefType iRef(itr.getKey()); - return AddResult(itr.getKey(), false); - - } else { - EntryRef newRef = _store.template allocator(_typeId).alloc(value).ref; - _dict.insert(itr, newRef, 1u); - return AddResult(newRef, true); - } -} - -template -EntryRef -UniqueStore::find(const EntryType &value) -{ - Compare comp(_store, value); - auto itr = _dict.lowerBound(RefType(), comp); - if (itr.valid() && !comp(EntryRef(), itr.getKey())) { - return itr.getKey(); - } else { - return EntryRef(); - } -} - -template -EntryRef -UniqueStore::move(EntryRef ref) -{ - return _store.template allocator(_typeId).alloc(get(ref)).ref; -} - -template -void -UniqueStore::remove(EntryRef ref) -{ - assert(ref.valid()); - EntryType unused{}; - Compare comp(_store, unused); - auto itr = _dict.lowerBound(ref, comp); - if (itr.valid() && itr.getKey() == ref) { - uint32_t refCount = itr.getData(); - if (refCount > 1) { - itr.writeData(refCount - 1); - } else { - _dict.remove(itr); - _store.holdElem(ref, 1); - } - } -} - -namespace uniquestore { - -template -class CompactionContext : public ICompactionContext { -private: - using UniqueStoreType = UniqueStore; - using Dictionary = typename UniqueStoreType::Dictionary; - DataStoreBase &_dataStore; - Dictionary &_dict; - UniqueStoreType &_store; - std::vector _bufferIdsToCompact; - std::vector> _mapping; - - bool compactingBuffer(uint32_t bufferId) { - return std::find(_bufferIdsToCompact.begin(), _bufferIdsToCompact.end(), - bufferId) != _bufferIdsToCompact.end(); - } - - void allocMapping() { - _mapping.resize(RefT::numBuffers()); - for (const auto bufferId : _bufferIdsToCompact) { - BufferState &state = _dataStore.getBufferState(bufferId); - _mapping[bufferId].resize(state.size()); - } - } - - void fillMapping() { - auto itr = _dict.begin(); - while (itr.valid()) { - RefT iRef(itr.getKey()); - assert(iRef.valid()); - if (compactingBuffer(iRef.bufferId())) { - assert(iRef.offset() < _mapping[iRef.bufferId()].size()); - EntryRef &mappedRef = _mapping[iRef.bufferId()][iRef.offset()]; - assert(!mappedRef.valid()); - EntryRef newRef = _store.move(itr.getKey()); - mappedRef = newRef; - _dict.thaw(itr); - itr.writeKey(newRef); - } - ++itr; - } - } - -public: - CompactionContext(DataStoreBase &dataStore, - Dictionary &dict, - UniqueStoreType &store, - std::vector bufferIdsToCompact) - : _dataStore(dataStore), - _dict(dict), - _store(store), - _bufferIdsToCompact(std::move(bufferIdsToCompact)), - _mapping() - { - } - virtual ~CompactionContext() { - _dataStore.finishCompact(_bufferIdsToCompact); - } - virtual void compact(vespalib::ArrayRef refs) override { - if (!_bufferIdsToCompact.empty()) { - if (_mapping.empty()) { - allocMapping(); - fillMapping(); - } - for (auto &ref : refs) { - if (ref.valid()) { - RefT internalRef(ref); - if (compactingBuffer(internalRef.bufferId())) { - assert(internalRef.offset() < _mapping[internalRef.bufferId()].size()); - EntryRef newRef = _mapping[internalRef.bufferId()][internalRef.offset()]; - assert(newRef.valid()); - ref = newRef; - } - } - } - } - } -}; - -} - -template -ICompactionContext::UP -UniqueStore::compactWorst() -{ - std::vector bufferIdsToCompact = _store.startCompactWorstBuffers(true, true); - return std::make_unique> - (_store, _dict, *this, std::move(bufferIdsToCompact)); -} - -template -vespalib::MemoryUsage -UniqueStore::getMemoryUsage() const -{ - vespalib::MemoryUsage usage = _store.getMemoryUsage(); - usage.merge(_dict.getMemoryUsage()); - return usage; -} - -template -const BufferState & -UniqueStore::bufferState(EntryRef ref) const -{ - RefT internalRef(ref); - return _store.getBufferState(internalRef.bufferId()); -} - - -template -void -UniqueStore::transferHoldLists(generation_t generation) -{ - _dict.getAllocator().transferHoldLists(generation); - _store.transferHoldLists(generation); -} - -template -void -UniqueStore::trimHoldLists(generation_t firstUsed) -{ - _dict.getAllocator().trimHoldLists(firstUsed); - _store.trimHoldLists(firstUsed); -} - -template -void -UniqueStore::freeze() -{ - _dict.getAllocator().freeze(); -} - -template -typename UniqueStore::Builder -UniqueStore::getBuilder(uint32_t uniqueValuesHint) -{ - return Builder(_store, _typeId, _dict, uniqueValuesHint); -} - -template -typename UniqueStore::Saver -UniqueStore::getSaver() const -{ - return Saver(_dict, _store); -} - -template -uint32_t -UniqueStore::getNumUniques() const -{ - return _dict.getFrozenView().size(); -} - -} diff --git a/searchlib/src/vespa/searchlib/datastore/unique_store_builder.h b/searchlib/src/vespa/searchlib/datastore/unique_store_builder.h deleted file mode 100644 index 0a3ec447e67..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/unique_store_builder.h +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "unique_store.h" - -namespace search::datastore { - -/** - * Builder for related UniqueStore class. - * - * Contains utility method for adding new unique values and mapping - * from enum value to EntryRef value. New unique values must be added - * in sorted order. - */ -template -class UniqueStoreBuilder { - using UniqueStoreType = UniqueStore; - using DataStoreType = typename UniqueStoreType::DataStoreType; - using Dictionary = typename UniqueStoreType::Dictionary; - using EntryType = EntryT; - using RefType = RefT; - - DataStoreType &_store; - uint32_t _typeId; - Dictionary &_dict; - std::vector _refs; - std::vector _refCounts; -public: - UniqueStoreBuilder(DataStoreType &store, uint32_t typeId, - Dictionary &dict, uint32_t uniqueValuesHint); - ~UniqueStoreBuilder(); - void setupRefCounts(); - void makeDictionary(); - void add(const EntryType &value) { - EntryRef newRef = _store.template allocator(_typeId).alloc(value).ref; - _refs.push_back(newRef); - } - EntryRef mapEnumValueToEntryRef(uint32_t enumValue) { - assert(enumValue < _refs.size()); - ++_refCounts[enumValue]; - return _refs[enumValue]; - } -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/unique_store_builder.hpp b/searchlib/src/vespa/searchlib/datastore/unique_store_builder.hpp deleted file mode 100644 index 9445eb3a9f2..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/unique_store_builder.hpp +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "unique_store_builder.h" -#include "datastore.hpp" -#include -#include -#include -#include -#include -#include - -namespace search::datastore { - -template -UniqueStoreBuilder::UniqueStoreBuilder(DataStoreType &store, uint32_t typeId, Dictionary &dict, uint32_t uniqueValuesHint) - : _store(store), - _typeId(typeId), - _dict(dict), - _refs(), - _refCounts() -{ - _refs.reserve(uniqueValuesHint); - _refs.push_back(EntryRef()); -} - -template -UniqueStoreBuilder::~UniqueStoreBuilder() -{ -} - -template -void -UniqueStoreBuilder::setupRefCounts() -{ - _refCounts.resize(_refs.size()); -} - - -template -void -UniqueStoreBuilder::makeDictionary() -{ - assert(_refs.size() == _refCounts.size()); - assert(!_refs.empty()); - typename Dictionary::Builder builder(_dict.getAllocator()); - for (size_t i = 1; i < _refs.size(); ++i) { - if (_refCounts[i] != 0u) { - builder.insert(_refs[i], _refCounts[i]); - } else { - _store.holdElem(_refs[i], 1); - } - } - _dict.assign(builder); -} - -} - diff --git a/searchlib/src/vespa/searchlib/datastore/unique_store_saver.h b/searchlib/src/vespa/searchlib/datastore/unique_store_saver.h deleted file mode 100644 index 6fdcf2da83a..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/unique_store_saver.h +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "unique_store.h" - -namespace search::datastore { - -/** - * Saver for related UniqueStore class. - * - * Contains utility methods for traversing all unique values (as - * EntryRef value) and mapping from EntryRef value to enum value. - */ -template -class UniqueStoreSaver { - using UniqueStoreType = UniqueStore; - using Dictionary = typename UniqueStoreType::Dictionary; - using ConstIterator = typename Dictionary::ConstIterator; - using EntryType = EntryT; - using RefType = RefT; - - ConstIterator _itr; - const DataStoreBase &_store; - std::vector> _enumValues; -public: - UniqueStoreSaver(const Dictionary &dict, const DataStoreBase &store); - ~UniqueStoreSaver(); - void enumerateValues(); - - template - void - foreach_key(Function &&func) const - { - _itr.foreach_key(func); - } - - uint32_t mapEntryRefToEnumValue(EntryRef ref) const { - if (ref.valid()) { - RefType iRef(ref); - assert(iRef.offset() < _enumValues[iRef.bufferId()].size()); - uint32_t enumValue = _enumValues[iRef.bufferId()][iRef.offset()]; - assert(enumValue != 0); - return enumValue; - } else { - return 0u; - } - } -}; - -} diff --git a/searchlib/src/vespa/searchlib/datastore/unique_store_saver.hpp b/searchlib/src/vespa/searchlib/datastore/unique_store_saver.hpp deleted file mode 100644 index 3377b674930..00000000000 --- a/searchlib/src/vespa/searchlib/datastore/unique_store_saver.hpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "unique_store_saver.h" - -namespace search::datastore { - -template -UniqueStoreSaver::UniqueStoreSaver(const Dictionary &dict, const DataStoreBase &store) - : _itr(), - _store(store) -{ - _itr = dict.getFrozenView().begin(); -} - -template -UniqueStoreSaver::~UniqueStoreSaver() -{ -} - -template -void -UniqueStoreSaver::enumerateValues() -{ - _enumValues.resize(RefType::numBuffers()); - for (uint32_t bufferId = 0; bufferId < RefType::numBuffers(); ++bufferId) { - const BufferState &state = _store.getBufferState(bufferId); - if (state.isActive()) { - _enumValues[bufferId].resize(state.size()); - } - } - ConstIterator it = _itr; - uint32_t nextEnumVal = 1; - while (it.valid()) { - RefType ref(it.getKey()); - assert(ref.valid()); - assert(ref.offset() < _enumValues[ref.bufferId()].size()); - uint32_t &enumVal = _enumValues[ref.bufferId()][ref.offset()]; - assert(enumVal == 0u); - enumVal = nextEnumVal; - ++it; - ++nextEnumVal; - } -} - -} diff --git a/searchlib/src/vespa/searchlib/memoryindex/compact_words_store.cpp b/searchlib/src/vespa/searchlib/memoryindex/compact_words_store.cpp index 7d90ff4bc60..b3135efc2bb 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/compact_words_store.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/compact_words_store.cpp @@ -1,7 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "compact_words_store.h" -#include +#include #include #include diff --git a/searchlib/src/vespa/searchlib/memoryindex/compact_words_store.h b/searchlib/src/vespa/searchlib/memoryindex/compact_words_store.h index 2ec125826b5..07d7c2e1cce 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/compact_words_store.h +++ b/searchlib/src/vespa/searchlib/memoryindex/compact_words_store.h @@ -1,8 +1,8 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #pragma once -#include -#include +#include +#include #include #include #include diff --git a/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp b/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp index 1d55ed76a09..1e9cd84a541 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/feature_store.cpp @@ -2,7 +2,7 @@ #include "feature_store.h" #include -#include +#include namespace search::memoryindex { diff --git a/searchlib/src/vespa/searchlib/memoryindex/feature_store.h b/searchlib/src/vespa/searchlib/memoryindex/feature_store.h index 3cacacfb03e..6afc3c02301 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/feature_store.h +++ b/searchlib/src/vespa/searchlib/memoryindex/feature_store.h @@ -2,10 +2,10 @@ #pragma once -#include #include #include #include +#include namespace search::memoryindex { diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp b/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp index cf22554a4ed..66644d1fd2b 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp @@ -5,13 +5,13 @@ #include #include #include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include #include using search::index::DocIdAndFeatures; diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index.h b/searchlib/src/vespa/searchlib/memoryindex/field_index.h index 9d869a7c548..3f601fd5b47 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/field_index.h +++ b/searchlib/src/vespa/searchlib/memoryindex/field_index.h @@ -5,12 +5,12 @@ #include "feature_store.h" #include "field_index_remover.h" #include "word_store.h" -#include -#include -#include -#include #include #include +#include +#include +#include +#include #include #include diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.cpp b/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.cpp index 1a396d62c33..a8d564971c3 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.cpp @@ -4,13 +4,13 @@ #include "field_inverter.h" #include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include #include #include diff --git a/searchlib/src/vespa/searchlib/memoryindex/i_field_index_insert_listener.h b/searchlib/src/vespa/searchlib/memoryindex/i_field_index_insert_listener.h index 0aacfa53c34..3d349b26d70 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/i_field_index_insert_listener.h +++ b/searchlib/src/vespa/searchlib/memoryindex/i_field_index_insert_listener.h @@ -1,6 +1,6 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #pragma once -#include +#include namespace search::memoryindex { diff --git a/searchlib/src/vespa/searchlib/memoryindex/memory_index.cpp b/searchlib/src/vespa/searchlib/memoryindex/memory_index.cpp index df302533192..a73d483ec2f 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/memory_index.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/memory_index.cpp @@ -6,7 +6,7 @@ #include "posting_iterator.h" #include #include -#include +#include #include #include #include diff --git a/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.cpp b/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.cpp index 9b127a8b096..0308c6d276b 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.cpp @@ -8,13 +8,13 @@ #include #include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include #include LOG_SETUP(".searchlib.memoryindex.ordered_document_inserter"); diff --git a/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.cpp b/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.cpp index 4c29ec321e3..63040aab66f 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.cpp @@ -1,11 +1,11 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "posting_iterator.h" -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include LOG_SETUP(".searchlib.memoryindex.posting_iterator"); diff --git a/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp b/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp index ffdc26f5eb0..0d699395512 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/word_store.cpp @@ -1,7 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "word_store.h" -#include +#include namespace search::memoryindex { diff --git a/searchlib/src/vespa/searchlib/memoryindex/word_store.h b/searchlib/src/vespa/searchlib/memoryindex/word_store.h index 6739b6152c8..9310910d69c 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/word_store.h +++ b/searchlib/src/vespa/searchlib/memoryindex/word_store.h @@ -2,7 +2,7 @@ #pragma once -#include +#include #include namespace search::memoryindex { diff --git a/searchlib/src/vespa/searchlib/predicate/common.h b/searchlib/src/vespa/searchlib/predicate/common.h index 6fd9f4562d5..18b5b92a9d5 100644 --- a/searchlib/src/vespa/searchlib/predicate/common.h +++ b/searchlib/src/vespa/searchlib/predicate/common.h @@ -2,7 +2,7 @@ #pragma once -#include +#include namespace search::predicate { diff --git a/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp b/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp index 12728c4c7c3..8e370a13919 100644 --- a/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp +++ b/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp @@ -3,9 +3,9 @@ #include "document_features_store.h" #include "predicate_range_expander.h" #include -#include -#include -#include +#include +#include +#include //#include "predicate_index.h" diff --git a/searchlib/src/vespa/searchlib/predicate/document_features_store.h b/searchlib/src/vespa/searchlib/predicate/document_features_store.h index ad721e7c43a..cca3fa2ef54 100644 --- a/searchlib/src/vespa/searchlib/predicate/document_features_store.h +++ b/searchlib/src/vespa/searchlib/predicate/document_features_store.h @@ -3,8 +3,8 @@ #pragma once #include "predicate_tree_annotator.h" -#include #include +#include #include #include #include diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp b/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp index 0b8f5c9a921..99861db31c9 100644 --- a/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp +++ b/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp @@ -2,11 +2,11 @@ #include "predicate_index.h" #include "predicate_hash.h" -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include using search::datastore::EntryRef; diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp index d9852dabe25..40c1acf6a7d 100644 --- a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp +++ b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp @@ -2,7 +2,7 @@ #include "predicate_interval_store.h" #include "predicate_interval.h" -#include +#include using search::datastore::BufferState; using search::datastore::EntryRef; diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h index 1f1a9f3fa07..8b4eebee9b2 100644 --- a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h +++ b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h @@ -3,8 +3,8 @@ #pragma once #include "predicate_ref_cache.h" -#include -#include +#include +#include #include namespace search::predicate { diff --git a/searchlib/src/vespa/searchlib/predicate/simple_index.cpp b/searchlib/src/vespa/searchlib/predicate/simple_index.cpp index 151a66740e5..96957008255 100644 --- a/searchlib/src/vespa/searchlib/predicate/simple_index.cpp +++ b/searchlib/src/vespa/searchlib/predicate/simple_index.cpp @@ -1,12 +1,12 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "simple_index.hpp" +#include +#include +#include +#include +#include #include -#include -#include -#include -#include -#include #include LOG_SETUP(".searchlib.predicate.simple_index"); diff --git a/searchlib/src/vespa/searchlib/predicate/simple_index.h b/searchlib/src/vespa/searchlib/predicate/simple_index.h index abf7aa9af42..4edc0ff2d14 100644 --- a/searchlib/src/vespa/searchlib/predicate/simple_index.h +++ b/searchlib/src/vespa/searchlib/predicate/simple_index.h @@ -3,7 +3,7 @@ #pragma once #include "common.h" -#include +#include #include #include #include diff --git a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp index b32047ed8c2..3fd08047922 100644 --- a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp +++ b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp @@ -8,11 +8,11 @@ #include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include LOG_SETUP(".searchlib.predicate.predicate_blueprint"); diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute_saver.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute_saver.cpp index 0daabfd9a01..fb0554112ef 100644 --- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute_saver.cpp +++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute_saver.cpp @@ -1,7 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "dense_tensor_attribute_saver.h" -#include +#include #include "dense_tensor_store.h" #include diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp index c20c3d85d28..205c686df81 100644 --- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp +++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp @@ -6,7 +6,7 @@ #include #include #include -#include +#include using search::datastore::Handle; using vespalib::tensor::Tensor; diff --git a/searchlib/src/vespa/searchlib/tensor/generic_tensor_attribute_saver.cpp b/searchlib/src/vespa/searchlib/tensor/generic_tensor_attribute_saver.cpp index 81ec3a5218e..2f0bb731aa4 100644 --- a/searchlib/src/vespa/searchlib/tensor/generic_tensor_attribute_saver.cpp +++ b/searchlib/src/vespa/searchlib/tensor/generic_tensor_attribute_saver.cpp @@ -2,7 +2,7 @@ #include "generic_tensor_attribute_saver.h" #include "generic_tensor_store.h" -#include +#include #include using vespalib::GenerationHandler; diff --git a/searchlib/src/vespa/searchlib/tensor/generic_tensor_store.cpp b/searchlib/src/vespa/searchlib/tensor/generic_tensor_store.cpp index 49a78f8d9d9..4e522f27ce2 100644 --- a/searchlib/src/vespa/searchlib/tensor/generic_tensor_store.cpp +++ b/searchlib/src/vespa/searchlib/tensor/generic_tensor_store.cpp @@ -3,12 +3,12 @@ #include "generic_tensor_store.h" #include #include +#include +#include +#include #include #include #include -#include -#include -#include using document::DeserializeException; using search::datastore::Handle; diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_store.cpp b/searchlib/src/vespa/searchlib/tensor/tensor_store.cpp index b7edf0dd5d4..d0ac5017e4d 100644 --- a/searchlib/src/vespa/searchlib/tensor/tensor_store.cpp +++ b/searchlib/src/vespa/searchlib/tensor/tensor_store.cpp @@ -1,7 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "tensor_store.h" -#include +#include namespace search { diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_store.h b/searchlib/src/vespa/searchlib/tensor/tensor_store.h index 4805c5829c9..4be5701a327 100644 --- a/searchlib/src/vespa/searchlib/tensor/tensor_store.h +++ b/searchlib/src/vespa/searchlib/tensor/tensor_store.h @@ -2,8 +2,8 @@ #pragma once -#include -#include +#include +#include #include namespace vespalib { namespace tensor { class Tensor; } } diff --git a/searchlib/src/vespa/searchlib/test/btree/aggregated_printer.h b/searchlib/src/vespa/searchlib/test/btree/aggregated_printer.h deleted file mode 100644 index b84ab8285d0..00000000000 --- a/searchlib/src/vespa/searchlib/test/btree/aggregated_printer.h +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include -#include - -namespace search::btree::test { - -template -void printAggregated(ostream &os, const Aggregated &aggr); - -template -void printAggregated(ostream &os, const NoAggregated &aggr) -{ - (void) os; - (void) aggr; -} - -template -void printAggregated(ostream &os, const MinMaxAggregated &aggr) -{ - os << "[min=" << aggr.getMin() << ",max=" << aggr.getMax() << "]"; -} - -} // namespace search::btree::test diff --git a/searchlib/src/vespa/searchlib/test/btree/btree_printer.h b/searchlib/src/vespa/searchlib/test/btree/btree_printer.h deleted file mode 100644 index 3d3f8c35c16..00000000000 --- a/searchlib/src/vespa/searchlib/test/btree/btree_printer.h +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "data_printer.h" -#include "aggregated_printer.h" -#include - -namespace search::btree::test { - -template -class BTreePrinter -{ - using LeafNode = typename NodeAllocator::LeafNodeType; - using InternalNode = typename NodeAllocator::InternalNodeType; - ostream &_os; - const NodeAllocator &_allocator; - bool _levelFirst; - uint8_t _printLevel; - - void printLeafNode(const LeafNode &n) { - if (!_levelFirst) { - _os << ","; - } - _levelFirst = false; - _os << "{"; - for (uint32_t i = 0; i < n.validSlots(); ++i) { - if (i > 0) _os << ","; - _os << n.getKey(i) << ":" << n.getData(i); - } - printAggregated(_os, n.getAggregated()); - _os << "}"; - } - - void printInternalNode(const InternalNode &n) { - if (!_levelFirst) { - _os << ","; - } - _levelFirst = false; - _os << "{"; - for (uint32_t i = 0; i < n.validSlots(); ++i) { - if (i > 0) _os << ","; - _os << n.getKey(i); - } - printAggregated(_os, n.getAggregated()); - _os << "}"; - } - - void printNode(BTreeNode::Ref ref) { - if (!ref.valid()) { - _os << "[]"; - } - if (_allocator.isLeafRef(ref)) { - printLeafNode(*_allocator.mapLeafRef(ref)); - return; - } - const InternalNode &n(*_allocator.mapInternalRef(ref)); - if (n.getLevel() == _printLevel) { - printInternalNode(n); - return; - } - for (uint32_t i = 0; i < n.validSlots(); ++i) { - printNode(n.getData(i)); - } - } - -public: - - BTreePrinter(ostream &os, const NodeAllocator &allocator) - : _os(os), - _allocator(allocator), - _levelFirst(true), - _printLevel(0) - { - } - - ~BTreePrinter() { } - - void print(BTreeNode::Ref ref) { - if (!ref.valid()) { - _os << "{}"; - return; - } - _printLevel = 0; - if (!_allocator.isLeafRef(ref)) { - const InternalNode &n(*_allocator.mapInternalRef(ref)); - _printLevel = n.getLevel(); - } - while (_printLevel > 0) { - _os << "{"; - _levelFirst = true; - printNode(ref); - _os << "} -> "; - --_printLevel; - } - _os << "{"; - _levelFirst = true; - printNode(ref); - _os << "}"; - } -}; - -} // namespace search::btree::test diff --git a/searchlib/src/vespa/searchlib/test/btree/data_printer.h b/searchlib/src/vespa/searchlib/test/btree/data_printer.h deleted file mode 100644 index 26b77da0db7..00000000000 --- a/searchlib/src/vespa/searchlib/test/btree/data_printer.h +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -namespace search::btree { - -class BtreeNoLeafData; - -namespace test { - -template -void printData(ostream &os, const DataT &data); - -template -void printData(ostream &os, const DataT &data) -{ - os << ":" << data; -} - -template -void printData(ostream &os, const BTreeNoLeafData &data) -{ - (void) os; - (void) data; -} - -} // namespace search::btree::test -} // namespace search::btree diff --git a/searchlib/src/vespa/searchlib/test/datastore/memstats.h b/searchlib/src/vespa/searchlib/test/datastore/memstats.h deleted file mode 100644 index 0a5373d0c78..00000000000 --- a/searchlib/src/vespa/searchlib/test/datastore/memstats.h +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include - -namespace search::datastore::test { - -/* - * Class representing expected memory stats in unit tests. - */ -struct MemStats -{ - size_t _used; - size_t _hold; - size_t _dead; - MemStats() : _used(0), _hold(0), _dead(0) {} - MemStats(const vespalib::MemoryUsage &usage) - : _used(usage.usedBytes()), - _hold(usage.allocatedBytesOnHold()), - _dead(usage.deadBytes()) {} - MemStats &used(size_t val) { _used += val; return *this; } - MemStats &hold(size_t val) { _hold += val; return *this; } - MemStats &dead(size_t val) { _dead += val; return *this; } - MemStats &holdToDead(size_t val) { - decHold(val); - _dead += val; - return *this; - } - MemStats &decHold(size_t val) { - assert(_hold >= val); - _hold -= val; - return *this; - } -}; - -} diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp index d59417a1e78..54c0aa866b4 100644 --- a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp +++ b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp @@ -2,14 +2,14 @@ #include "fakememtreeocc.h" #include "fpfactory.h" -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include LOG_SETUP(".fakememtreeocc"); diff --git a/searchlib/src/vespa/searchlib/util/CMakeLists.txt b/searchlib/src/vespa/searchlib/util/CMakeLists.txt index 70242c3aacd..873f1824d04 100644 --- a/searchlib/src/vespa/searchlib/util/CMakeLists.txt +++ b/searchlib/src/vespa/searchlib/util/CMakeLists.txt @@ -1,7 +1,6 @@ # Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. vespa_add_library(searchlib_util OBJECT SOURCES - bufferwriter.cpp comprbuffer.cpp comprfile.cpp dirtraverse.cpp diff --git a/searchlib/src/vespa/searchlib/util/bufferwriter.cpp b/searchlib/src/vespa/searchlib/util/bufferwriter.cpp deleted file mode 100644 index 6e57d6f58d4..00000000000 --- a/searchlib/src/vespa/searchlib/util/bufferwriter.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "bufferwriter.h" - -namespace search { - -BufferWriter::BufferWriter() - : _cur(nullptr), - _end(nullptr), - _start(nullptr) -{ -} - -BufferWriter::~BufferWriter() = default; - -void -BufferWriter::writeSlow(const void *src, size_t len) -{ - size_t residue = len; - const char *csrc = static_cast(src); - for (;;) { - size_t maxLen = freeLen(); - if (residue <= maxLen) { - writeFast(csrc, residue); - break; - } - if (maxLen != 0) { - writeFast(csrc, maxLen); - csrc += maxLen; - residue -= maxLen; - } - flush(); - } -} - -} // namespace search diff --git a/searchlib/src/vespa/searchlib/util/bufferwriter.h b/searchlib/src/vespa/searchlib/util/bufferwriter.h deleted file mode 100644 index 3da6e3f8030..00000000000 --- a/searchlib/src/vespa/searchlib/util/bufferwriter.h +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include - -namespace search { - -/** - * Abstract class to write to a buffer with an abstract backing store - * and abstract backing buffer. Each time backing buffer is full, - * flush() is called to resize it or drain it to the backing store. - */ -class BufferWriter -{ - char *_cur; - char *_end; - char *_start; -protected: - void rewind() { _cur = _start; } - - void setup(void *start, size_t len) { - _start = static_cast(start); - _end = _start + len; - rewind(); - } - - size_t freeLen() const { return _end - _cur; } - size_t usedLen() const { return _cur - _start; } - - void writeFast(const void *src, size_t len) - { - __builtin_memcpy(_cur, src, len); - _cur += len; - } - - void writeSlow(const void *src, size_t len); - -public: - BufferWriter(); - - virtual ~BufferWriter(); - - virtual void flush() = 0; - - void write(const void *src, size_t len) - { - if (__builtin_expect(len <= freeLen(), true)) { - writeFast(src, len); - return; - } - writeSlow(src, len); - } -}; - -} // namespace search diff --git a/searchlib/src/vespa/searchlib/util/drainingbufferwriter.h b/searchlib/src/vespa/searchlib/util/drainingbufferwriter.h index e4efca0796d..38d8337fab5 100644 --- a/searchlib/src/vespa/searchlib/util/drainingbufferwriter.h +++ b/searchlib/src/vespa/searchlib/util/drainingbufferwriter.h @@ -2,7 +2,7 @@ #pragma once -#include "bufferwriter.h" +#include #include #include -- cgit v1.2.3