aboutsummaryrefslogtreecommitdiffstats
path: root/searchcore/src/tests
diff options
context:
space:
mode:
authorJon Bratseth <bratseth@yahoo-inc.com>2016-06-15 23:09:44 +0200
committerJon Bratseth <bratseth@yahoo-inc.com>2016-06-15 23:09:44 +0200
commit72231250ed81e10d66bfe70701e64fa5fe50f712 (patch)
tree2728bba1131a6f6e5bdf95afec7d7ff9358dac50 /searchcore/src/tests
Publish
Diffstat (limited to 'searchcore/src/tests')
-rw-r--r--searchcore/src/tests/.gitignore3
-rw-r--r--searchcore/src/tests/applyattrupdates/.gitignore4
-rw-r--r--searchcore/src/tests/applyattrupdates/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/applyattrupdates/applyattrupdates.cpp338
-rw-r--r--searchcore/src/tests/applyattrupdates/doctypes.cfg174
-rw-r--r--searchcore/src/tests/fdispatch/randomrow/.gitignore1
-rw-r--r--searchcore/src/tests/fdispatch/randomrow/CMakeLists.txt10
-rw-r--r--searchcore/src/tests/fdispatch/randomrow/DESC1
-rw-r--r--searchcore/src/tests/fdispatch/randomrow/FILES1
-rw-r--r--searchcore/src/tests/fdispatch/randomrow/randomrow_test.cpp89
-rw-r--r--searchcore/src/tests/fdispatch/search_path/.gitignore1
-rw-r--r--searchcore/src/tests/fdispatch/search_path/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/fdispatch/search_path/DESC1
-rw-r--r--searchcore/src/tests/fdispatch/search_path/FILES1
-rw-r--r--searchcore/src/tests/fdispatch/search_path/search_path_test.cpp124
-rw-r--r--searchcore/src/tests/grouping/.gitignore4
-rw-r--r--searchcore/src/tests/grouping/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/grouping/DESC1
-rw-r--r--searchcore/src/tests/grouping/FILES1
-rw-r--r--searchcore/src/tests/grouping/grouping.cpp604
-rw-r--r--searchcore/src/tests/proton/attribute/.gitignore9
-rw-r--r--searchcore/src/tests/proton/attribute/CMakeLists.txt21
-rw-r--r--searchcore/src/tests/proton/attribute/DESC1
-rw-r--r--searchcore/src/tests/proton/attribute/FILES1
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_manager/.gitignore1
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_manager/CMakeLists.txt14
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_manager/DESC1
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_manager/FILES1
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_manager/attribute_manager_test.cpp686
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_populator/.gitignore1
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_populator/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_populator/DESC1
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_populator/FILES1
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_populator/attribute_populator_test.cpp98
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_test.cpp607
-rwxr-xr-xsearchcore/src/tests/proton/attribute/attribute_test.sh3
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_usage_filter/.gitignore1
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_usage_filter/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_usage_filter/DESC1
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_usage_filter/FILES1
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_usage_filter/attribute_usage_filter_test.cpp143
-rw-r--r--searchcore/src/tests/proton/attribute/attributeflush_test.cpp564
-rwxr-xr-xsearchcore/src/tests/proton/attribute/attributeflush_test.sh3
-rw-r--r--searchcore/src/tests/proton/attribute/attributes_state_explorer/.gitignore1
-rw-r--r--searchcore/src/tests/proton/attribute/attributes_state_explorer/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/attribute/attributes_state_explorer/DESC1
-rw-r--r--searchcore/src/tests/proton/attribute/attributes_state_explorer/FILES1
-rw-r--r--searchcore/src/tests/proton/attribute/attributes_state_explorer/attributes_state_explorer_test.cpp70
-rw-r--r--searchcore/src/tests/proton/attribute/document_field_populator/.gitignore1
-rw-r--r--searchcore/src/tests/proton/attribute/document_field_populator/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/attribute/document_field_populator/DESC1
-rw-r--r--searchcore/src/tests/proton/attribute/document_field_populator/FILES1
-rw-r--r--searchcore/src/tests/proton/attribute/document_field_populator/document_field_populator_test.cpp84
-rw-r--r--searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/.gitignore1
-rw-r--r--searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/DESC1
-rw-r--r--searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/FILES1
-rw-r--r--searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/exclusive_attribute_read_accessor_test.cpp54
-rw-r--r--searchcore/src/tests/proton/attribute/gidmapattribute/.gitignore0
-rw-r--r--searchcore/src/tests/proton/bucketdb/bucketdb/.gitignore1
-rw-r--r--searchcore/src/tests/proton/bucketdb/bucketdb/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/bucketdb/bucketdb/DESC1
-rw-r--r--searchcore/src/tests/proton/bucketdb/bucketdb/FILES1
-rw-r--r--searchcore/src/tests/proton/bucketdb/bucketdb/bucketdb_test.cpp169
-rwxr-xr-xsearchcore/src/tests/proton/clean_tests.sh8
-rw-r--r--searchcore/src/tests/proton/common/.gitignore3
-rw-r--r--searchcore/src/tests/proton/common/CMakeLists.txt22
-rw-r--r--searchcore/src/tests/proton/common/cachedselect_test.cpp710
-rw-r--r--searchcore/src/tests/proton/common/document_type_inspector/.gitignore1
-rw-r--r--searchcore/src/tests/proton/common/document_type_inspector/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/common/document_type_inspector/DESC2
-rw-r--r--searchcore/src/tests/proton/common/document_type_inspector/FILES1
-rw-r--r--searchcore/src/tests/proton/common/document_type_inspector/document_type_inspector_test.cpp50
-rw-r--r--searchcore/src/tests/proton/common/dummydbowner.h23
-rw-r--r--searchcore/src/tests/proton/common/schemautil_test.cpp132
-rw-r--r--searchcore/src/tests/proton/common/selectpruner_test.cpp778
-rw-r--r--searchcore/src/tests/proton/common/state_reporter_utils/.gitignore1
-rw-r--r--searchcore/src/tests/proton/common/state_reporter_utils/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/common/state_reporter_utils/DESC1
-rw-r--r--searchcore/src/tests/proton/common/state_reporter_utils/FILES1
-rw-r--r--searchcore/src/tests/proton/common/state_reporter_utils/state_reporter_utils_test.cpp48
-rw-r--r--searchcore/src/tests/proton/config/.cvsignore3
-rw-r--r--searchcore/src/tests/proton/config/.gitignore1
-rw-r--r--searchcore/src/tests/proton/config/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/config/config.cpp268
-rwxr-xr-xsearchcore/src/tests/proton/create-test.sh73
-rw-r--r--searchcore/src/tests/proton/docsummary/.gitignore6
-rw-r--r--searchcore/src/tests/proton/docsummary/CMakeLists.txt32
-rw-r--r--searchcore/src/tests/proton/docsummary/DESC1
-rw-r--r--searchcore/src/tests/proton/docsummary/FILES1
-rw-r--r--searchcore/src/tests/proton/docsummary/attributes.cfg45
-rw-r--r--searchcore/src/tests/proton/docsummary/docsummary.cpp1296
-rwxr-xr-xsearchcore/src/tests/proton/docsummary/docsummary_test.sh15
-rw-r--r--searchcore/src/tests/proton/docsummary/documentmanager.cfg81
-rw-r--r--searchcore/src/tests/proton/docsummary/indexingdocument.cfg0
-rw-r--r--searchcore/src/tests/proton/docsummary/indexschema.cfg0
-rw-r--r--searchcore/src/tests/proton/docsummary/juniperrc.cfg0
-rw-r--r--searchcore/src/tests/proton/docsummary/rank-profiles.cfg2
-rw-r--r--searchcore/src/tests/proton/docsummary/summary.cfg108
-rw-r--r--searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp713
-rw-r--r--searchcore/src/tests/proton/docsummary/summarymap.cfg48
-rw-r--r--searchcore/src/tests/proton/document_iterator/.gitignore1
-rw-r--r--searchcore/src/tests/proton/document_iterator/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/document_iterator/FILES1
-rw-r--r--searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp888
-rw-r--r--searchcore/src/tests/proton/documentdb/.gitignore6
-rw-r--r--searchcore/src/tests/proton/documentdb/CMakeLists.txt24
-rw-r--r--searchcore/src/tests/proton/documentdb/DESC1
-rw-r--r--searchcore/src/tests/proton/documentdb/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/buckethandler/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/buckethandler/CMakeLists.txt18
-rw-r--r--searchcore/src/tests/proton/documentdb/buckethandler/DESC1
-rw-r--r--searchcore/src/tests/proton/documentdb/buckethandler/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/buckethandler/buckethandler_test.cpp265
-rw-r--r--searchcore/src/tests/proton/documentdb/cfg/attributes.cfg3
-rw-r--r--searchcore/src/tests/proton/documentdb/cfg/indexschema.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/cfg/juniperrc.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/cfg/rank-profiles.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/cfg/summary.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/cfg/summarymap.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/clusterstatehandler/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/clusterstatehandler/CMakeLists.txt16
-rw-r--r--searchcore/src/tests/proton/documentdb/clusterstatehandler/DESC1
-rw-r--r--searchcore/src/tests/proton/documentdb/clusterstatehandler/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/clusterstatehandler/clusterstatehandler_test.cpp94
-rw-r--r--searchcore/src/tests/proton/documentdb/combiningfeedview/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/combiningfeedview/CMakeLists.txt19
-rw-r--r--searchcore/src/tests/proton/documentdb/combiningfeedview/DESC1
-rw-r--r--searchcore/src/tests/proton/documentdb/combiningfeedview/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp438
-rw-r--r--searchcore/src/tests/proton/documentdb/configurer/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/configurer/CMakeLists.txt22
-rw-r--r--searchcore/src/tests/proton/documentdb/configurer/DESC1
-rw-r--r--searchcore/src/tests/proton/documentdb/configurer/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp611
-rw-r--r--searchcore/src/tests/proton/documentdb/configvalidator/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/configvalidator/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/documentdb/configvalidator/DESC1
-rw-r--r--searchcore/src/tests/proton/documentdb/configvalidator/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/configvalidator/configvalidator_test.cpp351
-rw-r--r--searchcore/src/tests/proton/documentdb/document_scan_iterator/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/document_scan_iterator/CMakeLists.txt12
-rw-r--r--searchcore/src/tests/proton/documentdb/document_scan_iterator/DESC2
-rw-r--r--searchcore/src/tests/proton/documentdb/document_scan_iterator/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/document_scan_iterator/document_scan_iterator_test.cpp102
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/CMakeLists.txt24
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/DESC1
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/attributes.cfg3
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/indexschema.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/juniperrc.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/rank-profiles.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/summary.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/summarymap.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/attributes.cfg5
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/indexschema.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/juniperrc.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/rank-profiles.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/summary.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/summarymap.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/attributes.cfg6
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/indexschema.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/juniperrc.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/rank-profiles.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/summary.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/summarymap.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/attributes.cfg7
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/indexschema.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/juniperrc.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/rank-profiles.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/summary.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/summarymap.cfg0
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp978
-rw-r--r--searchcore/src/tests/proton/documentdb/documentbucketmover/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/documentbucketmover/CMakeLists.txt19
-rw-r--r--searchcore/src/tests/proton/documentdb/documentbucketmover/DESC1
-rw-r--r--searchcore/src/tests/proton/documentdb/documentbucketmover/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp1182
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdb_test.cpp218
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdb_test.sh3
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdbconfig/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdbconfig/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdbconfig/DESC1
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdbconfig/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdbconfig/documentdbconfig_test.cpp70
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdbconfigscout/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdbconfigscout/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdbconfigscout/DESC1
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdbconfigscout/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdbconfigscout/documentdbconfigscout_test.cpp264
-rw-r--r--searchcore/src/tests/proton/documentdb/feedhandler/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/feedhandler/CMakeLists.txt18
-rw-r--r--searchcore/src/tests/proton/documentdb/feedhandler/DESC1
-rw-r--r--searchcore/src/tests/proton/documentdb/feedhandler/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp748
-rw-r--r--searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.sh4
-rw-r--r--searchcore/src/tests/proton/documentdb/feedview/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/feedview/CMakeLists.txt19
-rw-r--r--searchcore/src/tests/proton/documentdb/feedview/DESC1
-rw-r--r--searchcore/src/tests/proton/documentdb/feedview/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp1211
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/.gitignore5
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/CMakeLists.txt11
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/DESC1
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/attributes.cfg2
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/documenttypes.cfg15
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/indexschema.cfg3
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/juniperrc.cfg2
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/mycfg.cfg1
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/rank-profiles.cfg2
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/summary.cfg7
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/summarymap.cfg3
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/fileconfigmanager_test.cpp322
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/fileconfigmanager_test.sh4
-rw-r--r--searchcore/src/tests/proton/documentdb/fileconfigmanager/mycfg.def4
-rw-r--r--searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/DESC2
-rw-r--r--searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/job_tracked_maintenance_job_test.cpp134
-rw-r--r--searchcore/src/tests/proton/documentdb/lid_space_compaction/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentdb/lid_space_compaction/CMakeLists.txt13
-rw-r--r--searchcore/src/tests/proton/documentdb/lid_space_compaction/DESC2
-rw-r--r--searchcore/src/tests/proton/documentdb/lid_space_compaction/FILES1
-rw-r--r--searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp450
-rw-r--r--searchcore/src/tests/proton/documentdb/maintenancecontroller/.gitignore2
-rw-r--r--searchcore/src/tests/proton/documentdb/maintenancecontroller/CMakeLists.txt38
-rw-r--r--searchcore/src/tests/proton/documentdb/maintenancecontroller/DESC2
-rw-r--r--searchcore/src/tests/proton/documentdb/maintenancecontroller/FILES2
-rw-r--r--searchcore/src/tests/proton/documentdb/maintenancecontroller/frozenbucketsmap_test.cpp86
-rw-r--r--searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp1472
-rw-r--r--searchcore/src/tests/proton/documentdb/storeonlyfeedview/.gitignore4
-rw-r--r--searchcore/src/tests/proton/documentdb/storeonlyfeedview/CMakeLists.txt13
-rw-r--r--searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp289
-rw-r--r--searchcore/src/tests/proton/documentmetastore/.gitignore6
-rw-r--r--searchcore/src/tests/proton/documentmetastore/CMakeLists.txt13
-rw-r--r--searchcore/src/tests/proton/documentmetastore/DESC1
-rw-r--r--searchcore/src/tests/proton/documentmetastore/FILES1
-rw-r--r--searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp1878
-rw-r--r--searchcore/src/tests/proton/documentmetastore/documentmetastore_test.sh4
-rw-r--r--searchcore/src/tests/proton/documentmetastore/lidreusedelayer/.gitignore1
-rw-r--r--searchcore/src/tests/proton/documentmetastore/lidreusedelayer/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/documentmetastore/lidreusedelayer/DESC1
-rw-r--r--searchcore/src/tests/proton/documentmetastore/lidreusedelayer/FILES1
-rw-r--r--searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp325
-rw-r--r--searchcore/src/tests/proton/feed_and_search/.gitignore8
-rw-r--r--searchcore/src/tests/proton/feed_and_search/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/feed_and_search/DESC1
-rw-r--r--searchcore/src/tests/proton/feed_and_search/FILES1
-rw-r--r--searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp241
-rw-r--r--searchcore/src/tests/proton/feedoperation/.gitignore5
-rw-r--r--searchcore/src/tests/proton/feedoperation/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp172
-rw-r--r--searchcore/src/tests/proton/feedtoken/.gitignore4
-rw-r--r--searchcore/src/tests/proton/feedtoken/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/feedtoken/DESC1
-rw-r--r--searchcore/src/tests/proton/feedtoken/FILES1
-rw-r--r--searchcore/src/tests/proton/feedtoken/feedtoken.cpp158
-rw-r--r--searchcore/src/tests/proton/flushengine/.gitignore2
-rw-r--r--searchcore/src/tests/proton/flushengine/CMakeLists.txt13
-rw-r--r--searchcore/src/tests/proton/flushengine/DESC1
-rw-r--r--searchcore/src/tests/proton/flushengine/FILES1
-rw-r--r--searchcore/src/tests/proton/flushengine/flushengine.cpp605
-rw-r--r--searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/CMakeLists.txt12
-rw-r--r--searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/DESC1
-rw-r--r--searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/FILES1
-rw-r--r--searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/prepare_restart_flush_strategy_test.cpp297
-rw-r--r--searchcore/src/tests/proton/index/.gitignore4
-rw-r--r--searchcore/src/tests/proton/index/CMakeLists.txt33
-rw-r--r--searchcore/src/tests/proton/index/diskindexcleaner_test.cpp159
-rw-r--r--searchcore/src/tests/proton/index/fusionrunner_test.cpp328
-rw-r--r--searchcore/src/tests/proton/index/index_test.sh7
-rw-r--r--searchcore/src/tests/proton/index/index_writer/.gitignore1
-rw-r--r--searchcore/src/tests/proton/index/index_writer/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/index/index_writer/DESC1
-rw-r--r--searchcore/src/tests/proton/index/index_writer/FILES1
-rw-r--r--searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp117
-rw-r--r--searchcore/src/tests/proton/index/indexcollection_test.cpp129
-rw-r--r--searchcore/src/tests/proton/index/indexmanager_test.cpp690
-rw-r--r--searchcore/src/tests/proton/initializer/.gitignore1
-rw-r--r--searchcore/src/tests/proton/initializer/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/initializer/DESC1
-rw-r--r--searchcore/src/tests/proton/initializer/FILES1
-rw-r--r--searchcore/src/tests/proton/initializer/task_runner_test.cpp141
-rw-r--r--searchcore/src/tests/proton/matchengine/.gitignore6
-rw-r--r--searchcore/src/tests/proton/matchengine/CMakeLists.txt10
-rw-r--r--searchcore/src/tests/proton/matchengine/DESC1
-rw-r--r--searchcore/src/tests/proton/matchengine/FILES1
-rw-r--r--searchcore/src/tests/proton/matchengine/matchengine.cpp214
-rw-r--r--searchcore/src/tests/proton/matching/.cvsignore3
-rw-r--r--searchcore/src/tests/proton/matching/.gitignore14
-rw-r--r--searchcore/src/tests/proton/matching/CMakeLists.txt60
-rw-r--r--searchcore/src/tests/proton/matching/DESC1
-rw-r--r--searchcore/src/tests/proton/matching/FILES1
-rw-r--r--searchcore/src/tests/proton/matching/docid_range_scheduler/.gitignore3
-rw-r--r--searchcore/src/tests/proton/matching/docid_range_scheduler/CMakeLists.txt15
-rw-r--r--searchcore/src/tests/proton/matching/docid_range_scheduler/docid_range_scheduler_bench.cpp226
-rw-r--r--searchcore/src/tests/proton/matching/docid_range_scheduler/docid_range_scheduler_test.cpp286
-rw-r--r--searchcore/src/tests/proton/matching/match_loop_communicator/.gitignore1
-rw-r--r--searchcore/src/tests/proton/matching/match_loop_communicator/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/matching/match_loop_communicator/FILES1
-rw-r--r--searchcore/src/tests/proton/matching/match_loop_communicator/match_loop_communicator_test.cpp118
-rw-r--r--searchcore/src/tests/proton/matching/match_phase_limiter/.gitignore1
-rw-r--r--searchcore/src/tests/proton/matching/match_phase_limiter/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/matching/match_phase_limiter/FILES1
-rw-r--r--searchcore/src/tests/proton/matching/match_phase_limiter/match_phase_limiter_test.cpp361
-rw-r--r--searchcore/src/tests/proton/matching/matching_stats_test.cpp151
-rw-r--r--searchcore/src/tests/proton/matching/matching_test.cpp775
-rw-r--r--searchcore/src/tests/proton/matching/partial_result/.gitignore1
-rw-r--r--searchcore/src/tests/proton/matching/partial_result/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/matching/partial_result/FILES1
-rw-r--r--searchcore/src/tests/proton/matching/partial_result/partial_result_test.cpp159
-rw-r--r--searchcore/src/tests/proton/matching/query_test.cpp900
-rw-r--r--searchcore/src/tests/proton/matching/querynodes_test.cpp486
-rw-r--r--searchcore/src/tests/proton/matching/resolveviewvisitor_test.cpp142
-rw-r--r--searchcore/src/tests/proton/matching/sessionmanager_test.cpp87
-rw-r--r--searchcore/src/tests/proton/matching/termdataextractor_test.cpp167
-rw-r--r--searchcore/src/tests/proton/metrics/documentdb_job_trackers/.gitignore1
-rw-r--r--searchcore/src/tests/proton/metrics/documentdb_job_trackers/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/metrics/documentdb_job_trackers/DESC1
-rw-r--r--searchcore/src/tests/proton/metrics/documentdb_job_trackers/FILES1
-rw-r--r--searchcore/src/tests/proton/metrics/documentdb_job_trackers/documentdb_job_trackers_test.cpp116
-rw-r--r--searchcore/src/tests/proton/metrics/job_load_sampler/.gitignore1
-rw-r--r--searchcore/src/tests/proton/metrics/job_load_sampler/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/metrics/job_load_sampler/DESC1
-rw-r--r--searchcore/src/tests/proton/metrics/job_load_sampler/FILES1
-rw-r--r--searchcore/src/tests/proton/metrics/job_load_sampler/job_load_sampler_test.cpp95
-rw-r--r--searchcore/src/tests/proton/metrics/job_tracked_flush/.gitignore1
-rw-r--r--searchcore/src/tests/proton/metrics/job_tracked_flush/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/metrics/job_tracked_flush/DESC2
-rw-r--r--searchcore/src/tests/proton/metrics/job_tracked_flush/FILES1
-rw-r--r--searchcore/src/tests/proton/metrics/job_tracked_flush/job_tracked_flush_test.cpp139
-rw-r--r--searchcore/src/tests/proton/metrics/metrics_engine/.gitignore1
-rw-r--r--searchcore/src/tests/proton/metrics/metrics_engine/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/metrics/metrics_engine/DESC1
-rw-r--r--searchcore/src/tests/proton/metrics/metrics_engine/FILES1
-rw-r--r--searchcore/src/tests/proton/metrics/metrics_engine/metrics_engine_test.cpp32
-rw-r--r--searchcore/src/tests/proton/persistenceconformance/.gitignore1
-rw-r--r--searchcore/src/tests/proton/persistenceconformance/CMakeLists.txt6
-rw-r--r--searchcore/src/tests/proton/persistenceconformance/DESC1
-rw-r--r--searchcore/src/tests/proton/persistenceconformance/FILES1
-rw-r--r--searchcore/src/tests/proton/persistenceengine/.gitignore1
-rw-r--r--searchcore/src/tests/proton/persistenceengine/CMakeLists.txt10
-rw-r--r--searchcore/src/tests/proton/persistenceengine/DESC1
-rw-r--r--searchcore/src/tests/proton/persistenceengine/FILES1
-rw-r--r--searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp828
-rw-r--r--searchcore/src/tests/proton/proton/CMakeLists.txt1
-rw-r--r--searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/.gitignore1
-rw-r--r--searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/CMakeLists.txt10
-rw-r--r--searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/DESC2
-rw-r--r--searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/FILES1
-rw-r--r--searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/attribute_reprocessing_initializer_test.cpp247
-rw-r--r--searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/.gitignore1
-rw-r--r--searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/DESC2
-rw-r--r--searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/FILES1
-rw-r--r--searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/document_reprocessing_handler_test.cpp124
-rw-r--r--searchcore/src/tests/proton/reprocessing/reprocessing_runner/.gitignore1
-rw-r--r--searchcore/src/tests/proton/reprocessing/reprocessing_runner/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/reprocessing/reprocessing_runner/DESC1
-rw-r--r--searchcore/src/tests/proton/reprocessing/reprocessing_runner/FILES1
-rw-r--r--searchcore/src/tests/proton/reprocessing/reprocessing_runner/reprocessing_runner_test.cpp141
-rw-r--r--searchcore/src/tests/proton/server/.gitignore9
-rw-r--r--searchcore/src/tests/proton/server/CMakeLists.txt52
-rw-r--r--searchcore/src/tests/proton/server/attribute_metrics_test.cpp56
-rw-r--r--searchcore/src/tests/proton/server/data_directory_upgrader/.gitignore1
-rw-r--r--searchcore/src/tests/proton/server/data_directory_upgrader/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/server/data_directory_upgrader/DESC1
-rw-r--r--searchcore/src/tests/proton/server/data_directory_upgrader/FILES1
-rw-r--r--searchcore/src/tests/proton/server/data_directory_upgrader/data_directory_upgrader_test.cpp200
-rw-r--r--searchcore/src/tests/proton/server/disk_mem_usage_filter/.gitignore1
-rw-r--r--searchcore/src/tests/proton/server/disk_mem_usage_filter/CMakeLists.txt10
-rw-r--r--searchcore/src/tests/proton/server/disk_mem_usage_filter/DESC1
-rw-r--r--searchcore/src/tests/proton/server/disk_mem_usage_filter/FILES1
-rw-r--r--searchcore/src/tests/proton/server/disk_mem_usage_filter/disk_mem_usage_filter_test.cpp113
-rw-r--r--searchcore/src/tests/proton/server/documentretriever_test.cpp455
-rw-r--r--searchcore/src/tests/proton/server/feeddebugger_test.cpp85
-rw-r--r--searchcore/src/tests/proton/server/feedstates_test.cpp136
-rw-r--r--searchcore/src/tests/proton/server/health_adapter/.gitignore1
-rw-r--r--searchcore/src/tests/proton/server/health_adapter/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/server/health_adapter/FILES1
-rw-r--r--searchcore/src/tests/proton/server/health_adapter/health_adapter_test.cpp59
-rw-r--r--searchcore/src/tests/proton/server/memoryconfigstore_test.cpp211
-rw-r--r--searchcore/src/tests/proton/server/memoryflush/.gitignore1
-rw-r--r--searchcore/src/tests/proton/server/memoryflush/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/server/memoryflush/DESC1
-rw-r--r--searchcore/src/tests/proton/server/memoryflush/FILES1
-rw-r--r--searchcore/src/tests/proton/server/memoryflush/memoryflush_test.cpp361
-rw-r--r--searchcore/src/tests/proton/server/visibility_handler/.gitignore1
-rw-r--r--searchcore/src/tests/proton/server/visibility_handler/CMakeLists.txt10
-rw-r--r--searchcore/src/tests/proton/server/visibility_handler/DESC1
-rw-r--r--searchcore/src/tests/proton/server/visibility_handler/FILES1
-rw-r--r--searchcore/src/tests/proton/server/visibility_handler/visibility_handler_test.cpp188
-rw-r--r--searchcore/src/tests/proton/statusreport/.gitignore1
-rw-r--r--searchcore/src/tests/proton/statusreport/CMakeLists.txt7
-rw-r--r--searchcore/src/tests/proton/statusreport/DESC1
-rw-r--r--searchcore/src/tests/proton/statusreport/FILES1
-rw-r--r--searchcore/src/tests/proton/statusreport/statusreport.cpp44
-rw-r--r--searchcore/src/tests/proton/summaryengine/.gitignore4
-rw-r--r--searchcore/src/tests/proton/summaryengine/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/summaryengine/DESC1
-rw-r--r--searchcore/src/tests/proton/summaryengine/FILES1
-rw-r--r--searchcore/src/tests/proton/summaryengine/summaryengine.cpp434
-rw-r--r--searchcore/src/tests/proton/verify_ranksetup/.cvsignore3
-rw-r--r--searchcore/src/tests/proton/verify_ranksetup/.gitignore5
-rw-r--r--searchcore/src/tests/proton/verify_ranksetup/CMakeLists.txt7
-rw-r--r--searchcore/src/tests/proton/verify_ranksetup/DESC1
-rw-r--r--searchcore/src/tests/proton/verify_ranksetup/FILES1
-rw-r--r--searchcore/src/tests/proton/verify_ranksetup/invalid_attr_name/.gitignore0
-rw-r--r--searchcore/src/tests/proton/verify_ranksetup/invalid_feature_name/.gitignore0
-rw-r--r--searchcore/src/tests/proton/verify_ranksetup/unsupported_collection_type/.gitignore0
-rw-r--r--searchcore/src/tests/proton/verify_ranksetup/valid/.gitignore0
-rw-r--r--searchcore/src/tests/proton/verify_ranksetup/verify_ranksetup_test.cpp250
-rwxr-xr-xsearchcore/src/tests/proton/verify_ranksetup/verify_ranksetup_test.sh4
-rw-r--r--searchcore/src/tests/slime/convert_document_to_slime/.gitignore0
417 files changed, 33338 insertions, 0 deletions
diff --git a/searchcore/src/tests/.gitignore b/searchcore/src/tests/.gitignore
new file mode 100644
index 00000000000..a3e9c375723
--- /dev/null
+++ b/searchcore/src/tests/.gitignore
@@ -0,0 +1,3 @@
+.depend
+Makefile
+*_test
diff --git a/searchcore/src/tests/applyattrupdates/.gitignore b/searchcore/src/tests/applyattrupdates/.gitignore
new file mode 100644
index 00000000000..b7789427c09
--- /dev/null
+++ b/searchcore/src/tests/applyattrupdates/.gitignore
@@ -0,0 +1,4 @@
+.depend
+Makefile
+applyattrupdates_test
+searchcore_applyattrupdates_test_app
diff --git a/searchcore/src/tests/applyattrupdates/CMakeLists.txt b/searchcore/src/tests/applyattrupdates/CMakeLists.txt
new file mode 100644
index 00000000000..2778d0f62dc
--- /dev/null
+++ b/searchcore/src/tests/applyattrupdates/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_applyattrupdates_test_app
+ SOURCES
+ applyattrupdates.cpp
+ DEPENDS
+ searchcore_pcommon
+ searchcore_util
+)
+vespa_add_test(NAME searchcore_applyattrupdates_test_app COMMAND searchcore_applyattrupdates_test_app)
diff --git a/searchcore/src/tests/applyattrupdates/applyattrupdates.cpp b/searchcore/src/tests/applyattrupdates/applyattrupdates.cpp
new file mode 100644
index 00000000000..bc1f44740da
--- /dev/null
+++ b/searchcore/src/tests/applyattrupdates/applyattrupdates.cpp
@@ -0,0 +1,338 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/document/config/config-documenttypes.h>
+#include <vespa/document/fieldvalue/arrayfieldvalue.h>
+#include <vespa/document/fieldvalue/bytefieldvalue.h>
+#include <vespa/document/fieldvalue/doublefieldvalue.h>
+#include <vespa/document/fieldvalue/floatfieldvalue.h>
+#include <vespa/document/fieldvalue/intfieldvalue.h>
+#include <vespa/document/fieldvalue/longfieldvalue.h>
+#include <vespa/document/fieldvalue/stringfieldvalue.h>
+#include <vespa/document/fieldvalue/weightedsetfieldvalue.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/document/update/addvalueupdate.h>
+#include <vespa/document/update/assignvalueupdate.h>
+#include <vespa/document/update/clearvalueupdate.h>
+#include <vespa/document/update/documentupdate.h>
+#include <vespa/document/update/removevalueupdate.h>
+#include <vespa/log/log.h>
+#include <vespa/searchcore/proton/common/attrupdate.h>
+#include <vespa/searchlib/attribute/attributefactory.h>
+#include <vespa/searchlib/attribute/attributevector.hpp>
+#include <vespa/vespalib/testkit/testapp.h>
+
+LOG_SETUP("applyattrupdates_test");
+
+using namespace document;
+using search::attribute::BasicType;
+using search::attribute::Config;
+using search::attribute::CollectionType;
+
+namespace search {
+
+//-----------------------------------------------------------------------------
+
+template <typename T>
+class Vector
+{
+private:
+ std::vector<T> _vec;
+public:
+ Vector() : _vec() {}
+ size_t size() const {
+ return _vec.size();
+ }
+ Vector & pb(const T & val) {
+ _vec.push_back(val);
+ return *this;
+ }
+ const T & operator [] (size_t idx) const {
+ return _vec[idx];
+ }
+};
+
+//-----------------------------------------------------------------------------
+
+typedef AttributeVector::SP AttributePtr;
+typedef AttributeVector::WeightedInt WeightedInt;
+typedef AttributeVector::WeightedFloat WeightedFloat;
+typedef AttributeVector::WeightedString WeightedString;
+
+class Test : public vespalib::TestApp
+{
+private:
+ template <typename T, typename VectorType>
+ AttributePtr
+ create(uint32_t numDocs, T val, int32_t weight,
+ const std::string & baseName,
+ const Config &info)
+ {
+ LOG(info, "create attribute vector: %s", baseName.c_str());
+ AttributePtr vec = AttributeFactory::createAttribute(baseName, info);
+ VectorType * api = static_cast<VectorType *>(vec.get());
+ for (uint32_t i = 0; i < numDocs; ++i) {
+ if (!api->addDoc(i)) {
+ LOG(info, "failed adding doc: %u", i);
+ return AttributePtr();
+ }
+ if (api->hasMultiValue()) {
+ if (!api->append(i, val, weight)) {
+ LOG(info, "failed append to doc: %u", i);
+ }
+ } else {
+ if (!api->update(i, val)) {
+ LOG(info, "failed update doc: %u", i);
+ return AttributePtr();
+ }
+ }
+ }
+ api->commit();
+ return vec;
+ }
+
+ template <typename T>
+ bool check(const AttributePtr & vec, uint32_t docId, const Vector<T> & values) {
+ uint32_t sz = vec->getValueCount(docId);
+ if (!EXPECT_EQUAL(sz, values.size())) return false;
+ std::vector<T> buf(sz);
+ uint32_t asz = vec->get(docId, &buf[0], sz);
+ if (!EXPECT_EQUAL(sz, asz)) return false;
+ for (uint32_t i = 0; i < values.size(); ++i) {
+ if (!EXPECT_EQUAL(buf[i].getValue(), values[i].getValue())) return false;
+ if (!EXPECT_EQUAL(buf[i].getWeight(), values[i].getWeight())) return false;
+ }
+ return true;
+ }
+
+ void applyValueUpdate(AttributeVector & vec, uint32_t docId, const ValueUpdate & upd) {
+ FieldUpdate fupd(_docType->getField(vec.getName()));
+ fupd.addUpdate(upd);
+ search::AttrUpdate::handleUpdate(vec, docId, fupd);
+ vec.commit();
+ }
+
+ void applyArrayUpdates(AttributeVector & vec, const FieldValue & assign,
+ const FieldValue & first, const FieldValue & second) {
+ applyValueUpdate(vec, 0, AssignValueUpdate(assign));
+ applyValueUpdate(vec, 1, AddValueUpdate(second));
+ applyValueUpdate(vec, 2, RemoveValueUpdate(first));
+ applyValueUpdate(vec, 3, ClearValueUpdate());
+ }
+
+ void applyWeightedSetUpdates(AttributeVector & vec, const FieldValue & assign,
+ const FieldValue & first, const FieldValue & second) {
+ applyValueUpdate(vec, 0, AssignValueUpdate(assign));
+ applyValueUpdate(vec, 1, AddValueUpdate(second, 20));
+ applyValueUpdate(vec, 2, RemoveValueUpdate(first));
+ applyValueUpdate(vec, 3, ClearValueUpdate());
+ ArithmeticValueUpdate arithmetic(ArithmeticValueUpdate::Add, 10);
+ applyValueUpdate(vec, 4, MapValueUpdate(first, arithmetic));
+ }
+
+ void requireThatSingleAttributesAreUpdated();
+ void requireThatArrayAttributesAreUpdated();
+ void requireThatWeightedSetAttributesAreUpdated();
+
+ DocumentTypeRepo _repo;
+ const DocumentType* _docType;
+
+public:
+ Test();
+ int Main();
+};
+
+void
+Test::requireThatSingleAttributesAreUpdated()
+{
+ using search::attribute::getUndefined;
+ CollectionType ct(CollectionType::SINGLE);
+ {
+ BasicType bt(BasicType::INT32);
+ AttributePtr vec = create<int32_t, IntegerAttribute>(3, 32, 0,
+ "in1/int",
+ Config(bt, ct));
+ applyValueUpdate(*vec, 0, AssignValueUpdate(IntFieldValue(64)));
+ applyValueUpdate(*vec, 1, ArithmeticValueUpdate(ArithmeticValueUpdate::Add, 10));
+ applyValueUpdate(*vec, 2, ClearValueUpdate());
+ EXPECT_EQUAL(3u, vec->getNumDocs());
+ EXPECT_TRUE(check(vec, 0, Vector<WeightedInt>().pb(WeightedInt(64))));
+ EXPECT_TRUE(check(vec, 1, Vector<WeightedInt>().pb(WeightedInt(42))));
+ EXPECT_TRUE(check(vec, 2, Vector<WeightedInt>().pb(WeightedInt(getUndefined<int32_t>()))));
+ }
+ {
+ BasicType bt(BasicType::FLOAT);
+ AttributePtr vec = create<float, FloatingPointAttribute>(3, 55.5f, 0,
+ "in1/float",
+ Config(bt,
+ ct));
+ applyValueUpdate(*vec, 0, AssignValueUpdate(FloatFieldValue(77.7f)));
+ applyValueUpdate(*vec, 1, ArithmeticValueUpdate(ArithmeticValueUpdate::Add, 10));
+ applyValueUpdate(*vec, 2, ClearValueUpdate());
+ EXPECT_EQUAL(3u, vec->getNumDocs());
+ EXPECT_TRUE(check(vec, 0, Vector<WeightedFloat>().pb(WeightedFloat(77.7f))));
+ EXPECT_TRUE(check(vec, 1, Vector<WeightedFloat>().pb(WeightedFloat(65.5f))));
+ EXPECT_TRUE(std::isnan(vec->getFloat(2)));
+ }
+ {
+ BasicType bt(BasicType::STRING);
+ AttributePtr vec = create<std::string, StringAttribute>(3, "first", 0,
+ "in1/string",
+ Config(bt,
+ ct));
+ applyValueUpdate(*vec, 0, AssignValueUpdate(StringFieldValue("second")));
+ applyValueUpdate(*vec, 2, ClearValueUpdate());
+ EXPECT_EQUAL(3u, vec->getNumDocs());
+ EXPECT_TRUE(check(vec, 0, Vector<WeightedString>().pb(WeightedString("second"))));
+ EXPECT_TRUE(check(vec, 1, Vector<WeightedString>().pb(WeightedString("first"))));
+ EXPECT_TRUE(check(vec, 2, Vector<WeightedString>().pb(WeightedString(""))));
+ }
+}
+
+void
+Test::requireThatArrayAttributesAreUpdated()
+{
+ CollectionType ct(CollectionType::ARRAY);
+ {
+ BasicType bt(BasicType::INT32);
+ AttributePtr vec = create<int32_t, IntegerAttribute>(5, 32, 1,
+ "in1/aint",
+ Config(bt, ct));
+ IntFieldValue first(32);
+ IntFieldValue second(64);
+ ArrayFieldValue assign(_docType->getField("aint").getDataType());
+ assign.add(second);
+ applyArrayUpdates(*vec, assign, first, second);
+
+ EXPECT_EQUAL(5u, vec->getNumDocs());
+ EXPECT_TRUE(check(vec, 0, Vector<WeightedInt>().pb(WeightedInt(64))));
+ EXPECT_TRUE(check(vec, 1, Vector<WeightedInt>().pb(WeightedInt(32)).pb(WeightedInt(64))));
+ EXPECT_TRUE(check(vec, 2, Vector<WeightedInt>()));
+ EXPECT_TRUE(check(vec, 3, Vector<WeightedInt>()));
+ EXPECT_TRUE(check(vec, 4, Vector<WeightedInt>().pb(WeightedInt(32))));
+ }
+ {
+ BasicType bt(BasicType::FLOAT);
+ AttributePtr vec = create<float, FloatingPointAttribute>(5, 55.5f, 1,
+ "in1/afloat",
+ Config(bt,
+ ct));
+ FloatFieldValue first(55.5f);
+ FloatFieldValue second(77.7f);
+ ArrayFieldValue assign(_docType->getField("afloat").getDataType());
+ assign.add(second);
+ applyArrayUpdates(*vec, assign, first, second);
+
+ EXPECT_EQUAL(5u, vec->getNumDocs());
+ EXPECT_TRUE(check(vec, 0, Vector<WeightedFloat>().pb(WeightedFloat(77.7f))));
+ EXPECT_TRUE(check(vec, 1, Vector<WeightedFloat>().pb(WeightedFloat(55.5f)).pb(WeightedFloat(77.7f))));
+ EXPECT_TRUE(check(vec, 2, Vector<WeightedFloat>()));
+ EXPECT_TRUE(check(vec, 3, Vector<WeightedFloat>()));
+ EXPECT_TRUE(check(vec, 4, Vector<WeightedFloat>().pb(WeightedFloat(55.5f))));
+ }
+ {
+ BasicType bt(BasicType::STRING);
+ AttributePtr vec = create<std::string, StringAttribute>(5, "first", 1,
+ "in1/astring",
+ Config(bt, ct));
+ StringFieldValue first("first");
+ StringFieldValue second("second");
+ ArrayFieldValue assign(_docType->getField("astring").getDataType());
+ assign.add(second);
+ applyArrayUpdates(*vec, assign, first, second);
+
+ EXPECT_EQUAL(5u, vec->getNumDocs());
+ EXPECT_TRUE(check(vec, 0, Vector<WeightedString>().pb(WeightedString("second"))));
+ EXPECT_TRUE(check(vec, 1, Vector<WeightedString>().pb(WeightedString("first")).pb(WeightedString("second"))));
+ EXPECT_TRUE(check(vec, 2, Vector<WeightedString>()));
+ EXPECT_TRUE(check(vec, 3, Vector<WeightedString>()));
+ EXPECT_TRUE(check(vec, 4, Vector<WeightedString>().pb(WeightedString("first"))));
+ }
+}
+
+void
+Test::requireThatWeightedSetAttributesAreUpdated()
+{
+ CollectionType ct(CollectionType::WSET);
+ {
+ BasicType bt(BasicType::INT32);
+ AttributePtr vec = create<int32_t, IntegerAttribute>(5, 32, 100,
+ "in1/wsint",
+ Config(bt, ct));
+ IntFieldValue first(32);
+ IntFieldValue second(64);
+ WeightedSetFieldValue
+ assign(_docType->getField("wsint").getDataType());
+ assign.add(second, 20);
+ applyWeightedSetUpdates(*vec, assign, first, second);
+
+ EXPECT_EQUAL(5u, vec->getNumDocs());
+ EXPECT_TRUE(check(vec, 0, Vector<WeightedInt>().pb(WeightedInt(64, 20))));
+ EXPECT_TRUE(check(vec, 1, Vector<WeightedInt>().pb(WeightedInt(32, 100)).pb(WeightedInt(64, 20))));
+ EXPECT_TRUE(check(vec, 2, Vector<WeightedInt>()));
+ EXPECT_TRUE(check(vec, 3, Vector<WeightedInt>()));
+ EXPECT_TRUE(check(vec, 4, Vector<WeightedInt>().pb(WeightedInt(32, 110))));
+ }
+ {
+ BasicType bt(BasicType::FLOAT);
+ AttributePtr vec = create<float, FloatingPointAttribute>(5, 55.5f, 100,
+ "in1/wsfloat",
+ Config(bt,
+ ct));
+ FloatFieldValue first(55.5f);
+ FloatFieldValue second(77.7f);
+ WeightedSetFieldValue
+ assign(_docType->getField("wsfloat").getDataType());
+ assign.add(second, 20);
+ applyWeightedSetUpdates(*vec, assign, first, second);
+
+ EXPECT_EQUAL(5u, vec->getNumDocs());
+ EXPECT_TRUE(check(vec, 0, Vector<WeightedFloat>().pb(WeightedFloat(77.7f, 20))));
+ EXPECT_TRUE(check(vec, 1, Vector<WeightedFloat>().pb(WeightedFloat(55.5f, 100)).pb(WeightedFloat(77.7f, 20))));
+ EXPECT_TRUE(check(vec, 2, Vector<WeightedFloat>()));
+ EXPECT_TRUE(check(vec, 3, Vector<WeightedFloat>()));
+ EXPECT_TRUE(check(vec, 4, Vector<WeightedFloat>().pb(WeightedFloat(55.5f, 110))));
+ }
+ {
+ BasicType bt(BasicType::STRING);
+ AttributePtr vec = create<std::string, StringAttribute>(5, "first",
+ 100,
+ "in1/wsstring",
+ Config(bt,
+ ct));
+ StringFieldValue first("first");
+ StringFieldValue second("second");
+ WeightedSetFieldValue
+ assign(_docType->getField("wsstring").getDataType());
+ assign.add(second, 20);
+ applyWeightedSetUpdates(*vec, assign, first, second);
+
+ EXPECT_EQUAL(5u, vec->getNumDocs());
+ EXPECT_TRUE(check(vec, 0, Vector<WeightedString>().pb(WeightedString("second", 20))));
+ EXPECT_TRUE(check(vec, 1, Vector<WeightedString>().pb(WeightedString("first", 100)).pb(WeightedString("second", 20))));
+ EXPECT_TRUE(check(vec, 2, Vector<WeightedString>()));
+ EXPECT_TRUE(check(vec, 3, Vector<WeightedString>()));
+ EXPECT_TRUE(check(vec, 4, Vector<WeightedString>().pb(WeightedString("first", 110))));
+ }
+}
+
+Test::Test()
+ : _repo(readDocumenttypesConfig("doctypes.cfg")),
+ _docType(_repo.getDocumentType("testdoc"))
+{
+}
+
+int
+Test::Main()
+{
+ TEST_INIT("applyattrupdates_test");
+
+ TEST_DO(requireThatSingleAttributesAreUpdated());
+ TEST_DO(requireThatArrayAttributesAreUpdated());
+ TEST_DO(requireThatWeightedSetAttributesAreUpdated());
+
+ TEST_DONE();
+}
+
+} // namespace search
+
+TEST_APPHOOK(search::Test);
diff --git a/searchcore/src/tests/applyattrupdates/doctypes.cfg b/searchcore/src/tests/applyattrupdates/doctypes.cfg
new file mode 100644
index 00000000000..23cbf06629e
--- /dev/null
+++ b/searchcore/src/tests/applyattrupdates/doctypes.cfg
@@ -0,0 +1,174 @@
+enablecompression false
+documenttype[1]
+documenttype[0].id -1175657560
+documenttype[0].name "testdoc"
+documenttype[0].version 0
+documenttype[0].headerstruct -1636745577
+documenttype[0].bodystruct 1878320748
+documenttype[0].inherits[0]
+documenttype[0].datatype[8]
+documenttype[0].datatype[0].id 100
+documenttype[0].datatype[0].type ARRAY
+documenttype[0].datatype[0].array.element.id 0
+documenttype[0].datatype[0].map.key.id 0
+documenttype[0].datatype[0].map.value.id 0
+documenttype[0].datatype[0].wset.key.id 0
+documenttype[0].datatype[0].wset.createifnonexistent false
+documenttype[0].datatype[0].wset.removeifzero false
+documenttype[0].datatype[0].annotationref.annotation.id 0
+documenttype[0].datatype[0].sstruct.name ""
+documenttype[0].datatype[0].sstruct.version 0
+documenttype[0].datatype[0].sstruct.compression.type NONE
+documenttype[0].datatype[0].sstruct.compression.level 0
+documenttype[0].datatype[0].sstruct.compression.threshold 95
+documenttype[0].datatype[0].sstruct.compression.minsize 200
+documenttype[0].datatype[0].sstruct.field[0]
+documenttype[0].datatype[1].id 101
+documenttype[0].datatype[1].type ARRAY
+documenttype[0].datatype[1].array.element.id 1
+documenttype[0].datatype[1].map.key.id 0
+documenttype[0].datatype[1].map.value.id 0
+documenttype[0].datatype[1].wset.key.id 0
+documenttype[0].datatype[1].wset.createifnonexistent false
+documenttype[0].datatype[1].wset.removeifzero false
+documenttype[0].datatype[1].annotationref.annotation.id 0
+documenttype[0].datatype[1].sstruct.name ""
+documenttype[0].datatype[1].sstruct.version 0
+documenttype[0].datatype[1].sstruct.compression.type NONE
+documenttype[0].datatype[1].sstruct.compression.level 0
+documenttype[0].datatype[1].sstruct.compression.threshold 95
+documenttype[0].datatype[1].sstruct.compression.minsize 200
+documenttype[0].datatype[1].sstruct.field[0]
+documenttype[0].datatype[2].id 102
+documenttype[0].datatype[2].type ARRAY
+documenttype[0].datatype[2].array.element.id 2
+documenttype[0].datatype[2].map.key.id 0
+documenttype[0].datatype[2].map.value.id 0
+documenttype[0].datatype[2].wset.key.id 0
+documenttype[0].datatype[2].wset.createifnonexistent false
+documenttype[0].datatype[2].wset.removeifzero false
+documenttype[0].datatype[2].annotationref.annotation.id 0
+documenttype[0].datatype[2].sstruct.name ""
+documenttype[0].datatype[2].sstruct.version 0
+documenttype[0].datatype[2].sstruct.compression.type NONE
+documenttype[0].datatype[2].sstruct.compression.level 0
+documenttype[0].datatype[2].sstruct.compression.threshold 95
+documenttype[0].datatype[2].sstruct.compression.minsize 200
+documenttype[0].datatype[2].sstruct.field[0]
+documenttype[0].datatype[3].id 200
+documenttype[0].datatype[3].type WSET
+documenttype[0].datatype[3].array.element.id 0
+documenttype[0].datatype[3].map.key.id 0
+documenttype[0].datatype[3].map.value.id 0
+documenttype[0].datatype[3].wset.key.id 0
+documenttype[0].datatype[3].wset.createifnonexistent false
+documenttype[0].datatype[3].wset.removeifzero false
+documenttype[0].datatype[3].annotationref.annotation.id 0
+documenttype[0].datatype[3].sstruct.name ""
+documenttype[0].datatype[3].sstruct.version 0
+documenttype[0].datatype[3].sstruct.compression.type NONE
+documenttype[0].datatype[3].sstruct.compression.level 0
+documenttype[0].datatype[3].sstruct.compression.threshold 95
+documenttype[0].datatype[3].sstruct.compression.minsize 200
+documenttype[0].datatype[3].sstruct.field[0]
+documenttype[0].datatype[4].id 201
+documenttype[0].datatype[4].type WSET
+documenttype[0].datatype[4].array.element.id 0
+documenttype[0].datatype[4].map.key.id 0
+documenttype[0].datatype[4].map.value.id 0
+documenttype[0].datatype[4].wset.key.id 1
+documenttype[0].datatype[4].wset.createifnonexistent false
+documenttype[0].datatype[4].wset.removeifzero false
+documenttype[0].datatype[4].annotationref.annotation.id 0
+documenttype[0].datatype[4].sstruct.name ""
+documenttype[0].datatype[4].sstruct.version 0
+documenttype[0].datatype[4].sstruct.compression.type NONE
+documenttype[0].datatype[4].sstruct.compression.level 0
+documenttype[0].datatype[4].sstruct.compression.threshold 95
+documenttype[0].datatype[4].sstruct.compression.minsize 200
+documenttype[0].datatype[4].sstruct.field[0]
+documenttype[0].datatype[5].id 202
+documenttype[0].datatype[5].type WSET
+documenttype[0].datatype[5].array.element.id 0
+documenttype[0].datatype[5].map.key.id 0
+documenttype[0].datatype[5].map.value.id 0
+documenttype[0].datatype[5].wset.key.id 2
+documenttype[0].datatype[5].wset.createifnonexistent false
+documenttype[0].datatype[5].wset.removeifzero false
+documenttype[0].datatype[5].annotationref.annotation.id 0
+documenttype[0].datatype[5].sstruct.name ""
+documenttype[0].datatype[5].sstruct.version 0
+documenttype[0].datatype[5].sstruct.compression.type NONE
+documenttype[0].datatype[5].sstruct.compression.level 0
+documenttype[0].datatype[5].sstruct.compression.threshold 95
+documenttype[0].datatype[5].sstruct.compression.minsize 200
+documenttype[0].datatype[5].sstruct.field[0]
+documenttype[0].datatype[6].id -1636745577
+documenttype[0].datatype[6].type STRUCT
+documenttype[0].datatype[6].array.element.id 0
+documenttype[0].datatype[6].map.key.id 0
+documenttype[0].datatype[6].map.value.id 0
+documenttype[0].datatype[6].wset.key.id 0
+documenttype[0].datatype[6].wset.createifnonexistent false
+documenttype[0].datatype[6].wset.removeifzero false
+documenttype[0].datatype[6].annotationref.annotation.id 0
+documenttype[0].datatype[6].sstruct.name "testdoc.header"
+documenttype[0].datatype[6].sstruct.version 0
+documenttype[0].datatype[6].sstruct.compression.type NONE
+documenttype[0].datatype[6].sstruct.compression.level 0
+documenttype[0].datatype[6].sstruct.compression.threshold 90
+documenttype[0].datatype[6].sstruct.compression.minsize 0
+documenttype[0].datatype[6].sstruct.field[9]
+documenttype[0].datatype[6].sstruct.field[0].name "afloat"
+documenttype[0].datatype[6].sstruct.field[0].id 401182245
+documenttype[0].datatype[6].sstruct.field[0].id_v6 303812879
+documenttype[0].datatype[6].sstruct.field[0].datatype 101
+documenttype[0].datatype[6].sstruct.field[1].name "aint"
+documenttype[0].datatype[6].sstruct.field[1].id 19542829
+documenttype[0].datatype[6].sstruct.field[1].id_v6 764769238
+documenttype[0].datatype[6].sstruct.field[1].datatype 100
+documenttype[0].datatype[6].sstruct.field[2].name "astring"
+documenttype[0].datatype[6].sstruct.field[2].id 1494118564
+documenttype[0].datatype[6].sstruct.field[2].id_v6 1745177607
+documenttype[0].datatype[6].sstruct.field[2].datatype 102
+documenttype[0].datatype[6].sstruct.field[3].name "float"
+documenttype[0].datatype[6].sstruct.field[3].id 151686688
+documenttype[0].datatype[6].sstruct.field[3].id_v6 827904364
+documenttype[0].datatype[6].sstruct.field[3].datatype 1
+documenttype[0].datatype[6].sstruct.field[4].name "int"
+documenttype[0].datatype[6].sstruct.field[4].id 123383020
+documenttype[0].datatype[6].sstruct.field[4].id_v6 2014709351
+documenttype[0].datatype[6].sstruct.field[4].datatype 0
+documenttype[0].datatype[6].sstruct.field[5].name "string"
+documenttype[0].datatype[6].sstruct.field[5].id 1572342091
+documenttype[0].datatype[6].sstruct.field[5].id_v6 1847335717
+documenttype[0].datatype[6].sstruct.field[5].datatype 2
+documenttype[0].datatype[6].sstruct.field[6].name "wsfloat"
+documenttype[0].datatype[6].sstruct.field[6].id 821634779
+documenttype[0].datatype[6].sstruct.field[6].id_v6 1168403784
+documenttype[0].datatype[6].sstruct.field[6].datatype 201
+documenttype[0].datatype[6].sstruct.field[7].name "wsint"
+documenttype[0].datatype[6].sstruct.field[7].id 1160390473
+documenttype[0].datatype[6].sstruct.field[7].id_v6 1177062897
+documenttype[0].datatype[6].sstruct.field[7].datatype 200
+documenttype[0].datatype[6].sstruct.field[8].name "wsstring"
+documenttype[0].datatype[6].sstruct.field[8].id 981031285
+documenttype[0].datatype[6].sstruct.field[8].id_v6 682978193
+documenttype[0].datatype[6].sstruct.field[8].datatype 202
+documenttype[0].datatype[7].id 1878320748
+documenttype[0].datatype[7].type STRUCT
+documenttype[0].datatype[7].array.element.id 0
+documenttype[0].datatype[7].map.key.id 0
+documenttype[0].datatype[7].map.value.id 0
+documenttype[0].datatype[7].wset.key.id 0
+documenttype[0].datatype[7].wset.createifnonexistent false
+documenttype[0].datatype[7].wset.removeifzero false
+documenttype[0].datatype[7].annotationref.annotation.id 0
+documenttype[0].datatype[7].sstruct.name "testdoc.body"
+documenttype[0].datatype[7].sstruct.version 0
+documenttype[0].datatype[7].sstruct.compression.type NONE
+documenttype[0].datatype[7].sstruct.compression.level 0
+documenttype[0].datatype[7].sstruct.compression.threshold 90
+documenttype[0].datatype[7].sstruct.compression.minsize 0
+documenttype[0].datatype[7].sstruct.field[0]
+documenttype[0].annotationtype[0]
diff --git a/searchcore/src/tests/fdispatch/randomrow/.gitignore b/searchcore/src/tests/fdispatch/randomrow/.gitignore
new file mode 100644
index 00000000000..bfe075b287a
--- /dev/null
+++ b/searchcore/src/tests/fdispatch/randomrow/.gitignore
@@ -0,0 +1 @@
+searchcore_randomrow_test_app
diff --git a/searchcore/src/tests/fdispatch/randomrow/CMakeLists.txt b/searchcore/src/tests/fdispatch/randomrow/CMakeLists.txt
new file mode 100644
index 00000000000..f3ad936ded2
--- /dev/null
+++ b/searchcore/src/tests/fdispatch/randomrow/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_randomrow_test_app
+ SOURCES
+ randomrow_test.cpp
+ DEPENDS
+ searchcore_fdispatch_search
+ searchcore_util
+ searchcore_fdcommon
+)
+vespa_add_test(NAME searchcore_randomrow_test_app COMMAND searchcore_randomrow_test_app)
diff --git a/searchcore/src/tests/fdispatch/randomrow/DESC b/searchcore/src/tests/fdispatch/randomrow/DESC
new file mode 100644
index 00000000000..86d8eecd44c
--- /dev/null
+++ b/searchcore/src/tests/fdispatch/randomrow/DESC
@@ -0,0 +1 @@
+randomrow test. Take a look at randomrow_test.cpp for details.
diff --git a/searchcore/src/tests/fdispatch/randomrow/FILES b/searchcore/src/tests/fdispatch/randomrow/FILES
new file mode 100644
index 00000000000..2f15498219f
--- /dev/null
+++ b/searchcore/src/tests/fdispatch/randomrow/FILES
@@ -0,0 +1 @@
+randomrow_test.cpp
diff --git a/searchcore/src/tests/fdispatch/randomrow/randomrow_test.cpp b/searchcore/src/tests/fdispatch/randomrow/randomrow_test.cpp
new file mode 100644
index 00000000000..375afee1777
--- /dev/null
+++ b/searchcore/src/tests/fdispatch/randomrow/randomrow_test.cpp
@@ -0,0 +1,89 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("randomrow_test");
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <vespa/searchcore/fdispatch/search/plain_dataset.h>
+
+using fdispatch::StateOfRows;
+
+TEST("requireThatEmpyStateReturnsRowZero")
+{
+ StateOfRows s(1, 1.0, 1000);
+ EXPECT_EQUAL(0u, s.getRandomWeightedRow());
+ EXPECT_EQUAL(1.0, s.getRowState(0).getAverageSearchTime());
+}
+
+TEST("requireThatDecayWorks")
+{
+ StateOfRows s(1, 1.0, 1000);
+ s.updateSearchTime(1.0, 0);
+ EXPECT_EQUAL(1.0, s.getRowState(0).getAverageSearchTime());
+ s.updateSearchTime(2.0, 0);
+ EXPECT_EQUAL(1.001, s.getRowState(0).getAverageSearchTime());
+ s.updateSearchTime(2.0, 0);
+ EXPECT_APPROX(1.002, s.getRowState(0).getAverageSearchTime(), 0.0001);
+ s.updateSearchTime(0.1, 0);
+ s.updateSearchTime(0.1, 0);
+ s.updateSearchTime(0.1, 0);
+ s.updateSearchTime(0.1, 0);
+ EXPECT_APPROX(0.998396, s.getRowState(0).getAverageSearchTime(), 0.000001);
+}
+
+TEST("requireWeightedSelectionWorks")
+{
+ StateOfRows s(5, 1.0, 1000);
+ EXPECT_EQUAL(0u, s.getWeightedNode(-0.1));
+ EXPECT_EQUAL(0u, s.getWeightedNode(0.0));
+ EXPECT_EQUAL(0u, s.getWeightedNode(0.1));
+ EXPECT_EQUAL(1u, s.getWeightedNode(0.2));
+ EXPECT_EQUAL(1u, s.getWeightedNode(0.39));
+ EXPECT_EQUAL(2u, s.getWeightedNode(0.4));
+ EXPECT_EQUAL(3u, s.getWeightedNode(0.6));
+ EXPECT_EQUAL(4u, s.getWeightedNode(0.8));
+ EXPECT_EQUAL(4u, s.getWeightedNode(2.0));
+}
+
+TEST("requireWeightedSelectionWorksFineWithDifferentWeights")
+{
+ StateOfRows s(5, 1.0, 1000);
+ s.getRowState(0).setAverageSearchTime(0.1);
+ s.getRowState(1).setAverageSearchTime(0.2);
+ s.getRowState(2).setAverageSearchTime(0.3);
+ s.getRowState(3).setAverageSearchTime(0.4);
+ s.getRowState(4).setAverageSearchTime(0.5);
+ EXPECT_EQUAL(0.1, s.getRowState(0).getAverageSearchTime());
+ EXPECT_EQUAL(0.2, s.getRowState(1).getAverageSearchTime());
+ EXPECT_EQUAL(0.3, s.getRowState(2).getAverageSearchTime());
+ EXPECT_EQUAL(0.4, s.getRowState(3).getAverageSearchTime());
+ EXPECT_EQUAL(0.5, s.getRowState(4).getAverageSearchTime());
+ EXPECT_EQUAL(0u, s.getWeightedNode(-0.1));
+ EXPECT_EQUAL(0u, s.getWeightedNode(0.0));
+ EXPECT_EQUAL(0u, s.getWeightedNode(0.4379));
+ EXPECT_EQUAL(1u, s.getWeightedNode(0.4380));
+ EXPECT_EQUAL(1u, s.getWeightedNode(0.6569));
+ EXPECT_EQUAL(2u, s.getWeightedNode(0.6570));
+ EXPECT_EQUAL(2u, s.getWeightedNode(0.8029));
+ EXPECT_EQUAL(3u, s.getWeightedNode(0.8030));
+ EXPECT_EQUAL(3u, s.getWeightedNode(0.9124));
+ EXPECT_EQUAL(4u, s.getWeightedNode(0.9125));
+ EXPECT_EQUAL(4u, s.getWeightedNode(2.0));
+}
+
+TEST("require randomness")
+{
+ StateOfRows s(3, 1.0, 1000);
+ s.getRowState(0).setAverageSearchTime(1.0);
+ s.getRowState(1).setAverageSearchTime(1.0);
+ s.getRowState(2).setAverageSearchTime(1.0);
+ size_t counts[3] = {0,0,0};
+ for (size_t i(0); i < 1000; i++) {
+ counts[s.getRandomWeightedRow()]++;
+ }
+ EXPECT_EQUAL(322ul, counts[0]);
+ EXPECT_EQUAL(345ul, counts[1]);
+ EXPECT_EQUAL(333ul, counts[2]);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/fdispatch/search_path/.gitignore b/searchcore/src/tests/fdispatch/search_path/.gitignore
new file mode 100644
index 00000000000..7452ecf3ecc
--- /dev/null
+++ b/searchcore/src/tests/fdispatch/search_path/.gitignore
@@ -0,0 +1 @@
+searchcore_search_path_test_app
diff --git a/searchcore/src/tests/fdispatch/search_path/CMakeLists.txt b/searchcore/src/tests/fdispatch/search_path/CMakeLists.txt
new file mode 100644
index 00000000000..86067faa4cc
--- /dev/null
+++ b/searchcore/src/tests/fdispatch/search_path/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_search_path_test_app
+ SOURCES
+ search_path_test.cpp
+ DEPENDS
+ searchcore_fdispatch_search
+)
+vespa_add_test(NAME searchcore_search_path_test_app COMMAND searchcore_search_path_test_app)
diff --git a/searchcore/src/tests/fdispatch/search_path/DESC b/searchcore/src/tests/fdispatch/search_path/DESC
new file mode 100644
index 00000000000..4bc24883896
--- /dev/null
+++ b/searchcore/src/tests/fdispatch/search_path/DESC
@@ -0,0 +1 @@
+search_path test. Take a look at search_path_test.cpp for details.
diff --git a/searchcore/src/tests/fdispatch/search_path/FILES b/searchcore/src/tests/fdispatch/search_path/FILES
new file mode 100644
index 00000000000..a38e13c26fd
--- /dev/null
+++ b/searchcore/src/tests/fdispatch/search_path/FILES
@@ -0,0 +1 @@
+search_path_test.cpp
diff --git a/searchcore/src/tests/fdispatch/search_path/search_path_test.cpp b/searchcore/src/tests/fdispatch/search_path/search_path_test.cpp
new file mode 100644
index 00000000000..8dd3ada4270
--- /dev/null
+++ b/searchcore/src/tests/fdispatch/search_path/search_path_test.cpp
@@ -0,0 +1,124 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("search_path_test");
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <vespa/searchcore/fdispatch/search/search_path.h>
+#include <vespa/searchcore/fdispatch/search/fnet_search.h>
+#include <iostream>
+
+using namespace fdispatch;
+
+template <typename T>
+vespalib::string
+toString(const T &val)
+{
+ std::ostringstream oss;
+ oss << "[";
+ bool first = true;
+ for (auto v : val) {
+ if (!first) oss << ",";
+ oss << v;
+ first = false;
+ }
+ oss << "]";
+ return oss.str();
+}
+
+void
+assertParts(const std::vector<size_t> &exp, const SearchPath::NodeList &act)
+{
+ std::string expStr = toString(exp);
+ std::string actStr = toString(act);
+ std::cout << "assertParts(" << expStr << "," << actStr << ")" << std::endl;
+ EXPECT_EQUAL(expStr, actStr);
+}
+
+void
+assertElement(const std::vector<size_t> &parts, size_t row, const SearchPath::Element &elem)
+{
+ assertParts(parts, elem.nodes());
+ EXPECT_TRUE(elem.hasRow());
+ EXPECT_EQUAL(row, elem.row());
+}
+
+void
+assertElement(const std::vector<size_t> &parts, const SearchPath::Element &elem)
+{
+ assertParts(parts, elem.nodes());
+ EXPECT_FALSE(elem.hasRow());
+}
+
+void
+assertSinglePath(const std::vector<size_t> &parts, const vespalib::string &spec, size_t numNodes=0)
+{
+ SearchPath p(spec, numNodes);
+ EXPECT_EQUAL(1u, p.elements().size());
+ assertElement(parts, p.elements().front());
+}
+
+void
+assertSinglePath(const std::vector<size_t> &parts, size_t row, const vespalib::string &spec, size_t numNodes=0)
+{
+ SearchPath p(spec, numNodes);
+ EXPECT_EQUAL(1u, p.elements().size());
+ assertElement(parts, row, p.elements().front());
+}
+
+TEST("requireThatSinglePartCanBeSpecified")
+{
+ assertSinglePath({0}, "0/");
+}
+
+TEST("requireThatMultiplePartsCanBeSpecified")
+{
+ assertSinglePath({1,3,5}, "1,3,5/");
+}
+
+TEST("requireThatRangePartsCanBeSpecified")
+{
+ assertSinglePath({1,2,3}, "[1,4>/", 6);
+}
+
+TEST("requireThatAllPartsCanBeSpecified")
+{
+ assertSinglePath({0,1,2,3}, "*/", 4);
+}
+
+TEST("requireThatRowCanBeSpecified")
+{
+ assertSinglePath({1}, 2, "1/2");
+}
+
+TEST("requireThatMultipleSimpleElementsCanBeSpecified")
+{
+ SearchPath p("0/1;2/3", 3);
+ EXPECT_EQUAL(2u, p.elements().size());
+ assertElement({0}, 1, p.elements()[0]);
+ assertElement({2}, 3, p.elements()[1]);
+}
+
+TEST("requireThatMultipleComplexElementsCanBeSpecified")
+{
+ SearchPath p("0,2,4/1;1,3,5/3", 6);
+ EXPECT_EQUAL(2u, p.elements().size());
+ assertElement({0,2,4}, 1, p.elements()[0]);
+ assertElement({1,3,5}, 3, p.elements()[1]);
+}
+
+TEST("requireThatMultipleElementsWithoutRowsCanBeSpecified")
+{
+ SearchPath p("0/;1/", 2);
+ EXPECT_EQUAL(2u, p.elements().size());
+ assertElement({0}, p.elements()[0]);
+ assertElement({1}, p.elements()[1]);
+}
+
+TEST("require that sizeof FastS_FNET_SearchNode is reasonable")
+{
+ EXPECT_EQUAL(240u, sizeof(FastS_FNET_SearchNode));
+ EXPECT_EQUAL(40u, sizeof(search::common::SortDataIterator));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/grouping/.gitignore b/searchcore/src/tests/grouping/.gitignore
new file mode 100644
index 00000000000..940fd49d20b
--- /dev/null
+++ b/searchcore/src/tests/grouping/.gitignore
@@ -0,0 +1,4 @@
+.depend
+Makefile
+grouping_test
+searchcore_grouping_test_app
diff --git a/searchcore/src/tests/grouping/CMakeLists.txt b/searchcore/src/tests/grouping/CMakeLists.txt
new file mode 100644
index 00000000000..245406187f6
--- /dev/null
+++ b/searchcore/src/tests/grouping/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_grouping_test_app
+ SOURCES
+ grouping.cpp
+ DEPENDS
+ searchcore_grouping
+ searchcore_matching
+)
+vespa_add_test(NAME searchcore_grouping_test_app COMMAND searchcore_grouping_test_app)
diff --git a/searchcore/src/tests/grouping/DESC b/searchcore/src/tests/grouping/DESC
new file mode 100644
index 00000000000..1aa6cb37e89
--- /dev/null
+++ b/searchcore/src/tests/grouping/DESC
@@ -0,0 +1 @@
+grouping test. Take a look at grouping.cpp for details.
diff --git a/searchcore/src/tests/grouping/FILES b/searchcore/src/tests/grouping/FILES
new file mode 100644
index 00000000000..a3a45cfb198
--- /dev/null
+++ b/searchcore/src/tests/grouping/FILES
@@ -0,0 +1 @@
+grouping.cpp
diff --git a/searchcore/src/tests/grouping/grouping.cpp b/searchcore/src/tests/grouping/grouping.cpp
new file mode 100644
index 00000000000..740e1aeb285
--- /dev/null
+++ b/searchcore/src/tests/grouping/grouping.cpp
@@ -0,0 +1,604 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("grouping_test");
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/searchlib/aggregation/grouping.h>
+#include <vespa/searchlib/aggregation/sumaggregationresult.h>
+#include <vespa/searchcommon/attribute/iattributevector.h>
+#include <vespa/searchlib/expression/attributenode.h>
+#include <vespa/searchlib/attribute/extendableattributes.h>
+#include <vespa/searchcore/grouping/groupingcontext.h>
+#include <vespa/searchcore/grouping/groupingmanager.h>
+#include <vespa/searchcore/grouping/groupingsession.h>
+#include <vespa/searchcore/grouping/sessionid.h>
+#include <vespa/searchcore/proton/matching/sessionmanager.h>
+
+using namespace search::attribute;
+using namespace search::aggregation;
+using namespace search::expression;
+using namespace search::grouping;
+using namespace search;
+
+using proton::matching::SessionManager;
+
+
+//-----------------------------------------------------------------------------
+
+const uint32_t NUM_DOCS = 1000;
+
+//-----------------------------------------------------------------------------
+
+class MyAttributeContext : public IAttributeContext
+{
+private:
+ typedef std::map<string, IAttributeVector *> Map;
+ Map _vectors;
+
+public:
+ const IAttributeVector *get(const string &name) const {
+ if (_vectors.find(name) == _vectors.end()) {
+ return 0;
+ }
+ return _vectors.find(name)->second;
+ }
+ virtual const IAttributeVector *
+ getAttribute(const string &name) const {
+ return get(name);
+ }
+ virtual const IAttributeVector *
+ getAttributeStableEnum(const string &name) const {
+ return get(name);
+ }
+ virtual void
+ getAttributeList(std::vector<const IAttributeVector *> & list) const {
+ Map::const_iterator pos = _vectors.begin();
+ Map::const_iterator end = _vectors.end();
+ for (; pos != end; ++pos) {
+ list.push_back(pos->second);
+ }
+ }
+ ~MyAttributeContext() {
+ Map::iterator pos = _vectors.begin();
+ Map::iterator end = _vectors.end();
+ for (; pos != end; ++pos) {
+ delete pos->second;
+ }
+ }
+
+ //-------------------------------------------------------------------------
+
+ void add(IAttributeVector *attr) {
+ _vectors[attr->getName()] = attr;
+ }
+};
+
+
+//-----------------------------------------------------------------------------
+
+struct MyWorld {
+ MyAttributeContext attributeContext;
+
+ void basicSetup() {
+ // attribute context
+ {
+ SingleInt32ExtAttribute *attr = new SingleInt32ExtAttribute("attr0");
+ AttributeVector::DocId docid;
+ for (uint32_t i = 0; i < NUM_DOCS; ++i) {
+ attr->addDoc(docid);
+ attr->add(i, docid); // value = docid
+ }
+ assert(docid + 1 == NUM_DOCS);
+ attributeContext.add(attr);
+ }
+ {
+ SingleInt32ExtAttribute *attr = new SingleInt32ExtAttribute("attr1");
+ AttributeVector::DocId docid;
+ for (uint32_t i = 0; i < NUM_DOCS; ++i) {
+ attr->addDoc(docid);
+ attr->add(i * 2, docid); // value = docid * 2
+ }
+ assert(docid + 1 == NUM_DOCS);
+ attributeContext.add(attr);
+ }
+ {
+ SingleInt32ExtAttribute *attr = new SingleInt32ExtAttribute("attr2");
+ AttributeVector::DocId docid;
+ for (uint32_t i = 0; i < NUM_DOCS; ++i) {
+ attr->addDoc(docid);
+ attr->add(i * 3, docid); // value = docid * 3
+ }
+ assert(docid + 1 == NUM_DOCS);
+ attributeContext.add(attr);
+ }
+ {
+ SingleInt32ExtAttribute *attr = new SingleInt32ExtAttribute("attr3");
+ AttributeVector::DocId docid;
+ for (uint32_t i = 0; i < NUM_DOCS; ++i) {
+ attr->addDoc(docid);
+ attr->add(i * 4, docid); // value = docid * 4
+ }
+ assert(docid + 1 == NUM_DOCS);
+ attributeContext.add(attr);
+ }
+
+ }
+};
+
+//-----------------------------------------------------------------------------
+
+typedef GroupingContext::GroupingList GroupingList;
+
+SessionId createSessionId(const std::string & s) {
+ std::vector<char> vec;
+ for (size_t i = 0; i < s.size(); i++) {
+ vec.push_back(s[i]);
+ }
+ return SessionId(&vec[0], vec.size());
+}
+
+class CheckAttributeReferences : public vespalib::ObjectOperation, public vespalib::ObjectPredicate
+{
+public:
+ CheckAttributeReferences() : _numrefs(0) { }
+ int _numrefs;
+private:
+ virtual void execute(vespalib::Identifiable &obj) {
+ if (static_cast<AttributeNode &>(obj).getAttribute() != NULL) {
+ _numrefs++;
+ }
+ }
+ virtual bool check(const vespalib::Identifiable &obj) const { return obj.inherits(AttributeNode::classId); }
+};
+
+struct DoomFixture {
+ vespalib::Clock clock;
+ fastos::TimeStamp timeOfDoom;
+ DoomFixture() : clock(), timeOfDoom(fastos::TimeStamp::FUTURE) {}
+};
+
+//-----------------------------------------------------------------------------
+
+TEST("testSessionId") {
+ SessionId id1;
+ ASSERT_TRUE(id1.empty());
+
+ SessionId id2(createSessionId("foo"));
+ SessionId id3(createSessionId("bar"));
+
+ ASSERT_TRUE(!id2.empty());
+ ASSERT_TRUE(!id3.empty());
+ ASSERT_TRUE(id3 < id2);
+ EXPECT_EQUAL(id2, id2);
+}
+
+TEST_F("testGroupingContextInitialization", DoomFixture()) {
+ vespalib::nbostream os;
+ Grouping baseRequest = Grouping()
+ .setRoot(Group()
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr0"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr1"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr2"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr2"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr3"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr3"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr1"))));
+
+ vespalib::NBOSerializer nos(os);
+ nos << (uint32_t)1;
+ baseRequest.serialize(nos);
+
+ GroupingContext context(f1.clock, f1.timeOfDoom, os.c_str(), os.size());
+ ASSERT_TRUE(!context.empty());
+ GroupingContext::GroupingList list = context.getGroupingList();
+ ASSERT_TRUE(list.size() == 1);
+ EXPECT_EQUAL(list[0]->asString(), baseRequest.asString());
+ context.reset();
+ ASSERT_TRUE(context.empty());
+}
+
+TEST_F("testGroupingContextUsage", DoomFixture()) {
+ vespalib::nbostream os;
+ Grouping request1 = Grouping()
+ .setFirstLevel(0)
+ .setLastLevel(0)
+ .setRoot(Group()
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr0"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr1"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr2"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr2"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr3"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr3"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr1"))));
+
+ Grouping request2 = Grouping()
+ .setFirstLevel(0)
+ .setLastLevel(3)
+ .setRoot(Group()
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr0"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr1"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr2"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr2"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr3"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr3"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr1"))));
+
+
+ GroupingContext::GroupingPtr r1(new Grouping(request1));
+ GroupingContext::GroupingPtr r2(new Grouping(request2));
+ GroupingContext context(f1.clock, f1.timeOfDoom);
+ ASSERT_TRUE(context.empty());
+ context.addGrouping(r1);
+ ASSERT_TRUE(context.getGroupingList().size() == 1);
+ context.addGrouping(r2);
+ ASSERT_TRUE(context.getGroupingList().size() == 2);
+ context.reset();
+ ASSERT_TRUE(context.empty());
+}
+
+TEST_F("testGroupingContextSerializing", DoomFixture()) {
+ Grouping baseRequest = Grouping()
+ .setRoot(Group()
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr0"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr1"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr2"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr2"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr3"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr3"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr1"))));
+
+ vespalib::nbostream os;
+ vespalib::NBOSerializer nos(os);
+ nos << (uint32_t)1;
+ baseRequest.serialize(nos);
+
+ GroupingContext context(f1.clock, f1.timeOfDoom);
+ GroupingContext::GroupingPtr bp(new Grouping(baseRequest));
+ context.addGrouping(bp);
+ context.serialize();
+ vespalib::nbostream & res(context.getResult());
+ EXPECT_EQUAL(res.size(), os.size());
+ ASSERT_TRUE(memcmp(res.c_str(), os.c_str(), res.size()) == 0);
+}
+
+TEST_F("testGroupingManager", DoomFixture()) {
+ vespalib::nbostream os;
+ Grouping request1 = Grouping()
+ .setFirstLevel(0)
+ .setLastLevel(0)
+ .setRoot(Group()
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr0"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr1"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr2"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr2"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr3"))));
+
+ GroupingContext context(f1.clock, f1.timeOfDoom);
+ GroupingContext::GroupingPtr bp(new Grouping(request1));
+ context.addGrouping(bp);
+ GroupingManager manager(context);
+ ASSERT_TRUE(!manager.empty());
+}
+
+TEST_F("testGroupingSession", DoomFixture()) {
+ MyWorld world;
+ world.basicSetup();
+ vespalib::nbostream os;
+ Grouping request1 = Grouping()
+ .setId(0)
+ .setFirstLevel(0)
+ .setLastLevel(0)
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr1"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr2"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr2"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr3"))));
+
+ Grouping request2 = Grouping()
+ .setId(1)
+ .setFirstLevel(0)
+ .setLastLevel(3)
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr1"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr2"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr2"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr3"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr3"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr1"))));
+
+
+ CheckAttributeReferences attrCheck;
+ request1.select(attrCheck, attrCheck);
+ EXPECT_EQUAL(attrCheck._numrefs, 0);
+ request2.select(attrCheck, attrCheck);
+ EXPECT_EQUAL(attrCheck._numrefs, 0);
+
+ GroupingContext::GroupingPtr r1(new Grouping(request1));
+ GroupingContext::GroupingPtr r2(new Grouping(request2));
+ GroupingContext initContext(f1.clock, f1.timeOfDoom);
+ initContext.addGrouping(r1);
+ initContext.addGrouping(r2);
+ SessionId id("foo");
+
+ // Test initialization phase
+ GroupingSession session(id, initContext, world.attributeContext);
+ CheckAttributeReferences attrCheck2;
+ GroupingList &gl2(initContext.getGroupingList());
+ for (unsigned int i = 0; i < gl2.size(); i++) {
+ gl2[i]->select(attrCheck2, attrCheck2);
+ }
+ EXPECT_EQUAL(attrCheck2._numrefs, 10);
+ RankedHit hit;
+ hit._docId = 0;
+ GroupingManager &manager(session.getGroupingManager());
+ manager.groupInRelevanceOrder(&hit, 1);
+ CheckAttributeReferences attrCheck_after;
+ GroupingList &gl3(initContext.getGroupingList());
+ for (unsigned int i = 0; i < gl3.size(); i++) {
+ gl3[i]->select(attrCheck_after, attrCheck_after);
+ }
+ EXPECT_EQUAL(attrCheck_after._numrefs, 0);
+ {
+ EXPECT_EQUAL(id, session.getSessionId());
+ ASSERT_TRUE(!session.getGroupingManager().empty());
+ ASSERT_TRUE(!session.finished());
+ session.continueExecution(initContext);
+ ASSERT_TRUE(!session.finished());
+ }
+ // Test second pass
+ {
+ GroupingContext context(f1.clock, f1.timeOfDoom);
+ GroupingContext::GroupingPtr r(new Grouping(request1));
+ r->setFirstLevel(1);
+ r->setLastLevel(1);
+ context.addGrouping(r);
+
+ session.continueExecution(context);
+ ASSERT_TRUE(!session.finished());
+ }
+ // Test last pass. Session should be marked as finished
+ {
+ GroupingContext context(f1.clock, f1.timeOfDoom);
+ GroupingContext::GroupingPtr r(new Grouping(request1));
+ r->setFirstLevel(2);
+ r->setLastLevel(2);
+ context.addGrouping(r);
+
+ session.continueExecution(context);
+ ASSERT_TRUE(session.finished());
+ }
+
+}
+
+TEST_F("testEmptySessionId", DoomFixture()) {
+ MyWorld world;
+ world.basicSetup();
+ vespalib::nbostream os;
+ Grouping request1 = Grouping()
+ .setId(0)
+ .setFirstLevel(0)
+ .setLastLevel(0)
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr1"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr2"))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr2"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr3"))));
+
+ GroupingContext::GroupingPtr r1(new Grouping(request1));
+ GroupingContext initContext(f1.clock, f1.timeOfDoom);
+ initContext.addGrouping(r1);
+ SessionId id;
+
+ // Test initialization phase
+ GroupingSession session(id, initContext, world.attributeContext);
+ RankedHit hit;
+ hit._docId = 0;
+ GroupingManager &manager(session.getGroupingManager());
+ manager.groupInRelevanceOrder(&hit, 1);
+ EXPECT_EQUAL(id, session.getSessionId());
+ ASSERT_TRUE(!session.getGroupingManager().empty());
+ ASSERT_TRUE(session.finished() && session.getSessionId().empty());
+ session.continueExecution(initContext);
+ ASSERT_TRUE(session.finished());
+ ASSERT_TRUE(r1->getRoot().getChildrenSize() > 0);
+}
+
+TEST_F("testSessionManager", DoomFixture()) {
+ MyWorld world;
+ world.basicSetup();
+ vespalib::nbostream os;
+ Grouping request1 = Grouping()
+ .setId(0)
+ .setFirstLevel(0)
+ .setLastLevel(0)
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr1"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr2"))
+ .setResult(Int64ResultNode(0))))
+ .addLevel(GroupingLevel()
+ .setExpression(AttributeNode("attr2"))
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr3"))
+ .setResult(Int64ResultNode(0))))
+ .setRoot(Group()
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr0"))
+ .setResult(Int64ResultNode(0))));
+
+ GroupingContext::GroupingPtr r1(new Grouping(request1));
+ GroupingContext initContext(f1.clock, f1.timeOfDoom);
+ initContext.addGrouping(r1);
+
+ SessionManager mgr(2);
+ SessionId id1("foo");
+ SessionId id2("bar");
+ SessionId id3("baz");
+ GroupingSession::UP s1(new GroupingSession(id1, initContext, world.attributeContext));
+ GroupingSession::UP s2(new GroupingSession(id2, initContext, world.attributeContext));
+ GroupingSession::UP s3(new GroupingSession(id3, initContext, world.attributeContext));
+ ASSERT_EQUAL(f1.timeOfDoom, s1->getTimeOfDoom());
+ mgr.insert(std::move(s1));
+ s1 = mgr.pickGrouping(id1);
+ ASSERT_TRUE(s1.get());
+ EXPECT_EQUAL(id1, s1->getSessionId());
+
+ mgr.insert(std::move(s1));
+ mgr.insert(std::move(s2));
+ mgr.insert(std::move(s3));
+ s1 = mgr.pickGrouping(id1);
+ s2 = mgr.pickGrouping(id2);
+ s3 = mgr.pickGrouping(id3);
+ ASSERT_TRUE(s1.get() == NULL);
+ ASSERT_TRUE(s2.get() != NULL);
+ ASSERT_TRUE(s3.get() != NULL);
+ EXPECT_EQUAL(id2, s2->getSessionId());
+ EXPECT_EQUAL(id3, s3->getSessionId());
+ SessionManager::Stats stats = mgr.getGroupingStats();
+ EXPECT_EQUAL(4u, stats.numInsert);
+ EXPECT_EQUAL(3u, stats.numPick);
+ EXPECT_EQUAL(1u, stats.numDropped);
+}
+
+void doGrouping(GroupingContext &ctx,
+ uint32_t doc1, double rank1,
+ uint32_t doc2, double rank2,
+ uint32_t doc3, double rank3)
+{
+ GroupingManager man(ctx);
+ std::vector<RankedHit> hits;
+ hits.push_back(RankedHit(doc1, rank1));
+ hits.push_back(RankedHit(doc2, rank2));
+ hits.push_back(RankedHit(doc3, rank3));
+ man.groupInRelevanceOrder(&hits[0], 3);
+}
+
+TEST_F("test grouping fork/join", DoomFixture()) {
+ MyWorld world;
+ world.basicSetup();
+
+ Grouping request = Grouping()
+ .setRoot(Group()
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr0"))))
+ .addLevel(GroupingLevel()
+ .setMaxGroups(3)
+ .setExpression(AttributeNode("attr0")))
+ .setFirstLevel(0)
+ .setLastLevel(1);
+
+ GroupingContext::GroupingPtr g1(new Grouping(request));
+ GroupingContext context(f1.clock, f1.timeOfDoom);
+ context.addGrouping(g1);
+ GroupingSession session(SessionId(), context, world.attributeContext);
+ session.prepareThreadContextCreation(4);
+
+ GroupingContext::UP ctx0 = session.createThreadContext(0, world.attributeContext);
+ GroupingContext::UP ctx1 = session.createThreadContext(1, world.attributeContext);
+ GroupingContext::UP ctx2 = session.createThreadContext(2, world.attributeContext);
+ GroupingContext::UP ctx3 = session.createThreadContext(3, world.attributeContext);
+ doGrouping(*ctx0, 12, 30.0, 11, 20.0, 10, 10.0);
+ doGrouping(*ctx1, 22, 150.0, 21, 40.0, 20, 25.0);
+ doGrouping(*ctx2, 32, 100.0, 31, 15.0, 30, 5.0);
+ doGrouping(*ctx3, 42, 4.0, 41, 3.0, 40, 2.0); // not merged (verify independent contexts)
+ {
+ GroupingManager man(*ctx0);
+ man.merge(*ctx1);
+ man.merge(*ctx2);
+ man.prune();
+ }
+
+ Grouping expect = Grouping()
+ .setRoot(Group()
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("attr0"))
+ .setResult(Int64ResultNode(189)))
+ .addChild(Group().setId(Int64ResultNode(21)).setRank(40.0))
+ .addChild(Group().setId(Int64ResultNode(22)).setRank(150.0))
+ .addChild(Group().setId(Int64ResultNode(32)).setRank(100.0)))
+ .addLevel(GroupingLevel()
+ .setMaxGroups(3)
+ .setExpression(AttributeNode("attr0")))
+ .setFirstLevel(0)
+ .setLastLevel(1);
+
+ session.continueExecution(context);
+ GroupingContext::GroupingList list = context.getGroupingList();
+ ASSERT_TRUE(list.size() == 1);
+ EXPECT_EQUAL(expect.asString(), list[0]->asString());
+}
+
+TEST_F("test session timeout", DoomFixture()) {
+ MyWorld world;
+ world.basicSetup();
+ SessionManager mgr(2);
+ SessionId id1("foo");
+ SessionId id2("bar");
+
+ GroupingContext initContext1(f1.clock, 10);
+ GroupingContext initContext2(f1.clock, 20);
+ GroupingSession::UP s1(new GroupingSession(id1, initContext1, world.attributeContext));
+ GroupingSession::UP s2(new GroupingSession(id2, initContext2, world.attributeContext));
+ mgr.insert(std::move(s1));
+ mgr.insert(std::move(s2));
+ mgr.pruneTimedOutSessions(5);
+ SessionManager::Stats stats(mgr.getGroupingStats());
+ ASSERT_EQUAL(2u, stats.numCached);
+ mgr.pruneTimedOutSessions(10);
+ stats = mgr.getGroupingStats();
+ ASSERT_EQUAL(2u, stats.numCached);
+
+ mgr.pruneTimedOutSessions(11);
+ stats = mgr.getGroupingStats();
+ ASSERT_EQUAL(1u, stats.numCached);
+
+ mgr.pruneTimedOutSessions(21);
+ stats = mgr.getGroupingStats();
+ ASSERT_EQUAL(0u, stats.numCached);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/attribute/.gitignore b/searchcore/src/tests/proton/attribute/.gitignore
new file mode 100644
index 00000000000..794f5f454f8
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/.gitignore
@@ -0,0 +1,9 @@
+.depend
+Makefile
+*_test
+test
+test_output
+flush
+
+searchcore_attribute_test_app
+searchcore_attributeflush_test_app
diff --git a/searchcore/src/tests/proton/attribute/CMakeLists.txt b/searchcore/src/tests/proton/attribute/CMakeLists.txt
new file mode 100644
index 00000000000..1439c2b2646
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/CMakeLists.txt
@@ -0,0 +1,21 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_attribute_test_app
+ SOURCES
+ attribute_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_attribute
+ searchcore_flushengine
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_attribute_test_app COMMAND sh attribute_test.sh)
+vespa_add_executable(searchcore_attributeflush_test_app
+ SOURCES
+ attributeflush_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_attribute
+ searchcore_flushengine
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_attributeflush_test_app COMMAND sh attributeflush_test.sh)
diff --git a/searchcore/src/tests/proton/attribute/DESC b/searchcore/src/tests/proton/attribute/DESC
new file mode 100644
index 00000000000..bd71a808c51
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/DESC
@@ -0,0 +1 @@
+attribute test. Take a look at attribute.cpp for details.
diff --git a/searchcore/src/tests/proton/attribute/FILES b/searchcore/src/tests/proton/attribute/FILES
new file mode 100644
index 00000000000..84bc710d58b
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/FILES
@@ -0,0 +1 @@
+attribute.cpp
diff --git a/searchcore/src/tests/proton/attribute/attribute_manager/.gitignore b/searchcore/src/tests/proton/attribute/attribute_manager/.gitignore
new file mode 100644
index 00000000000..3e77da66466
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_manager/.gitignore
@@ -0,0 +1 @@
+searchcore_attribute_manager_test_app
diff --git a/searchcore/src/tests/proton/attribute/attribute_manager/CMakeLists.txt b/searchcore/src/tests/proton/attribute/attribute_manager/CMakeLists.txt
new file mode 100644
index 00000000000..7e8ab14a13b
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_manager/CMakeLists.txt
@@ -0,0 +1,14 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_attribute_manager_test_app
+ SOURCES
+ attribute_manager_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_attribute
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_initializer
+ searchcore_flushengine
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_attribute_manager_test_app COMMAND searchcore_attribute_manager_test_app)
diff --git a/searchcore/src/tests/proton/attribute/attribute_manager/DESC b/searchcore/src/tests/proton/attribute/attribute_manager/DESC
new file mode 100644
index 00000000000..f1cdc01fd47
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_manager/DESC
@@ -0,0 +1 @@
+attribute manager test. Take a look at attribute_manager_test.cpp for details.
diff --git a/searchcore/src/tests/proton/attribute/attribute_manager/FILES b/searchcore/src/tests/proton/attribute/attribute_manager/FILES
new file mode 100644
index 00000000000..8e4fbdcb888
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_manager/FILES
@@ -0,0 +1 @@
+attribute_manager_test.cpp
diff --git a/searchcore/src/tests/proton/attribute/attribute_manager/attribute_manager_test.cpp b/searchcore/src/tests/proton/attribute/attribute_manager/attribute_manager_test.cpp
new file mode 100644
index 00000000000..34c67da4ac8
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_manager/attribute_manager_test.cpp
@@ -0,0 +1,686 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("attribute_manager_test");
+
+#include <vespa/fastos/file.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcommon/attribute/attributecontent.h>
+#include <vespa/searchcore/proton/attribute/attribute_collection_spec_factory.h>
+#include <vespa/searchcore/proton/attribute/attributemanager.h>
+#include <vespa/searchcore/proton/attribute/attribute_manager_initializer.h>
+#include <vespa/searchcore/proton/attribute/attribute_writer.h>
+#include <vespa/searchcore/proton/attribute/exclusive_attribute_read_accessor.h>
+#include <vespa/searchcore/proton/attribute/sequential_attributes_initializer.h>
+#include <vespa/searchcore/proton/attribute/i_attribute_functor.h>
+#include <vespa/searchcore/proton/initializer/initializer_task.h>
+#include <vespa/searchcore/proton/initializer/task_runner.h>
+#include <vespa/searchcore/proton/test/attribute_utils.h>
+#include <vespa/searchcore/proton/test/attribute_vectors.h>
+#include <vespa/searchcore/proton/test/directory_handler.h>
+#include <vespa/searchlib/attribute/attributefactory.h>
+#include <vespa/searchlib/attribute/integerbase.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/searchlib/util/filekit.h>
+
+#include <vespa/searchlib/attribute/attributevector.hpp>
+#include <vespa/searchlib/attribute/predicate_attribute.h>
+#include <vespa/searchlib/predicate/predicate_index.h>
+#include <vespa/searchlib/predicate/predicate_tree_annotator.h>
+#include <vespa/searchlib/attribute/singlenumericattribute.hpp>
+#include <vespa/searchlib/common/foregroundtaskexecutor.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <vespa/config-attributes.h>
+
+namespace vespa { namespace config { namespace search {}}}
+
+using std::string;
+using namespace vespa::config::search;
+using namespace config;
+using namespace document;
+using namespace proton;
+using namespace search;
+using namespace search::index;
+using proton::initializer::InitializerTask;
+using proton::test::AttributeUtils;
+using proton::test::Int32Attribute;
+using search::TuneFileAttributes;
+using search::index::DummyFileHeaderContext;
+using search::ForegroundTaskExecutor;
+using search::predicate::PredicateIndex;
+using search::predicate::PredicateTreeAnnotations;
+using vespa::config::search::AttributesConfig;
+using vespa::config::search::AttributesConfigBuilder;
+
+typedef search::attribute::Config AVConfig;
+typedef proton::AttributeCollectionSpec::Attribute AttrSpec;
+typedef proton::AttributeCollectionSpec::AttributeList AttrSpecList;
+typedef proton::AttributeCollectionSpec AttrMgrSpec;
+
+namespace {
+
+const uint64_t createSerialNum = 42u;
+
+class MyAttributeFunctor : public proton::IAttributeFunctor
+{
+ std::vector<vespalib::string> _names;
+
+public:
+ virtual void
+ operator()(const search::AttributeVector &attributeVector) override {
+ _names.push_back(attributeVector.getName());
+ }
+
+ std::string getSortedNames() {
+ std::ostringstream os;
+ std::sort(_names.begin(), _names.end());
+ for (const vespalib::string &name : _names) {
+ if (!os.str().empty())
+ os << ",";
+ os << name;
+ }
+ return os.str();
+ }
+};
+
+}
+
+const string test_dir = "test_output";
+const AVConfig INT32_SINGLE = AttributeUtils::getInt32Config();
+const AVConfig INT32_ARRAY = AttributeUtils::getInt32ArrayConfig();
+
+void
+fillAttribute(const AttributeVector::SP &attr, uint32_t numDocs, int64_t value, uint64_t lastSyncToken)
+{
+ test::AttributeUtils::fillAttribute(attr, numDocs, value, lastSyncToken);
+}
+
+void
+fillAttribute(const AttributeVector::SP &attr, uint32_t from, uint32_t to, int64_t value, uint64_t lastSyncToken)
+{
+ test::AttributeUtils::fillAttribute(attr, from, to, value, lastSyncToken);
+}
+
+struct BaseFixture
+{
+ test::DirectoryHandler _dirHandler;
+ DummyFileHeaderContext _fileHeaderContext;
+ ForegroundTaskExecutor _attributeFieldWriter;
+ BaseFixture()
+ : _dirHandler(test_dir),
+ _fileHeaderContext(),
+ _attributeFieldWriter()
+ {
+ }
+};
+
+
+struct AttributeManagerFixture
+{
+ proton::AttributeManager::SP _msp;
+ proton::AttributeManager &_m;
+ AttributeWriter _aw;
+ AttributeManagerFixture(BaseFixture &bf)
+ : _msp(std::make_shared<proton::AttributeManager>
+ (test_dir, "test.subdb", TuneFileAttributes(), bf._fileHeaderContext,
+ bf._attributeFieldWriter)),
+ _m(*_msp),
+ _aw(_msp)
+ {
+ }
+ AttributeVector::SP addAttribute(const vespalib::string &name) {
+ return _m.addAttribute(name, INT32_SINGLE, createSerialNum);
+ }
+};
+
+struct Fixture : public BaseFixture, public AttributeManagerFixture
+{
+ Fixture()
+ : BaseFixture(),
+ AttributeManagerFixture(*static_cast<BaseFixture *>(this))
+ {
+ }
+};
+
+struct SequentialAttributeManager
+{
+ SequentialAttributesInitializer initializer;
+ proton::AttributeManager mgr;
+ SequentialAttributeManager(const AttributeManager &currMgr,
+ const AttrMgrSpec &newSpec)
+ : initializer(newSpec.getDocIdLimit()),
+ mgr(currMgr, newSpec, initializer)
+ {
+ mgr.addInitializedAttributes(initializer.getInitializedAttributes());
+ }
+};
+
+struct DummyInitializerTask : public InitializerTask
+{
+ virtual void run() override {}
+};
+
+struct ParallelAttributeManager
+{
+ InitializerTask::SP documentMetaStoreInitTask;
+ BucketDBOwner::SP bucketDbOwner;
+ DocumentMetaStore::SP documentMetaStore;
+ search::GrowStrategy attributeGrow;
+ size_t attributeGrowNumDocs;
+ bool fastAccessAttributesOnly;
+ std::shared_ptr<AttributeManager::SP> mgr;
+ AttributeManagerInitializer::SP initializer;
+
+ ParallelAttributeManager(search::SerialNum configSerialNum,
+ AttributeManager::SP baseAttrMgr,
+ const AttributesConfig &attrCfg,
+ uint32_t docIdLimit)
+ : documentMetaStoreInitTask(std::make_shared<DummyInitializerTask>()),
+ bucketDbOwner(std::make_shared<BucketDBOwner>()),
+ documentMetaStore(std::make_shared<DocumentMetaStore>(bucketDbOwner)),
+ attributeGrow(),
+ attributeGrowNumDocs(1),
+ fastAccessAttributesOnly(false),
+ mgr(std::make_shared<AttributeManager::SP>()),
+ initializer(std::make_shared<AttributeManagerInitializer>
+ (configSerialNum, documentMetaStoreInitTask, documentMetaStore, baseAttrMgr, attrCfg,
+ attributeGrow, attributeGrowNumDocs, fastAccessAttributesOnly, mgr))
+ {
+ documentMetaStore->setCommittedDocIdLimit(docIdLimit);
+ vespalib::ThreadStackExecutor executor(3, 128 * 1024);
+ initializer::TaskRunner taskRunner(executor);
+ taskRunner.runTask(initializer);
+ }
+};
+
+
+TEST_F("require that attributes are added", Fixture)
+{
+ EXPECT_TRUE(f.addAttribute("a1").get() != NULL);
+ EXPECT_TRUE(f.addAttribute("a2").get() != NULL);
+ EXPECT_EQUAL("a1", (*f._m.getAttribute("a1"))->getName());
+ EXPECT_EQUAL("a1", (*f._m.getAttributeStableEnum("a1"))->getName());
+ EXPECT_EQUAL("a2", (*f._m.getAttribute("a2"))->getName());
+ EXPECT_EQUAL("a2", (*f._m.getAttributeStableEnum("a2"))->getName());
+ EXPECT_TRUE(!f._m.getAttribute("not")->valid());
+}
+
+TEST_F("require that predicate attributes are added", Fixture)
+{
+ EXPECT_TRUE(f._m.addAttribute("p1", AttributeUtils::getPredicateConfig(),
+ createSerialNum).get() != NULL);
+ EXPECT_EQUAL("p1", (*f._m.getAttribute("p1"))->getName());
+ EXPECT_EQUAL("p1", (*f._m.getAttributeStableEnum("p1"))->getName());
+}
+
+TEST_F("require that attributes are flushed and loaded", BaseFixture)
+{
+ IndexMetaInfo ia1(test_dir + "/a1");
+ IndexMetaInfo ia2(test_dir + "/a2");
+ IndexMetaInfo ia3(test_dir + "/a3");
+ {
+ AttributeManagerFixture amf(f);
+ proton::AttributeManager &am = amf._m;
+ AttributeVector::SP a1 = amf.addAttribute("a1");
+ EXPECT_EQUAL(1u, a1->getNumDocs()); // Resized to size of attributemanager
+ fillAttribute(a1, 1, 3, 2, 10);
+ EXPECT_EQUAL(3u, a1->getNumDocs()); // Resized to size of attributemanager
+ AttributeVector::SP a2 = amf.addAttribute("a2");
+ EXPECT_EQUAL(1u, a2->getNumDocs()); // Not resized to size of attributemanager
+ fillAttribute(a2, 1, 5, 4, 10);
+ EXPECT_EQUAL(5u, a2->getNumDocs()); // Increased
+ EXPECT_TRUE(ia1.load());
+ EXPECT_TRUE(!ia1.getBestSnapshot().valid);
+ EXPECT_TRUE(ia2.load());
+ EXPECT_TRUE(!ia2.getBestSnapshot().valid);
+ EXPECT_TRUE(!ia3.load());
+ am.flushAll(0);
+ EXPECT_TRUE(ia1.load());
+ EXPECT_EQUAL(10u, ia1.getBestSnapshot().syncToken);
+ EXPECT_TRUE(ia2.load());
+ EXPECT_EQUAL(10u, ia2.getBestSnapshot().syncToken);
+ }
+ {
+ AttributeManagerFixture amf(f);
+ proton::AttributeManager &am = amf._m;
+ AttributeVector::SP a1 = amf.addAttribute("a1"); // loaded
+
+ EXPECT_EQUAL(3u, a1->getNumDocs());
+ fillAttribute(a1, 1, 2, 20);
+ EXPECT_EQUAL(4u, a1->getNumDocs());
+ AttributeVector::SP a2 = amf.addAttribute("a2"); // loaded
+ EXPECT_EQUAL(5u, a2->getNumDocs());
+ EXPECT_EQUAL(4u, a1->getNumDocs());
+ amf._aw.onReplayDone(5u);
+ EXPECT_EQUAL(5u, a2->getNumDocs());
+ EXPECT_EQUAL(5u, a1->getNumDocs());
+ fillAttribute(a2, 1, 4, 20);
+ EXPECT_EQUAL(6u, a2->getNumDocs());
+ AttributeVector::SP a3 = amf.addAttribute("a3"); // not-loaded
+ EXPECT_EQUAL(1u, a3->getNumDocs());
+ amf._aw.onReplayDone(6);
+ EXPECT_EQUAL(6u, a3->getNumDocs());
+ fillAttribute(a3, 1, 7, 6, 20);
+ EXPECT_EQUAL(7u, a3->getNumDocs());
+ EXPECT_TRUE(ia1.load());
+ EXPECT_EQUAL(10u, ia1.getBestSnapshot().syncToken);
+ EXPECT_TRUE(ia2.load());
+ EXPECT_EQUAL(10u, ia2.getBestSnapshot().syncToken);
+ EXPECT_TRUE(ia3.load());
+ EXPECT_TRUE(!ia3.getBestSnapshot().valid);
+ am.flushAll(0);
+ EXPECT_TRUE(ia1.load());
+ EXPECT_EQUAL(20u, ia1.getBestSnapshot().syncToken);
+ EXPECT_TRUE(ia2.load());
+ EXPECT_EQUAL(20u, ia2.getBestSnapshot().syncToken);
+ EXPECT_TRUE(ia3.load());
+ EXPECT_EQUAL(20u, ia3.getBestSnapshot().syncToken);
+ }
+ {
+ AttributeManagerFixture amf(f);
+ AttributeVector::SP a1 = amf.addAttribute("a1"); // loaded
+ EXPECT_EQUAL(6u, a1->getNumDocs());
+ AttributeVector::SP a2 = amf.addAttribute("a2"); // loaded
+ EXPECT_EQUAL(6u, a1->getNumDocs());
+ EXPECT_EQUAL(6u, a2->getNumDocs());
+ AttributeVector::SP a3 = amf.addAttribute("a3"); // loaded
+ EXPECT_EQUAL(6u, a1->getNumDocs());
+ EXPECT_EQUAL(6u, a2->getNumDocs());
+ EXPECT_EQUAL(7u, a3->getNumDocs());
+ amf._aw.onReplayDone(7);
+ EXPECT_EQUAL(7u, a1->getNumDocs());
+ EXPECT_EQUAL(7u, a2->getNumDocs());
+ EXPECT_EQUAL(7u, a3->getNumDocs());
+ }
+}
+
+TEST_F("require that predicate attributes are flushed and loaded", BaseFixture)
+{
+ IndexMetaInfo ia1(test_dir + "/a1");
+ {
+ AttributeManagerFixture amf(f);
+ proton::AttributeManager &am = amf._m;
+ AttributeVector::SP a1 =
+ am.addAttribute("a1",
+ AttributeUtils::getPredicateConfig(),
+ createSerialNum);
+ EXPECT_EQUAL(1u, a1->getNumDocs());
+
+ PredicateAttribute &pa = static_cast<PredicateAttribute &>(*a1);
+ PredicateIndex &index = pa.getIndex();
+ uint32_t doc_id;
+ a1->addDoc(doc_id);
+ index.indexEmptyDocument(doc_id);
+ pa.commit(10, 10);
+
+ EXPECT_EQUAL(2u, a1->getNumDocs());
+
+ EXPECT_TRUE(ia1.load());
+ EXPECT_TRUE(!ia1.getBestSnapshot().valid);
+ am.flushAll(0);
+ EXPECT_TRUE(ia1.load());
+ EXPECT_EQUAL(10u, ia1.getBestSnapshot().syncToken);
+ }
+ {
+ AttributeManagerFixture amf(f);
+ proton::AttributeManager &am = amf._m;
+ AttributeVector::SP a1 =
+ am.addAttribute("a1", AttributeUtils::getPredicateConfig(),
+ createSerialNum); // loaded
+ EXPECT_EQUAL(2u, a1->getNumDocs());
+
+ PredicateAttribute &pa = static_cast<PredicateAttribute &>(*a1);
+ PredicateIndex &index = pa.getIndex();
+ uint32_t doc_id;
+ a1->addDoc(doc_id);
+ PredicateTreeAnnotations annotations(3);
+ annotations.interval_map[123] = {{ 0x0001ffff }};
+ index.indexDocument(1, annotations);
+ pa.commit(20, 20);
+
+ EXPECT_EQUAL(3u, a1->getNumDocs());
+ EXPECT_TRUE(ia1.load());
+ EXPECT_EQUAL(10u, ia1.getBestSnapshot().syncToken);
+ am.flushAll(0);
+ EXPECT_TRUE(ia1.load());
+ EXPECT_EQUAL(20u, ia1.getBestSnapshot().syncToken);
+ }
+}
+
+TEST_F("require that extra attribute is added", Fixture)
+{
+ AttributeVector::SP extra(new Int32Attribute("extra"));
+ f._m.addExtraAttribute(extra);
+ AttributeGuard::UP exguard(f._m.getAttribute("extra"));
+ EXPECT_TRUE(dynamic_cast<Int32Attribute *>(exguard->operator->()) !=
+ NULL);
+}
+
+TEST_F("require that reconfig can add attributes", Fixture)
+{
+ AttributeVector::SP a1 = f.addAttribute("a1");
+ AttributeVector::SP ex(new Int32Attribute("ex"));
+ f._m.addExtraAttribute(ex);
+
+ AttrSpecList newSpec;
+ newSpec.push_back(AttrSpec("a1", INT32_SINGLE));
+ newSpec.push_back(AttrSpec("a2", INT32_SINGLE));
+ newSpec.push_back(AttrSpec("a3", INT32_SINGLE));
+
+ SequentialAttributeManager sam(f._m, AttrMgrSpec(newSpec, f._m.getNumDocs(), 0));
+ std::vector<AttributeGuard> list;
+ sam.mgr.getAttributeList(list);
+ std::sort(list.begin(), list.end(), [](const AttributeGuard & a, const AttributeGuard & b) {
+ return a->getName() < b->getName();
+ });
+ EXPECT_EQUAL(3u, list.size());
+ EXPECT_EQUAL("a1", list[0]->getName());
+ EXPECT_TRUE(list[0].operator->() == a1.get()); // reuse
+ EXPECT_EQUAL("a2", list[1]->getName());
+ EXPECT_EQUAL("a3", list[2]->getName());
+ EXPECT_TRUE(sam.mgr.getAttribute("ex")->operator->() == ex.get()); // reuse
+}
+
+TEST_F("require that reconfig can remove attributes", Fixture)
+{
+ AttributeVector::SP a1 = f.addAttribute("a1");
+ AttributeVector::SP a2 = f.addAttribute("a2");
+ AttributeVector::SP a3 = f.addAttribute("a3");
+
+ AttrSpecList newSpec;
+ newSpec.push_back(AttrSpec("a2", INT32_SINGLE));
+
+ SequentialAttributeManager sam(f._m, AttrMgrSpec(newSpec, 1, 0));
+ std::vector<AttributeGuard> list;
+ sam.mgr.getAttributeList(list);
+ EXPECT_EQUAL(1u, list.size());
+ EXPECT_EQUAL("a2", list[0]->getName());
+ EXPECT_TRUE(list[0].operator->() == a2.get()); // reuse
+}
+
+TEST_F("require that new attributes after reconfig are initialized", Fixture)
+{
+ AttributeVector::SP a1 = f.addAttribute("a1");
+ uint32_t docId(0);
+ a1->addDoc(docId);
+ EXPECT_EQUAL(1u, docId);
+ a1->addDoc(docId);
+ EXPECT_EQUAL(2u, docId);
+ EXPECT_EQUAL(3u, a1->getNumDocs());
+
+ AttrSpecList newSpec;
+ newSpec.push_back(AttrSpec("a1", INT32_SINGLE));
+ newSpec.push_back(AttrSpec("a2", INT32_SINGLE));
+ newSpec.push_back(AttrSpec("a3", INT32_ARRAY));
+
+ SequentialAttributeManager sam(f._m, AttrMgrSpec(newSpec, 3, 4));
+ AttributeGuard::UP a2ap = sam.mgr.getAttribute("a2");
+ AttributeGuard &a2(*a2ap);
+ EXPECT_EQUAL(3u, a2->getNumDocs());
+ EXPECT_TRUE(search::attribute::isUndefined<int32_t>(a2->getInt(1)));
+ EXPECT_TRUE(search::attribute::isUndefined<int32_t>(a2->getInt(2)));
+ EXPECT_EQUAL(0u, a2->getStatus().getLastSyncToken());
+ AttributeGuard::UP a3ap = sam.mgr.getAttribute("a3");
+ AttributeGuard &a3(*a3ap);
+ AttributeVector::largeint_t buf[1];
+ EXPECT_EQUAL(3u, a3->getNumDocs());
+ EXPECT_EQUAL(0u, a3->get(1, buf, 1));
+ EXPECT_EQUAL(0u, a3->get(2, buf, 1));
+ EXPECT_EQUAL(0u, a3->getStatus().getLastSyncToken());
+}
+
+TEST_F("require that removed attributes can resurrect", BaseFixture)
+{
+ proton::AttributeManager::SP am1(
+ new proton::AttributeManager(test_dir, "test.subdb",
+ TuneFileAttributes(),
+ f._fileHeaderContext,
+ f._attributeFieldWriter));
+ {
+ AttributeVector::SP a1 =
+ am1->addAttribute("a1", INT32_SINGLE,
+ 0);
+ fillAttribute(a1, 2, 10, 15);
+ EXPECT_EQUAL(3u, a1->getNumDocs());
+ }
+
+ AttrSpecList ns1;
+ SequentialAttributeManager am2(*am1, AttrMgrSpec(ns1, 3, 16));
+ am1.reset();
+
+ AttrSpecList ns2;
+ ns2.push_back(AttrSpec("a1", INT32_SINGLE));
+ // 2 new documents added since a1 was removed
+ SequentialAttributeManager am3(am2.mgr, AttrMgrSpec(ns2, 5, 20));
+
+ AttributeGuard::UP ag1ap = am3.mgr.getAttribute("a1");
+ AttributeGuard &ag1(*ag1ap);
+ ASSERT_TRUE(ag1.valid());
+ EXPECT_EQUAL(5u, ag1->getNumDocs());
+ EXPECT_EQUAL(10, ag1->getInt(1));
+ EXPECT_EQUAL(10, ag1->getInt(2));
+ EXPECT_TRUE(search::attribute::isUndefined<int32_t>(ag1->getInt(3)));
+ EXPECT_TRUE(search::attribute::isUndefined<int32_t>(ag1->getInt(4)));
+ EXPECT_EQUAL(16u, ag1->getStatus().getLastSyncToken());
+}
+
+TEST_F("require that extra attribute is not treated as removed", Fixture)
+{
+ AttributeVector::SP ex(new Int32Attribute("ex"));
+ f._m.addExtraAttribute(ex);
+ ex->commit(1,1);
+
+ AttrSpecList ns;
+ SequentialAttributeManager am2(f._m, AttrMgrSpec(ns, 2, 1));
+ EXPECT_TRUE(am2.mgr.getAttribute("ex")->operator->() == ex.get()); // reuse
+}
+
+TEST_F("require that history can be wiped", Fixture)
+{
+ f.addAttribute("a1");
+ f.addAttribute("a2");
+ f.addAttribute("a3");
+ f._m.flushAll(10);
+ Schema hs;
+ hs.addAttributeField(Schema::AttributeField("a1", Schema::INT32));
+ hs.addAttributeField(Schema::AttributeField("a3", Schema::INT32));
+ f._m.wipeHistory(hs);
+ FastOS_StatInfo si;
+ EXPECT_TRUE(!FastOS_File::Stat(vespalib::string(test_dir + "/a1").c_str(), &si));
+ EXPECT_TRUE(FastOS_File::Stat(vespalib::string(test_dir + "/a2").c_str(), &si));
+ EXPECT_TRUE(!FastOS_File::Stat(vespalib::string(test_dir + "/a3").c_str(), &si));
+}
+
+TEST_F("require that lid space can be compacted", Fixture)
+{
+ AttributeVector::SP a1 = f.addAttribute("a1");
+ AttributeVector::SP a2 = f.addAttribute("a2");
+ AttributeVector::SP ex(new Int32Attribute("ex"));
+ f._m.addExtraAttribute(ex);
+ const int64_t attrValue = 33;
+ fillAttribute(a1, 20, attrValue, 100);
+ fillAttribute(a2, 20, attrValue, 100);
+ fillAttribute(ex, 20, attrValue, 100);
+
+ EXPECT_EQUAL(21u, a1->getNumDocs());
+ EXPECT_EQUAL(21u, a2->getNumDocs());
+ EXPECT_EQUAL(20u, ex->getNumDocs());
+ EXPECT_EQUAL(21u, a1->getCommittedDocIdLimit());
+ EXPECT_EQUAL(21u, a2->getCommittedDocIdLimit());
+ EXPECT_EQUAL(20u, ex->getCommittedDocIdLimit());
+
+ f._aw.compactLidSpace(10, 101);
+
+ EXPECT_EQUAL(21u, a1->getNumDocs());
+ EXPECT_EQUAL(21u, a2->getNumDocs());
+ EXPECT_EQUAL(20u, ex->getNumDocs());
+ EXPECT_EQUAL(10u, a1->getCommittedDocIdLimit());
+ EXPECT_EQUAL(10u, a2->getCommittedDocIdLimit());
+ EXPECT_EQUAL(20u, ex->getCommittedDocIdLimit());
+}
+
+TEST_F("require that lid space compaction op can be ignored", Fixture)
+{
+ AttributeVector::SP a1 = f.addAttribute("a1");
+ AttributeVector::SP a2 = f.addAttribute("a2");
+ AttributeVector::SP ex(new Int32Attribute("ex"));
+ f._m.addExtraAttribute(ex);
+ const int64_t attrValue = 33;
+ fillAttribute(a1, 20, attrValue, 200);
+ fillAttribute(a2, 20, attrValue, 100);
+ fillAttribute(ex, 20, attrValue, 100);
+
+ EXPECT_EQUAL(21u, a1->getNumDocs());
+ EXPECT_EQUAL(21u, a2->getNumDocs());
+ EXPECT_EQUAL(20u, ex->getNumDocs());
+ EXPECT_EQUAL(21u, a1->getCommittedDocIdLimit());
+ EXPECT_EQUAL(21u, a2->getCommittedDocIdLimit());
+ EXPECT_EQUAL(20u, ex->getCommittedDocIdLimit());
+
+ f._aw.compactLidSpace(10, 101);
+
+ EXPECT_EQUAL(21u, a1->getNumDocs());
+ EXPECT_EQUAL(21u, a2->getNumDocs());
+ EXPECT_EQUAL(20u, ex->getNumDocs());
+ EXPECT_EQUAL(21u, a1->getCommittedDocIdLimit());
+ EXPECT_EQUAL(10u, a2->getCommittedDocIdLimit());
+ EXPECT_EQUAL(20u, ex->getCommittedDocIdLimit());
+}
+
+TEST_F("require that flushed serial number can be retrieved", Fixture)
+{
+ f.addAttribute("a1");
+ EXPECT_EQUAL(0u, f._m.getFlushedSerialNum("a1"));
+ f._m.flushAll(100);
+ EXPECT_EQUAL(100u, f._m.getFlushedSerialNum("a1"));
+ EXPECT_EQUAL(0u, f._m.getFlushedSerialNum("a2"));
+}
+
+
+TEST_F("require that writable attributes can be retrieved", Fixture)
+{
+ auto a1 = f.addAttribute("a1");
+ auto a2 = f.addAttribute("a2");
+ AttributeVector::SP ex(new Int32Attribute("ex"));
+ f._m.addExtraAttribute(ex);
+ auto &vec = f._m.getWritableAttributes();
+ EXPECT_EQUAL(2u, vec.size());
+ EXPECT_EQUAL(a1.get(), vec[0]);
+ EXPECT_EQUAL(a2.get(), vec[1]);
+ EXPECT_EQUAL(a1.get(), f._m.getWritableAttribute("a1"));
+ EXPECT_EQUAL(a2.get(), f._m.getWritableAttribute("a2"));
+ AttributeVector *noAttr = nullptr;
+ EXPECT_EQUAL(noAttr, f._m.getWritableAttribute("a3"));
+ EXPECT_EQUAL(noAttr, f._m.getWritableAttribute("ex"));
+}
+
+
+void
+populateAndFlushAttributes(AttributeManagerFixture &f)
+{
+ const int64_t attrValue = 7;
+ AttributeVector::SP a1 = f.addAttribute("a1");
+ fillAttribute(a1, 1, 10, attrValue, createSerialNum);
+ AttributeVector::SP a2 = f.addAttribute("a2");
+ fillAttribute(a2, 1, 10, attrValue, createSerialNum);
+ AttributeVector::SP a3 = f.addAttribute("a3");
+ fillAttribute(a3, 1, 10, attrValue, createSerialNum);
+ f._m.flushAll(createSerialNum + 3);
+}
+
+void
+validateAttribute(const AttributeVector &attr)
+{
+ ASSERT_EQUAL(10u, attr.getNumDocs());
+ EXPECT_EQUAL(createSerialNum + 3, attr.getStatus().getLastSyncToken());
+ for (uint32_t docId = 1; docId < 10; ++docId) {
+ EXPECT_EQUAL(7, attr.getInt(docId));
+ }
+}
+
+TEST_F("require that attributes can be initialized and loaded in sequence", BaseFixture)
+{
+ {
+ AttributeManagerFixture amf(f);
+ populateAndFlushAttributes(amf);
+ }
+ {
+ AttributeManagerFixture amf(f);
+
+ AttrSpecList newSpec;
+ newSpec.push_back(AttrSpec("a1", INT32_SINGLE));
+ newSpec.push_back(AttrSpec("a2", INT32_SINGLE));
+ newSpec.push_back(AttrSpec("a3", INT32_SINGLE));
+
+ SequentialAttributeManager newMgr(amf._m, AttrMgrSpec(newSpec, 10, createSerialNum + 5));
+
+ AttributeGuard::UP a1 = newMgr.mgr.getAttribute("a1");
+ TEST_DO(validateAttribute(a1->get()));
+ AttributeGuard::UP a2 = newMgr.mgr.getAttribute("a2");
+ TEST_DO(validateAttribute(a2->get()));
+ AttributeGuard::UP a3 = newMgr.mgr.getAttribute("a3");
+ TEST_DO(validateAttribute(a3->get()));
+ }
+}
+
+AttributesConfigBuilder::Attribute
+createAttributeConfig(const vespalib::string &name)
+{
+ AttributesConfigBuilder::Attribute result;
+ result.name = name;
+ result.datatype = AttributesConfigBuilder::Attribute::Datatype::INT32;
+ result.collectiontype = AttributesConfigBuilder::Attribute::Collectiontype::SINGLE;
+ return result;
+}
+
+TEST_F("require that attributes can be initialized and loaded in parallel", BaseFixture)
+{
+ {
+ AttributeManagerFixture amf(f);
+ populateAndFlushAttributes(amf);
+ }
+ {
+ AttributeManagerFixture amf(f);
+
+ AttributesConfigBuilder attrCfg;
+ attrCfg.attribute.push_back(createAttributeConfig("a1"));
+ attrCfg.attribute.push_back(createAttributeConfig("a2"));
+ attrCfg.attribute.push_back(createAttributeConfig("a3"));
+
+ ParallelAttributeManager newMgr(createSerialNum + 5, amf._msp, attrCfg, 10);
+
+ AttributeGuard::UP a1 = newMgr.mgr->get()->getAttribute("a1");
+ TEST_DO(validateAttribute(a1->get()));
+ AttributeGuard::UP a2 = newMgr.mgr->get()->getAttribute("a2");
+ TEST_DO(validateAttribute(a2->get()));
+ AttributeGuard::UP a3 = newMgr.mgr->get()->getAttribute("a3");
+ TEST_DO(validateAttribute(a3->get()));
+ }
+}
+
+TEST_F("require that we can call functions on all attributes via functor",
+ Fixture)
+{
+ f.addAttribute("a1");
+ f.addAttribute("a2");
+ f.addAttribute("a3");
+ std::shared_ptr<MyAttributeFunctor> functor =
+ std::make_shared<MyAttributeFunctor>();
+ f._m.asyncForEachAttribute(functor);
+ EXPECT_EQUAL("a1,a2,a3", functor->getSortedNames());
+}
+
+TEST_F("require that we can acquire exclusive read access to attribute", Fixture)
+{
+ f.addAttribute("attr");
+ ExclusiveAttributeReadAccessor::UP attrAccessor = f._m.getExclusiveReadAccessor("attr");
+ ExclusiveAttributeReadAccessor::UP noneAccessor = f._m.getExclusiveReadAccessor("none");
+ EXPECT_TRUE(attrAccessor.get() != nullptr);
+ EXPECT_TRUE(noneAccessor.get() == nullptr);
+}
+
+TEST_MAIN()
+{
+ vespalib::rmdir(test_dir, true);
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/attribute/attribute_populator/.gitignore b/searchcore/src/tests/proton/attribute/attribute_populator/.gitignore
new file mode 100644
index 00000000000..2400fd559e6
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_populator/.gitignore
@@ -0,0 +1 @@
+searchcore_attribute_populator_test_app
diff --git a/searchcore/src/tests/proton/attribute/attribute_populator/CMakeLists.txt b/searchcore/src/tests/proton/attribute/attribute_populator/CMakeLists.txt
new file mode 100644
index 00000000000..064759b88d1
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_populator/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_attribute_populator_test_app
+ SOURCES
+ attribute_populator_test.cpp
+ DEPENDS
+ searchcore_attribute
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_attribute_populator_test_app COMMAND searchcore_attribute_populator_test_app)
diff --git a/searchcore/src/tests/proton/attribute/attribute_populator/DESC b/searchcore/src/tests/proton/attribute/attribute_populator/DESC
new file mode 100644
index 00000000000..5ef9dcb2709
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_populator/DESC
@@ -0,0 +1 @@
+attribute_populator test. Take a look at attribute_populator_test.cpp for details.
diff --git a/searchcore/src/tests/proton/attribute/attribute_populator/FILES b/searchcore/src/tests/proton/attribute/attribute_populator/FILES
new file mode 100644
index 00000000000..b6bf0bf8458
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_populator/FILES
@@ -0,0 +1 @@
+attribute_populator_test.cpp
diff --git a/searchcore/src/tests/proton/attribute/attribute_populator/attribute_populator_test.cpp b/searchcore/src/tests/proton/attribute/attribute_populator/attribute_populator_test.cpp
new file mode 100644
index 00000000000..36e50249b89
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_populator/attribute_populator_test.cpp
@@ -0,0 +1,98 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("attribute_populator_test");
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <vespa/searchcommon/common/schema.h>
+#include <vespa/searchcore/proton/attribute/attributemanager.h>
+#include <vespa/searchcore/proton/attribute/attribute_populator.h>
+#include <vespa/searchcore/proton/test/test.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/searchlib/common/foregroundtaskexecutor.h>
+
+using namespace document;
+using namespace proton;
+using namespace search;
+using namespace search::index;
+
+typedef search::attribute::Config AVConfig;
+typedef search::attribute::BasicType AVBasicType;
+
+const vespalib::string TEST_DIR = "testdir";
+const uint64_t CREATE_SERIAL_NUM = 8u;
+
+Schema
+createSchema()
+{
+ Schema schema;
+ schema.addAttributeField(Schema::AttributeField("a1", Schema::DataType::INT32));
+ return schema;
+}
+
+struct DocContext
+{
+ Schema _schema;
+ DocBuilder _builder;
+ DocContext()
+ : _schema(createSchema()),
+ _builder(_schema)
+ {
+ }
+ Document::UP create(uint32_t id, int64_t fieldValue) {
+ vespalib::string docId =
+ vespalib::make_string("id:searchdocument:searchdocument::%u", id);
+ return _builder.startDocument(docId).
+ startAttributeField("a1").addInt(fieldValue).endField().
+ endDocument();
+ }
+};
+
+struct Fixture
+{
+ test::DirectoryHandler _testDir;
+ DummyFileHeaderContext _fileHeader;
+ ForegroundTaskExecutor _attributeFieldWriter;
+ AttributeManager::SP _mgr;
+ AttributePopulator _pop;
+ DocContext _ctx;
+ Fixture()
+ : _testDir(TEST_DIR),
+ _fileHeader(),
+ _attributeFieldWriter(),
+ _mgr(new AttributeManager(TEST_DIR, "test.subdb",
+ TuneFileAttributes(),
+ _fileHeader, _attributeFieldWriter)),
+ _pop(_mgr, 1, "test"),
+ _ctx()
+ {
+ _mgr->addAttribute("a1", AVConfig(AVBasicType::INT32),
+ CREATE_SERIAL_NUM);
+ }
+ AttributeGuard::UP getAttr() {
+ return _mgr->getAttribute("a1");
+ }
+};
+
+TEST_F("require that reprocess with document populates attribute", Fixture)
+{
+ AttributeGuard::UP attr = f.getAttr();
+ EXPECT_EQUAL(1u, attr->get().getNumDocs());
+
+ f._pop.handleExisting(5, *f._ctx.create(0, 33));
+ EXPECT_EQUAL(6u, attr->get().getNumDocs());
+ EXPECT_EQUAL(33, attr->get().getInt(5));
+ EXPECT_EQUAL(1u, attr->get().getStatus().getLastSyncToken());
+
+ f._pop.handleExisting(6, *f._ctx.create(1, 44));
+ EXPECT_EQUAL(7u, attr->get().getNumDocs());
+ EXPECT_EQUAL(44, attr->get().getInt(6));
+ EXPECT_EQUAL(2u, attr->get().getStatus().getLastSyncToken());
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/attribute/attribute_test.cpp b/searchcore/src/tests/proton/attribute/attribute_test.cpp
new file mode 100644
index 00000000000..d5084273c6c
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_test.cpp
@@ -0,0 +1,607 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("attribute_test");
+
+#include <vespa/fastos/file.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/document/update/arithmeticvalueupdate.h>
+#include <vespa/searchcommon/attribute/attributecontent.h>
+#include <vespa/searchcore/proton/attribute/attribute_collection_spec_factory.h>
+#include <vespa/searchcore/proton/attribute/attribute_writer.h>
+#include <vespa/searchcore/proton/attribute/attributemanager.h>
+#include <vespa/searchcore/proton/attribute/filter_attribute_manager.h>
+#include <vespa/searchcore/proton/test/attribute_utils.h>
+#include <vespa/searchlib/attribute/attributefactory.h>
+#include <vespa/searchlib/attribute/integerbase.h>
+#include <vespa/searchlib/common/idestructorcallback.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/searchlib/util/filekit.h>
+#include <vespa/vespalib/io/fileutil.h>
+
+#include <vespa/document/predicate/predicate_slime_builder.h>
+#include <vespa/document/update/assignvalueupdate.h>
+#include <vespa/searchlib/attribute/attributevector.hpp>
+#include <vespa/searchlib/attribute/predicate_attribute.h>
+#include <vespa/searchlib/predicate/predicate_index.h>
+#include <vespa/searchlib/attribute/singlenumericattribute.hpp>
+#include <vespa/searchlib/predicate/predicate_hash.h>
+#include <vespa/searchlib/common/foregroundtaskexecutor.h>
+#include <vespa/searchcore/proton/test/directory_handler.h>
+#include <vespa/vespalib/tensor/tensor.h>
+#include <vespa/vespalib/tensor/types.h>
+#include <vespa/vespalib/tensor/default_tensor.h>
+#include <vespa/vespalib/tensor/tensor_factory.h>
+#include <vespa/searchlib/attribute/tensorattribute.h>
+
+
+namespace vespa { namespace config { namespace search {}}}
+
+using std::string;
+using namespace vespa::config::search;
+using namespace config;
+using namespace document;
+using namespace proton;
+using namespace search;
+using namespace search::index;
+using search::attribute::TensorAttribute;
+using search::TuneFileAttributes;
+using search::index::DummyFileHeaderContext;
+using search::predicate::PredicateIndex;
+using search::predicate::PredicateHash;
+using vespalib::tensor::Tensor;
+using vespalib::tensor::TensorType;
+using vespalib::tensor::TensorCells;
+using vespalib::tensor::TensorDimensions;
+
+typedef search::attribute::Config AVConfig;
+typedef search::attribute::BasicType AVBasicType;
+typedef search::attribute::CollectionType AVCollectionType;
+typedef proton::AttributeCollectionSpec::Attribute AttrSpec;
+typedef proton::AttributeCollectionSpec::AttributeList AttrSpecList;
+typedef proton::AttributeCollectionSpec AttrMgrSpec;
+typedef SingleValueNumericAttribute<IntegerAttributeTemplate<int32_t> > Int32AttributeVector;
+
+namespace
+{
+
+const uint64_t createSerialNum = 42u;
+
+}
+
+AVConfig
+unregister(const AVConfig & cfg)
+{
+ AVConfig retval = cfg;
+ return retval;
+}
+
+const string test_dir = "test_output";
+const AVConfig INT32_SINGLE = unregister(AVConfig(AVBasicType::INT32));
+const AVConfig INT32_ARRAY = unregister(AVConfig(AVBasicType::INT32, AVCollectionType::ARRAY));
+
+void
+fillAttribute(const AttributeVector::SP &attr, uint32_t numDocs, int64_t value, uint64_t lastSyncToken)
+{
+ test::AttributeUtils::fillAttribute(attr, numDocs, value, lastSyncToken);
+}
+
+void
+fillAttribute(const AttributeVector::SP &attr, uint32_t from, uint32_t to, int64_t value, uint64_t lastSyncToken)
+{
+ test::AttributeUtils::fillAttribute(attr, from, to, value, lastSyncToken);
+}
+
+const std::shared_ptr<IDestructorCallback> emptyCallback;
+
+
+struct Fixture
+{
+ test::DirectoryHandler _dirHandler;
+ DummyFileHeaderContext _fileHeaderContext;
+ ForegroundTaskExecutor _attributeFieldWriter;
+ proton::AttributeManager::SP _m;
+ AttributeWriter aw;
+
+ Fixture()
+ : _dirHandler(test_dir),
+ _fileHeaderContext(),
+ _attributeFieldWriter(),
+ _m(std::make_shared<proton::AttributeManager>
+ (test_dir, "test.subdb", TuneFileAttributes(),
+ _fileHeaderContext, _attributeFieldWriter)),
+ aw(_m)
+ {
+ }
+ AttributeVector::SP addAttribute(const vespalib::string &name) {
+ return _m->addAttribute(name, AVConfig(AVBasicType::INT32),
+ createSerialNum);
+ }
+ void put(SerialNum serialNum, const Document &doc, DocumentIdT lid,
+ bool immediateCommit = true) {
+ aw.put(serialNum, doc, lid, immediateCommit, emptyCallback);
+ }
+ void update(SerialNum serialNum, const DocumentUpdate &upd,
+ DocumentIdT lid, bool immediateCommit) {
+ aw.update(serialNum, upd, lid, immediateCommit, emptyCallback);
+ }
+ void remove(SerialNum serialNum, DocumentIdT lid, bool immediateCommit = true) {
+ aw.remove(serialNum, lid, immediateCommit, emptyCallback);
+ }
+ void commit(SerialNum serialNum) {
+ aw.commit(serialNum, emptyCallback);
+ }
+};
+
+
+TEST_F("require that attribute adapter handles put", Fixture)
+{
+ Schema s;
+ s.addAttributeField(Schema::AttributeField("a1", Schema::INT32, Schema::SINGLE));
+ s.addAttributeField(Schema::AttributeField("a2", Schema::INT32, Schema::ARRAY));
+ s.addAttributeField(Schema::AttributeField("a3", Schema::FLOAT, Schema::SINGLE));
+ s.addAttributeField(Schema::AttributeField("a4", Schema::STRING, Schema::SINGLE));
+
+ DocBuilder idb(s);
+
+ proton::AttributeManager & am = *f._m;
+ AttributeVector::SP a1 = f.addAttribute("a1");
+ AttributeVector::SP a2 =
+ am.addAttribute("a2",
+ AVConfig(AVBasicType::INT32,
+ AVCollectionType::ARRAY),
+ createSerialNum);
+ AttributeVector::SP a3 =
+ am.addAttribute("a3", AVConfig(AVBasicType::FLOAT),
+ createSerialNum);
+ AttributeVector::SP a4 = am.addAttribute("a4",
+ AVConfig(AVBasicType::STRING),
+ createSerialNum);
+
+ attribute::IntegerContent ibuf;
+ attribute::FloatContent fbuf;
+ attribute::ConstCharContent sbuf;
+ { // empty document should give default values
+ EXPECT_EQUAL(1u, a1->getNumDocs());
+ f.put(1, *idb.startDocument("doc::1").endDocument(), 1);
+ EXPECT_EQUAL(2u, a1->getNumDocs());
+ EXPECT_EQUAL(2u, a2->getNumDocs());
+ EXPECT_EQUAL(2u, a3->getNumDocs());
+ EXPECT_EQUAL(2u, a4->getNumDocs());
+ EXPECT_EQUAL(1u, a1->getStatus().getLastSyncToken());
+ EXPECT_EQUAL(1u, a2->getStatus().getLastSyncToken());
+ EXPECT_EQUAL(1u, a3->getStatus().getLastSyncToken());
+ EXPECT_EQUAL(1u, a4->getStatus().getLastSyncToken());
+ ibuf.fill(*a1, 1);
+ EXPECT_EQUAL(1u, ibuf.size());
+ EXPECT_TRUE(search::attribute::isUndefined<int32_t>(ibuf[0]));
+ ibuf.fill(*a2, 1);
+ EXPECT_EQUAL(0u, ibuf.size());
+ fbuf.fill(*a3, 1);
+ EXPECT_EQUAL(1u, fbuf.size());
+ EXPECT_TRUE(search::attribute::isUndefined<float>(fbuf[0]));
+ sbuf.fill(*a4, 1);
+ EXPECT_EQUAL(1u, sbuf.size());
+ EXPECT_EQUAL(strcmp("", sbuf[0]), 0);
+ }
+ { // document with single value & multi value attribute
+ Document::UP doc = idb.startDocument("doc::2").
+ startAttributeField("a1").addInt(10).endField().
+ startAttributeField("a2").startElement().addInt(20).endElement().
+ startElement().addInt(30).endElement().endField().endDocument();
+ f.put(2, *doc, 2);
+ EXPECT_EQUAL(3u, a1->getNumDocs());
+ EXPECT_EQUAL(3u, a2->getNumDocs());
+ EXPECT_EQUAL(2u, a1->getStatus().getLastSyncToken());
+ EXPECT_EQUAL(2u, a2->getStatus().getLastSyncToken());
+ EXPECT_EQUAL(2u, a3->getStatus().getLastSyncToken());
+ EXPECT_EQUAL(2u, a4->getStatus().getLastSyncToken());
+ ibuf.fill(*a1, 2);
+ EXPECT_EQUAL(1u, ibuf.size());
+ EXPECT_EQUAL(10u, ibuf[0]);
+ ibuf.fill(*a2, 2);
+ EXPECT_EQUAL(2u, ibuf.size());
+ EXPECT_EQUAL(20u, ibuf[0]);
+ EXPECT_EQUAL(30u, ibuf[1]);
+ }
+ { // replace existing document
+ Document::UP doc = idb.startDocument("doc::2").
+ startAttributeField("a1").addInt(100).endField().
+ startAttributeField("a2").startElement().addInt(200).endElement().
+ startElement().addInt(300).endElement().
+ startElement().addInt(400).endElement().endField().endDocument();
+ f.put(3, *doc, 2);
+ EXPECT_EQUAL(3u, a1->getNumDocs());
+ EXPECT_EQUAL(3u, a2->getNumDocs());
+ EXPECT_EQUAL(3u, a1->getStatus().getLastSyncToken());
+ EXPECT_EQUAL(3u, a2->getStatus().getLastSyncToken());
+ EXPECT_EQUAL(3u, a3->getStatus().getLastSyncToken());
+ EXPECT_EQUAL(3u, a4->getStatus().getLastSyncToken());
+ ibuf.fill(*a1, 2);
+ EXPECT_EQUAL(1u, ibuf.size());
+ EXPECT_EQUAL(100u, ibuf[0]);
+ ibuf.fill(*a2, 2);
+ EXPECT_EQUAL(3u, ibuf.size());
+ EXPECT_EQUAL(200u, ibuf[0]);
+ EXPECT_EQUAL(300u, ibuf[1]);
+ EXPECT_EQUAL(400u, ibuf[2]);
+ }
+}
+
+TEST_F("require that attribute adapter handles predicate put", Fixture)
+{
+ Schema s;
+ s.addAttributeField(
+ Schema::AttributeField("a1", Schema::BOOLEANTREE, Schema::SINGLE));
+ DocBuilder idb(s);
+
+ proton::AttributeManager & am = *f._m;
+ AttributeVector::SP a1 = am.addAttribute("a1",
+ AVConfig(AVBasicType::PREDICATE),
+ createSerialNum);
+
+ PredicateIndex &index = static_cast<PredicateAttribute &>(*a1).getIndex();
+
+ // empty document should give default values
+ EXPECT_EQUAL(1u, a1->getNumDocs());
+ f.put(1, *idb.startDocument("doc::1").endDocument(), 1);
+ EXPECT_EQUAL(2u, a1->getNumDocs());
+ EXPECT_EQUAL(1u, a1->getStatus().getLastSyncToken());
+ EXPECT_EQUAL(0u, index.getZeroConstraintDocs().size());
+
+ // document with single value attribute
+ PredicateSlimeBuilder builder;
+ Document::UP doc =
+ idb.startDocument("doc::2").startAttributeField("a1")
+ .addPredicate(builder.true_predicate().build())
+ .endField().endDocument();
+ f.put(2, *doc, 2);
+ EXPECT_EQUAL(3u, a1->getNumDocs());
+ EXPECT_EQUAL(2u, a1->getStatus().getLastSyncToken());
+ EXPECT_EQUAL(1u, index.getZeroConstraintDocs().size());
+
+ auto it = index.getIntervalIndex().lookup(PredicateHash::hash64("foo=bar"));
+ EXPECT_FALSE(it.valid());
+
+ // replace existing document
+ doc = idb.startDocument("doc::2").startAttributeField("a1")
+ .addPredicate(builder.feature("foo").value("bar").build())
+ .endField().endDocument();
+ f.put(3, *doc, 2);
+ EXPECT_EQUAL(3u, a1->getNumDocs());
+ EXPECT_EQUAL(3u, a1->getStatus().getLastSyncToken());
+
+ it = index.getIntervalIndex().lookup(PredicateHash::hash64("foo=bar"));
+ EXPECT_TRUE(it.valid());
+}
+
+TEST_F("require that attribute adapter handles remove", Fixture)
+{
+ AttributeVector::SP a1 = f.addAttribute("a1");
+ AttributeVector::SP a2 = f.addAttribute("a2");
+ Schema s;
+ s.addAttributeField(Schema::AttributeField("a1", Schema::INT32, Schema::SINGLE));
+ s.addAttributeField(Schema::AttributeField("a2", Schema::INT32, Schema::SINGLE));
+
+ DocBuilder idb(s);
+
+ fillAttribute(a1, 1, 10, 1);
+ fillAttribute(a2, 1, 20, 1);
+
+ f.remove(2, 0);
+
+ EXPECT_TRUE(search::attribute::isUndefined<int32_t>(a1->getInt(0)));
+ EXPECT_TRUE(search::attribute::isUndefined<int32_t>(a2->getInt(0)));
+
+ f.remove(2, 0); // same sync token as previous
+ try {
+ f.remove(1, 0); // lower sync token than previous
+ EXPECT_TRUE(true); // update is ignored
+ } catch (vespalib::IllegalStateException & e) {
+ LOG(info, "Got expected exception: '%s'", e.getMessage().c_str());
+ EXPECT_TRUE(true);
+ }
+}
+
+void verifyAttributeContent(const AttributeVector & v, uint32_t lid, vespalib::stringref expected)
+{
+ attribute::ConstCharContent sbuf;
+ sbuf.fill(v, lid);
+ EXPECT_EQUAL(1u, sbuf.size());
+ EXPECT_EQUAL(expected, sbuf[0]);
+}
+
+TEST_F("require that visibilitydelay is honoured", Fixture)
+{
+ proton::AttributeManager & am = *f._m;
+ AttributeVector::SP a1 = am.addAttribute("a1",
+ AVConfig(AVBasicType::STRING),
+ createSerialNum);
+ Schema s;
+ s.addAttributeField(Schema::AttributeField("a1", Schema::STRING, Schema::SINGLE));
+ DocBuilder idb(s);
+ EXPECT_EQUAL(1u, a1->getNumDocs());
+ EXPECT_EQUAL(0u, a1->getStatus().getLastSyncToken());
+ Document::UP doc = idb.startDocument("doc::1")
+ .startAttributeField("a1").addStr("10").endField()
+ .endDocument();
+ f.put(3, *doc, 1);
+ EXPECT_EQUAL(2u, a1->getNumDocs());
+ EXPECT_EQUAL(3u, a1->getStatus().getLastSyncToken());
+ AttributeWriter awDelayed(f._m);
+ awDelayed.put(4, *doc, 2, false, emptyCallback);
+ EXPECT_EQUAL(3u, a1->getNumDocs());
+ EXPECT_EQUAL(3u, a1->getStatus().getLastSyncToken());
+ awDelayed.put(5, *doc, 4, false, emptyCallback);
+ EXPECT_EQUAL(5u, a1->getNumDocs());
+ EXPECT_EQUAL(3u, a1->getStatus().getLastSyncToken());
+ awDelayed.commit(6, emptyCallback);
+ EXPECT_EQUAL(6u, a1->getStatus().getLastSyncToken());
+
+ AttributeWriter awDelayedShort(f._m);
+ awDelayedShort.put(7, *doc, 2, false, emptyCallback);
+ EXPECT_EQUAL(6u, a1->getStatus().getLastSyncToken());
+ awDelayedShort.put(8, *doc, 2, false, emptyCallback);
+ awDelayedShort.commit(8, emptyCallback);
+ EXPECT_EQUAL(8u, a1->getStatus().getLastSyncToken());
+
+ verifyAttributeContent(*a1, 2, "10");
+ awDelayed.put(9, *idb.startDocument("doc::1").startAttributeField("a1").addStr("11").endField().endDocument(),
+ 2, false, emptyCallback);
+ awDelayed.put(10, *idb.startDocument("doc::1").startAttributeField("a1").addStr("20").endField().endDocument(),
+ 2, false, emptyCallback);
+ awDelayed.put(11, *idb.startDocument("doc::1").startAttributeField("a1").addStr("30").endField().endDocument(),
+ 2, false, emptyCallback);
+ EXPECT_EQUAL(8u, a1->getStatus().getLastSyncToken());
+ verifyAttributeContent(*a1, 2, "10");
+ awDelayed.commit(12, emptyCallback);
+ EXPECT_EQUAL(12u, a1->getStatus().getLastSyncToken());
+ verifyAttributeContent(*a1, 2, "30");
+
+}
+
+TEST_F("require that attribute adapter handles predicate remove", Fixture)
+{
+ proton::AttributeManager & am = *f._m;
+ AttributeVector::SP a1 = am.addAttribute("a1",
+ AVConfig(AVBasicType::PREDICATE),
+ createSerialNum);
+ Schema s;
+ s.addAttributeField(
+ Schema::AttributeField("a1", Schema::BOOLEANTREE, Schema::SINGLE));
+
+ DocBuilder idb(s);
+ PredicateSlimeBuilder builder;
+ Document::UP doc =
+ idb.startDocument("doc::1").startAttributeField("a1")
+ .addPredicate(builder.true_predicate().build())
+ .endField().endDocument();
+ f.put(1, *doc, 1);
+ EXPECT_EQUAL(2u, a1->getNumDocs());
+
+ PredicateIndex &index = static_cast<PredicateAttribute &>(*a1).getIndex();
+ EXPECT_EQUAL(1u, index.getZeroConstraintDocs().size());
+ f.remove(2, 1);
+ EXPECT_EQUAL(0u, index.getZeroConstraintDocs().size());
+}
+
+TEST_F("require that attribute adapter handles update", Fixture)
+{
+ AttributeVector::SP a1 = f.addAttribute("a1");
+ AttributeVector::SP a2 = f.addAttribute("a2");
+
+ fillAttribute(a1, 1, 10, 1);
+ fillAttribute(a2, 1, 20, 1);
+
+ Schema schema;
+ schema.addAttributeField(Schema::AttributeField(
+ "a1", Schema::INT32,
+ Schema::SINGLE));
+ schema.addAttributeField(Schema::AttributeField(
+ "a2", Schema::INT32,
+ Schema::SINGLE));
+ DocBuilder idb(schema);
+ const document::DocumentType &dt(idb.getDocumentType());
+ DocumentUpdate upd(dt, DocumentId("doc::1"));
+ upd.addUpdate(FieldUpdate(upd.getType().getField("a1"))
+ .addUpdate(ArithmeticValueUpdate(ArithmeticValueUpdate::Add, 5)));
+ upd.addUpdate(FieldUpdate(upd.getType().getField("a2"))
+ .addUpdate(ArithmeticValueUpdate(ArithmeticValueUpdate::Add, 10)));
+
+ bool immediateCommit = true;
+ f.update(2, upd, 1, immediateCommit);
+
+ attribute::IntegerContent ibuf;
+ ibuf.fill(*a1, 1);
+ EXPECT_EQUAL(1u, ibuf.size());
+ EXPECT_EQUAL(15u, ibuf[0]);
+ ibuf.fill(*a2, 1);
+ EXPECT_EQUAL(1u, ibuf.size());
+ EXPECT_EQUAL(30u, ibuf[0]);
+
+ f.update(2, upd, 1, immediateCommit); // same sync token as previous
+ try {
+ f.update(1, upd, 1, immediateCommit); // lower sync token than previous
+ EXPECT_TRUE(true); // update is ignored
+ } catch (vespalib::IllegalStateException & e) {
+ LOG(info, "Got expected exception: '%s'", e.getMessage().c_str());
+ EXPECT_TRUE(true);
+ }
+}
+
+TEST_F("require that attribute adapter handles predicate update", Fixture)
+{
+ proton::AttributeManager & am = *f._m;
+ AttributeVector::SP a1 = am.addAttribute("a1",
+ AVConfig(AVBasicType::PREDICATE),
+ createSerialNum);
+ Schema schema;
+ schema.addAttributeField(Schema::AttributeField(
+ "a1", Schema::BOOLEANTREE,
+ Schema::SINGLE));
+
+ DocBuilder idb(schema);
+ PredicateSlimeBuilder builder;
+ Document::UP doc =
+ idb.startDocument("doc::1").startAttributeField("a1")
+ .addPredicate(builder.true_predicate().build())
+ .endField().endDocument();
+ f.put(1, *doc, 1);
+ EXPECT_EQUAL(2u, a1->getNumDocs());
+
+ const document::DocumentType &dt(idb.getDocumentType());
+ DocumentUpdate upd(dt, DocumentId("doc::1"));
+ PredicateFieldValue new_value(builder.feature("foo").value("bar").build());
+ upd.addUpdate(FieldUpdate(upd.getType().getField("a1"))
+ .addUpdate(AssignValueUpdate(new_value)));
+
+ PredicateIndex &index = static_cast<PredicateAttribute &>(*a1).getIndex();
+ EXPECT_EQUAL(1u, index.getZeroConstraintDocs().size());
+ EXPECT_FALSE(index.getIntervalIndex().lookup(PredicateHash::hash64("foo=bar")).valid());
+ bool immediateCommit = true;
+ f.update(2, upd, 1, immediateCommit);
+ EXPECT_EQUAL(0u, index.getZeroConstraintDocs().size());
+ EXPECT_TRUE(index.getIntervalIndex().lookup(PredicateHash::hash64("foo=bar")).valid());
+}
+
+struct AttributeCollectionSpecFixture
+{
+ AttributesConfigBuilder _builder;
+ AttributeCollectionSpecFactory _factory;
+ AttributeCollectionSpecFixture(bool fastAccessOnly)
+ : _builder(),
+ _factory(search::GrowStrategy(), 100, fastAccessOnly)
+ {
+ addAttribute("a1", false);
+ addAttribute("a2", true);
+ }
+ void addAttribute(const vespalib::string &name, bool fastAccess) {
+ AttributesConfigBuilder::Attribute attr;
+ attr.name = name;
+ attr.fastaccess = fastAccess;
+ _builder.attribute.push_back(attr);
+ }
+ AttributeCollectionSpec::UP create(uint32_t docIdLimit,
+ search::SerialNum serialNum) {
+ return _factory.create(_builder, docIdLimit, serialNum);
+ }
+};
+
+struct NormalAttributeCollectionSpecFixture : public AttributeCollectionSpecFixture
+{
+ NormalAttributeCollectionSpecFixture() : AttributeCollectionSpecFixture(false) {}
+};
+
+struct FastAccessAttributeCollectionSpecFixture : public AttributeCollectionSpecFixture
+{
+ FastAccessAttributeCollectionSpecFixture() : AttributeCollectionSpecFixture(true) {}
+};
+
+TEST_F("require that normal attribute collection spec can be created",
+ NormalAttributeCollectionSpecFixture)
+{
+ AttributeCollectionSpec::UP spec = f.create(10, 20);
+ EXPECT_EQUAL(2u, spec->getAttributes().size());
+ EXPECT_EQUAL("a1", spec->getAttributes()[0].getName());
+ EXPECT_EQUAL("a2", spec->getAttributes()[1].getName());
+ EXPECT_EQUAL(10u, spec->getDocIdLimit());
+ EXPECT_EQUAL(20u, spec->getCurrentSerialNum());
+}
+
+TEST_F("require that fast access attribute collection spec can be created",
+ FastAccessAttributeCollectionSpecFixture)
+{
+ AttributeCollectionSpec::UP spec = f.create(10, 20);
+ EXPECT_EQUAL(1u, spec->getAttributes().size());
+ EXPECT_EQUAL("a2", spec->getAttributes()[0].getName());
+ EXPECT_EQUAL(10u, spec->getDocIdLimit());
+ EXPECT_EQUAL(20u, spec->getCurrentSerialNum());
+}
+
+const FilterAttributeManager::AttributeSet ACCEPTED_ATTRIBUTES = {"a2"};
+
+struct FilterFixture
+{
+ test::DirectoryHandler _dirHandler;
+ DummyFileHeaderContext _fileHeaderContext;
+ ForegroundTaskExecutor _attributeFieldWriter;
+ proton::AttributeManager::SP _baseMgr;
+ FilterAttributeManager _filterMgr;
+ FilterFixture()
+ : _dirHandler(test_dir),
+ _fileHeaderContext(),
+ _attributeFieldWriter(),
+ _baseMgr(new proton::AttributeManager(test_dir, "test.subdb",
+ TuneFileAttributes(),
+ _fileHeaderContext,
+ _attributeFieldWriter)),
+ _filterMgr(ACCEPTED_ATTRIBUTES, _baseMgr)
+ {
+ _baseMgr->addAttribute("a1", INT32_SINGLE, createSerialNum);
+ _baseMgr->addAttribute("a2", INT32_SINGLE, createSerialNum);
+ }
+};
+
+TEST_F("require that filter attribute manager can filter attributes", FilterFixture)
+{
+ EXPECT_TRUE(f._filterMgr.getAttribute("a1").get() == NULL);
+ EXPECT_TRUE(f._filterMgr.getAttribute("a2").get() != NULL);
+ std::vector<AttributeGuard> attrs;
+ f._filterMgr.getAttributeList(attrs);
+ EXPECT_EQUAL(1u, attrs.size());
+ EXPECT_EQUAL("a2", attrs[0].get().getName());
+}
+
+TEST_F("require that filter attribute manager can return flushed serial number", FilterFixture)
+{
+ f._baseMgr->flushAll(100);
+ EXPECT_EQUAL(0u, f._filterMgr.getFlushedSerialNum("a1"));
+ EXPECT_EQUAL(100u, f._filterMgr.getFlushedSerialNum("a2"));
+}
+
+namespace {
+
+Tensor::UP
+createTensor(const TensorCells &cells, const TensorDimensions &dimensions) {
+ vespalib::tensor::DefaultTensor::builder builder;
+ return vespalib::tensor::TensorFactory::create(cells, dimensions, builder);
+}
+
+}
+
+
+TEST_F("Test that we can use attribute writer to write to tensor attribute",
+ Fixture)
+{
+ proton::AttributeManager & am = *f._m;
+ AVConfig cfg(AVBasicType::TENSOR);
+ cfg.setTensorType(TensorType::fromSpec("tensor(x{},y{})"));
+ AttributeVector::SP a1 = am.addAttribute("a1",
+ cfg,
+ createSerialNum);
+ Schema s;
+ s.addAttributeField(Schema::AttributeField("a1", Schema::TENSOR,
+ Schema::SINGLE));
+ DocBuilder builder(s);
+ auto tensor = createTensor({ {{{"x", "4"}, {"y", "5"}}, 7} },
+ {"x", "y"});
+ Document::UP doc = builder.startDocument("doc::1").
+ startAttributeField("a1").
+ addTensor(tensor->clone()).endField().endDocument();
+ f.put(1, *doc, 1);
+ EXPECT_EQUAL(2u, a1->getNumDocs());
+ TensorAttribute *tensorAttribute =
+ dynamic_cast<TensorAttribute *>(a1.get());
+ EXPECT_TRUE(tensorAttribute != nullptr);
+ auto tensor2 = tensorAttribute->getTensor(1);
+ EXPECT_TRUE(static_cast<bool>(tensor2));
+ EXPECT_TRUE(tensor->equals(*tensor2));
+}
+
+TEST_MAIN()
+{
+ vespalib::rmdir(test_dir, true);
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/attribute/attribute_test.sh b/searchcore/src/tests/proton/attribute/attribute_test.sh
new file mode 100755
index 00000000000..950a9f92bb8
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_test.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+rm -rf test_output
+$VALGRIND ./searchcore_attribute_test_app
diff --git a/searchcore/src/tests/proton/attribute/attribute_usage_filter/.gitignore b/searchcore/src/tests/proton/attribute/attribute_usage_filter/.gitignore
new file mode 100644
index 00000000000..2642c637ea0
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_usage_filter/.gitignore
@@ -0,0 +1 @@
+searchcore_attribute_usage_filter_test_app
diff --git a/searchcore/src/tests/proton/attribute/attribute_usage_filter/CMakeLists.txt b/searchcore/src/tests/proton/attribute/attribute_usage_filter/CMakeLists.txt
new file mode 100644
index 00000000000..2dd66c2a3ec
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_usage_filter/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_attribute_usage_filter_test_app
+ SOURCES
+ attribute_usage_filter_test.cpp
+ DEPENDS
+ searchcore_attribute
+)
+vespa_add_test(NAME searchcore_attribute_usage_filter_test_app COMMAND searchcore_attribute_usage_filter_test_app)
diff --git a/searchcore/src/tests/proton/attribute/attribute_usage_filter/DESC b/searchcore/src/tests/proton/attribute/attribute_usage_filter/DESC
new file mode 100644
index 00000000000..31b3afbcdf7
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_usage_filter/DESC
@@ -0,0 +1 @@
+AttributeUsageFilter test. Take a look at attribute_usage_filter_test.cpp for details.
diff --git a/searchcore/src/tests/proton/attribute/attribute_usage_filter/FILES b/searchcore/src/tests/proton/attribute/attribute_usage_filter/FILES
new file mode 100644
index 00000000000..b63aeb79d02
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_usage_filter/FILES
@@ -0,0 +1 @@
+attribute_usage_filter_test.cpp
diff --git a/searchcore/src/tests/proton/attribute/attribute_usage_filter/attribute_usage_filter_test.cpp b/searchcore/src/tests/proton/attribute/attribute_usage_filter/attribute_usage_filter_test.cpp
new file mode 100644
index 00000000000..d8ede8030e2
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attribute_usage_filter/attribute_usage_filter_test.cpp
@@ -0,0 +1,143 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("attribute_usage_filter_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/attribute/attribute_usage_filter.h>
+
+using proton::AttributeUsageFilter;
+using proton::AttributeUsageStats;
+
+namespace
+{
+
+search::AddressSpace enumStoreOverLoad(30 * 1024 * 1024 * UINT64_C(1024),
+ 32 * 1024 * 1024 * UINT64_C(1024));
+
+search::AddressSpace multiValueOverLoad(127 * 1024 * 1024,
+ 128 * 1024 * 1024);
+
+
+
+class MyAttributeStats : public AttributeUsageStats
+{
+public:
+ void triggerEnumStoreLimit() {
+ merge({ enumStoreOverLoad,
+ search::AddressSpaceUsage::defaultMultiValueUsage() },
+ "enumeratedName",
+ "ready");
+ }
+
+ void triggerMultiValueLimit() {
+ merge({ search::AddressSpaceUsage::defaultEnumStoreUsage(),
+ multiValueOverLoad },
+ "multiValueName",
+ "ready");
+ }
+};
+
+struct Fixture
+{
+ AttributeUsageFilter _filter;
+ using State = AttributeUsageFilter::State;
+ using Config = AttributeUsageFilter::Config;
+
+ Fixture()
+ : _filter()
+ {
+ }
+
+ void testWrite(const vespalib::string &exp) {
+ if (exp.empty()) {
+ EXPECT_TRUE(_filter.acceptWriteOperation());
+ State state = _filter.getAcceptState();
+ EXPECT_TRUE(state.acceptWriteOperation());
+ EXPECT_EQUAL(exp, state.message());
+ } else {
+ EXPECT_FALSE(_filter.acceptWriteOperation());
+ State state = _filter.getAcceptState();
+ EXPECT_FALSE(state.acceptWriteOperation());
+ EXPECT_EQUAL(exp, state.message());
+ }
+ }
+
+ void setAttributeStats(const AttributeUsageStats &stats) {
+ _filter.setAttributeStats(stats);
+ }
+};
+
+}
+
+TEST_F("Check that default filter allows write", Fixture)
+{
+ f.testWrite("");
+}
+
+
+TEST_F("Check that enum store limit can be reached", Fixture)
+{
+ f._filter.setConfig(Fixture::Config(0.8, 1.0));
+ MyAttributeStats stats;
+ stats.triggerEnumStoreLimit();
+ f.setAttributeStats(stats);
+ f.testWrite("enumStoreLimitReached: { "
+ "action: \""
+ "add more content nodes"
+ "\", "
+ "reason: \""
+ "enum store address space used (0.9375) > limit (0.8)"
+ "\", "
+ "enumStore: { used: 32212254720, limit: 34359738368}, "
+ "attributeName: \"enumeratedName\", subdb: \"ready\"}");
+}
+
+TEST_F("Check that multivalue limit can be reached", Fixture)
+{
+ f._filter.setConfig(Fixture::Config(1.0, 0.8));
+ MyAttributeStats stats;
+ stats.triggerMultiValueLimit();
+ f.setAttributeStats(stats);
+ f.testWrite("multiValueLimitReached: { "
+ "action: \""
+ "use 'huge' setting on attribute field "
+ "or add more content nodes"
+ "\", "
+ "reason: \""
+ "multiValue address space used (0.992188) > limit (0.8)"
+ "\", "
+ "multiValue: { used: 133169152, limit: 134217728}, "
+ "attributeName: \"multiValueName\", subdb: \"ready\"}");
+}
+
+TEST_F("Check that both enumstore limit and multivalue limit can be reached",
+ Fixture)
+{
+ f._filter.setConfig(Fixture::Config(0.8, 0.8));
+ MyAttributeStats stats;
+ stats.triggerEnumStoreLimit();
+ stats.triggerMultiValueLimit();
+ f.setAttributeStats(stats);
+ f.testWrite("enumStoreLimitReached: { "
+ "action: \""
+ "add more content nodes"
+ "\", "
+ "reason: \""
+ "enum store address space used (0.9375) > limit (0.8)"
+ "\", "
+ "enumStore: { used: 32212254720, limit: 34359738368}, "
+ "attributeName: \"enumeratedName\", subdb: \"ready\"}"
+ ", "
+ "multiValueLimitReached: { "
+ "action: \""
+ "use 'huge' setting on attribute field "
+ "or add more content nodes"
+ "\", "
+ "reason: \""
+ "multiValue address space used (0.992188) > limit (0.8)"
+ "\", "
+ "multiValue: { used: 133169152, limit: 134217728}, "
+ "attributeName: \"multiValueName\", subdb: \"ready\"}");
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/attribute/attributeflush_test.cpp b/searchcore/src/tests/proton/attribute/attributeflush_test.cpp
new file mode 100644
index 00000000000..53904e14658
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attributeflush_test.cpp
@@ -0,0 +1,564 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("attributeflush_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <vespa/vespalib/util/sync.h>
+#include <vespa/searchcore/proton/attribute/attributemanager.h>
+#include <vespa/searchcore/proton/attribute/attribute_writer.h>
+#include <vespa/searchcore/proton/attribute/flushableattribute.h>
+#include <vespa/searchlib/attribute/attributefactory.h>
+#include <vespa/searchlib/attribute/integerbase.h>
+#include <vespa/searchlib/common/indexmetainfo.h>
+#include <vespa/searchlib/util/dirtraverse.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/searchlib/common/foregroundtaskexecutor.h>
+#include <vespa/searchcore/proton/test/directory_handler.h>
+
+#include <vespa/searchlib/attribute/attributevector.hpp>
+
+using namespace document;
+using namespace search;
+using namespace vespalib;
+
+using search::index::DummyFileHeaderContext;
+
+typedef search::attribute::Config AVConfig;
+typedef search::attribute::BasicType AVBasicType;
+typedef search::attribute::CollectionType AVCollectionType;
+
+typedef std::shared_ptr<Gate> GateSP;
+
+namespace proton {
+
+namespace
+{
+
+const uint64_t createSerialNum = 42u;
+
+}
+
+class TaskWrapper : public Executor::Task
+{
+private:
+ Executor::Task::UP _task;
+ GateSP _gate;
+public:
+ TaskWrapper(Executor::Task::UP task, const GateSP &gate)
+ : _task(std::move(task)),
+ _gate(gate)
+ {
+ }
+
+ virtual void
+ run(void)
+ {
+ _task->run();
+ _gate->countDown();
+ LOG(info, "doneFlushing");
+ }
+};
+
+
+class FlushHandler
+{
+private:
+ ThreadStackExecutor _executor;
+public:
+ GateSP gate;
+
+ FlushHandler()
+ : _executor(1, 65536),
+ gate()
+ {
+ }
+
+ void
+ doFlushing(Executor::Task::UP task)
+ {
+ Executor::Task::UP wrapper(new TaskWrapper(std::move(task), gate));
+ Executor::Task::UP ok = _executor.execute(std::move(wrapper));
+ assert(ok.get() == NULL);
+ }
+};
+
+
+class UpdaterTask
+{
+private:
+ proton::AttributeManager & _am;
+public:
+ UpdaterTask(proton::AttributeManager & am)
+ :
+ _am(am)
+ {
+ }
+
+ void
+ startFlushing(uint64_t syncToken, FlushHandler & handler);
+
+ void
+ run(void);
+};
+
+
+void
+UpdaterTask::startFlushing(uint64_t syncToken, FlushHandler & handler)
+{
+ handler.gate.reset(new Gate());
+ IFlushTarget::SP flushable = _am.getFlushable("a1");
+ LOG(info, "startFlushing(%" PRIu64 ")", syncToken);
+ handler.doFlushing(flushable->initFlush(syncToken));
+}
+
+
+void
+UpdaterTask::run(void)
+{
+ LOG(info, "UpdaterTask::run(begin)");
+ uint32_t totalDocs = 2000000;
+ uint32_t totalDocsMax = 125000000; // XXX: Timing dependent.
+ uint32_t slowdownUpdateLim = 4000000;
+ bool slowedDown = false;
+ uint32_t incDocs = 1000;
+ uint64_t commits = 0;
+ uint32_t flushCount = 0;
+ uint64_t flushedToken = 0;
+ uint64_t needFlushToken = 0;
+ FlushHandler flushHandler;
+ for (uint32_t i = incDocs;
+ i <= totalDocs || (flushCount + (flushedToken <
+ needFlushToken) <= 2 &&
+ i <= totalDocsMax);
+ i += incDocs) {
+ uint32_t startDoc = 0;
+ uint32_t lastDoc = 0;
+ AttributeGuard::UP agap = _am.getAttribute("a1");
+ AttributeGuard &ag(*agap);
+ IntegerAttribute & ia = static_cast<IntegerAttribute &>(*ag);
+ for (uint32_t j = i - incDocs; j < i; ++j) {
+ if (j >= ag->getNumDocs()) {
+ ag->addDocs(startDoc, lastDoc, incDocs);
+ if (i % (totalDocs / 20) == 0) {
+ LOG(info,
+ "addDocs(%u, %u, %u)",
+ startDoc, lastDoc, ag->getNumDocs());
+ }
+ }
+ ia.update(j, i);
+ }
+ ia.commit(i-1, i); // save i as last sync token
+ needFlushToken = i;
+ assert(i + 1 == ag->getNumDocs());
+ if ((commits++ % 20 == 0) &&
+ (flushHandler.gate.get() == NULL ||
+ flushHandler.gate->getCount() == 0)) {
+ startFlushing(i, flushHandler);
+ ++flushCount;
+ flushedToken = i;
+ slowedDown = false;
+ }
+ if (needFlushToken > flushedToken + slowdownUpdateLim) {
+ FastOS_Thread::Sleep(100);
+ if (!slowedDown) {
+ LOG(warning,
+ "Slowing down updates due to slow flushing (slow disk ?)");
+ }
+ slowedDown = true;
+ }
+ }
+ if (flushHandler.gate.get() != NULL) {
+ flushHandler.gate->await();
+ }
+ if (flushedToken < needFlushToken) {
+ startFlushing(needFlushToken, flushHandler);
+ flushHandler.gate->await();
+ }
+ LOG(info, "UpdaterTask::run(end)");
+}
+
+
+AVConfig
+getInt32Config()
+{
+ return AVConfig(AVBasicType::INT32);
+}
+
+
+class Test : public vespalib::TestApp
+{
+private:
+ void
+ requireThatUpdaterAndFlusherCanRunConcurrently(void);
+
+ void
+ requireThatFlushableAttributeReportsMemoryUsage(void);
+
+ void
+ requireThatFlushableAttributeManagesSyncTokenInfo(void);
+
+ void
+ requireThatFlushTargetsCanBeRetrieved(void);
+
+ void
+ requireThatCleanUpIsPerformedAfterFlush(void);
+
+ void
+ requireThatFlushStatsAreUpdated(void);
+
+ void
+ requireThatOnlyOneFlusherCanRunAtTheSameTime(void);
+
+ void
+ requireThatLastFlushTimeIsReported(void);
+
+ void
+ requireThatShrinkWorks();
+public:
+ int
+ Main(void);
+};
+
+
+const string test_dir = "flush";
+
+struct BaseFixture
+{
+ test::DirectoryHandler _dirHandler;
+ DummyFileHeaderContext _fileHeaderContext;
+ ForegroundTaskExecutor _attributeFieldWriter;
+ BaseFixture()
+ : _dirHandler(test_dir),
+ _fileHeaderContext(),
+ _attributeFieldWriter()
+ {
+ }
+};
+
+
+struct AttributeManagerFixture
+{
+ AttributeManager::SP _msp;
+ AttributeManager &_m;
+ AttributeWriter _aw;
+ AttributeManagerFixture(BaseFixture &bf)
+ : _msp(std::make_shared<AttributeManager>
+ (test_dir, "test.subdb", TuneFileAttributes(), bf._fileHeaderContext,
+ bf._attributeFieldWriter)),
+ _m(*_msp),
+ _aw(_msp)
+ {
+ }
+ AttributeVector::SP addAttribute(const vespalib::string &name) {
+ return _m.addAttribute(name, getInt32Config(), createSerialNum);
+ }
+};
+
+struct Fixture : public BaseFixture, public AttributeManagerFixture
+{
+ Fixture()
+ : BaseFixture(),
+ AttributeManagerFixture(*static_cast<BaseFixture *>(this))
+ {
+ }
+};
+
+
+
+void
+Test::requireThatUpdaterAndFlusherCanRunConcurrently(void)
+{
+ Fixture f;
+ AttributeManager &am = f._m;
+ EXPECT_TRUE(f.addAttribute("a1").get() != NULL);
+ IFlushTarget::SP ft = am.getFlushable("a1");
+ (static_cast<FlushableAttribute *>(ft.get()))->setCleanUpAfterFlush(false);
+ UpdaterTask updaterTask(am);
+ updaterTask.run();
+
+ IndexMetaInfo info("flush/a1");
+ EXPECT_TRUE(info.load());
+ EXPECT_TRUE(info.snapshots().size() > 2);
+ for (size_t i = 0; i < info.snapshots().size(); ++i) {
+ const IndexMetaInfo::Snapshot & snap = info.snapshots()[i];
+ LOG(info,
+ "Snapshot(%" PRIu64 ", %s)",
+ snap.syncToken, snap.dirName.c_str());
+ if (snap.syncToken > 0) {
+ EXPECT_TRUE(snap.valid);
+ std::string baseFileName = "flush/a1/" + snap.dirName + "/a1";
+ AttributeVector::SP attr =
+ AttributeFactory::createAttribute(baseFileName,
+ getInt32Config());
+ EXPECT_TRUE(attr->load());
+ EXPECT_EQUAL((uint32_t)snap.syncToken + 1, attr->getNumDocs());
+ }
+ }
+}
+
+
+void
+Test::requireThatFlushableAttributeReportsMemoryUsage(void)
+{
+ Fixture f;
+ AttributeManager &am = f._m;
+ AttributeVector::SP av = f.addAttribute("a2");
+ av->addDocs(100);
+ av->commit();
+ IFlushTarget::SP fa = am.getFlushable("a2");
+ EXPECT_TRUE(av->getStatus().getAllocated() >= 100u * sizeof(int32_t));
+ EXPECT_EQUAL(av->getStatus().getUsed(),
+ fa->getApproxMemoryGain().getBefore()+0lu);
+ // attributes stay in memory
+ EXPECT_EQUAL(fa->getApproxMemoryGain().getBefore(),
+ fa->getApproxMemoryGain().getAfter());
+}
+
+
+void
+Test::requireThatFlushableAttributeManagesSyncTokenInfo(void)
+{
+ Fixture f;
+ AttributeManager &am = f._m;
+ AttributeVector::SP av = f.addAttribute("a3");
+ av->addDocs(1);
+ IFlushTarget::SP fa = am.getFlushable("a3");
+
+ IndexMetaInfo info("flush/a3");
+ EXPECT_EQUAL(0u, fa->getFlushedSerialNum());
+ EXPECT_TRUE(fa->initFlush(0).get() == NULL);
+ EXPECT_TRUE(info.load());
+ EXPECT_EQUAL(0u, info.snapshots().size());
+
+ av->commit(10, 10); // last sync token = 10
+ EXPECT_EQUAL(0u, fa->getFlushedSerialNum());
+ EXPECT_TRUE(fa->initFlush(10).get() != NULL);
+ fa->initFlush(10)->run();
+ EXPECT_EQUAL(10u, fa->getFlushedSerialNum());
+ EXPECT_TRUE(info.load());
+ EXPECT_EQUAL(1u, info.snapshots().size());
+ EXPECT_TRUE(info.snapshots()[0].valid);
+ EXPECT_EQUAL(10u, info.snapshots()[0].syncToken);
+
+ av->commit(20, 20); // last sync token = 20
+ EXPECT_EQUAL(10u, fa->getFlushedSerialNum());
+ fa->initFlush(20)->run();
+ EXPECT_EQUAL(20u, fa->getFlushedSerialNum());
+ EXPECT_TRUE(info.load());
+ EXPECT_EQUAL(1u, info.snapshots().size()); // snapshot 10 removed
+ EXPECT_TRUE(info.snapshots()[0].valid);
+ EXPECT_EQUAL(20u, info.snapshots()[0].syncToken);
+}
+
+
+void
+Test::requireThatFlushTargetsCanBeRetrieved(void)
+{
+ Fixture f;
+ AttributeManager &am = f._m;
+ f.addAttribute("a4");
+ f.addAttribute("a5");
+ std::vector<IFlushTarget::SP> ftl = am.getFlushTargets();
+ EXPECT_EQUAL(2u, ftl.size());
+ EXPECT_EQUAL(am.getFlushable("a4").get(), ftl[0].get());
+ EXPECT_EQUAL(am.getFlushable("a5").get(), ftl[1].get());
+}
+
+
+void
+Test::requireThatCleanUpIsPerformedAfterFlush(void)
+{
+ Fixture f;
+ AttributeVector::SP av = f.addAttribute("a6");
+ av->addDocs(1);
+ av->commit(30, 30);
+
+ // fake up some snapshots
+ std::string snap10 = "flush/a6/snapshot-10";
+ std::string snap20 = "flush/a6/snapshot-20";
+ vespalib::mkdir(snap10, false);
+ vespalib::mkdir(snap20, false);
+ IndexMetaInfo info("flush/a6");
+ info.addSnapshot(IndexMetaInfo::Snapshot(true, 10, "snapshot-10"));
+ info.addSnapshot(IndexMetaInfo::Snapshot(false, 20, "snapshot-20"));
+ EXPECT_TRUE(info.save());
+
+ FlushableAttribute fa(av, "flush", TuneFileAttributes(),
+ f._fileHeaderContext, f._attributeFieldWriter);
+ fa.initFlush(30)->run();
+
+ EXPECT_TRUE(info.load());
+ EXPECT_EQUAL(1u, info.snapshots().size()); // snapshots 10 & 20 removed
+ EXPECT_TRUE(info.snapshots()[0].valid);
+ EXPECT_EQUAL(30u, info.snapshots()[0].syncToken);
+ FastOS_StatInfo statInfo;
+ EXPECT_TRUE(!FastOS_File::Stat(snap10.c_str(), &statInfo));
+ EXPECT_TRUE(!FastOS_File::Stat(snap20.c_str(), &statInfo));
+}
+
+
+void
+Test::requireThatFlushStatsAreUpdated(void)
+{
+ Fixture f;
+ AttributeManager &am = f._m;
+ AttributeVector::SP av = f.addAttribute("a7");
+ av->addDocs(1);
+ av->commit(100,100);
+ IFlushTarget::SP ft = am.getFlushable("a7");
+ ft->initFlush(101)->run();
+ FlushStats stats = ft->getLastFlushStats();
+ EXPECT_EQUAL("flush/a7/snapshot-101", stats.getPath());
+ EXPECT_EQUAL(8u, stats.getPathElementsToLog());
+}
+
+
+void
+Test::requireThatOnlyOneFlusherCanRunAtTheSameTime(void)
+{
+ Fixture f;
+ AttributeManager &am = f._m;
+ AttributeVector::SP av = f.addAttribute("a8");
+ av->addDocs(10000);
+ av->commit(9,9);
+ IFlushTarget::SP ft = am.getFlushable("a8");
+ (static_cast<FlushableAttribute *>(ft.get()))->setCleanUpAfterFlush(false);
+ vespalib::ThreadStackExecutor exec(16, 64000);
+
+ for (size_t i = 10; i < 100; ++i) {
+ av->commit(i, i);
+ vespalib::Executor::Task::UP task = ft->initFlush(i);
+ exec.execute(std::move(task));
+ }
+ exec.sync();
+ exec.shutdown();
+
+ IndexMetaInfo info("flush/a8");
+ ASSERT_TRUE(info.load());
+ LOG(info, "Found %zu snapshots", info.snapshots().size());
+ for (size_t i = 0; i < info.snapshots().size(); ++i) {
+ EXPECT_EQUAL(true, info.snapshots()[i].valid);
+ }
+ IndexMetaInfo::Snapshot best = info.getBestSnapshot();
+ EXPECT_EQUAL(true, best.valid);
+ EXPECT_EQUAL(99u, best.syncToken);
+ FlushStats stats = ft->getLastFlushStats();
+ EXPECT_EQUAL("flush/a8/snapshot-99", stats.getPath());
+}
+
+
+void
+Test::requireThatLastFlushTimeIsReported(void)
+{
+ BaseFixture f;
+ FastOS_StatInfo stat;
+ { // no meta info file yet
+ AttributeManagerFixture amf(f);
+ AttributeManager &am = amf._m;
+ AttributeVector::SP av = amf.addAttribute("a9");
+ EXPECT_EQUAL(0, am.getFlushable("a9")->getLastFlushTime().time());
+ }
+ { // no snapshot flushed yet
+ AttributeManagerFixture amf(f);
+ AttributeManager &am = amf._m;
+ AttributeVector::SP av = amf.addAttribute("a9");
+ IFlushTarget::SP ft = am.getFlushable("a9");
+ EXPECT_EQUAL(0, ft->getLastFlushTime().time());
+ ft->initFlush(5)->run();
+ EXPECT_TRUE(FastOS_File::Stat("flush/a9/snapshot-5", &stat));
+ EXPECT_EQUAL(stat._modifiedTime, ft->getLastFlushTime().time());
+ }
+ { // snapshot flushed
+ AttributeManagerFixture amf(f);
+ AttributeManager &am = amf._m;
+ amf.addAttribute("a9");
+ IFlushTarget::SP ft = am.getFlushable("a9");
+ EXPECT_EQUAL(stat._modifiedTime, ft->getLastFlushTime().time());
+ { // updated flush time after nothing to flush
+ FastOS_Thread::Sleep(8000);
+ fastos::TimeStamp now = fastos::ClockSystem::now();
+ Executor::Task::UP task = ft->initFlush(5);
+ EXPECT_TRUE(task.get() == NULL);
+ EXPECT_LESS(stat._modifiedTime, ft->getLastFlushTime().time());
+ EXPECT_APPROX(now.time(), ft->getLastFlushTime().time(), 8);
+ }
+ }
+}
+
+
+void
+Test::requireThatShrinkWorks()
+{
+ Fixture f;
+ AttributeManager &am = f._m;
+ AttributeVector::SP av = f.addAttribute("a10");
+
+ av->addDocs(1000 - av->getNumDocs());
+ av->commit(10, 10);
+ IFlushTarget::SP ft = am.getFlushable("a10");
+ EXPECT_EQUAL(ft->getApproxMemoryGain().getBefore(),
+ ft->getApproxMemoryGain().getAfter());
+ AttributeGuard::UP g = am.getAttribute("a10");
+ EXPECT_FALSE(av->wantShrinkLidSpace());
+ EXPECT_FALSE(av->canShrinkLidSpace());
+ EXPECT_EQUAL(1000u, av->getNumDocs());
+ EXPECT_EQUAL(1000u, av->getCommittedDocIdLimit());
+ av->compactLidSpace(100);
+ EXPECT_TRUE(av->wantShrinkLidSpace());
+ EXPECT_FALSE(av->canShrinkLidSpace());
+ EXPECT_EQUAL(1000u, av->getNumDocs());
+ EXPECT_EQUAL(100u, av->getCommittedDocIdLimit());
+ f._aw.heartBeat(11);
+ EXPECT_TRUE(av->wantShrinkLidSpace());
+ EXPECT_FALSE(av->canShrinkLidSpace());
+ EXPECT_EQUAL(ft->getApproxMemoryGain().getBefore(),
+ ft->getApproxMemoryGain().getAfter());
+ g.reset();
+ f._aw.heartBeat(11);
+ EXPECT_TRUE(av->wantShrinkLidSpace());
+ EXPECT_TRUE(av->canShrinkLidSpace());
+ EXPECT_TRUE(ft->getApproxMemoryGain().getBefore() >
+ ft->getApproxMemoryGain().getAfter());
+ EXPECT_EQUAL(1000u, av->getNumDocs());
+ EXPECT_EQUAL(100u, av->getCommittedDocIdLimit());
+ vespalib::ThreadStackExecutor exec(1, 128 * 1024);
+ vespalib::Executor::Task::UP task = ft->initFlush(11);
+ exec.execute(std::move(task));
+ exec.sync();
+ exec.shutdown();
+ EXPECT_FALSE(av->wantShrinkLidSpace());
+ EXPECT_FALSE(av->canShrinkLidSpace());
+ EXPECT_EQUAL(ft->getApproxMemoryGain().getBefore(),
+ ft->getApproxMemoryGain().getAfter());
+ EXPECT_EQUAL(100u, av->getNumDocs());
+ EXPECT_EQUAL(100u, av->getCommittedDocIdLimit());
+}
+
+
+int
+Test::Main(void)
+{
+ TEST_INIT("attributeflush_test");
+
+ if (_argc > 0) {
+ DummyFileHeaderContext::setCreator(_argv[0]);
+ }
+ vespalib::rmdir(test_dir, true);
+ TEST_DO(requireThatUpdaterAndFlusherCanRunConcurrently());
+ TEST_DO(requireThatFlushableAttributeReportsMemoryUsage());
+ TEST_DO(requireThatFlushableAttributeManagesSyncTokenInfo());
+ TEST_DO(requireThatFlushTargetsCanBeRetrieved());
+ TEST_DO(requireThatCleanUpIsPerformedAfterFlush());
+ TEST_DO(requireThatFlushStatsAreUpdated());
+ TEST_DO(requireThatOnlyOneFlusherCanRunAtTheSameTime());
+ TEST_DO(requireThatLastFlushTimeIsReported());
+ TEST_DO(requireThatShrinkWorks());
+
+ TEST_DONE();
+}
+
+}
+
+TEST_APPHOOK(proton::Test);
diff --git a/searchcore/src/tests/proton/attribute/attributeflush_test.sh b/searchcore/src/tests/proton/attribute/attributeflush_test.sh
new file mode 100755
index 00000000000..8ec2f5d8dd8
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attributeflush_test.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+rm -rf flush
+$VALGRIND ./searchcore_attributeflush_test_app
diff --git a/searchcore/src/tests/proton/attribute/attributes_state_explorer/.gitignore b/searchcore/src/tests/proton/attribute/attributes_state_explorer/.gitignore
new file mode 100644
index 00000000000..3b612102a10
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attributes_state_explorer/.gitignore
@@ -0,0 +1 @@
+searchcore_attributes_state_explorer_test_app
diff --git a/searchcore/src/tests/proton/attribute/attributes_state_explorer/CMakeLists.txt b/searchcore/src/tests/proton/attribute/attributes_state_explorer/CMakeLists.txt
new file mode 100644
index 00000000000..322d22c8f0d
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attributes_state_explorer/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_attributes_state_explorer_test_app
+ SOURCES
+ attributes_state_explorer_test.cpp
+ DEPENDS
+ searchcore_attribute
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_attributes_state_explorer_test_app COMMAND searchcore_attributes_state_explorer_test_app)
diff --git a/searchcore/src/tests/proton/attribute/attributes_state_explorer/DESC b/searchcore/src/tests/proton/attribute/attributes_state_explorer/DESC
new file mode 100644
index 00000000000..1459d32ddae
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attributes_state_explorer/DESC
@@ -0,0 +1 @@
+attributes_state_explorer test. Take a look at attributes_state_explorer_test.cpp for details.
diff --git a/searchcore/src/tests/proton/attribute/attributes_state_explorer/FILES b/searchcore/src/tests/proton/attribute/attributes_state_explorer/FILES
new file mode 100644
index 00000000000..f49eb2b8e86
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attributes_state_explorer/FILES
@@ -0,0 +1 @@
+attributes_state_explorer_test.cpp
diff --git a/searchcore/src/tests/proton/attribute/attributes_state_explorer/attributes_state_explorer_test.cpp b/searchcore/src/tests/proton/attribute/attributes_state_explorer/attributes_state_explorer_test.cpp
new file mode 100644
index 00000000000..43eeec6086a
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/attributes_state_explorer/attributes_state_explorer_test.cpp
@@ -0,0 +1,70 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("attributes_state_explorer_test");
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <vespa/searchcore/proton/attribute/attribute_manager_explorer.h>
+#include <vespa/searchcore/proton/attribute/attributemanager.h>
+#include <vespa/searchcore/proton/test/attribute_vectors.h>
+#include <vespa/searchcore/proton/test/directory_handler.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/searchlib/common/foregroundtaskexecutor.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+
+using namespace proton;
+using namespace proton::test;
+using search::index::DummyFileHeaderContext;
+using search::AttributeVector;
+using search::TuneFileAttributes;
+using search::ForegroundTaskExecutor;
+
+const vespalib::string TEST_DIR = "test_output";
+
+struct Fixture
+{
+ DirectoryHandler _dirHandler;
+ DummyFileHeaderContext _fileHeaderContext;
+ ForegroundTaskExecutor _attributeFieldWriter;
+ AttributeManager::SP _mgr;
+ AttributeManagerExplorer _explorer;
+ Fixture()
+ : _dirHandler(TEST_DIR),
+ _fileHeaderContext(),
+ _attributeFieldWriter(),
+ _mgr(new AttributeManager(TEST_DIR, "test.subdb", TuneFileAttributes(),
+ _fileHeaderContext,
+ _attributeFieldWriter)),
+ _explorer(_mgr)
+ {
+ addAttribute("regular");
+ addExtraAttribute("extra");
+ }
+ void addAttribute(const vespalib::string &name) {
+ _mgr->addAttribute(name, AttributeUtils::getInt32Config(), 1);
+ }
+ void addExtraAttribute(const vespalib::string &name) {
+ _mgr->addExtraAttribute(AttributeVector::SP(new Int32Attribute(name)));
+ }
+};
+
+typedef std::vector<vespalib::string> StringVector;
+
+TEST_F("require that attributes are exposed as children names", Fixture)
+{
+ StringVector children = f._explorer.get_children_names();
+ std::sort(children.begin(), children.end());
+ EXPECT_EQUAL(StringVector({"extra", "regular"}), children);
+}
+
+TEST_F("require that attributes are explorable", Fixture)
+{
+ EXPECT_TRUE(f._explorer.get_child("regular").get() != nullptr);
+ EXPECT_TRUE(f._explorer.get_child("extra").get() != nullptr);
+ EXPECT_TRUE(f._explorer.get_child("not").get() == nullptr);
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/attribute/document_field_populator/.gitignore b/searchcore/src/tests/proton/attribute/document_field_populator/.gitignore
new file mode 100644
index 00000000000..45cd0a54f56
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/document_field_populator/.gitignore
@@ -0,0 +1 @@
+searchcore_document_field_populator_test_app
diff --git a/searchcore/src/tests/proton/attribute/document_field_populator/CMakeLists.txt b/searchcore/src/tests/proton/attribute/document_field_populator/CMakeLists.txt
new file mode 100644
index 00000000000..4c6da0a3397
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/document_field_populator/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_document_field_populator_test_app
+ SOURCES
+ document_field_populator_test.cpp
+ DEPENDS
+ searchcore_attribute
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_document_field_populator_test_app COMMAND searchcore_document_field_populator_test_app)
diff --git a/searchcore/src/tests/proton/attribute/document_field_populator/DESC b/searchcore/src/tests/proton/attribute/document_field_populator/DESC
new file mode 100644
index 00000000000..cdc71250210
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/document_field_populator/DESC
@@ -0,0 +1 @@
+document_field_populator test. Take a look at document_field_populator_test.cpp for details.
diff --git a/searchcore/src/tests/proton/attribute/document_field_populator/FILES b/searchcore/src/tests/proton/attribute/document_field_populator/FILES
new file mode 100644
index 00000000000..21f62452acf
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/document_field_populator/FILES
@@ -0,0 +1 @@
+document_field_populator_test.cpp
diff --git a/searchcore/src/tests/proton/attribute/document_field_populator/document_field_populator_test.cpp b/searchcore/src/tests/proton/attribute/document_field_populator/document_field_populator_test.cpp
new file mode 100644
index 00000000000..d0be50bfd2f
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/document_field_populator/document_field_populator_test.cpp
@@ -0,0 +1,84 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("document_field_populator_test");
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <vespa/searchcommon/common/schema.h>
+#include <vespa/searchcore/proton/attribute/document_field_populator.h>
+#include <vespa/searchlib/attribute/attributefactory.h>
+#include <vespa/searchlib/attribute/integerbase.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/vespalib/util/stringfmt.h>
+
+using namespace document;
+using namespace proton;
+using namespace search;
+using namespace search::index;
+
+typedef search::attribute::Config AVConfig;
+typedef search::attribute::BasicType AVBasicType;
+
+Schema::AttributeField
+createAttributeField()
+{
+ return Schema::AttributeField("a1", Schema::DataType::INT32);
+}
+
+Schema
+createSchema()
+{
+ Schema schema;
+ schema.addAttributeField(createAttributeField());
+ return schema;
+}
+
+struct DocContext
+{
+ Schema _schema;
+ DocBuilder _builder;
+ DocContext()
+ : _schema(createSchema()),
+ _builder(_schema)
+ {
+ }
+ Document::UP create(uint32_t id) {
+ vespalib::string docId =
+ vespalib::make_string("id:searchdocument:searchdocument::%u", id);
+ return _builder.startDocument(docId).endDocument();
+ }
+};
+
+struct Fixture
+{
+ AttributeVector::SP _attr;
+ IntegerAttribute &_intAttr;
+ DocumentFieldPopulator _pop;
+ DocContext _ctx;
+ Fixture()
+ : _attr(search::AttributeFactory::createAttribute("a1", AVConfig(AVBasicType::INT32))),
+ _intAttr(dynamic_cast<IntegerAttribute &>(*_attr)),
+ _pop(createAttributeField(), _attr, "test"),
+ _ctx()
+ {
+ _intAttr.addDocs(2);
+ _intAttr.update(1, 100);
+ _intAttr.commit();
+ }
+};
+
+TEST_F("require that document field is populated based on attribute content", Fixture)
+{
+ // NOTE: DocumentFieldRetriever (used by DocumentFieldPopulator) is fully tested
+ // with all data types in searchcore/src/tests/proton/server/documentretriever_test.cpp.
+ {
+ Document::UP doc = f._ctx.create(1);
+ f._pop.handleExisting(1, *doc);
+ EXPECT_EQUAL(100, doc->getValue("a1")->getAsInt());
+ }
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/.gitignore b/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/.gitignore
new file mode 100644
index 00000000000..f3666eecb6e
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/.gitignore
@@ -0,0 +1 @@
+searchcore_exclusive_attribute_read_accessor_test_app
diff --git a/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/CMakeLists.txt b/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/CMakeLists.txt
new file mode 100644
index 00000000000..c39025ae39f
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_exclusive_attribute_read_accessor_test_app
+ SOURCES
+ exclusive_attribute_read_accessor_test.cpp
+ DEPENDS
+ searchcore_attribute
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_exclusive_attribute_read_accessor_test_app COMMAND searchcore_exclusive_attribute_read_accessor_test_app)
diff --git a/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/DESC b/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/DESC
new file mode 100644
index 00000000000..ec5a407ddbd
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/DESC
@@ -0,0 +1 @@
+exclusive_attribute_read_accessor test. Take a look at exclusive_attribute_read_accessor_test.cpp for details.
diff --git a/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/FILES b/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/FILES
new file mode 100644
index 00000000000..74a9ab77547
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/FILES
@@ -0,0 +1 @@
+exclusive_attribute_read_accessor_test.cpp
diff --git a/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/exclusive_attribute_read_accessor_test.cpp b/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/exclusive_attribute_read_accessor_test.cpp
new file mode 100644
index 00000000000..7cb6a503ae8
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/exclusive_attribute_read_accessor/exclusive_attribute_read_accessor_test.cpp
@@ -0,0 +1,54 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <vespa/searchcore/proton/attribute/exclusive_attribute_read_accessor.h>
+#include <vespa/searchcommon/attribute/config.h>
+#include <vespa/searchlib/attribute/attributefactory.h>
+#include <vespa/searchlib/common/sequencedtaskexecutor.h>
+#include <vespa/vespalib/util/sync.h>
+
+using namespace proton;
+using namespace search;
+using namespace vespalib;
+
+using ReadGuard = ExclusiveAttributeReadAccessor::Guard;
+
+AttributeVector::SP
+createAttribute()
+{
+ attribute::Config cfg(attribute::BasicType::INT32, attribute::CollectionType::SINGLE);
+ return search::AttributeFactory::createAttribute("myattr", cfg);
+}
+
+struct Fixture
+{
+ AttributeVector::SP attribute;
+ SequencedTaskExecutor writer;
+ ExclusiveAttributeReadAccessor accessor;
+
+ Fixture()
+ : attribute(createAttribute()),
+ writer(1),
+ accessor(attribute, writer)
+ {}
+};
+
+TEST_F("require that attribute write thread is blocked while guard is held", Fixture)
+{
+ ReadGuard::UP guard = f.accessor.takeGuard();
+ Gate gate;
+ f.writer.execute("myattr", [&gate]() { gate.countDown(); });
+ bool reachedZero = gate.await(100);
+ EXPECT_FALSE(reachedZero);
+ EXPECT_EQUAL(1u, gate.getCount());
+
+ guard.reset();
+ gate.await();
+ EXPECT_EQUAL(0u, gate.getCount());
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/attribute/gidmapattribute/.gitignore b/searchcore/src/tests/proton/attribute/gidmapattribute/.gitignore
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/attribute/gidmapattribute/.gitignore
diff --git a/searchcore/src/tests/proton/bucketdb/bucketdb/.gitignore b/searchcore/src/tests/proton/bucketdb/bucketdb/.gitignore
new file mode 100644
index 00000000000..3512e4268a1
--- /dev/null
+++ b/searchcore/src/tests/proton/bucketdb/bucketdb/.gitignore
@@ -0,0 +1 @@
+searchcore_bucketdb_test_app
diff --git a/searchcore/src/tests/proton/bucketdb/bucketdb/CMakeLists.txt b/searchcore/src/tests/proton/bucketdb/bucketdb/CMakeLists.txt
new file mode 100644
index 00000000000..f07ded6d89b
--- /dev/null
+++ b/searchcore/src/tests/proton/bucketdb/bucketdb/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_bucketdb_test_app
+ SOURCES
+ bucketdb_test.cpp
+ DEPENDS
+ searchcore_bucketdb
+)
+vespa_add_test(NAME searchcore_bucketdb_test_app COMMAND searchcore_bucketdb_test_app)
diff --git a/searchcore/src/tests/proton/bucketdb/bucketdb/DESC b/searchcore/src/tests/proton/bucketdb/bucketdb/DESC
new file mode 100644
index 00000000000..59057628f89
--- /dev/null
+++ b/searchcore/src/tests/proton/bucketdb/bucketdb/DESC
@@ -0,0 +1 @@
+bucketdb test. Take a look at bucketdb_test.cpp for details.
diff --git a/searchcore/src/tests/proton/bucketdb/bucketdb/FILES b/searchcore/src/tests/proton/bucketdb/bucketdb/FILES
new file mode 100644
index 00000000000..c5cd1105c23
--- /dev/null
+++ b/searchcore/src/tests/proton/bucketdb/bucketdb/FILES
@@ -0,0 +1 @@
+bucketdb_test.cpp
diff --git a/searchcore/src/tests/proton/bucketdb/bucketdb/bucketdb_test.cpp b/searchcore/src/tests/proton/bucketdb/bucketdb/bucketdb_test.cpp
new file mode 100644
index 00000000000..6895e469319
--- /dev/null
+++ b/searchcore/src/tests/proton/bucketdb/bucketdb/bucketdb_test.cpp
@@ -0,0 +1,169 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("bucketdb_test");
+
+#include <vespa/searchcore/proton/bucketdb/bucket_db_explorer.h>
+#include <vespa/searchcore/proton/bucketdb/bucketdb.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace document;
+using namespace proton;
+using namespace proton::bucketdb;
+using namespace vespalib::slime;
+using storage::spi::BucketChecksum;
+using storage::spi::BucketInfo;
+using storage::spi::Timestamp;
+using vespalib::Slime;
+
+constexpr uint32_t MIN_NUM_BITS = 8u;
+const GlobalId GID_1("111111111111");
+const BucketId BUCKET_1(MIN_NUM_BITS, GID_1.convertToBucketId().getRawId());
+const Timestamp TIME_1(1u);
+const Timestamp TIME_2(2u);
+const Timestamp TIME_3(3u);
+
+typedef BucketInfo::ReadyState RS;
+typedef SubDbType SDT;
+
+void
+assertDocCount(uint32_t ready,
+ uint32_t notReady,
+ uint32_t removed,
+ const BucketState &state)
+{
+ EXPECT_EQUAL(ready, state.getReadyCount());
+ EXPECT_EQUAL(notReady, state.getNotReadyCount());
+ EXPECT_EQUAL(removed, state.getRemovedCount());
+ BucketInfo info = state;
+ EXPECT_EQUAL(ready + notReady, info.getDocumentCount());
+ EXPECT_EQUAL(ready + notReady + removed, info.getEntryCount());
+}
+
+void
+assertReady(bool expReady,
+ const BucketInfo &info)
+{
+ EXPECT_EQUAL(expReady, info.isReady());
+}
+
+struct Fixture
+{
+ BucketDB _db;
+ Fixture()
+ : _db()
+ {}
+ const BucketState &add(const Timestamp &timestamp,
+ SubDbType subDbType) {
+ return _db.add(GID_1, BUCKET_1, timestamp, subDbType);
+ }
+ BucketState remove(const Timestamp &timestamp,
+ SubDbType subDbType) {
+ _db.remove(GID_1, BUCKET_1, timestamp, subDbType);
+ return get();
+ }
+ BucketState get() const {
+ return _db.get(BUCKET_1);
+ }
+ BucketChecksum getChecksum(const Timestamp &timestamp,
+ SubDbType subDbType) {
+ BucketDB db;
+ BucketChecksum retval = db.add(GID_1, BUCKET_1, timestamp, subDbType).getChecksum();
+ // Must ensure empty bucket db before destruction.
+ db.remove(GID_1, BUCKET_1, timestamp, subDbType);
+ return retval;
+ }
+};
+
+TEST_F("require that bucket db tracks doc counts per sub db type", Fixture)
+{
+ assertDocCount(0, 0, 0, f.get());
+ assertDocCount(1, 0, 0, f.add(TIME_1, SDT::READY));
+ assertDocCount(1, 1, 0, f.add(TIME_2, SDT::NOTREADY));
+ assertDocCount(1, 1, 1, f.add(TIME_3, SDT::REMOVED));
+ assertDocCount(0, 1, 1, f.remove(TIME_1, SDT::READY));
+ assertDocCount(0, 0, 1, f.remove(TIME_2, SDT::NOTREADY));
+ assertDocCount(0, 0, 0, f.remove(TIME_3, SDT::REMOVED));
+}
+
+TEST_F("require that bucket checksum is a combination of sub db types", Fixture)
+{
+ BucketChecksum zero(0u);
+ BucketChecksum ready = f.getChecksum(TIME_1, SDT::READY);
+ BucketChecksum notReady = f.getChecksum(TIME_2, SDT::NOTREADY);
+
+ EXPECT_EQUAL(zero, f.get().getChecksum());
+ EXPECT_EQUAL(ready, f.add(TIME_1, SDT::READY).getChecksum());
+ EXPECT_EQUAL(ready + notReady, f.add(TIME_2, SDT::NOTREADY).getChecksum());
+ EXPECT_EQUAL(ready + notReady, f.add(TIME_3, SDT::REMOVED).getChecksum());
+ EXPECT_EQUAL(notReady, f.remove(TIME_1, SDT::READY).getChecksum());
+ EXPECT_EQUAL(zero, f.remove(TIME_2, SDT::NOTREADY).getChecksum());
+ EXPECT_EQUAL(zero, f.remove(TIME_3, SDT::REMOVED).getChecksum());
+}
+
+TEST_F("require that bucket is ready when not having docs in notready sub db", Fixture)
+{
+ assertReady(true, f.get());
+ assertReady(true, f.add(TIME_1, SDT::READY));
+ assertReady(false, f.add(TIME_2, SDT::NOTREADY));
+ assertReady(false, f.add(TIME_3, SDT::REMOVED));
+ assertReady(true, f.remove(TIME_2, SDT::NOTREADY));
+ assertReady(true, f.remove(TIME_1, SDT::READY));
+ assertReady(true, f.remove(TIME_3, SDT::REMOVED));
+}
+
+TEST_F("require that bucket can be cached", Fixture)
+{
+ f.add(TIME_1, SDT::READY);
+ EXPECT_FALSE(f._db.isCachedBucket(BUCKET_1));
+ f._db.cacheBucket(BUCKET_1);
+ EXPECT_TRUE(f._db.isCachedBucket(BUCKET_1));
+
+ assertDocCount(1, 0, 0, f._db.cachedGet(BUCKET_1));
+ f.add(TIME_2, SDT::NOTREADY);
+ assertDocCount(1, 0, 0, f._db.cachedGet(BUCKET_1));
+
+ f._db.uncacheBucket();
+ EXPECT_FALSE(f._db.isCachedBucket(BUCKET_1));
+ assertDocCount(1, 1, 0, f._db.cachedGet(BUCKET_1));
+
+ // Must ensure empty bucket db before destruction.
+ f.remove(TIME_1, SDT::READY);
+ f.remove(TIME_2, SDT::NOTREADY);
+}
+
+TEST("require that bucket db can be explored")
+{
+ BucketDBOwner db;
+ db.takeGuard()->add(GID_1, BUCKET_1, TIME_1, SDT::READY);
+ {
+ BucketDBExplorer explorer(db.takeGuard());
+ Slime expectSlime;
+ vespalib::string expectJson =
+ "{"
+ " numBuckets: 1,"
+ " buckets: ["
+ " {"
+ " id: '0x2000000000000031',"
+ " checksum: '0x93939394',"
+ " readyCount: 1,"
+ " notReadyCount: 0,"
+ " removedCount: 0,"
+ " active: false"
+ " }"
+ " ]"
+ "}";
+ EXPECT_TRUE(JsonFormat::decode(expectJson, expectSlime) > 0);
+ Slime actualSlime;
+ SlimeInserter inserter(actualSlime);
+ explorer.get_state(inserter, true);
+
+ EXPECT_EQUAL(expectSlime, actualSlime);
+ }
+
+ // Must ensure empty bucket db before destruction.
+ db.takeGuard()->remove(GID_1, BUCKET_1, TIME_1, SDT::READY);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/clean_tests.sh b/searchcore/src/tests/proton/clean_tests.sh
new file mode 100755
index 00000000000..c99f0a92ee8
--- /dev/null
+++ b/searchcore/src/tests/proton/clean_tests.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+for file in *
+do
+ if [ -d "$file" ]; then
+ (cd "$file" && make clean && echo "$file cleaned")
+ fi
+done
diff --git a/searchcore/src/tests/proton/common/.gitignore b/searchcore/src/tests/proton/common/.gitignore
new file mode 100644
index 00000000000..c03144e885d
--- /dev/null
+++ b/searchcore/src/tests/proton/common/.gitignore
@@ -0,0 +1,3 @@
+searchcore_cachedselect_test_app
+searchcore_schemautil_test_app
+searchcore_selectpruner_test_app
diff --git a/searchcore/src/tests/proton/common/CMakeLists.txt b/searchcore/src/tests/proton/common/CMakeLists.txt
new file mode 100644
index 00000000000..833bac4c065
--- /dev/null
+++ b/searchcore/src/tests/proton/common/CMakeLists.txt
@@ -0,0 +1,22 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_schemautil_test_app
+ SOURCES
+ schemautil_test.cpp
+ DEPENDS
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_schemautil_test_app COMMAND searchcore_schemautil_test_app)
+vespa_add_executable(searchcore_selectpruner_test_app
+ SOURCES
+ selectpruner_test.cpp
+ DEPENDS
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_selectpruner_test_app COMMAND searchcore_selectpruner_test_app)
+vespa_add_executable(searchcore_cachedselect_test_app
+ SOURCES
+ cachedselect_test.cpp
+ DEPENDS
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_cachedselect_test_app COMMAND searchcore_cachedselect_test_app)
diff --git a/searchcore/src/tests/proton/common/cachedselect_test.cpp b/searchcore/src/tests/proton/common/cachedselect_test.cpp
new file mode 100644
index 00000000000..e2c759f72aa
--- /dev/null
+++ b/searchcore/src/tests/proton/common/cachedselect_test.cpp
@@ -0,0 +1,710 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("cachedselect_test");
+
+#include <vespa/vespalib/stllike/string.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/document/repo/configbuilder.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/searchcore/proton/common/cachedselect.h>
+#include <vespa/searchcore/proton/common/selectcontext.h>
+#include <vespa/searchlib/attribute/attributecontext.h>
+#include <vespa/searchlib/attribute/integerbase.h>
+#include <vespa/searchlib/attribute/postinglistattribute.h>
+#include <vespa/searchlib/attribute/enumcomparator.h>
+#include <vespa/searchlib/attribute/singlenumericpostattribute.h>
+#include <vespa/searchlib/attribute/singleenumattribute.hpp>
+#include <vespa/searchlib/attribute/singlenumericenumattribute.hpp>
+#include <vespa/searchlib/attribute/singlenumericpostattribute.hpp>
+#include <vespa/searchlib/attribute/attributevector.hpp>
+#include <vespa/document/select/parser.h>
+#include <vespa/document/select/cloningvisitor.h>
+#include <vespa/document/base/documentid.h>
+#include <vespa/document/fieldvalue/stringfieldvalue.h>
+#include <vespa/document/fieldvalue/intfieldvalue.h>
+#include <vespa/document/fieldvalue/document.h>
+
+
+using search::index::Schema;
+using document::DocumentTypeRepo;
+using document::DocumentType;
+using document::select::Node;
+using document::select::Result;
+using document::select::ResultSet;
+using document::select::CloningVisitor;
+using document::select::Context;
+using vespalib::string;
+
+using document::config_builder::DocumenttypesConfigBuilderHelper;
+using document::config_builder::Struct;
+using document::config_builder::Array;
+using document::config_builder::Wset;
+using document::config_builder::Map;
+using document::DataType;
+using document::Document;
+using document::DocumentId;
+using document::StringFieldValue;
+using document::IntFieldValue;
+using proton::CachedSelect;
+using proton::SelectContext;
+using search::AttributeVector;
+using search::AttributeGuard;
+using search::AttributeEnumGuard;
+using search::AttributeContext;
+using search::EnumAttribute;
+using search::AttributePosting;
+using search::SingleValueNumericPostingAttribute;
+using search::IntegerAttribute;
+using search::IntegerAttributeTemplate;
+using search::attribute::IAttributeContext;
+
+typedef Node::UP NodeUP;
+typedef IntegerAttributeTemplate<int32_t> IATint32;
+typedef EnumAttribute<IATint32> IntEnumAttribute;
+
+#if 0
+extern template class SingleValueNumericPostingAttribute<IntPostingAttribute>;
+#endif
+
+typedef SingleValueNumericPostingAttribute<IntEnumAttribute> SvIntAttr;
+
+namespace
+{
+
+void
+makeSchema(Schema &s)
+{
+ s.addIndexField(Schema::IndexField("ia", Schema::STRING));
+ s.addAttributeField(Schema::AttributeField("aa", Schema::INT32));
+ s.addAttributeField(Schema::AttributeField("aaa", Schema::INT32,
+ Schema::ARRAY));
+ s.addAttributeField(Schema::AttributeField("aaw", Schema::INT32,
+ Schema::WEIGHTEDSET));
+}
+
+const int32_t doc_type_id = 787121340;
+const string type_name = "test";
+const string header_name = type_name + ".header";
+const string body_name = type_name + ".body";
+const string type_name_2 = "test_2";
+const string header_name_2 = type_name_2 + ".header";
+const string body_name_2 = type_name_2 + ".body";
+
+const int32_t noIntVal = std::numeric_limits<int32_t>::min();
+
+
+DocumentTypeRepo::UP
+makeDocTypeRepo(void)
+{
+ DocumenttypesConfigBuilderHelper builder;
+ builder.document(doc_type_id, type_name,
+ Struct(header_name), Struct(body_name).
+ addField("ia", DataType::T_STRING).
+ addField("ib", DataType::T_STRING).
+ addField("ibs", Struct("pair").
+ addField("x", DataType::T_STRING).
+ addField("y", DataType::T_STRING)).
+ addField("iba", Array(DataType::T_STRING)).
+ addField("ibw", Wset(DataType::T_STRING)).
+ addField("ibm", Map(DataType::T_STRING,
+ DataType::T_STRING)).
+ addField("aa", DataType::T_INT).
+ addField("aaa", Array(DataType::T_INT)).
+ addField("aaw", Wset(DataType::T_INT)).
+ addField("ab", DataType::T_INT));
+ builder.document(doc_type_id + 1, type_name_2,
+ Struct(header_name_2), Struct(body_name_2).
+ addField("ic", DataType::T_STRING).
+ addField("id", DataType::T_STRING).
+ addField("ac", DataType::T_INT).
+ addField("ad", DataType::T_INT));
+ return DocumentTypeRepo::UP(new DocumentTypeRepo(builder.config()));
+}
+
+
+Document::UP
+makeDoc(const DocumentTypeRepo &repo,
+ const string &docId,
+ const string &ia,
+ const string &ib,
+ int32_t aa,
+ int32_t ab)
+{
+ const DocumentType *docType = repo.getDocumentType("test");
+ Document::UP doc(new Document(*docType, DocumentId(docId)));
+ if (ia != "null")
+ doc->setValue("ia", StringFieldValue(ia));
+ if (ib != "null")
+ doc->setValue("ib", StringFieldValue(ib));
+ if (aa != noIntVal)
+ doc->setValue("aa", IntFieldValue(aa));
+ if (ab != noIntVal)
+ doc->setValue("ab", IntFieldValue(ab));
+ return doc;
+}
+
+
+bool
+checkSelect(const NodeUP &sel,
+ const Context &ctx,
+ const Result &exp)
+{
+ if (EXPECT_TRUE(sel->contains(ctx) == exp))
+ return true;
+ std::ostringstream os;
+ EXPECT_TRUE(sel->trace(ctx, os) == exp);
+ LOG(info,
+ "trace output: '%s'",
+ os.str().c_str());
+ return false;
+}
+
+bool
+checkSelect(const CachedSelect::SP &cs,
+ const Context &ctx,
+ const Result &exp)
+{
+ return checkSelect(cs->_select, ctx, exp);
+}
+
+
+bool
+checkSelect(const CachedSelect::SP &cs,
+ uint32_t docId,
+ const Result &exp)
+{
+ SelectContext ctx(*cs);
+ ctx._docId = docId;
+ ctx.getAttributeGuards();
+ return checkSelect(cs->_attrSelect, ctx, exp);
+}
+
+
+class MyIntAv : public SvIntAttr
+{
+ mutable uint32_t _gets;
+public:
+ MyIntAv(const string &name)
+ : SvIntAttr(name, Config(BasicType::INT32,
+ CollectionType::SINGLE,
+ true, false)),
+ _gets(0)
+ {
+ }
+
+ virtual uint32_t
+ get(AttributeVector::DocId doc, largeint_t *v, uint32_t sz) const
+ {
+ ++_gets;
+ return SvIntAttr::get(doc, v, sz);
+ }
+
+ uint32_t
+ getGets(void) const
+ {
+ return _gets;
+ }
+};
+
+
+
+class MyAttributeManager : public search::IAttributeManager
+{
+public:
+ typedef std::map<string, AttributeVector::SP> AttributeMap;
+
+ AttributeMap _attributes;
+
+ AttributeVector::SP
+ findAttribute(const vespalib::string &name) const
+ {
+ AttributeMap::const_iterator itr = _attributes.find(name);
+ if (itr != _attributes.end()) {
+ return itr->second;
+ }
+ return AttributeVector::SP();
+ }
+
+ virtual
+ AttributeGuard::UP
+ getAttribute(const string &name) const
+ {
+ AttributeVector::SP attr = findAttribute(name);
+ return AttributeGuard::UP(new AttributeGuard(attr));
+ }
+
+ virtual AttributeGuard::UP
+ getAttributeStableEnum(const string & name) const
+ {
+ AttributeVector::SP attr = findAttribute(name);
+ return AttributeGuard::UP(new AttributeEnumGuard(attr));
+ }
+
+ virtual void
+ getAttributeList(std::vector<AttributeGuard> & list) const
+ {
+ list.reserve(_attributes.size());
+ for (AttributeMap::const_iterator itr = _attributes.begin();
+ itr != _attributes.end();
+ ++itr) {
+ list.push_back(AttributeGuard(itr->second));
+ }
+ }
+
+ virtual IAttributeContext::UP
+ createContext() const
+ {
+ return IAttributeContext::UP(new AttributeContext(*this));
+ }
+
+ MyAttributeManager()
+ : _attributes()
+ {
+ }
+
+ void
+ addAttribute(const string &name)
+ {
+ if (findAttribute(name).get() != NULL)
+ return;
+ AttributeVector::SP av(new MyIntAv(name));
+ av->addReservedDoc();
+ _attributes[name] = av;
+ }
+};
+
+
+class MyDB
+{
+public:
+ typedef std::unique_ptr<MyDB> UP;
+
+ const Schema &_schema;
+ const DocumentTypeRepo &_repo;
+ MyAttributeManager &_amgr;
+ typedef std::map<string, uint32_t> DocIdToLid;
+ typedef std::map<uint32_t, Document::SP> LidToDocSP;
+ DocIdToLid _docIdToLid;
+ LidToDocSP _lidToDocSP;
+
+ MyDB(const Schema &schema,
+ const DocumentTypeRepo &repo,
+ MyAttributeManager &amgr)
+ : _schema(schema),
+ _repo(repo),
+ _amgr(amgr)
+ {
+ }
+
+ void
+ addDoc(uint32_t lid,
+ const string &docId,
+ const string &ia,
+ const string &ib,
+ int32_t aa,
+ int32_t ab);
+
+ const Document &
+ getDoc(uint32_t lid) const;
+};
+
+
+void
+MyDB::addDoc(uint32_t lid,
+ const string &docId,
+ const string &ia,
+ const string &ib,
+ int32_t aa,
+ int32_t ab)
+{
+ Document::UP doc(makeDoc(_repo, docId, ia, ib, aa, ab));
+
+ _docIdToLid[docId] = lid;
+ _lidToDocSP[lid] = Document::SP(doc.release());
+ AttributeVector &av(*_amgr.findAttribute("aa"));
+ if (lid >= av.getNumDocs()) {
+ AttributeVector::DocId checkDocId(0u);
+ ASSERT_TRUE(av.addDoc(checkDocId));
+ ASSERT_EQUAL(lid, checkDocId);
+ }
+ IntegerAttribute &iav(static_cast<IntegerAttribute &>(av));
+ AttributeVector::largeint_t laa(aa);
+ EXPECT_TRUE(iav.update(lid, laa));
+ av.commit();
+}
+
+
+const Document &
+MyDB::getDoc(uint32_t lid) const
+{
+ LidToDocSP::const_iterator it(_lidToDocSP.find(lid));
+ ASSERT_TRUE(it != _lidToDocSP.end());
+ return *it->second;
+}
+
+
+class TestFixture
+{
+public:
+ Schema _s;
+ DocumentTypeRepo::UP _repoUP;
+ bool _hasFields;
+ MyAttributeManager _amgr;
+ MyDB::UP _db;
+
+ TestFixture(void);
+
+ ~TestFixture(void);
+
+ CachedSelect::SP
+ testParse(const string &selection,
+ const string &docTypeName);
+
+};
+
+
+TestFixture::TestFixture(void)
+ : _s(),
+ _repoUP(),
+ _hasFields(true),
+ _amgr(),
+ _db()
+{
+ makeSchema(_s);
+ _repoUP = makeDocTypeRepo();
+
+ _amgr.addAttribute("aa");
+
+ _db.reset(new MyDB(_s, *_repoUP, _amgr));
+}
+
+
+TestFixture::~TestFixture(void)
+{
+}
+
+
+CachedSelect::SP
+TestFixture::testParse(const string &selection,
+ const string &docTypeName)
+{
+ const DocumentTypeRepo &repo(*_repoUP);
+ const Schema &schema(_s);
+
+ CachedSelect::SP res(new CachedSelect);
+
+ const DocumentType *docType = repo.getDocumentType(docTypeName);
+ ASSERT_TRUE(docType != NULL);
+ Document::UP emptyDoc(new Document(*docType, DocumentId()));
+
+ res->set(selection,
+ docTypeName,
+ *emptyDoc,
+ repo,
+ schema,
+ &_amgr,
+ _hasFields);
+
+ ASSERT_TRUE(res->_select.get() != NULL);
+ return res;
+}
+
+
+TEST_F("Test that test setup is OK", TestFixture)
+{
+ DocumentTypeRepo &repo = *f._repoUP;
+ const DocumentType *docType = repo.getDocumentType("test");
+ ASSERT_TRUE(docType);
+ EXPECT_EQUAL(10u, docType->getFieldCount());
+ EXPECT_EQUAL("String", docType->getField("ia").getDataType().getName());
+ EXPECT_EQUAL("String", docType->getField("ib").getDataType().getName());
+ EXPECT_EQUAL("Int", docType->getField("aa").getDataType().getName());
+ EXPECT_EQUAL("Int", docType->getField("ab").getDataType().getName());
+}
+
+
+TEST_F("Test that simple parsing works", TestFixture)
+{
+ f.testParse("not ((test))", "test");
+ f.testParse("not ((test and (test.aa > 3999)))", "test");
+ f.testParse("not ((test and (test.ab > 3999)))", "test");
+ f.testParse("not ((test and (test.af > 3999)))", "test");
+ f.testParse("not ((test_2 and (test_2.af > 3999)))", "test");
+}
+
+
+TEST_F("Test that const is flagged", TestFixture)
+{
+ CachedSelect::SP cs;
+
+ cs = f.testParse("false", "test");
+ EXPECT_TRUE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_FALSE(cs->_allInvalid);
+ EXPECT_EQUAL(0u, cs->_fieldNodes);
+ cs = f.testParse("true", "test");
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_TRUE(cs->_allTrue);
+ EXPECT_FALSE(cs->_allInvalid);
+ EXPECT_EQUAL(0u, cs->_fieldNodes);
+ cs = f.testParse("test_2.ac > 4999", "test");
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_TRUE(cs->_allInvalid);
+ EXPECT_EQUAL(0u, cs->_fieldNodes);
+ cs = f.testParse("test.aa > 4999", "test");
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_FALSE(cs->_allInvalid);
+ EXPECT_EQUAL(1u, cs->_fieldNodes);
+ EXPECT_EQUAL(1u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(1u, cs->_svAttrFieldNodes);
+}
+
+
+TEST_F("Test that basic select works", TestFixture)
+{
+ MyDB &db(*f._db);
+
+ db.addDoc(1u, "doc:test:1", "hello", "null", 45, 37);
+ db.addDoc(2u, "doc:test:2", "gotcha", "foo", 3, 25);
+ db.addDoc(3u, "doc:test:3", "gotcha", "foo", noIntVal, noIntVal);
+ db.addDoc(4u, "doc:test:4", "null", "foo", noIntVal, noIntVal);
+
+ CachedSelect::SP cs;
+
+ cs = f.testParse("test.ia == \"hello\"", "test");
+ EXPECT_FALSE(cs->_attrSelect.get() != NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_FALSE(cs->_allInvalid);
+ EXPECT_EQUAL(1u, cs->_fieldNodes);
+ EXPECT_EQUAL(0u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(0u, cs->_svAttrFieldNodes);
+ TEST_DO(checkSelect(cs, db.getDoc(1u), Result::True));
+ TEST_DO(checkSelect(cs, db.getDoc(2u), Result::False));
+ TEST_DO(checkSelect(cs, db.getDoc(3u), Result::False));
+ TEST_DO(checkSelect(cs, db.getDoc(4u), Result::False));
+
+ cs = f.testParse("test.ia.foo == \"hello\"", "test");
+ EXPECT_FALSE(cs->_attrSelect.get() != NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_TRUE(cs->_allInvalid);
+ EXPECT_EQUAL(0u, cs->_fieldNodes);
+ EXPECT_EQUAL(0u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(0u, cs->_svAttrFieldNodes);
+ TEST_DO(checkSelect(cs, db.getDoc(1u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(2u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(3u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(4u), Result::Invalid));
+
+ cs = f.testParse("test.ia[2] == \"hello\"", "test");
+ EXPECT_FALSE(cs->_attrSelect.get() != NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_TRUE(cs->_allInvalid);
+ EXPECT_EQUAL(0u, cs->_fieldNodes);
+ EXPECT_EQUAL(0u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(0u, cs->_svAttrFieldNodes);
+ TEST_DO(checkSelect(cs, db.getDoc(1u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(2u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(3u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(4u), Result::Invalid));
+
+ cs = f.testParse("test.ia{foo} == \"hello\"", "test");
+ EXPECT_FALSE(cs->_attrSelect.get() != NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_TRUE(cs->_allInvalid);
+ EXPECT_EQUAL(0u, cs->_fieldNodes);
+ EXPECT_EQUAL(0u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(0u, cs->_svAttrFieldNodes);
+ TEST_DO(checkSelect(cs, db.getDoc(1u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(2u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(3u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(4u), Result::Invalid));
+
+ cs = f.testParse("test.ia < \"hello\"", "test");
+ EXPECT_FALSE(cs->_attrSelect.get() != NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_FALSE(cs->_allInvalid);
+ EXPECT_EQUAL(1u, cs->_fieldNodes);
+ EXPECT_EQUAL(0u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(0u, cs->_svAttrFieldNodes);
+ TEST_DO(checkSelect(cs, db.getDoc(1u), Result::False));
+ TEST_DO(checkSelect(cs, db.getDoc(2u), Result::True));
+ TEST_DO(checkSelect(cs, db.getDoc(3u), Result::True));
+ TEST_DO(checkSelect(cs, db.getDoc(4u), Result::Invalid));
+
+ cs = f.testParse("test.aa == 3", "test");
+ EXPECT_TRUE(cs->_attrSelect.get() != NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_FALSE(cs->_allInvalid);
+ EXPECT_EQUAL(1u, cs->_fieldNodes);
+ EXPECT_EQUAL(1u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(1u, cs->_svAttrFieldNodes);
+ TEST_DO(checkSelect(cs, db.getDoc(1u), Result::False));
+ TEST_DO(checkSelect(cs, db.getDoc(2u), Result::True));
+ TEST_DO(checkSelect(cs, db.getDoc(3u), Result::False));
+ TEST_DO(checkSelect(cs, db.getDoc(4u), Result::False));
+ TEST_DO(checkSelect(cs, 1u, Result::False));
+ TEST_DO(checkSelect(cs, 2u, Result::True));
+ TEST_DO(checkSelect(cs, 3u, Result::False));
+ TEST_DO(checkSelect(cs, 4u, Result::False));
+
+ cs = f.testParse("test.aa == 3", "test");
+ EXPECT_TRUE(cs->_attrSelect.get() != NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_FALSE(cs->_allInvalid);
+ EXPECT_EQUAL(1u, cs->_fieldNodes);
+ EXPECT_EQUAL(1u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(1u, cs->_svAttrFieldNodes);
+ TEST_DO(checkSelect(cs, db.getDoc(1u), Result::False));
+ TEST_DO(checkSelect(cs, db.getDoc(2u), Result::True));
+ TEST_DO(checkSelect(cs, db.getDoc(3u), Result::False));
+ TEST_DO(checkSelect(cs, db.getDoc(4u), Result::False));
+ TEST_DO(checkSelect(cs, 1u, Result::False));
+ TEST_DO(checkSelect(cs, 2u, Result::True));
+ TEST_DO(checkSelect(cs, 3u, Result::False));
+ TEST_DO(checkSelect(cs, 4u, Result::False));
+
+ cs = f.testParse("test.aa.foo == 3", "test");
+ EXPECT_TRUE(cs->_attrSelect.get() == NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_TRUE(cs->_allInvalid);
+ EXPECT_EQUAL(0u, cs->_fieldNodes);
+ EXPECT_EQUAL(0u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(0u, cs->_svAttrFieldNodes);
+ TEST_DO(checkSelect(cs, db.getDoc(1u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(2u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(3u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(4u), Result::Invalid));
+
+ cs = f.testParse("test.aa[2] == 3", "test");
+ EXPECT_TRUE(cs->_attrSelect.get() == NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_TRUE(cs->_allInvalid);
+ EXPECT_EQUAL(0u, cs->_fieldNodes);
+ EXPECT_EQUAL(0u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(0u, cs->_svAttrFieldNodes);
+ TEST_DO(checkSelect(cs, db.getDoc(1u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(2u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(3u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(4u), Result::Invalid));
+
+ cs = f.testParse("test.aa{4} > 3", "test");
+ EXPECT_TRUE(cs->_attrSelect.get() == NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_TRUE(cs->_allInvalid);
+ EXPECT_EQUAL(0u, cs->_fieldNodes);
+ EXPECT_EQUAL(0u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(0u, cs->_svAttrFieldNodes);
+ TEST_DO(checkSelect(cs, db.getDoc(1u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(2u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(3u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(4u), Result::Invalid));
+
+ cs = f.testParse("test.aaa[2] == 3", "test");
+ EXPECT_TRUE(cs->_attrSelect.get() == NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_FALSE(cs->_allInvalid);
+ EXPECT_EQUAL(1u, cs->_fieldNodes);
+ EXPECT_EQUAL(1u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(0u, cs->_svAttrFieldNodes);
+
+ cs = f.testParse("test.aaw{4} > 3", "test");
+ EXPECT_TRUE(cs->_attrSelect.get() == NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_FALSE(cs->_allInvalid);
+ EXPECT_EQUAL(1u, cs->_fieldNodes);
+ EXPECT_EQUAL(1u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(0u, cs->_svAttrFieldNodes);
+
+ cs = f.testParse("test.aa < 45", "test");
+ EXPECT_TRUE(cs->_attrSelect.get() != NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_FALSE(cs->_allInvalid);
+ EXPECT_EQUAL(1u, cs->_fieldNodes);
+ EXPECT_EQUAL(1u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(1u, cs->_svAttrFieldNodes);
+ TEST_DO(checkSelect(cs, db.getDoc(1u), Result::False));
+ TEST_DO(checkSelect(cs, db.getDoc(2u), Result::True));
+ TEST_DO(checkSelect(cs, db.getDoc(3u), Result::Invalid));
+ TEST_DO(checkSelect(cs, db.getDoc(4u), Result::Invalid));
+ TEST_DO(checkSelect(cs, 1u, Result::False));
+ TEST_DO(checkSelect(cs, 2u, Result::True));
+ TEST_DO(checkSelect(cs, 3u, Result::Invalid));
+ TEST_DO(checkSelect(cs, 4u, Result::Invalid));
+
+ MyIntAv *v(dynamic_cast<MyIntAv *>(f._amgr.findAttribute("aa").get()));
+ EXPECT_TRUE(v != NULL);
+ EXPECT_EQUAL(6u, v->getGets());
+}
+
+
+TEST_F("Test performance when using attributes", TestFixture)
+{
+ MyDB &db(*f._db);
+
+ db.addDoc(1u, "doc:test:1", "hello", "null", 45, 37);
+ db.addDoc(2u, "doc:test:2", "gotcha", "foo", 3, 25);
+ db.addDoc(3u, "doc:test:3", "gotcha", "foo", noIntVal, noIntVal);
+ db.addDoc(4u, "doc:test:4", "null", "foo", noIntVal, noIntVal);
+
+ CachedSelect::SP cs;
+ cs = f.testParse("test.aa < 45", "test");
+ EXPECT_TRUE(cs->_attrSelect.get() != NULL);
+ EXPECT_FALSE(cs->_allFalse);
+ EXPECT_FALSE(cs->_allTrue);
+ EXPECT_FALSE(cs->_allInvalid);
+ EXPECT_EQUAL(1u, cs->_fieldNodes);
+ EXPECT_EQUAL(1u, cs->_attrFieldNodes);
+ EXPECT_EQUAL(1u, cs->_svAttrFieldNodes);
+ SelectContext ctx(*cs);
+ ctx.getAttributeGuards();
+ const NodeUP &sel(cs->_attrSelect);
+ uint32_t i;
+ const uint32_t loopcnt = 30000;
+ LOG(info, "Starting minibm loop, %u ierations of 4 docs each", loopcnt);
+ fastos::StopWatchT<fastos::ClockSystem> sw;
+ sw.start();
+ for (i = 0; i < loopcnt; ++i) {
+ ctx._docId = 1u;
+ if (sel->contains(ctx) != Result::False)
+ break;
+ ctx._docId = 2u;
+ if (sel->contains(ctx) != Result::True)
+ break;
+ ctx._docId = 3u;
+ if (sel->contains(ctx) != Result::Invalid)
+ break;
+ ctx._docId = 4u;
+ if (sel->contains(ctx) != Result::Invalid)
+ break;
+ }
+ sw.stop();
+ EXPECT_EQUAL(loopcnt, i);
+ LOG(info,
+ "Elapsed time for %u iterations of 4 docs each: %" PRId64 " ns, "
+ "%8.4f ns/doc",
+ i,
+ sw.elapsed().ns(),
+ static_cast<double>(sw.elapsed().ns()) / ( 4 * i));
+
+}
+
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/common/document_type_inspector/.gitignore b/searchcore/src/tests/proton/common/document_type_inspector/.gitignore
new file mode 100644
index 00000000000..49db4ae7746
--- /dev/null
+++ b/searchcore/src/tests/proton/common/document_type_inspector/.gitignore
@@ -0,0 +1 @@
+searchcore_document_type_inspector_test_app
diff --git a/searchcore/src/tests/proton/common/document_type_inspector/CMakeLists.txt b/searchcore/src/tests/proton/common/document_type_inspector/CMakeLists.txt
new file mode 100644
index 00000000000..f5c4610fc1b
--- /dev/null
+++ b/searchcore/src/tests/proton/common/document_type_inspector/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_document_type_inspector_test_app
+ SOURCES
+ document_type_inspector_test.cpp
+ DEPENDS
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_document_type_inspector_test_app COMMAND searchcore_document_type_inspector_test_app)
diff --git a/searchcore/src/tests/proton/common/document_type_inspector/DESC b/searchcore/src/tests/proton/common/document_type_inspector/DESC
new file mode 100644
index 00000000000..b40b3219939
--- /dev/null
+++ b/searchcore/src/tests/proton/common/document_type_inspector/DESC
@@ -0,0 +1,2 @@
+Test for document type inspector. Take a look at document_type_inspector_test.cpp for details.
+
diff --git a/searchcore/src/tests/proton/common/document_type_inspector/FILES b/searchcore/src/tests/proton/common/document_type_inspector/FILES
new file mode 100644
index 00000000000..1a0b0b31a76
--- /dev/null
+++ b/searchcore/src/tests/proton/common/document_type_inspector/FILES
@@ -0,0 +1 @@
+document_type_inspector_test.cpp
diff --git a/searchcore/src/tests/proton/common/document_type_inspector/document_type_inspector_test.cpp b/searchcore/src/tests/proton/common/document_type_inspector/document_type_inspector_test.cpp
new file mode 100644
index 00000000000..c7fcd9ec72d
--- /dev/null
+++ b/searchcore/src/tests/proton/common/document_type_inspector/document_type_inspector_test.cpp
@@ -0,0 +1,50 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("document_type_inspector_test");
+
+#include <vespa/searchcore/proton/common/document_type_inspector.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace document;
+using namespace proton;
+using namespace search::index;
+
+Schema
+getSchema()
+{
+ Schema schema;
+ schema.addSummaryField(Schema::SummaryField("f1", Schema::STRING));
+ schema.addSummaryField(Schema::SummaryField("f2", Schema::STRING));
+ return schema;
+}
+
+struct Fixture
+{
+ Schema _schema;
+ DocBuilder _builder;
+ DocumentTypeInspector _inspector;
+ Fixture()
+ : _schema(getSchema()),
+ _builder(_schema),
+ _inspector(_builder.getDocumentType())
+ {
+ }
+};
+
+TEST_F("require that existing fields are known", Fixture)
+{
+ EXPECT_TRUE(f._inspector.hasField("f1"));
+ EXPECT_TRUE(f._inspector.hasField("f2"));
+}
+
+TEST_F("require that non-existing fields are NOT known", Fixture)
+{
+ EXPECT_FALSE(f._inspector.hasField("not"));
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/common/dummydbowner.h b/searchcore/src/tests/proton/common/dummydbowner.h
new file mode 100644
index 00000000000..8e3748b5072
--- /dev/null
+++ b/searchcore/src/tests/proton/common/dummydbowner.h
@@ -0,0 +1,23 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/searchcore/proton/server/idocumentdbowner.h>
+#include <vespa/searchcorespi/plugin/iindexmanagerfactory.h>
+#include <vespa/vespalib/stllike/string.h>
+
+namespace proton
+{
+
+struct DummyDBOwner : IDocumentDBOwner {
+ bool isInitializing() const override { return false; }
+
+ searchcorespi::IIndexManagerFactory::SP
+ getIndexManagerFactory(const vespalib::stringref & ) const override {
+ return searchcorespi::IIndexManagerFactory::SP();
+ }
+ uint32_t getDistributionKey() const override { return -1; }
+};
+
+} // namespace proton
+
diff --git a/searchcore/src/tests/proton/common/schemautil_test.cpp b/searchcore/src/tests/proton/common/schemautil_test.cpp
new file mode 100644
index 00000000000..c6519ecae06
--- /dev/null
+++ b/searchcore/src/tests/proton/common/schemautil_test.cpp
@@ -0,0 +1,132 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for schemautil.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("schemautil_test");
+
+#include <vespa/searchcore/proton/common/schemautil.h>
+#include <vespa/vespalib/stllike/string.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using search::index::Schema;
+using vespalib::string;
+
+using namespace proton;
+
+namespace {
+
+void addAllFieldTypes(const string &name, Schema &schema,
+ fastos::TimeStamp timestamp)
+{
+ Schema::IndexField index_field(name, Schema::STRING);
+ index_field.setTimestamp(timestamp);
+ schema.addIndexField(index_field);
+
+ Schema::AttributeField attribute_field(name, Schema::STRING);
+ attribute_field.setTimestamp(timestamp);
+ schema.addAttributeField(attribute_field);
+
+ Schema::SummaryField summary_field(name, Schema::STRING);
+ summary_field.setTimestamp(timestamp);
+ schema.addSummaryField(summary_field);
+}
+
+TEST("require that makeHistorySchema sets timestamp")
+{
+ Schema old_schema;
+ Schema new_schema;
+ Schema old_history;
+
+ const fastos::TimeStamp now(84);
+ const string name = "foo";
+ addAllFieldTypes(name, old_schema, fastos::TimeStamp(0));
+
+ Schema::SP schema = SchemaUtil::makeHistorySchema(new_schema, old_schema,
+ old_history, now);
+
+ ASSERT_EQUAL(1u, schema->getNumIndexFields());
+ EXPECT_EQUAL(name, schema->getIndexField(0).getName());
+ EXPECT_EQUAL(now, schema->getIndexField(0).getTimestamp());
+
+ ASSERT_EQUAL(1u, schema->getNumAttributeFields());
+ EXPECT_EQUAL(name, schema->getAttributeField(0).getName());
+ EXPECT_EQUAL(now, schema->getAttributeField(0).getTimestamp());
+
+ ASSERT_EQUAL(1u, schema->getNumSummaryFields());
+ EXPECT_EQUAL(name, schema->getSummaryField(0).getName());
+ EXPECT_EQUAL(now, schema->getSummaryField(0).getTimestamp());
+}
+
+TEST("require that makeHistorySchema preserves timestamp")
+{
+ Schema old_schema;
+ Schema new_schema;
+ Schema old_history;
+
+ const fastos::TimeStamp timestamp(42);
+ const string name = "foo";
+ addAllFieldTypes("bar", old_schema, fastos::TimeStamp(0));
+ addAllFieldTypes(name, old_history, timestamp);
+
+ Schema::SP schema =
+ SchemaUtil::makeHistorySchema(new_schema, old_schema, old_history);
+
+ ASSERT_EQUAL(2u, schema->getNumIndexFields());
+ uint32_t id = schema->getIndexFieldId(name);
+ ASSERT_NOT_EQUAL(id, Schema::UNKNOWN_FIELD_ID);
+ EXPECT_EQUAL(timestamp, schema->getIndexField(id).getTimestamp());
+
+ ASSERT_EQUAL(2u, schema->getNumAttributeFields());
+ id = schema->getAttributeFieldId(name);
+ ASSERT_NOT_EQUAL(id, Schema::UNKNOWN_FIELD_ID);
+ EXPECT_EQUAL(timestamp, schema->getAttributeField(id).getTimestamp());
+
+ ASSERT_EQUAL(2u, schema->getNumSummaryFields());
+ id = schema->getSummaryFieldId(name);
+ ASSERT_NOT_EQUAL(id, Schema::UNKNOWN_FIELD_ID);
+ EXPECT_EQUAL(timestamp, schema->getSummaryField(id).getTimestamp());
+}
+
+struct ListSchemaResult {
+ std::vector<vespalib::string> fieldNames;
+ std::vector<vespalib::string> fieldDataTypes;
+ std::vector<vespalib::string> fieldCollectionTypes;
+ std::vector<vespalib::string> fieldLocations;
+};
+
+void
+assertSchemaResult(const vespalib::string &name,
+ const vespalib::string &dataType,
+ const vespalib::string &collectionType,
+ const vespalib::string &location,
+ const ListSchemaResult &r,
+ size_t i)
+{
+ EXPECT_EQUAL(name, r.fieldNames[i]);
+ EXPECT_EQUAL(dataType, r.fieldDataTypes[i]);
+ EXPECT_EQUAL(collectionType, r.fieldCollectionTypes[i]);
+ EXPECT_EQUAL(location, r.fieldLocations[i]);
+}
+
+TEST("require that listSchema can list all fields")
+{
+ Schema schema;
+ schema.addIndexField(Schema::IndexField("if", Schema::STRING));
+ schema.addAttributeField(Schema::AttributeField("af", Schema::INT32));
+ schema.addSummaryField(Schema::SummaryField("sf", Schema::FLOAT, Schema::ARRAY));
+
+ ListSchemaResult r;
+ SchemaUtil::listSchema(schema, r.fieldNames, r.fieldDataTypes, r.fieldCollectionTypes, r.fieldLocations);
+ EXPECT_EQUAL(3u, r.fieldNames.size());
+ EXPECT_EQUAL(3u, r.fieldDataTypes.size());
+ EXPECT_EQUAL(3u, r.fieldCollectionTypes.size());
+ EXPECT_EQUAL(3u, r.fieldLocations.size());
+ assertSchemaResult("af", "INT32", "SINGLE", "a", r, 0);
+ assertSchemaResult("if", "STRING", "SINGLE", "i", r, 1);
+ assertSchemaResult("sf", "FLOAT", "ARRAY", "s", r, 2);
+}
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/common/selectpruner_test.cpp b/searchcore/src/tests/proton/common/selectpruner_test.cpp
new file mode 100644
index 00000000000..e8ec0ae7cb5
--- /dev/null
+++ b/searchcore/src/tests/proton/common/selectpruner_test.cpp
@@ -0,0 +1,778 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("selectpruner_test");
+
+#include <vespa/vespalib/stllike/string.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/document/repo/configbuilder.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/searchcore/proton/common/selectpruner.h>
+#include <vespa/document/select/parser.h>
+#include <vespa/document/select/cloningvisitor.h>
+
+
+using search::index::Schema;
+using document::DocumentTypeRepo;
+using document::DocumentType;
+using document::select::Node;
+using document::select::Result;
+using document::select::ResultSet;
+using document::select::CloningVisitor;
+using vespalib::string;
+
+using document::config_builder::DocumenttypesConfigBuilderHelper;
+using document::config_builder::Struct;
+using document::config_builder::Array;
+using document::config_builder::Wset;
+using document::config_builder::Map;
+using document::DataType;
+using document::Document;
+using proton::SelectPruner;
+
+typedef Node::UP NodeUP;
+
+namespace
+{
+
+void
+makeSchema(Schema &s)
+{
+ s.addIndexField(Schema::IndexField("ia", Schema::STRING));
+ s.addAttributeField(Schema::AttributeField("aa", Schema::INT32));
+ s.addAttributeField(Schema::AttributeField("aaa", Schema::INT32,
+ Schema::ARRAY));
+ s.addAttributeField(Schema::AttributeField("aaw", Schema::INT32,
+ Schema::WEIGHTEDSET));
+}
+
+const int32_t doc_type_id = 787121340;
+const string type_name = "test";
+const string header_name = type_name + ".header";
+const string body_name = type_name + ".body";
+const string type_name_2 = "test_2";
+const string header_name_2 = type_name_2 + ".header";
+const string body_name_2 = type_name_2 + ".body";
+const string false_name("false");
+const string true_name("true");
+const string not_name("not");
+const string valid_name("test.aa > 3999");
+const string valid2_name("test.ab > 4999");
+const string rvalid_name("test.aa <= 3999");
+const string rvalid2_name("test.ab <= 4999");
+const string invalid_name("test_2.ac > 3999");
+const string invalid2_name("test_2.ac > 4999");
+const string empty("");
+
+const document::DocumentId docId("doc:test:1");
+
+
+DocumentTypeRepo::UP
+makeDocTypeRepo(void)
+{
+ DocumenttypesConfigBuilderHelper builder;
+ builder.document(doc_type_id, type_name,
+ Struct(header_name), Struct(body_name).
+ addField("ia", DataType::T_STRING).
+ addField("ib", DataType::T_STRING).
+ addField("ibs", Struct("pair").
+ addField("x", DataType::T_STRING).
+ addField("y", DataType::T_STRING)).
+ addField("iba", Array(DataType::T_STRING)).
+ addField("ibw", Wset(DataType::T_STRING)).
+ addField("ibm", Map(DataType::T_STRING,
+ DataType::T_STRING)).
+ addField("aa", DataType::T_INT).
+ addField("aaa", Array(DataType::T_INT)).
+ addField("aaw", Wset(DataType::T_INT)).
+ addField("ab", DataType::T_INT));
+ builder.document(doc_type_id + 1, type_name_2,
+ Struct(header_name_2), Struct(body_name_2).
+ addField("ic", DataType::T_STRING).
+ addField("id", DataType::T_STRING).
+ addField("ac", DataType::T_INT).
+ addField("ad", DataType::T_INT));
+ return DocumentTypeRepo::UP(new DocumentTypeRepo(builder.config()));
+}
+
+
+std::string
+rsString(const ResultSet &s)
+{
+ std::ostringstream os;
+ bool first = true;
+ uint32_t erange = Result::enumRange();
+ for (uint32_t e = 0; e < erange; ++e) {
+ if (s.hasEnum(e)) {
+ if (!first)
+ os << ",";
+ first = false;
+ Result::fromEnum(e).print(os, false, "");
+ }
+ }
+ if (first) {
+ os << "empty";
+ }
+ return os.str();
+}
+
+
+const char *
+csString(const SelectPruner &pruner)
+{
+ if (!pruner.isConst())
+ return "not const";
+ if (pruner.isFalse())
+ return "const false";
+ if (pruner.isTrue())
+ return "const true";
+ if (pruner.isInvalid())
+ return "const invalid";
+ return "const something";
+}
+
+
+class TestFixture
+{
+public:
+ Schema _s;
+ DocumentTypeRepo::UP _repoUP;
+ bool _hasFields;
+
+ TestFixture(void);
+
+ ~TestFixture(void);
+
+ void
+ testParse(const string &selection);
+
+ void
+ testParseFail(const string &selection);
+
+ void
+ testPrune(const string &selection,
+ const string &exp);
+
+ void
+ testPrune(const string &selection,
+ const string &exp,
+ const string &docTypeName);
+};
+
+
+TestFixture::TestFixture(void)
+ : _s(),
+ _repoUP(),
+ _hasFields(true)
+{
+ makeSchema(_s);
+ _repoUP = makeDocTypeRepo();
+}
+
+
+TestFixture::~TestFixture(void)
+{
+}
+
+
+void
+TestFixture::testParse(const string &selection)
+{
+ const DocumentTypeRepo &repo(*_repoUP);
+ document::select::Parser parser(repo,
+ document::BucketIdFactory());
+
+ NodeUP select;
+
+ try {
+ LOG(info,
+ "Trying to parse '%s'",
+ selection.c_str());
+ select = parser.parse(selection);
+ } catch (document::select::ParsingFailedException &e) {
+ LOG(info,
+ "Parse failed: %s", e.what());
+ select.reset(0);
+ }
+ ASSERT_TRUE(select.get() != NULL);
+}
+
+
+void
+TestFixture::testParseFail(const string &selection)
+{
+ const DocumentTypeRepo &repo(*_repoUP);
+ document::select::Parser parser(repo,
+ document::BucketIdFactory());
+
+ NodeUP select;
+
+ try {
+ LOG(info,
+ "Trying to parse '%s'",
+ selection.c_str());
+ select = parser.parse(selection);
+ } catch (document::select::ParsingFailedException &e) {
+ LOG(info,
+ "Parse failed: %s",
+ e.getMessage().c_str());
+ select.reset(0);
+ }
+ ASSERT_TRUE(select.get() == NULL);
+}
+
+
+void
+TestFixture::testPrune(const string &selection,
+ const string &exp,
+ const string &docTypeName)
+{
+ const DocumentTypeRepo &repo(*_repoUP);
+ const Schema &schema(_s);
+ document::select::Parser parser(repo,
+ document::BucketIdFactory());
+
+ NodeUP select;
+
+ try {
+ LOG(info,
+ "Trying to parse '%s' with docType=%s",
+ selection.c_str(),
+ docTypeName.c_str());
+ select = parser.parse(selection);
+ } catch (document::select::ParsingFailedException &e) {
+ LOG(info,
+ "Parse failed: %s", e.what());
+ select.reset(0);
+ }
+ ASSERT_TRUE(select.get() != NULL);
+ std::ostringstream os;
+ select->print(os, true, "");
+ LOG(info, "ParseTree: '%s'", os.str().c_str());
+ const DocumentType *docType = repo.getDocumentType(docTypeName);
+ ASSERT_TRUE(docType != NULL);
+ Document::UP emptyDoc(new Document(*docType, docId));
+ emptyDoc->setRepo(repo);
+ SelectPruner pruner(docTypeName, schema, *emptyDoc, repo, _hasFields);
+ pruner.process(*select);
+ std::ostringstream pos;
+ pruner.getNode()->print(pos, true, "");
+ EXPECT_EQUAL(exp, pos.str());
+ LOG(info,
+ "Pruned ParseTree: '%s', fieldNodes=%u,%u, %s, rs=%s",
+ pos.str().c_str(),
+ pruner.getFieldNodes(),
+ pruner.getAttrFieldNodes(),
+ csString(pruner),
+ rsString(pruner.getResultSet()).c_str());
+ if (pruner.isConst()) {
+ ResultSet t;
+ if (pruner.isFalse())
+ t.add(Result::False);
+ if (pruner.isTrue())
+ t.add(Result::True);
+ if (pruner.isInvalid())
+ t.add(Result::Invalid);
+ ASSERT_TRUE(t == pruner.getResultSet());
+ }
+ CloningVisitor cv;
+ pruner.getNode()->visit(cv);
+ std::ostringstream cvpos;
+ cv.getNode()->print(cvpos, true, "");
+ EXPECT_EQUAL(exp, cvpos.str());
+#if 0
+ std::ostringstream os2;
+ pruner.trace(os2);
+ LOG(info, "trace pruned: %s", os2.str().c_str());
+#endif
+}
+
+
+void
+TestFixture::testPrune(const string &selection,
+ const string &exp)
+{
+ testPrune(selection, exp, "test");
+}
+
+
+TEST_F("Test that test setup is OK", TestFixture)
+{
+ DocumentTypeRepo &repo = *f._repoUP;
+ const DocumentType *docType = repo.getDocumentType("test");
+ ASSERT_TRUE(docType);
+ EXPECT_EQUAL(10u, docType->getFieldCount());
+ EXPECT_EQUAL("String", docType->getField("ia").getDataType().getName());
+ EXPECT_EQUAL("String", docType->getField("ib").getDataType().getName());
+ EXPECT_EQUAL("Int", docType->getField("aa").getDataType().getName());
+ EXPECT_EQUAL("Int", docType->getField("ab").getDataType().getName());
+}
+
+
+TEST_F("Test that simple parsing works", TestFixture)
+{
+ f.testParse("not ((test))");
+ f.testParse("not ((test and (test.aa > 3999)))");
+ f.testParse("not ((test and (test.ab > 3999)))");
+ f.testParse("not ((test and (test.af > 3999)))");
+ f.testParse("not ((test_2 and (test_2.af > 3999)))");
+}
+
+
+TEST_F("Test that wrong doctype causes parse error", TestFixture)
+{
+ f.testParseFail("not ((test_3 and (test_3.af > 3999)))");
+}
+
+
+TEST_F("Test that boolean const shortcuts are OK", TestFixture)
+{
+ f.testPrune("false and false",
+ "false");
+ f.testPrune(false_name + " and " + invalid2_name,
+ "false");
+ f.testPrune(false_name + " and " + valid2_name,
+ "false");
+ f.testPrune("false and true",
+ "false");
+
+ f.testPrune(invalid_name + " and false",
+ "false");
+ f.testPrune(invalid_name + " and " + invalid2_name,
+ "invalid");
+ f.testPrune(invalid_name + " and " + valid2_name,
+ empty + "invalid and " + valid2_name);
+ f.testPrune(invalid_name + " and true",
+ "invalid");
+
+ f.testPrune(valid_name + " and false",
+ "false");
+ f.testPrune(valid_name + " and " + invalid2_name,
+ empty + valid_name + " and invalid");
+ f.testPrune(valid_name + " and " + valid2_name,
+ valid_name + " and " + valid2_name);
+ f.testPrune(valid_name + " and true",
+ valid_name);
+
+ f.testPrune("true and false",
+ "false");
+ f.testPrune(true_name + " and " + invalid2_name,
+ "invalid");
+ f.testPrune(true_name + " and " + valid2_name,
+ valid2_name);
+ f.testPrune("true and true",
+ "true");
+
+ f.testPrune("false or false",
+ "false");
+ f.testPrune(false_name + " or " + invalid2_name,
+ "invalid");
+ f.testPrune(false_name + " or " + valid2_name,
+ valid2_name);
+ f.testPrune("false or true",
+ "true");
+
+ f.testPrune(invalid_name + " or false",
+ "invalid");
+ f.testPrune(invalid_name + " or " + invalid2_name,
+ "invalid");
+ f.testPrune(invalid_name + " or " + valid2_name,
+ empty + "invalid or " + valid2_name);
+ f.testPrune(invalid_name + " or true",
+ "true");
+
+ f.testPrune(valid_name + " or false",
+ valid_name);
+ f.testPrune(valid_name + " or " + invalid2_name,
+ valid_name + " or invalid");
+ f.testPrune(valid_name + " or " + valid2_name,
+ valid_name + " or " + valid2_name);
+ f.testPrune(valid_name + " or true",
+ "true");
+
+ f.testPrune("true or false",
+ "true");
+ f.testPrune(true_name + " or " + invalid2_name,
+ "true");
+ f.testPrune(true_name + " or " + valid2_name,
+ "true");
+ f.testPrune("true or true",
+ "true");
+}
+
+
+TEST_F("Test that selection expressions are pruned", TestFixture)
+{
+ f.testPrune("not ((test))",
+ "false");
+ f.testPrune("not ((test and (test.aa > 3999)))",
+ "test.aa <= 3999");
+ f.testPrune("not ((test and (test.ab > 3999)))",
+ "test.ab <= 3999");
+ f.testPrune("not ((test and (test.af > 3999)))",
+ "invalid");
+ f.testPrune("not ((test and (test_2.ac > 3999)))",
+ "invalid");
+ f.testPrune("not ((test and (test.af > 3999)))",
+ "true",
+ "test_2");
+ const char *combined =
+ "not ((test and (test.aa > 3999)) or (test_2 and (test_2.ac > 4999)))";
+ f.testPrune(combined,
+ "test.aa <= 3999");
+ f.testPrune(combined,
+ "test_2.ac <= 4999",
+ "test_2");
+}
+
+
+TEST_F("Test that De Morgan's laws are applied", TestFixture)
+{
+ f.testPrune("not (test.aa < 3901 and test.ab < 3902)",
+ "test.aa >= 3901 or test.ab >= 3902");
+ f.testPrune("not (test.aa < 3903 or test.ab < 3904)",
+ "test.aa >= 3903 and test.ab >= 3904");
+ f.testPrune("not (not (test.aa < 3903 or test.ab < 3904))",
+ "test.aa < 3903 or test.ab < 3904");
+
+ f.testPrune("not (false and false)",
+ "true");
+ f.testPrune(empty + "not (false and " + invalid2_name + ")",
+ "true");
+ f.testPrune(empty + "not (false and " + valid2_name + ")",
+ "true");
+ f.testPrune("not (false and true)",
+ "true");
+
+ f.testPrune(empty + "not (" + invalid_name + " and false)",
+ "true");
+ f.testPrune(empty + "not (" + invalid_name + " and " + invalid2_name + ")",
+ "invalid");
+ f.testPrune(empty + "not (" + invalid_name + " and " + valid2_name + ")",
+ empty + "invalid or " + rvalid2_name);
+ f.testPrune(empty + "not (" + invalid_name + " and true)",
+ "invalid");
+
+ f.testPrune(empty + "not (" + valid_name + " and false)",
+ "true");
+ f.testPrune(empty + "not (" + valid_name + " and " + invalid2_name + ")",
+ empty + rvalid_name + " or invalid");
+ f.testPrune(empty + "not (" + valid_name + " and " + valid2_name + ")",
+ rvalid_name + " or " + rvalid2_name);
+ f.testPrune(empty + "not (" + valid_name + " and true)",
+ rvalid_name);
+
+ f.testPrune("not (true and false)",
+ "true");
+ f.testPrune(empty + "not (true and " + invalid2_name + ")",
+ "invalid");
+ f.testPrune(empty + "not (true and " + valid2_name + ")",
+ rvalid2_name);
+ f.testPrune("not (true and true)",
+ "false");
+
+ f.testPrune("not (false or false)",
+ "true");
+ f.testPrune(empty + "not (false or " + invalid2_name + ")",
+ "invalid");
+ f.testPrune(empty + "not (false or " + valid2_name + ")",
+ rvalid2_name);
+ f.testPrune("not (false or true)",
+ "false");
+
+ f.testPrune(empty + "not (" + invalid_name + " or false)",
+ "invalid");
+ f.testPrune(empty + "not (" + invalid_name + " or " + invalid2_name + ")",
+ "invalid");
+ f.testPrune(empty + "not (" + invalid_name + " or " + valid2_name + ")",
+ empty + "invalid and " + rvalid2_name);
+ f.testPrune(empty + "not (" + invalid_name + " or true)",
+ "false");
+
+ f.testPrune(empty + "not (" + valid_name + " or false)",
+ rvalid_name);
+ f.testPrune(empty + "not (" + valid_name + " or " + invalid2_name + ")",
+ rvalid_name + " and invalid");
+ f.testPrune(empty + "not (" + valid_name + " or " + valid2_name + ")",
+ rvalid_name + " and " + rvalid2_name);
+ f.testPrune(empty + "not (" + valid_name + " or true)",
+ "false");
+
+ f.testPrune("not (true or false)",
+ "false");
+ f.testPrune(empty + "not (true or " + invalid2_name + ")",
+ "false");
+ f.testPrune(empty + "not (true or " + valid2_name + ")",
+ "false");
+ f.testPrune("not (true or true)",
+ "false");
+
+}
+
+
+TEST_F("Test that attribute fields and constants are evaluated"
+ " before other fields",
+ TestFixture)
+{
+ f.testPrune("test.ia == \"hello\" and test.aa > 5",
+ "test.aa > 5 and test.ia == \"hello\"");
+}
+
+
+TEST_F("Test that functions are visited", TestFixture)
+{
+ f.testPrune("test.ia.lowercase() == \"hello\"",
+ "test.ia.lowercase() == \"hello\"");
+ f.testPrune("test_2.ac.lowercase() == \"hello\"",
+ "invalid");
+ f.testPrune("test.ia.hash() == 45",
+ "test.ia.hash() == 45");
+ f.testPrune("test_2.ic.hash() == 45",
+ "invalid");
+ f.testPrune("test.aa.abs() == 45",
+ "test.aa.abs() == 45");
+ f.testPrune("test_2.ac.abs() == 45",
+ "invalid");
+}
+
+
+TEST_F("Test that arithmethic values are visited", TestFixture)
+{
+ f.testPrune("test.aa + 4 < 3999",
+ "test.aa + 4 < 3999");
+ f.testPrune("test_2.ac + 4 < 3999",
+ "invalid");
+ f.testPrune("test.aa + 4.2 < 3999",
+ "test.aa + 4.2 < 3999");
+ f.testPrune("test_2.ac + 5.2 < 3999",
+ "invalid");
+}
+
+
+TEST_F("Test that addition is associative", TestFixture)
+{
+ f.testPrune("test.aa + 4 + 5 < 3999",
+ "test.aa + 4 + 5 < 3999");
+ f.testPrune("(test.aa + 6) + 7 < 3999",
+ "test.aa + 6 + 7 < 3999");
+ f.testPrune("test.aa + (8 + 9) < 3999",
+ "test.aa + 8 + 9 < 3999");
+}
+
+
+TEST_F("Test that subtraction is left associative", TestFixture)
+{
+ f.testPrune("test.aa - 4 - 5 < 3999",
+ "test.aa - 4 - 5 < 3999");
+ f.testPrune("(test.aa - 6) - 7 < 3999",
+ "test.aa - 6 - 7 < 3999");
+ f.testPrune("test.aa - (8 - 9) < 3999",
+ "test.aa - (8 - 9) < 3999");
+}
+
+
+TEST_F("Test that multiplication is associative", TestFixture)
+{
+ f.testPrune("test.aa * 4 * 5 < 3999",
+ "test.aa * 4 * 5 < 3999");
+ f.testPrune("(test.aa * 6) * 7 < 3999",
+ "test.aa * 6 * 7 < 3999");
+ f.testPrune("test.aa * (8 * 9) < 3999",
+ "test.aa * 8 * 9 < 3999");
+}
+
+
+TEST_F("Test that division is left associative", TestFixture)
+{
+ f.testPrune("test.aa / 4 / 5 < 3999",
+ "test.aa / 4 / 5 < 3999");
+ f.testPrune("(test.aa / 6) / 7 < 3999",
+ "test.aa / 6 / 7 < 3999");
+ f.testPrune("test.aa / (8 / 9) < 3999",
+ "test.aa / (8 / 9) < 3999");
+}
+
+
+TEST_F("Test that mod is left associative", TestFixture)
+{
+ f.testPrune("test.aa % 4 % 5 < 3999",
+ "test.aa % 4 % 5 < 3999");
+ f.testPrune("(test.aa % 6) % 7 < 3999",
+ "test.aa % 6 % 7 < 3999");
+ f.testPrune("test.aa % (8 % 9) < 3999",
+ "test.aa % (8 % 9) < 3999");
+}
+
+
+TEST_F("Test that multiplication has higher priority than addition",
+ TestFixture)
+{
+ f.testPrune("test.aa + 4 * 5 < 3999",
+ "test.aa + 4 * 5 < 3999");
+ f.testPrune("(test.aa + 6) * 7 < 3999",
+ "(test.aa + 6) * 7 < 3999");
+ f.testPrune("test.aa + (8 * 9) < 3999",
+ "test.aa + 8 * 9 < 3999");
+ f.testPrune("test.aa * 4 + 5 < 3999",
+ "test.aa * 4 + 5 < 3999");
+ f.testPrune("(test.aa * 6) + 7 < 3999",
+ "test.aa * 6 + 7 < 3999");
+ f.testPrune("test.aa * (8 + 9) < 3999",
+ "test.aa * (8 + 9) < 3999");
+}
+
+
+TEST_F("Test that toplevel functions are visited", TestFixture)
+{
+ f.testPrune("searchcolumn.15 == 4",
+ "searchcolumn.15 == 4");
+ f.testPrune("id.scheme == \"doc\"",
+ "id.scheme == \"doc\"");
+ f.testPrune("test.aa < now() - 7200",
+ "test.aa < now() - 7200");
+}
+
+
+TEST_F("Test that variables are visited", TestFixture)
+{
+ f.testPrune("$foovar == 4.3",
+ "$foovar == 4.3");
+}
+
+
+TEST_F("Test that null is visited", TestFixture)
+{
+ f.testPrune("test.aa",
+ "test.aa != null");
+ f.testPrune("test.aa == null",
+ "test.aa == null");
+ f.testPrune("not test.aa",
+ "test.aa == null");
+}
+
+
+TEST_F("Test that operator inversion works", TestFixture)
+{
+ f.testPrune("not test.aa < 3999",
+ "test.aa >= 3999");
+ f.testPrune("not test.aa <= 3999",
+ "test.aa > 3999");
+ f.testPrune("not test.aa > 3999",
+ "test.aa <= 3999");
+ f.testPrune("not test.aa >= 3999",
+ "test.aa < 3999");
+ f.testPrune("not test.aa == 3999",
+ "test.aa != 3999");
+ f.testPrune("not test.aa != 3999",
+ "test.aa == 3999");
+}
+
+
+TEST_F("Test that fields are not present in removed sub db", TestFixture)
+{
+ f._hasFields = true;
+ f.testPrune("test.aa > 5",
+ "test.aa > 5");
+ f.testPrune("test.aa == test.ab",
+ "test.aa == test.ab");
+ f.testPrune("test.aa != test.ab",
+ "test.aa != test.ab");
+ f.testPrune("not test.aa == test.ab",
+ "test.aa != test.ab");
+ f.testPrune("not test.aa != test.ab",
+ "test.aa == test.ab");
+ f.testPrune("test.ia == \"hello\"",
+ "test.ia == \"hello\"");
+ f._hasFields = false;
+ f.testPrune("test.aa > 5",
+ "invalid");
+ f.testPrune("test.aa == test.ab",
+ "true");
+ f.testPrune("test.aa != test.ab",
+ "false");
+ f.testPrune("test.aa < test.ab",
+ "invalid");
+ f.testPrune("test.aa > test.ab",
+ "invalid");
+ f.testPrune("test.aa <= test.ab",
+ "invalid");
+ f.testPrune("test.aa >= test.ab",
+ "invalid");
+ f.testPrune("not test.aa == test.ab",
+ "false");
+ f.testPrune("not test.aa != test.ab",
+ "true");
+ f.testPrune("test.ia == \"hello\"",
+ "invalid");
+ f.testPrune("not test.aa < test.ab",
+ "invalid");
+ f.testPrune("not test.aa > test.ab",
+ "invalid");
+ f.testPrune("not test.aa <= test.ab",
+ "invalid");
+ f.testPrune("not test.aa >= test.ab",
+ "invalid");
+}
+
+
+TEST_F("Test that some operators cannot be inverted", TestFixture)
+{
+ f.testPrune("test.ia == \"hello\"",
+ "test.ia == \"hello\"");
+ f.testPrune("not test.ia == \"hello\"",
+ "test.ia != \"hello\"");
+ f.testPrune("test.ia = \"hello\"",
+ "test.ia = \"hello\"");
+ f.testPrune("not test.ia = \"hello\"",
+ "not test.ia = \"hello\"");
+ f.testPrune("not (test.ia == \"hello\" or test.ia == \"hi\")",
+ "test.ia != \"hello\" and test.ia != \"hi\"");
+ f.testPrune("not (test.ia == \"hello\" or test.ia = \"hi\")",
+ "not (not test.ia != \"hello\" or test.ia = \"hi\")");
+ f.testPrune("not (test.ia = \"hello\" or test.ia == \"hi\")",
+ "not (test.ia = \"hello\" or not test.ia != \"hi\")");
+ f.testPrune("not (test.ia = \"hello\" or test.ia = \"hi\")",
+ "not (test.ia = \"hello\" or test.ia = \"hi\")");
+}
+
+
+TEST_F("Test that complex field refs are handled", TestFixture)
+{
+ f.testPrune("test.ia",
+ "test.ia != null");
+ f.testPrune("test.ia == \"hello\"",
+ "test.ia == \"hello\"");
+ f.testPrune("test.ia.foo == \"hello\"",
+ "invalid");
+ f.testPrune("test.ibs.foo == \"hello\"",
+ "invalid");
+ f.testPrune("test.ibs.x == \"hello\"",
+ "test.ibs.x == \"hello\"");
+ f.testPrune("test.ia[2] == \"hello\"",
+ "invalid");
+ f.testPrune("test.iba[2] == \"hello\"",
+ "test.iba[2] == \"hello\"");
+ f.testPrune("test.ia{foo} == \"hello\"",
+ "invalid");
+ f.testPrune("test.ibw{foo} == 4",
+ "test.ibw{foo} == 4");
+ f.testPrune("test.ibw{foo} == \"hello\"",
+ "test.ibw{foo} == \"hello\"");
+ f.testPrune("test.ibm{foo} == \"hello\"",
+ "test.ibm{foo} == \"hello\"");
+ f.testPrune("test.aa == 4",
+ "test.aa == 4");
+ f.testPrune("test.aa[4] == 4",
+ "invalid");
+ f.testPrune("test.aaa[4] == 4",
+ "test.aaa[4] == 4");
+ f.testPrune("test.aa{4} == 4",
+ "invalid");
+ f.testPrune("test.aaw{4} == 4",
+ "test.aaw{4} == 4");
+}
+
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/common/state_reporter_utils/.gitignore b/searchcore/src/tests/proton/common/state_reporter_utils/.gitignore
new file mode 100644
index 00000000000..bb0963e5ec3
--- /dev/null
+++ b/searchcore/src/tests/proton/common/state_reporter_utils/.gitignore
@@ -0,0 +1 @@
+searchcore_state_reporter_utils_test_app
diff --git a/searchcore/src/tests/proton/common/state_reporter_utils/CMakeLists.txt b/searchcore/src/tests/proton/common/state_reporter_utils/CMakeLists.txt
new file mode 100644
index 00000000000..9b1ffd8aef2
--- /dev/null
+++ b/searchcore/src/tests/proton/common/state_reporter_utils/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_state_reporter_utils_test_app
+ SOURCES
+ state_reporter_utils_test.cpp
+ DEPENDS
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_state_reporter_utils_test_app COMMAND searchcore_state_reporter_utils_test_app)
diff --git a/searchcore/src/tests/proton/common/state_reporter_utils/DESC b/searchcore/src/tests/proton/common/state_reporter_utils/DESC
new file mode 100644
index 00000000000..7e4bc287738
--- /dev/null
+++ b/searchcore/src/tests/proton/common/state_reporter_utils/DESC
@@ -0,0 +1 @@
+state reporter utils test. Take a look at state_reporter_utils_test.cpp for details.
diff --git a/searchcore/src/tests/proton/common/state_reporter_utils/FILES b/searchcore/src/tests/proton/common/state_reporter_utils/FILES
new file mode 100644
index 00000000000..cbea1ab5ad2
--- /dev/null
+++ b/searchcore/src/tests/proton/common/state_reporter_utils/FILES
@@ -0,0 +1 @@
+state_reporter_utils_test.cpp
diff --git a/searchcore/src/tests/proton/common/state_reporter_utils/state_reporter_utils_test.cpp b/searchcore/src/tests/proton/common/state_reporter_utils/state_reporter_utils_test.cpp
new file mode 100644
index 00000000000..9b04a126a48
--- /dev/null
+++ b/searchcore/src/tests/proton/common/state_reporter_utils/state_reporter_utils_test.cpp
@@ -0,0 +1,48 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("state_reporter_utils_test");
+
+#include <vespa/searchcore/proton/common/state_reporter_utils.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace proton;
+using namespace vespalib::slime;
+using vespalib::Slime;
+
+vespalib::string
+toString(const StatusReport &statusReport)
+{
+ Slime slime;
+ StateReporterUtils::convertToSlime(statusReport, SlimeInserter(slime));
+ return slime.toString();
+}
+
+TEST("require that simple status report is correctly converted to slime")
+{
+ EXPECT_EQUAL(
+ "{\n"
+ " \"state\": \"ONLINE\"\n"
+ "}\n",
+ toString(StatusReport(StatusReport::Params("").
+ internalState("ONLINE"))));
+}
+
+TEST("require that advanced status report is correctly converted to slime")
+{
+ EXPECT_EQUAL(
+ "{\n"
+ " \"state\": \"REPLAY\",\n"
+ " \"progress\": 65.5,\n"
+ " \"configState\": \"OK\",\n"
+ " \"message\": \"foo\"\n"
+ "}\n",
+ toString(StatusReport(StatusReport::Params("").
+ internalState("REPLAY").
+ progress(65.5).
+ internalConfigState("OK").
+ message("foo"))));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/config/.cvsignore b/searchcore/src/tests/proton/config/.cvsignore
new file mode 100644
index 00000000000..13fb04d2a35
--- /dev/null
+++ b/searchcore/src/tests/proton/config/.cvsignore
@@ -0,0 +1,3 @@
+.depend
+Makefile
+config_test
diff --git a/searchcore/src/tests/proton/config/.gitignore b/searchcore/src/tests/proton/config/.gitignore
new file mode 100644
index 00000000000..72c49479fc1
--- /dev/null
+++ b/searchcore/src/tests/proton/config/.gitignore
@@ -0,0 +1 @@
+searchcore_config_test_app
diff --git a/searchcore/src/tests/proton/config/CMakeLists.txt b/searchcore/src/tests/proton/config/CMakeLists.txt
new file mode 100644
index 00000000000..803556c2ae1
--- /dev/null
+++ b/searchcore/src/tests/proton/config/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_config_test_app
+ SOURCES
+ config.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_fconfig
+)
+vespa_add_test(NAME searchcore_config_test_app COMMAND searchcore_config_test_app)
diff --git a/searchcore/src/tests/proton/config/config.cpp b/searchcore/src/tests/proton/config/config.cpp
new file mode 100644
index 00000000000..24cf4cec4cc
--- /dev/null
+++ b/searchcore/src/tests/proton/config/config.cpp
@@ -0,0 +1,268 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("config_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/util/linkedptr.h>
+#include <map>
+#include <vespa/searchcore/proton/server/bootstrapconfigmanager.h>
+#include <vespa/searchcore/proton/server/bootstrapconfig.h>
+#include <vespa/searchcore/proton/server/documentdbconfigmanager.h>
+#include <vespa/searchcore/proton/server/protonconfigurer.h>
+#include <vespa/vespalib/util/varholder.h>
+
+using namespace config;
+using namespace proton;
+using namespace vespa::config::search::core;
+using namespace vespa::config::search::summary;
+using namespace vespa::config::search;
+
+using config::ConfigUri;
+using document::DocumentTypeRepo;
+using document::DocumenttypesConfig;
+using document::DocumenttypesConfigBuilder;
+using search::TuneFileDocumentDB;
+using std::map;
+using vespalib::LinkedPtr;
+using vespalib::VarHolder;
+
+struct DoctypeFixture {
+ typedef vespalib::LinkedPtr<DoctypeFixture> LP;
+ AttributesConfigBuilder attributesBuilder;
+ RankProfilesConfigBuilder rankProfilesBuilder;
+ IndexschemaConfigBuilder indexschemaBuilder;
+ SummaryConfigBuilder summaryBuilder;
+ SummarymapConfigBuilder summarymapBuilder;
+ JuniperrcConfigBuilder juniperrcBuilder;
+};
+
+struct ConfigTestFixture {
+ const std::string configId;
+ ProtonConfigBuilder protonBuilder;
+ DocumenttypesConfigBuilder documenttypesBuilder;
+ map<std::string, DoctypeFixture::LP> dbConfig;
+ ConfigSet set;
+ IConfigContext::SP context;
+ int idcounter;
+
+ ConfigTestFixture(const std::string & id)
+ : configId(id),
+ protonBuilder(),
+ documenttypesBuilder(),
+ dbConfig(),
+ set(),
+ context(new ConfigContext(set)),
+ idcounter(-1)
+ {
+ set.addBuilder(configId, &protonBuilder);
+ set.addBuilder(configId, &documenttypesBuilder);
+ addDocType("_alwaysthere_");
+ }
+
+ void addDocType(const std::string & name)
+ {
+ DocumenttypesConfigBuilder::Documenttype dt;
+ dt.bodystruct = -1270491200;
+ dt.headerstruct = 306916075;
+ dt.id = idcounter--;
+ dt.name = name;
+ dt.version = 0;
+ documenttypesBuilder.documenttype.push_back(dt);
+
+ ProtonConfigBuilder::Documentdb db;
+ db.inputdoctypename = name;
+ db.configid = configId + "/" + name;
+ protonBuilder.documentdb.push_back(db);
+
+ DoctypeFixture::LP fixture(new DoctypeFixture());
+ set.addBuilder(db.configid, &fixture->attributesBuilder);
+ set.addBuilder(db.configid, &fixture->rankProfilesBuilder);
+ set.addBuilder(db.configid, &fixture->indexschemaBuilder);
+ set.addBuilder(db.configid, &fixture->summaryBuilder);
+ set.addBuilder(db.configid, &fixture->summarymapBuilder);
+ set.addBuilder(db.configid, &fixture->juniperrcBuilder);
+ dbConfig[name] = fixture;
+ }
+
+ void removeDocType(const std::string & name)
+ {
+ for (DocumenttypesConfigBuilder::DocumenttypeVector::iterator it(documenttypesBuilder.documenttype.begin()),
+ mt(documenttypesBuilder.documenttype.end());
+ it != mt;
+ it++) {
+ if ((*it).name.compare(name) == 0) {
+ documenttypesBuilder.documenttype.erase(it);
+ break;
+ }
+ }
+
+ for (ProtonConfigBuilder::DocumentdbVector::iterator it(protonBuilder.documentdb.begin()),
+ mt(protonBuilder.documentdb.end());
+ it != mt;
+ it++) {
+ if ((*it).inputdoctypename.compare(name) == 0) {
+ protonBuilder.documentdb.erase(it);
+ break;
+ }
+ }
+ }
+
+ bool configEqual(const std::string & name, DocumentDBConfig::SP dbc) {
+ DoctypeFixture::LP fixture(dbConfig[name]);
+ return (fixture->attributesBuilder == dbc->getAttributesConfig() &&
+ fixture->rankProfilesBuilder == dbc->getRankProfilesConfig() &&
+ fixture->indexschemaBuilder == dbc->getIndexschemaConfig() &&
+ fixture->summaryBuilder == dbc->getSummaryConfig() &&
+ fixture->summarymapBuilder == dbc->getSummarymapConfig() &&
+ fixture->juniperrcBuilder == dbc->getJuniperrcConfig());
+ }
+
+ bool configEqual(BootstrapConfig::SP bootstrapConfig) {
+ return (protonBuilder == bootstrapConfig->getProtonConfig() &&
+ documenttypesBuilder == bootstrapConfig->getDocumenttypesConfig());
+ }
+
+ BootstrapConfig::SP getBootstrapConfig(int64_t generation) const {
+ return BootstrapConfig::SP(new BootstrapConfig(generation,
+ BootstrapConfig::DocumenttypesConfigSP(new DocumenttypesConfig(documenttypesBuilder)),
+ DocumentTypeRepo::SP(new DocumentTypeRepo(documenttypesBuilder)),
+ BootstrapConfig::ProtonConfigSP(new ProtonConfig(protonBuilder)),
+ TuneFileDocumentDB::SP(new TuneFileDocumentDB())));
+ }
+
+ void reload() { context->reload(); }
+};
+
+template <typename ConfigType, typename ConfigOwner>
+struct OwnerFixture : public ConfigOwner
+{
+ volatile bool configured;
+ VarHolder<ConfigType> config;
+
+ OwnerFixture() : configured(false), config() { }
+ bool waitUntilConfigured(int timeout) {
+ FastOS_Time timer;
+ timer.SetNow();
+ while (timer.MilliSecsToNow() < timeout) {
+ if (configured)
+ break;
+ FastOS_Thread::Sleep(100);
+ }
+ return configured;
+ }
+ void reconfigure(const ConfigType & cfg) {
+ assert(cfg->valid());
+ config.set(cfg);
+ configured = true;
+ }
+ bool addExtraConfigs(DocumentDBConfigManager & dbCfgMan) {
+ (void) dbCfgMan;
+ return false;
+ }
+};
+
+typedef OwnerFixture<BootstrapConfig::SP, IBootstrapOwner> BootstrapOwner;
+typedef OwnerFixture<DocumentDBConfig::SP, IDocumentDBConfigOwner> DBOwner;
+
+TEST_F("require that bootstrap config manager creats correct key set", BootstrapConfigManager("foo")) {
+ const ConfigKeySet set(f1.createConfigKeySet());
+ ASSERT_EQUAL(2u, set.size());
+ ConfigKey protonKey(ConfigKey::create<ProtonConfig>("foo"));
+ ConfigKey dtKey(ConfigKey::create<DocumenttypesConfig>("foo"));
+ ASSERT_TRUE(set.find(protonKey) != set.end());
+ ASSERT_TRUE(set.find(dtKey) != set.end());
+}
+
+TEST_FFF("require_that_bootstrap_config_manager_updates_config", ConfigTestFixture("search"),
+ BootstrapConfigManager(f1.configId),
+ ConfigRetriever(f2.createConfigKeySet(), f1.context)) {
+ f2.update(f3.getBootstrapConfigs());
+ ASSERT_TRUE(f1.configEqual(f2.getConfig()));
+ f1.protonBuilder.rpcport = 9010;
+ ASSERT_FALSE(f1.configEqual(f2.getConfig()));
+ f1.reload();
+ f2.update(f3.getBootstrapConfigs());
+ ASSERT_TRUE(f1.configEqual(f2.getConfig()));
+
+ f1.addDocType("foobar");
+ ASSERT_FALSE(f1.configEqual(f2.getConfig()));
+ f1.reload();
+ f2.update(f3.getBootstrapConfigs());
+ ASSERT_TRUE(f1.configEqual(f2.getConfig()));
+}
+
+TEST_FF("require_that_documentdb_config_manager_subscribes_for_config",
+ ConfigTestFixture("search"),
+ DocumentDBConfigManager(f1.configId + "/typea", "typea")) {
+ f1.addDocType("typea");
+ const ConfigKeySet keySet(f2.createConfigKeySet());
+ ASSERT_EQUAL(6u, keySet.size());
+ ConfigRetriever retriever(keySet, f1.context);
+ f2.forwardConfig(f1.getBootstrapConfig(1));
+ f2.update(retriever.getBootstrapConfigs()); // Cheating, but we only need the configs
+ ASSERT_TRUE(f1.configEqual("typea", f2.getConfig()));
+}
+
+TEST_FFF("require_that_protonconfigurer_follows_changes_to_bootstrap",
+ ConfigTestFixture("search"),
+ BootstrapOwner(),
+ ProtonConfigurer(ConfigUri(f1.configId, f1.context), &f2, 60000)) {
+ f3.start();
+ ASSERT_TRUE(f2.configured);
+ ASSERT_TRUE(f1.configEqual(f2.config.get()));
+ f2.configured = false;
+ f1.protonBuilder.rpcport = 9010;
+ f1.reload();
+ ASSERT_TRUE(f2.waitUntilConfigured(120000));
+ ASSERT_TRUE(f1.configEqual(f2.config.get()));
+ f3.close();
+}
+
+TEST_FFF("require_that_protonconfigurer_follows_changes_to_doctypes",
+ ConfigTestFixture("search"),
+ BootstrapOwner(),
+ ProtonConfigurer(ConfigUri(f1.configId, f1.context), &f2, 60000)) {
+ f3.start();
+
+ f2.configured = false;
+ f1.addDocType("typea");
+ f1.reload();
+ ASSERT_TRUE(f2.waitUntilConfigured(60000));
+ ASSERT_TRUE(f1.configEqual(f2.config.get()));
+
+ f2.configured = false;
+ f1.removeDocType("typea");
+ f1.reload();
+ ASSERT_TRUE(f2.waitUntilConfigured(60000));
+ ASSERT_TRUE(f1.configEqual(f2.config.get()));
+ f3.close();
+}
+
+TEST_FFF("require_that_protonconfigurer_reconfigures_dbowners",
+ ConfigTestFixture("search"),
+ BootstrapOwner(),
+ ProtonConfigurer(ConfigUri(f1.configId, f1.context), &f2, 60000)) {
+ f3.start();
+
+ DBOwner dbA;
+ f3.registerDocumentDB(DocTypeName("typea"), &dbA);
+
+ // Add db and verify that we get an initial callback
+ f2.configured = false;
+ f1.addDocType("typea");
+ f1.reload();
+ ASSERT_TRUE(f2.waitUntilConfigured(60000));
+ ASSERT_TRUE(f1.configEqual(f2.config.get()));
+ ASSERT_TRUE(dbA.waitUntilConfigured(60000));
+ ASSERT_TRUE(f1.configEqual("typea", dbA.config.get()));
+
+ // Remove and verify that we don't get any callback
+ dbA.configured = false;
+ f1.removeDocType("typea");
+ f1.reload();
+ ASSERT_TRUE(f2.waitUntilConfigured(60000));
+ ASSERT_FALSE(dbA.waitUntilConfigured(60000));
+ f3.close();
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/create-test.sh b/searchcore/src/tests/proton/create-test.sh
new file mode 100755
index 00000000000..2e3b7fc4af9
--- /dev/null
+++ b/searchcore/src/tests/proton/create-test.sh
@@ -0,0 +1,73 @@
+#!/bin/sh
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+gen_ignore_file() {
+ echo "generating '$1' ..."
+ echo ".depend" > $1
+ echo "Makefile" >> $1
+ echo "${test}_test" >> $1
+}
+
+gen_project_file() {
+ echo "generating '$1' ..."
+ echo "APPLICATION ${test}_test" > $1
+ echo "OBJS $test" >> $1
+ echo "EXTERNALLIBS searchlib document fnet" >> $1
+ echo "EXTERNALLIBS vespalib config vespalog" >> $1
+ echo "" >> $1
+ echo "CUSTOMMAKE" >> $1
+ echo "test: ${test}_test" >> $1
+ echo -e "\t\$(VALGRIND) ./${test}_test" >> $1
+}
+
+gen_source() {
+ echo "generating '$1' ..."
+ echo "#include <vespa/log/log.h>" > $1
+ echo "LOG_SETUP(\"${test}_test\");" >> $1
+ echo "#include <vespa/fastos/fastos.h>" >> $1
+ echo "#include <vespa/vespalib/testkit/testapp.h>" >> $1
+ echo "" >> $1
+ echo "// using namespace ;" >> $1
+ echo "" >> $1
+ echo "TEST_SETUP(Test);" >> $1
+ echo "" >> $1
+ echo "int" >> $1
+ echo "Test::Main()" >> $1
+ echo "{" >> $1
+ echo " TEST_INIT(\"${test}_test\");" >> $1
+ echo " TEST_DONE();" >> $1
+ echo "}" >> $1
+}
+
+gen_desc() {
+ echo "generating '$1' ..."
+ echo "$test test. Take a look at $test.cpp for details." > $1
+}
+
+gen_file_list() {
+ echo "generating '$1' ..."
+ echo "$test.cpp" > $1
+}
+
+if [ $# -ne 1 ]; then
+ echo "usage: $0 <name>"
+ echo " name: name of the test to create"
+ exit 1
+fi
+
+test=$1
+if [ -e $test ]; then
+ echo "$test already present, don't want to mess it up..."
+ exit 1
+fi
+
+echo "creating directory '$test' ..."
+mkdir -p $test || exit 1
+cd $test || exit 1
+test=`basename $test`
+
+gen_ignore_file .cvsignore
+gen_project_file fastos.project
+gen_source $test.cpp
+gen_desc DESC
+gen_file_list FILES
diff --git a/searchcore/src/tests/proton/docsummary/.gitignore b/searchcore/src/tests/proton/docsummary/.gitignore
new file mode 100644
index 00000000000..f5e934f84da
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/.gitignore
@@ -0,0 +1,6 @@
+.depend
+Makefile
+docsummary_test
+
+searchcore_docsummary_test_app
+searchcore_summaryfieldconverter_test_app
diff --git a/searchcore/src/tests/proton/docsummary/CMakeLists.txt b/searchcore/src/tests/proton/docsummary/CMakeLists.txt
new file mode 100644
index 00000000000..dca65528840
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/CMakeLists.txt
@@ -0,0 +1,32 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_docsummary_test_app
+ SOURCES
+ docsummary.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_initializer
+ searchcore_reprocessing
+ searchcore_index
+ searchcore_persistenceengine
+ searchcore_feedoperation
+ searchcore_docsummary
+ searchcore_matchengine
+ searchcore_summaryengine
+ searchcore_matching
+ searchcore_attribute
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_flushengine
+ searchcore_pcommon
+ searchcore_grouping
+ searchcore_proton_metrics
+ searchcore_fconfig
+ searchcore_util
+)
+vespa_add_executable(searchcore_summaryfieldconverter_test_app
+ SOURCES
+ summaryfieldconverter_test.cpp
+ DEPENDS
+ searchcore_docsummary
+)
+vespa_add_test(NAME searchcore_docsummary_test_app COMMAND sh docsummary_test.sh)
diff --git a/searchcore/src/tests/proton/docsummary/DESC b/searchcore/src/tests/proton/docsummary/DESC
new file mode 100644
index 00000000000..ba16d5453b6
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/DESC
@@ -0,0 +1 @@
+docsummary test. Take a look at docsummary.cpp for details.
diff --git a/searchcore/src/tests/proton/docsummary/FILES b/searchcore/src/tests/proton/docsummary/FILES
new file mode 100644
index 00000000000..e63fca83f2e
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/FILES
@@ -0,0 +1 @@
+docsummary.cpp
diff --git a/searchcore/src/tests/proton/docsummary/attributes.cfg b/searchcore/src/tests/proton/docsummary/attributes.cfg
new file mode 100644
index 00000000000..3866731b410
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/attributes.cfg
@@ -0,0 +1,45 @@
+attribute[16]
+attribute[0].name "ba"
+attribute[0].datatype INT32
+attribute[1].name "bb"
+attribute[1].datatype FLOAT
+attribute[2].name "bc"
+attribute[2].datatype STRING
+attribute[3].name "bd"
+attribute[3].datatype INT32
+attribute[3].collectiontype ARRAY
+attribute[4].name "be"
+attribute[4].datatype FLOAT
+attribute[4].collectiontype ARRAY
+attribute[5].name "bf"
+attribute[5].datatype STRING
+attribute[5].collectiontype ARRAY
+attribute[6].name "bg"
+attribute[6].datatype INT32
+attribute[6].collectiontype WEIGHTEDSET
+attribute[7].name "bh"
+attribute[7].datatype FLOAT
+attribute[7].collectiontype WEIGHTEDSET
+attribute[8].name "bi"
+attribute[8].datatype STRING
+attribute[8].collectiontype WEIGHTEDSET
+attribute[9].name "sp1"
+attribute[9].datatype INT32
+attribute[10].name "sp2"
+attribute[10].datatype INT64
+attribute[11].name "ap1"
+attribute[11].datatype INT32
+attribute[11].collectiontype ARRAY
+attribute[12].name "ap2"
+attribute[12].datatype INT64
+attribute[12].collectiontype ARRAY
+attribute[13].name "wp1"
+attribute[13].datatype INT32
+attribute[13].collectiontype WEIGHTEDSET
+attribute[14].name "wp2"
+attribute[14].datatype INT64
+attribute[14].collectiontype WEIGHTEDSET
+attribute[15].name "bj"
+attribute[15].datatype TENSOR
+attribute[15].tensortype "tensor(x{},y{})"
+attribute[15].collectiontype SINGLE
diff --git a/searchcore/src/tests/proton/docsummary/docsummary.cpp b/searchcore/src/tests/proton/docsummary/docsummary.cpp
new file mode 100644
index 00000000000..80eaf56bcba
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/docsummary.cpp
@@ -0,0 +1,1296 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("docsummary_test");
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/searchcore/proton/attribute/attribute_writer.h>
+#include <vespa/searchcore/proton/common/bucketfactory.h>
+#include <vespa/searchcore/proton/docsummary/docsumcontext.h>
+#include <vespa/searchcore/proton/docsummary/documentstoreadapter.h>
+#include <vespa/searchcore/proton/docsummary/summarymanager.h>
+#include <vespa/searchcore/proton/server/documentdb.h>
+#include <vespa/searchcore/proton/server/memoryconfigstore.h>
+#include <vespa/searchcore/proton/metrics/metricswireservice.h>
+#include <vespa/searchcore/proton/server/summaryadapter.h>
+#include <vespa/searchlib/common/idestructorcallback.h>
+#include <vespa/searchlib/docstore/logdocumentstore.h>
+#include <vespa/searchlib/engine/docsumapi.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/searchlib/transactionlog/translogserver.h>
+#include <tests/proton/common/dummydbowner.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchlib/transactionlog/nosyncproxy.h>
+#include <vespa/vespalib/tensor/tensor_factory.h>
+#include <vespa/vespalib/tensor/default_tensor.h>
+#include <vespa/searchlib/attribute/tensorattribute.h>
+
+using namespace document;
+using namespace search;
+using namespace search::docsummary;
+using namespace search::engine;
+using namespace search::index;
+using namespace search::transactionlog;
+using search::TuneFileDocumentDB;
+using document::DocumenttypesConfig;
+using storage::spi::Timestamp;
+using search::index::DummyFileHeaderContext;
+using vespa::config::search::core::ProtonConfig;
+using vespalib::tensor::Tensor;
+using vespalib::tensor::TensorCells;
+using vespalib::tensor::TensorDimensions;
+using vespalib::tensor::TensorFactory;
+
+typedef std::unique_ptr<GeneralResult> GeneralResultPtr;
+
+namespace proton {
+
+class DirMaker
+{
+public:
+ DirMaker(const vespalib::string & dir) :
+ _dir(dir)
+ {
+ FastOS_File::MakeDirectory(dir.c_str());
+ }
+ ~DirMaker()
+ {
+ FastOS_File::EmptyAndRemoveDirectory(_dir.c_str());
+ }
+private:
+ vespalib::string _dir;
+};
+
+class BuildContext
+{
+public:
+ DirMaker _dmk;
+ DocBuilder _bld;
+ DocumentTypeRepo::SP _repo;
+ DummyFileHeaderContext _fileHeaderContext;
+ vespalib::ThreadStackExecutor _summaryExecutor;
+ search::transactionlog::NoSyncProxy _noTlSyncer;
+ search::LogDocumentStore _str;
+ uint64_t _serialNum;
+
+ BuildContext(const Schema &schema)
+ : _dmk("summary"),
+ _bld(schema),
+ _repo(new DocumentTypeRepo(_bld.getDocumentType())),
+ _summaryExecutor(4, 128 * 1024),
+ _noTlSyncer(),
+ _str(_summaryExecutor, "summary",
+ LogDocumentStore::Config(
+ DocumentStore::Config(),
+ LogDataStore::Config()),
+ GrowStrategy(),
+ TuneFileSummary(),
+ _fileHeaderContext,
+ _noTlSyncer,
+ NULL),
+ _serialNum(1)
+ {
+ }
+
+ ~BuildContext(void)
+ {
+ }
+
+ void
+ endDocument(uint32_t docId)
+ {
+ Document::UP doc = _bld.endDocument();
+ _str.write(_serialNum++, *doc, docId);
+ }
+
+ FieldCacheRepo::UP createFieldCacheRepo(const ResultConfig &resConfig) const {
+ return FieldCacheRepo::UP(new FieldCacheRepo(resConfig, _bld.getDocumentType()));
+ }
+};
+
+
+namespace {
+
+const char *
+getDocTypeName(void)
+{
+ return "searchdocument";
+}
+
+Tensor::UP createTensor(const TensorCells &cells,
+ const TensorDimensions &dimensions) {
+ vespalib::tensor::DefaultTensor::builder builder;
+ return TensorFactory::create(cells, dimensions, builder);
+}
+
+} // namespace
+
+
+class DBContext : public DummyDBOwner
+{
+public:
+ DirMaker _dmk;
+ DummyFileHeaderContext _fileHeaderContext;
+ TransLogServer _tls;
+ vespalib::ThreadStackExecutor _summaryExecutor;
+ bool _mkdirOk;
+ matching::QueryLimiter _queryLimiter;
+ vespalib::Clock _clock;
+ DummyWireService _dummy;
+ config::DirSpec _spec;
+ DocumentDBConfigHelper _configMgr;
+ DocumentDBConfig::DocumenttypesConfigSP _documenttypesConfig;
+ const DocumentTypeRepo::SP _repo;
+ TuneFileDocumentDB::SP _tuneFileDocumentDB;
+ std::unique_ptr<DocumentDB> _ddb;
+ AttributeWriter::UP _aw;
+ ISummaryAdapter::SP _sa;
+
+ DBContext(const DocumentTypeRepo::SP &repo, const char *docTypeName)
+ : _dmk(docTypeName),
+ _fileHeaderContext(),
+ _tls("tmp", 9013, ".", _fileHeaderContext),
+ _summaryExecutor(8, 128*1024),
+ _mkdirOk(FastOS_File::MakeDirectory("tmpdb")),
+ _queryLimiter(),
+ _clock(),
+ _dummy(),
+ _spec("."),
+ _configMgr(_spec, getDocTypeName()),
+ _documenttypesConfig(new DocumenttypesConfig()),
+ _repo(repo),
+ _tuneFileDocumentDB(new TuneFileDocumentDB()),
+ _ddb(),
+ _aw(),
+ _sa()
+ {
+ assert(_mkdirOk);
+ BootstrapConfig::SP b(new BootstrapConfig(1,
+ _documenttypesConfig,
+ _repo,
+ BootstrapConfig::ProtonConfigSP(new ProtonConfig()),
+ _tuneFileDocumentDB));
+ _configMgr.forwardConfig(b);
+ _configMgr.nextGeneration(0);
+ if (! FastOS_File::MakeDirectory((std::string("tmpdb/") + docTypeName).c_str())) { abort(); }
+ _ddb.reset(new DocumentDB("tmpdb",
+ _configMgr.getConfig(),
+ "tcp/localhost:9013",
+ _queryLimiter,
+ _clock,
+ DocTypeName(docTypeName),
+ ProtonConfig(),
+ *this,
+ _summaryExecutor,
+ _summaryExecutor,
+ NULL,
+ _dummy,
+ _fileHeaderContext,
+ ConfigStore::UP(new MemoryConfigStore),
+ std::make_shared<vespalib::
+ ThreadStackExecutor>
+ (16, 128 * 1024))),
+ _ddb->start();
+ _ddb->waitForOnlineState();
+ _aw = AttributeWriter::UP(new AttributeWriter(_ddb->
+ getReadySubDB()->
+ getAttributeManager()));
+ _sa = _ddb->getReadySubDB()->getSummaryAdapter();
+ }
+ ~DBContext()
+ {
+ _sa.reset();
+ _aw.reset();
+ _ddb.reset();
+ FastOS_File::EmptyAndRemoveDirectory("tmp");
+ FastOS_File::EmptyAndRemoveDirectory("tmpdb");
+ }
+
+ void
+ put(const document::Document &doc, const search::DocumentIdT lid)
+ {
+ const document::DocumentId &docId = doc.getId();
+ typedef DocumentMetaStore::Result PutRes;
+ IDocumentMetaStore &dms = _ddb->getReadySubDB()->getDocumentMetaStoreContext().get();
+ PutRes putRes(dms.put(docId.getGlobalId(),
+ BucketFactory::getBucketId(docId),
+ Timestamp(0u),
+ lid));
+ LOG_ASSERT(putRes.ok());
+ uint64_t serialNum = _ddb->getFeedHandler().incSerialNum();
+ _aw->put(serialNum, doc, lid, true, std::shared_ptr<IDestructorCallback>());
+ _ddb->getReadySubDB()->
+ getAttributeManager()->getAttributeFieldWriter().sync();
+ _sa->put(serialNum, doc, lid);
+ const GlobalId &gid = docId.getGlobalId();
+ BucketId bucketId(gid.convertToBucketId());
+ bucketId.setUsedBits(8);
+ storage::spi::Timestamp ts(0);
+ DbDocumentId dbdId(lid);
+ DbDocumentId prevDbdId(0);
+ document::Document::SP xdoc(new document::Document(doc));
+ PutOperation op(bucketId,
+ ts,
+ xdoc,
+ serialNum,
+ dbdId,
+ prevDbdId);
+ _ddb->getFeedHandler().storeOperation(op);
+ SearchView *sv(dynamic_cast<SearchView *>
+ (_ddb->getReadySubDB()->getSearchView().get()));
+ if (sv != NULL) {
+ // cf. FeedView::putAttributes()
+ DocIdLimit &docIdLimit = sv->getDocIdLimit();
+ if (docIdLimit.get() <= lid)
+ docIdLimit.set(lid + 1);
+ }
+ }
+};
+
+class Test : public vespalib::TestApp
+{
+private:
+ std::unique_ptr<vespa::config::search::SummaryConfig> _summaryCfg;
+ ResultConfig _resultCfg;
+ std::set<vespalib::string> _markupFields;
+
+ const vespa::config::search::SummaryConfig &
+ getSummaryConfig() const
+ {
+ return *_summaryCfg;
+ }
+
+ const ResultConfig &getResultConfig() const
+ {
+ return _resultCfg;
+ }
+
+ const std::set<vespalib::string> &
+ getMarkupFields(void) const
+ {
+ return _markupFields;
+ }
+
+ GeneralResultPtr
+ getResult(DocumentStoreAdapter & dsa, uint32_t docId);
+
+ GeneralResultPtr
+ getResult(const DocsumReply & reply, uint32_t id, uint32_t resultClassID);
+
+ bool
+ assertString(const std::string & exp,
+ const std::string & fieldName,
+ DocumentStoreAdapter &dsa,
+ uint32_t id);
+
+ bool
+ assertString(const std::string &exp,
+ const std::string &fieldName,
+ const DocsumReply &reply,
+ uint32_t id,
+ uint32_t resultClassID);
+
+ bool
+ assertSlime(const std::string &exp,
+ const DocsumReply &reply,
+ uint32_t id);
+
+ void
+ requireThatAdapterHandlesAllFieldTypes();
+
+ void
+ requireThatAdapterHandlesMultipleDocuments();
+
+ void
+ requireThatAdapterHandlesDocumentIdField();
+
+ void
+ requireThatDocsumRequestIsProcessed();
+
+ void
+ requireThatRewritersAreUsed();
+
+ void
+ requireThatAttributesAreUsed();
+
+ void
+ requireThatSummaryAdapterHandlesPutAndRemove();
+
+ void
+ requireThatAnnotationsAreUsed();
+
+ void
+ requireThatUrisAreUsed();
+
+ void
+ requireThatPositionsAreUsed();
+
+ void
+ requireThatRawFieldsWorks();
+
+ void
+ requireThatFieldCacheRepoCanReturnDefaultFieldCache();
+
+public:
+ Test();
+ int Main();
+};
+
+
+GeneralResultPtr
+Test::getResult(DocumentStoreAdapter & dsa, uint32_t docId)
+{
+ DocsumStoreValue docsum = dsa.getMappedDocsum(docId, false);
+ ASSERT_TRUE(docsum.pt() != NULL);
+ GeneralResultPtr retval(new GeneralResult(dsa.getResultClass(),
+ 0, 0, 0));
+ // skip the 4 byte class id
+ ASSERT_TRUE(retval->unpack(docsum.pt() + 4,
+ docsum.len() - 4) == 0);
+ return retval;
+}
+
+
+GeneralResultPtr
+Test::getResult(const DocsumReply & reply, uint32_t id, uint32_t resultClassID)
+{
+ GeneralResultPtr retval(new GeneralResult(getResultConfig().
+ LookupResultClass(resultClassID),
+ 0, 0, 0));
+ const DocsumReply::Docsum & docsum = reply.docsums[id];
+ // skip the 4 byte class id
+ ASSERT_EQUAL(0, retval->unpack(docsum.data.c_str() + 4, docsum.data.size() - 4));
+ return retval;
+}
+
+
+bool
+Test::assertString(const std::string & exp, const std::string & fieldName,
+ DocumentStoreAdapter &dsa,
+ uint32_t id)
+{
+ GeneralResultPtr res = getResult(dsa, id);
+ return EXPECT_EQUAL(exp, std::string(res->GetEntry(fieldName.c_str())->
+ _stringval,
+ res->GetEntry(fieldName.c_str())->
+ _stringlen));
+}
+
+
+bool
+Test::assertString(const std::string & exp, const std::string & fieldName,
+ const DocsumReply & reply,
+ uint32_t id, uint32_t resultClassID)
+{
+ GeneralResultPtr res = getResult(reply, id, resultClassID);
+ return EXPECT_EQUAL(exp, std::string(res->GetEntry(fieldName.c_str())->
+ _stringval,
+ res->GetEntry(fieldName.c_str())->
+ _stringlen));
+}
+
+
+bool
+Test::assertSlime(const std::string &exp, const DocsumReply &reply, uint32_t id)
+{
+ const DocsumReply::Docsum & docsum = reply.docsums[id];
+ uint32_t classId;
+ ASSERT_LESS_EQUAL(sizeof(classId), docsum.data.size());
+ memcpy(&classId, docsum.data.c_str(), sizeof(classId));
+ ASSERT_EQUAL(::search::fs4transport::SLIME_MAGIC_ID, classId);
+ vespalib::Slime slime;
+ vespalib::slime::Memory serialized(docsum.data.c_str() + sizeof(classId),
+ docsum.data.size() - sizeof(classId));
+ size_t decodeRes = vespalib::slime::BinaryFormat::decode(serialized,
+ slime);
+ ASSERT_EQUAL(decodeRes, serialized.size);
+ vespalib::Slime expSlime;
+ size_t used = vespalib::slime::JsonFormat::decode(exp, expSlime);
+ EXPECT_EQUAL(exp.size(), used);
+ return EXPECT_EQUAL(expSlime, slime);
+}
+
+void
+Test::requireThatAdapterHandlesAllFieldTypes()
+{
+ Schema s;
+ s.addSummaryField(Schema::SummaryField("a", Schema::INT8));
+ s.addSummaryField(Schema::SummaryField("b", Schema::INT16));
+ s.addSummaryField(Schema::SummaryField("c", Schema::INT32));
+ s.addSummaryField(Schema::SummaryField("d", Schema::INT64));
+ s.addSummaryField(Schema::SummaryField("e", Schema::FLOAT));
+ s.addSummaryField(Schema::SummaryField("f", Schema::DOUBLE));
+ s.addSummaryField(Schema::SummaryField("g", Schema::STRING));
+ s.addSummaryField(Schema::SummaryField("h", Schema::STRING));
+ s.addSummaryField(Schema::SummaryField("i", Schema::RAW));
+ s.addSummaryField(Schema::SummaryField("j", Schema::RAW));
+ s.addSummaryField(Schema::SummaryField("k", Schema::STRING));
+ s.addSummaryField(Schema::SummaryField("l", Schema::STRING));
+
+ BuildContext bc(s);
+ bc._bld.startDocument("doc::0");
+ bc._bld.startSummaryField("a").addInt(255).endField();
+ bc._bld.startSummaryField("b").addInt(32767).endField();
+ bc._bld.startSummaryField("c").addInt(2147483647).endField();
+ bc._bld.startSummaryField("d").addInt(2147483648).endField();
+ bc._bld.startSummaryField("e").addFloat(1234.56).endField();
+ bc._bld.startSummaryField("f").addFloat(9876.54).endField();
+ bc._bld.startSummaryField("g").addStr("foo").endField();
+ bc._bld.startSummaryField("h").addStr("bar").endField();
+ bc._bld.startSummaryField("i").addStr("baz").endField();
+ bc._bld.startSummaryField("j").addStr("qux").endField();
+ bc._bld.startSummaryField("k").addStr("<foo>").endField();
+ bc._bld.startSummaryField("l").addStr("{foo:10}").endField();
+ bc.endDocument(0);
+
+ DocumentStoreAdapter dsa(bc._str,
+ *bc._repo,
+ getResultConfig(), "class0",
+ bc.createFieldCacheRepo(getResultConfig())->getFieldCache("class0"),
+ getMarkupFields());
+ GeneralResultPtr res = getResult(dsa, 0);
+ EXPECT_EQUAL(255u, res->GetEntry("a")->_intval);
+ EXPECT_EQUAL(32767u, res->GetEntry("b")->_intval);
+ EXPECT_EQUAL(2147483647u, res->GetEntry("c")->_intval);
+ EXPECT_EQUAL(2147483648u, res->GetEntry("d")->_int64val);
+ EXPECT_APPROX(1234.56, res->GetEntry("e")->_doubleval, 10e-5);
+ EXPECT_APPROX(9876.54, res->GetEntry("f")->_doubleval, 10e-5);
+ EXPECT_EQUAL("foo", std::string(res->GetEntry("g")->_stringval,
+ res->GetEntry("g")->_stringlen));
+ EXPECT_EQUAL("bar", std::string(res->GetEntry("h")->_stringval,
+ res->GetEntry("h")->_stringlen));
+ EXPECT_EQUAL("baz", std::string(res->GetEntry("i")->_dataval,
+ res->GetEntry("i")->_datalen));
+ EXPECT_EQUAL("qux", std::string(res->GetEntry("j")->_dataval,
+ res->GetEntry("j")->_datalen));
+ EXPECT_EQUAL("<foo>", std::string(res->GetEntry("k")->_stringval,
+ res->GetEntry("k")->_stringlen));
+ EXPECT_EQUAL("{foo:10}", std::string(res->GetEntry("l")->_stringval,
+ res->GetEntry("l")->_stringlen));
+}
+
+
+void
+Test::requireThatAdapterHandlesMultipleDocuments()
+{
+ Schema s;
+ s.addSummaryField(Schema::SummaryField("a", Schema::INT32));
+
+ BuildContext bc(s);
+ bc._bld.startDocument("doc::0").
+ startSummaryField("a").
+ addInt(1000).
+ endField();
+ bc.endDocument(0);
+ bc._bld.startDocument("doc::1").
+ startSummaryField("a").
+ addInt(2000).endField();
+ bc.endDocument(1);
+
+ DocumentStoreAdapter dsa(bc._str, *bc._repo, getResultConfig(), "class1",
+ bc.createFieldCacheRepo(getResultConfig())->getFieldCache("class1"),
+ getMarkupFields());
+ { // doc 0
+ GeneralResultPtr res = getResult(dsa, 0);
+ EXPECT_EQUAL(1000u, res->GetEntry("a")->_intval);
+ }
+ { // doc 1
+ GeneralResultPtr res = getResult(dsa, 1);
+ EXPECT_EQUAL(2000u, res->GetEntry("a")->_intval);
+ }
+ { // doc 2
+ DocsumStoreValue docsum = dsa.getMappedDocsum(2, false);
+ EXPECT_TRUE(docsum.pt() == NULL);
+ }
+ { // doc 0 (again)
+ GeneralResultPtr res = getResult(dsa, 0);
+ EXPECT_EQUAL(1000u, res->GetEntry("a")->_intval);
+ }
+ EXPECT_EQUAL(0u, bc._str.lastSyncToken());
+ uint64_t flushToken = bc._str.initFlush(bc._serialNum - 1);
+ bc._str.flush(flushToken);
+}
+
+
+void
+Test::requireThatAdapterHandlesDocumentIdField()
+{
+ Schema s;
+ s.addSummaryField(Schema::SummaryField("documentid",
+ Schema::STRING));
+ BuildContext bc(s);
+ bc._bld.startDocument("doc::0").
+ startSummaryField("documentid").
+ addStr("foo").
+ endField();
+ bc.endDocument(0);
+ DocumentStoreAdapter dsa(bc._str, *bc._repo, getResultConfig(), "class4",
+ bc.createFieldCacheRepo(getResultConfig())->getFieldCache("class4"),
+ getMarkupFields());
+ GeneralResultPtr res = getResult(dsa, 0);
+ EXPECT_EQUAL("doc::0", std::string(res->GetEntry("documentid")->_stringval,
+ res->GetEntry("documentid")->_stringlen));
+}
+
+
+GlobalId gid1 = DocumentId("doc::1").getGlobalId(); // lid 1
+GlobalId gid2 = DocumentId("doc::2").getGlobalId(); // lid 2
+GlobalId gid3 = DocumentId("doc::3").getGlobalId(); // lid 3
+GlobalId gid4 = DocumentId("doc::4").getGlobalId(); // lid 4
+GlobalId gid9 = DocumentId("doc::9").getGlobalId(); // not existing
+
+
+void
+Test::requireThatDocsumRequestIsProcessed()
+{
+ Schema s;
+ s.addSummaryField(Schema::SummaryField("a", Schema::INT32));
+
+ BuildContext bc(s);
+ DBContext dc(bc._repo, getDocTypeName());
+ dc.put(*bc._bld.startDocument("doc::1").
+ startSummaryField("a").
+ addInt(10).
+ endField().
+ endDocument(),
+ 1);
+ dc.put(*bc._bld.startDocument("doc::2").
+ startSummaryField("a").
+ addInt(20).
+ endField().
+ endDocument(),
+ 2);
+ dc.put(*bc._bld.startDocument("doc::3").
+ startSummaryField("a").
+ addInt(30).
+ endField().
+ endDocument(),
+ 3);
+ dc.put(*bc._bld.startDocument("doc::4").
+ startSummaryField("a").
+ addInt(40).
+ endField().
+ endDocument(),
+ 4);
+ dc.put(*bc._bld.startDocument("doc::5").
+ startSummaryField("a").
+ addInt(50).
+ endField().
+ endDocument(),
+ 5);
+
+ DocsumRequest req;
+ req.resultClassName = "class1";
+ req.hits.push_back(DocsumRequest::Hit(gid2));
+ req.hits.push_back(DocsumRequest::Hit(gid4));
+ req.hits.push_back(DocsumRequest::Hit(gid9));
+ DocsumReply::UP rep = dc._ddb->getDocsums(req);
+ EXPECT_EQUAL(3u, rep->docsums.size());
+ EXPECT_EQUAL(2u, rep->docsums[0].docid);
+ EXPECT_EQUAL(gid2, rep->docsums[0].gid);
+ EXPECT_EQUAL(20u, getResult(*rep, 0, 1)->GetEntry("a")->_intval);
+ EXPECT_EQUAL(4u, rep->docsums[1].docid);
+ EXPECT_EQUAL(gid4, rep->docsums[1].gid);
+ EXPECT_EQUAL(40u, getResult(*rep, 1, 1)->GetEntry("a")->_intval);
+ EXPECT_EQUAL(search::endDocId, rep->docsums[2].docid);
+ EXPECT_EQUAL(gid9, rep->docsums[2].gid);
+ EXPECT_TRUE(rep->docsums[2].data.get() == NULL);
+}
+
+
+void
+Test::requireThatRewritersAreUsed()
+{
+ Schema s;
+ s.addSummaryField(Schema::SummaryField("aa", Schema::INT32));
+ s.addSummaryField(Schema::SummaryField("ab", Schema::INT32));
+
+ BuildContext bc(s);
+ DBContext dc(bc._repo, getDocTypeName());
+ dc.put(*bc._bld.startDocument("doc::1").
+ startSummaryField("aa").
+ addInt(10).
+ endField().
+ startSummaryField("ab").
+ addInt(20).
+ endField().
+ endDocument(),
+ 1);
+
+ DocsumRequest req;
+ req.resultClassName = "class2";
+ req.hits.push_back(DocsumRequest::Hit(gid1));
+ DocsumReply::UP rep = dc._ddb->getDocsums(req);
+ EXPECT_EQUAL(1u, rep->docsums.size());
+ EXPECT_EQUAL(20u, getResult(*rep, 0, 2)->GetEntry("aa")->_intval);
+ EXPECT_EQUAL(0u, getResult(*rep, 0, 2)->GetEntry("ab")->_intval);
+}
+
+
+void
+addField(Schema & s,
+ const std::string &name,
+ Schema::DataType dtype,
+ Schema::CollectionType ctype)
+{
+ s.addSummaryField(Schema::SummaryField(name, dtype, ctype));
+ s.addAttributeField(Schema::AttributeField(name, dtype, ctype));
+}
+
+
+void
+Test::requireThatAttributesAreUsed()
+{
+ Schema s;
+ addField(s, "ba",
+ Schema::INT32, Schema::SINGLE);
+ addField(s, "bb",
+ Schema::FLOAT, Schema::SINGLE);
+ addField(s, "bc",
+ Schema::STRING, Schema::SINGLE);
+ addField(s, "bd",
+ Schema::INT32, Schema::ARRAY);
+ addField(s, "be",
+ Schema::FLOAT, Schema::ARRAY);
+ addField(s, "bf",
+ Schema::STRING, Schema::ARRAY);
+ addField(s, "bg",
+ Schema::INT32, Schema::WEIGHTEDSET);
+ addField(s, "bh",
+ Schema::FLOAT, Schema::WEIGHTEDSET);
+ addField(s, "bi",
+ Schema::STRING, Schema::WEIGHTEDSET);
+ addField(s, "bj", Schema::TENSOR, Schema::SINGLE);
+
+ BuildContext bc(s);
+ DBContext dc(bc._repo, getDocTypeName());
+ dc.put(*bc._bld.startDocument("doc::1").
+ endDocument(),
+ 1); // empty doc
+ dc.put(*bc._bld.startDocument("doc::2").
+ startAttributeField("ba").
+ addInt(10).
+ endField().
+ startAttributeField("bb").
+ addFloat(10.1).
+ endField().
+ startAttributeField("bc").
+ addStr("foo").
+ endField().
+ startAttributeField("bd").
+ startElement().
+ addInt(20).
+ endElement().
+ startElement().
+ addInt(30).
+ endElement().
+ endField().
+ startAttributeField("be").
+ startElement().
+ addFloat(20.2).
+ endElement().
+ startElement().
+ addFloat(30.3).
+ endElement().
+ endField().
+ startAttributeField("bf").
+ startElement().
+ addStr("bar").
+ endElement().
+ startElement().
+ addStr("baz").
+ endElement().
+ endField().
+ startAttributeField("bg").
+ startElement(2).
+ addInt(40).
+ endElement().
+ startElement(3).
+ addInt(50).
+ endElement().
+ endField().
+ startAttributeField("bh").
+ startElement(4).
+ addFloat(40.4).
+ endElement().
+ startElement(5).
+ addFloat(50.5).
+ endElement().
+ endField().
+ startAttributeField("bi").
+ startElement(7).
+ addStr("quux").
+ endElement().
+ startElement(6).
+ addStr("qux").
+ endElement().
+ endField().
+ startAttributeField("bj").
+ addTensor(createTensor({ {{}, 3} }, { "x", "y"})).
+ endField().
+ endDocument(),
+ 2);
+ dc.put(*bc._bld.startDocument("doc::3").
+ endDocument(),
+ 3); // empty doc
+
+ DocsumRequest req;
+ req.resultClassName = "class3";
+ req.hits.push_back(DocsumRequest::Hit(gid2));
+ req.hits.push_back(DocsumRequest::Hit(gid3));
+ DocsumReply::UP rep = dc._ddb->getDocsums(req);
+ uint32_t rclass = 3;
+
+ EXPECT_EQUAL(2u, rep->docsums.size());
+ EXPECT_EQUAL(10u, getResult(*rep, 0, rclass)->GetEntry("ba")->_intval);
+ EXPECT_APPROX(10.1, getResult(*rep, 0, rclass)->GetEntry("bb")->_doubleval,
+ 10e-5);
+ EXPECT_TRUE(assertString("foo", "bc", *rep, 0, rclass));
+ EXPECT_TRUE(assertString("[\"20\",\"30\"]", "bd", *rep, 0, rclass));
+ EXPECT_TRUE(assertString("[\"20.2\",\"30.3\"]", "be", *rep, 0, rclass));
+ EXPECT_TRUE(assertString("[\"bar\",\"baz\"]", "bf", *rep, 0, rclass));
+ EXPECT_TRUE(assertString("[[\"40\",2],[\"50\",3]]", "bg",
+ *rep, 0, rclass));
+ EXPECT_TRUE(assertString("[[\"40.4\",4],[\"50.5\",5]]", "bh",
+ *rep, 0, rclass));
+ EXPECT_TRUE(assertString("[[\"quux\",7],[\"qux\",6]]", "bi",
+ *rep, 0, rclass));
+ EXPECT_TRUE(assertString("{\"dimensions\":[\"x\",\"y\"],"
+ "\"cells\":[{\"address\":{},\"value\":3}]}",
+ "bj", *rep, 0, rclass));
+
+ // empty doc
+ EXPECT_TRUE(search::attribute::isUndefined<int32_t>
+ (getResult(*rep, 1, rclass)->GetEntry("ba")->_intval));
+ EXPECT_TRUE(search::attribute::isUndefined<float>
+ (getResult(*rep, 1, rclass)->GetEntry("bb")->_doubleval));
+ EXPECT_TRUE(assertString("", "bc", *rep, 1, rclass));
+ EXPECT_TRUE(assertString("[]", "bd", *rep, 1, rclass));
+ EXPECT_TRUE(assertString("[]", "be", *rep, 1, rclass));
+ EXPECT_TRUE(assertString("[]", "bf", *rep, 1, rclass));
+ EXPECT_TRUE(assertString("[]", "bg", *rep, 1, rclass));
+ EXPECT_TRUE(assertString("[]", "bh", *rep, 1, rclass));
+ EXPECT_TRUE(assertString("[]", "bi", *rep, 1, rclass));
+ EXPECT_TRUE(assertString("", "bj", *rep, 1, rclass));
+
+ proton::IAttributeManager::SP attributeManager =
+ dc._ddb->getReadySubDB()->getAttributeManager();
+ search::ISequencedTaskExecutor &attributeFieldWriter =
+ attributeManager->getAttributeFieldWriter();
+ search::AttributeVector *bjAttr =
+ attributeManager->getWritableAttribute("bj");
+ search::attribute::TensorAttribute *bjTensorAttr =
+ dynamic_cast<search::attribute::TensorAttribute *>(bjAttr);
+
+ attributeFieldWriter.
+ execute("bj",
+ [&]() { bjTensorAttr->setTensor(3,
+ *createTensor({ {{}, 4} }, { "x"}));
+ bjTensorAttr->commit(); });
+ attributeFieldWriter.sync();
+
+ DocsumReply::UP rep2 = dc._ddb->getDocsums(req);
+ EXPECT_TRUE(assertString("{\"dimensions\":[\"x\",\"y\"],"
+ "\"cells\":[{\"address\":{},\"value\":4}]}",
+ "bj", *rep2, 1, rclass));
+
+ DocsumRequest req3;
+ req3.resultClassName = "class3";
+ req3._flags = ::search::fs4transport::GDFLAG_ALLOW_SLIME;
+ req3.hits.push_back(DocsumRequest::Hit(gid3));
+ DocsumReply::UP rep3 = dc._ddb->getDocsums(req3);
+
+ EXPECT_TRUE(assertSlime("{bd:[],be:[],bf:[],bg:[],"
+ "bh:[],bi:[],"
+ "bj:{dimensions:['x','y'],"
+ "cells:[{address:{},value:4.0}]}}",
+ *rep3, 0));
+}
+
+
+void
+Test::requireThatSummaryAdapterHandlesPutAndRemove()
+{
+ Schema s;
+ s.addSummaryField(Schema::SummaryField("f1",
+ Schema::STRING,
+ Schema::SINGLE));
+ BuildContext bc(s);
+ DBContext dc(bc._repo, getDocTypeName());
+ Document::UP exp = bc._bld.startDocument("doc::1").
+ startSummaryField("f1").
+ addStr("foo").
+ endField().
+ endDocument();
+ dc._sa->put(1, *exp, 1);
+ IDocumentStore & store =
+ dc._ddb->getReadySubDB()->getSummaryManager()->getBackingStore();
+ Document::UP act = store.read(1, *bc._repo);
+ EXPECT_TRUE(act.get() != NULL);
+ EXPECT_EQUAL(exp->getType(), act->getType());
+ EXPECT_EQUAL("foo", act->getValue("f1")->toString());
+ dc._sa->remove(2, 1);
+ EXPECT_TRUE(store.read(1, *bc._repo).get() == NULL);
+}
+
+
+const std::string TERM_ORIG = "\357\277\271";
+const std::string TERM_INDEX = "\357\277\272";
+const std::string TERM_END = "\357\277\273";
+const std::string TERM_SEP = "\037";
+const std::string TERM_EMPTY = "";
+namespace
+{
+ const std::string empty;
+}
+
+void
+Test::requireThatAnnotationsAreUsed()
+{
+ Schema s;
+ s.addIndexField(Schema::IndexField("g",
+ Schema::STRING,
+ Schema::SINGLE));
+ s.addSummaryField(Schema::SummaryField("g",
+ Schema::STRING,
+ Schema::SINGLE));
+ s.addIndexField(Schema::IndexField("dynamicstring",
+ Schema::STRING,
+ Schema::SINGLE));
+ s.addSummaryField(Schema::SummaryField("dynamicstring",
+ Schema::STRING,
+ Schema::SINGLE));
+ BuildContext bc(s);
+ DBContext dc(bc._repo, getDocTypeName());
+ Document::UP exp = bc._bld.startDocument("doc::0").
+ startIndexField("g").
+ addStr("foo").
+ addStr("bar").
+ addTermAnnotation("baz").
+ endField().
+ startIndexField("dynamicstring").
+ setAutoAnnotate(false).
+ addStr("foo").
+ addSpan().
+ addAlphabeticTokenAnnotation().
+ addTermAnnotation().
+ addNoWordStr(" ").
+ addSpan().
+ addSpaceTokenAnnotation().
+ addStr("bar").
+ addSpan().
+ addAlphabeticTokenAnnotation().
+ addTermAnnotation("baz").
+ setAutoAnnotate(true).
+ endField().
+ endDocument();
+ dc._sa->put(1, *exp, 1);
+
+ IDocumentStore & store =
+ dc._ddb->getReadySubDB()->getSummaryManager()->getBackingStore();
+ Document::UP act = store.read(1, *bc._repo);
+ EXPECT_TRUE(act.get() != NULL);
+ EXPECT_EQUAL(exp->getType(), act->getType());
+ EXPECT_EQUAL("foo bar", act->getValue("g")->getAsString());
+ EXPECT_EQUAL("foo bar", act->getValue("dynamicstring")->getAsString());
+
+ DocumentStoreAdapter dsa(store, *bc._repo, getResultConfig(), "class0",
+ bc.createFieldCacheRepo(getResultConfig())->getFieldCache("class0"),
+ getMarkupFields());
+ EXPECT_TRUE(assertString("foo bar", "g", dsa, 1));
+ EXPECT_TRUE(assertString(TERM_EMPTY + "foo" + TERM_SEP +
+ " " + TERM_SEP +
+ TERM_ORIG + "bar" + TERM_INDEX + "baz" + TERM_END +
+ TERM_SEP,
+ "dynamicstring", dsa, 1));
+}
+
+void
+Test::requireThatUrisAreUsed()
+{
+ Schema s;
+ s.addUriIndexFields(Schema::IndexField("urisingle",
+ Schema::STRING,
+ Schema::SINGLE));
+ s.addSummaryField(Schema::SummaryField("urisingle",
+ Schema::STRING,
+ Schema::SINGLE));
+ s.addUriIndexFields(Schema::IndexField("uriarray",
+ Schema::STRING,
+ Schema::ARRAY));
+ s.addSummaryField(Schema::SummaryField("uriarray",
+ Schema::STRING,
+ Schema::ARRAY));
+ s.addUriIndexFields(Schema::IndexField("uriwset",
+ Schema::STRING,
+ Schema::WEIGHTEDSET));
+ s.addSummaryField(Schema::SummaryField("uriwset",
+ Schema::STRING,
+ Schema::WEIGHTEDSET));
+ BuildContext bc(s);
+ DBContext dc(bc._repo, getDocTypeName());
+ Document::UP exp = bc._bld.startDocument("doc::0").
+ startIndexField("urisingle").
+ startSubField("all").
+ addUrlTokenizedString(
+ "http://www.yahoo.com:81/fluke?ab=2#4").
+ endSubField().
+ startSubField("scheme").
+ addUrlTokenizedString("http").
+ endSubField().
+ startSubField("host").
+ addUrlTokenizedString("www.yahoo.com").
+ endSubField().
+ startSubField("port").
+ addUrlTokenizedString("81").
+ endSubField().
+ startSubField("path").
+ addUrlTokenizedString("/fluke").
+ endSubField().
+ startSubField("query").
+ addUrlTokenizedString("ab=2").
+ endSubField().
+ startSubField("fragment").
+ addUrlTokenizedString("4").
+ endSubField().
+ endField().
+ startIndexField("uriarray").
+ startElement(1).
+ startSubField("all").
+ addUrlTokenizedString(
+ "http://www.yahoo.com:82/fluke?ab=2#8").
+ endSubField().
+ startSubField("scheme").
+ addUrlTokenizedString("http").
+ endSubField().
+ startSubField("host").
+ addUrlTokenizedString("www.yahoo.com").
+ endSubField().
+ startSubField("port").
+ addUrlTokenizedString("82").
+ endSubField().
+ startSubField("path").
+ addUrlTokenizedString("/fluke").
+ endSubField().
+ startSubField("query").
+ addUrlTokenizedString("ab=2").
+ endSubField().
+ startSubField("fragment").
+ addUrlTokenizedString("8").
+ endSubField().
+ endElement().
+ startElement(1).
+ startSubField("all").
+ addUrlTokenizedString(
+ "http://www.flickr.com:82/fluke?ab=2#9").
+ endSubField().
+ startSubField("scheme").
+ addUrlTokenizedString("http").
+ endSubField().
+ startSubField("host").
+ addUrlTokenizedString("www.flickr.com").
+ endSubField().
+ startSubField("port").
+ addUrlTokenizedString("82").
+ endSubField().
+ startSubField("path").
+ addUrlTokenizedString("/fluke").
+ endSubField().
+ startSubField("query").
+ addUrlTokenizedString("ab=2").
+ endSubField().
+ startSubField("fragment").
+ addUrlTokenizedString("9").
+ endSubField().
+ endElement().
+ endField().
+ startIndexField("uriwset").
+ startElement(4).
+ startSubField("all").
+ addUrlTokenizedString(
+ "http://www.yahoo.com:83/fluke?ab=2#12").
+ endSubField().
+ startSubField("scheme").
+ addUrlTokenizedString("http").
+ endSubField().
+ startSubField("host").
+ addUrlTokenizedString("www.yahoo.com").
+ endSubField().
+ startSubField("port").
+ addUrlTokenizedString("83").
+ endSubField().
+ startSubField("path").
+ addUrlTokenizedString("/fluke").
+ endSubField().
+ startSubField("query").
+ addUrlTokenizedString("ab=2").
+ endSubField().
+ startSubField("fragment").
+ addUrlTokenizedString("12").
+ endSubField().
+ endElement().
+ startElement(7).
+ startSubField("all").
+ addUrlTokenizedString(
+ "http://www.flickr.com:85/fluke?ab=2#13").
+ endSubField().
+ startSubField("scheme").
+ addUrlTokenizedString("http").
+ endSubField().
+ startSubField("host").
+ addUrlTokenizedString("www.flickr.com").
+ endSubField().
+ startSubField("port").
+ addUrlTokenizedString("85").
+ endSubField().
+ startSubField("path").
+ addUrlTokenizedString("/fluke").
+ endSubField().
+ startSubField("query").
+ addUrlTokenizedString("ab=2").
+ endSubField().
+ startSubField("fragment").
+ addUrlTokenizedString("13").
+ endSubField().
+ endElement().
+ endField().
+ endDocument();
+ dc._sa->put(1, *exp, 1);
+
+ IDocumentStore & store =
+ dc._ddb->getReadySubDB()->getSummaryManager()->getBackingStore();
+ Document::UP act = store.read(1, *bc._repo);
+ EXPECT_TRUE(act.get() != NULL);
+ EXPECT_EQUAL(exp->getType(), act->getType());
+
+ DocumentStoreAdapter dsa(store, *bc._repo, getResultConfig(), "class0",
+ bc.createFieldCacheRepo(getResultConfig())->getFieldCache("class0"),
+ getMarkupFields());
+ EXPECT_TRUE(assertString("http://www.yahoo.com:81/fluke?ab=2#4",
+ "urisingle", dsa, 1));
+ EXPECT_TRUE(assertString("[\"http://www.yahoo.com:82/fluke?ab=2#8\","
+ "\"http://www.flickr.com:82/fluke?ab=2#9\"]",
+ "uriarray", dsa, 1));
+ EXPECT_TRUE(assertString("["
+ "{\"item\":\"http://www.yahoo.com:83/fluke?ab=2#12\",\"weight\":4}"
+ ","
+ "{\"item\":\"http://www.flickr.com:85/fluke?ab=2#13\",\"weight\":7}"
+ "]",
+ "uriwset", dsa, 1));
+}
+
+
+void
+Test::requireThatPositionsAreUsed()
+{
+ Schema s;
+ s.addAttributeField(Schema::AttributeField("sp2",
+ Schema::INT64));
+ s.addAttributeField(Schema::AttributeField("ap2",
+ Schema::INT64,
+ Schema::ARRAY));
+ s.addAttributeField(Schema::AttributeField("wp2",
+ Schema::INT64,
+ Schema::WEIGHTEDSET));
+
+ BuildContext bc(s);
+ DBContext dc(bc._repo, getDocTypeName());
+ Document::UP exp = bc._bld.startDocument("doc::1").
+ startAttributeField("sp2").
+ addPosition(1002, 1003).
+ endField().
+ startAttributeField("ap2").
+ startElement().addPosition(1006, 1007).endElement().
+ startElement().addPosition(1008, 1009).endElement().
+ endField().
+ startAttributeField("wp2").
+ startElement(43).addPosition(1012, 1013).endElement().
+ startElement(44).addPosition(1014, 1015).endElement().
+ endField().
+ endDocument();
+ dc.put(*exp, 1);
+
+ IDocumentStore & store =
+ dc._ddb->getReadySubDB()->getSummaryManager()->getBackingStore();
+ Document::UP act = store.read(1, *bc._repo);
+ EXPECT_TRUE(act.get() != NULL);
+ EXPECT_EQUAL(exp->getType(), act->getType());
+
+ DocsumRequest req;
+ req.resultClassName = "class5";
+ req.hits.push_back(DocsumRequest::Hit(gid1));
+ DocsumReply::UP rep = dc._ddb->getDocsums(req);
+ uint32_t rclass = 5;
+
+ EXPECT_EQUAL(1u, rep->docsums.size());
+ EXPECT_EQUAL(1u, rep->docsums[0].docid);
+ EXPECT_EQUAL(gid1, rep->docsums[0].gid);
+ EXPECT_TRUE(assertString("1047758",
+ "sp2", *rep, 0, rclass));
+ EXPECT_TRUE(assertString("<position x=\"1002\" y=\"1003\" latlong=\"N0.001003;E0.001002\" />",
+ "sp2x", *rep, 0, rclass));
+ EXPECT_TRUE(assertString("[1047806,1048322]",
+ "ap2", *rep, 0, rclass));
+ EXPECT_TRUE(assertString("<position x=\"1006\" y=\"1007\" latlong=\"N0.001007;E0.001006\" />"
+ "<position x=\"1008\" y=\"1009\" latlong=\"N0.001009;E0.001008\" />",
+ "ap2x", *rep, 0, rclass));
+ EXPECT_TRUE(assertString("[{\"item\":1048370,\"weight\":43},{\"item\":1048382,\"weight\":44}]",
+ "wp2", *rep, 0, rclass));
+ EXPECT_TRUE(assertString("<position x=\"1012\" y=\"1013\" latlong=\"N0.001013;E0.001012\" />"
+ "<position x=\"1014\" y=\"1015\" latlong=\"N0.001015;E0.001014\" />",
+ "wp2x", *rep, 0, rclass));
+}
+
+
+void
+Test::requireThatRawFieldsWorks()
+{
+ Schema s;
+ s.addSummaryField(Schema::AttributeField("i",
+ Schema::RAW));
+ s.addSummaryField(Schema::AttributeField("araw",
+ Schema::RAW,
+ Schema::ARRAY));
+ s.addSummaryField(Schema::AttributeField("wraw",
+ Schema::RAW,
+ Schema::WEIGHTEDSET));
+
+ std::vector<char> binaryBlob;
+ binaryBlob.push_back('\0');
+ binaryBlob.push_back('\2');
+ binaryBlob.push_back('\1');
+ std::string raw1s("Single Raw Element");
+ std::string raw1a0("Array Raw Element 0");
+ std::string raw1a1("Array Raw Element 1");
+ std::string raw1w0("Weighted Set Raw Element 0");
+ std::string raw1w1("Weighted Set Raw Element 1");
+ raw1s += std::string(&binaryBlob[0],
+ &binaryBlob[0] + binaryBlob.size());
+ raw1a0 += std::string(&binaryBlob[0],
+ &binaryBlob[0] + binaryBlob.size());
+ raw1a1 += std::string(&binaryBlob[0],
+ &binaryBlob[0] + binaryBlob.size());
+ raw1w0 += std::string(&binaryBlob[0],
+ &binaryBlob[0] + binaryBlob.size());
+ raw1w1 += std::string(&binaryBlob[0],
+ &binaryBlob[0] + binaryBlob.size());
+
+ BuildContext bc(s);
+ DBContext dc(bc._repo, getDocTypeName());
+ Document::UP exp = bc._bld.startDocument("doc::0").
+ startSummaryField("i").
+ addRaw(raw1s.c_str(), raw1s.size()).
+ endField().
+ startSummaryField("araw").
+ startElement().
+ addRaw(raw1a0.c_str(), raw1a0.size()).
+ endElement().
+ startElement().
+ addRaw(raw1a1.c_str(), raw1a1.size()).
+ endElement().
+ endField().
+ startSummaryField("wraw").
+ startElement(46).
+ addRaw(raw1w1.c_str(), raw1w1.size()).
+ endElement().
+ startElement(45).
+ addRaw(raw1w0.c_str(), raw1w0.size()).
+ endElement().
+ endField().
+ endDocument();
+ dc._sa->put(1, *exp, 1);
+
+ IDocumentStore & store =
+ dc._ddb->getReadySubDB()->getSummaryManager()->getBackingStore();
+ Document::UP act = store.read(1, *bc._repo);
+ EXPECT_TRUE(act.get() != NULL);
+ EXPECT_EQUAL(exp->getType(), act->getType());
+
+ DocumentStoreAdapter dsa(store, *bc._repo, getResultConfig(), "class0",
+ bc.createFieldCacheRepo(getResultConfig())->getFieldCache("class0"),
+ getMarkupFields());
+
+ ASSERT_TRUE(assertString(raw1s,
+ "i", dsa, 1));
+ ASSERT_TRUE(assertString(empty + "[\"" +
+ vespalib::Base64::encode(raw1a0) +
+ "\",\"" +
+ vespalib::Base64::encode(raw1a1) +
+ "\"]",
+ "araw", dsa, 1));
+ ASSERT_TRUE(assertString(empty + "[{\"item\":\"" +
+ vespalib::Base64::encode(raw1w1) +
+ "\",\"weight\":46},{\"item\":\"" +
+ vespalib::Base64::encode(raw1w0) +
+ "\",\"weight\":45}]",
+ "wraw", dsa, 1));
+}
+
+
+void
+Test::requireThatFieldCacheRepoCanReturnDefaultFieldCache()
+{
+ Schema s;
+ s.addSummaryField(Schema::SummaryField("a", Schema::INT32));
+ BuildContext bc(s);
+ FieldCacheRepo::UP repo = bc.createFieldCacheRepo(getResultConfig());
+ FieldCache::CSP cache = repo->getFieldCache("");
+ EXPECT_TRUE(cache.get() == repo->getFieldCache("class1").get());
+ EXPECT_EQUAL(1u, cache->size());
+ EXPECT_EQUAL("a", cache->getField(0)->getName());
+}
+
+
+Test::Test()
+ : _summaryCfg(),
+ _resultCfg(),
+ _markupFields()
+{
+ std::string cfgId("summary");
+ _summaryCfg = config::ConfigGetter<vespa::config::search::SummaryConfig>::getConfig(cfgId, config::FileSpec("summary.cfg"));
+ _resultCfg.ReadConfig(*_summaryCfg, cfgId.c_str());
+ std::string mapCfgId("summarymap");
+ std::unique_ptr<vespa::config::search::SummarymapConfig> mapCfg = config::ConfigGetter<vespa::config::search::SummarymapConfig>::getConfig(mapCfgId, config::FileSpec("summarymap.cfg"));
+ for (size_t i = 0; i < mapCfg->override.size(); ++i) {
+ const vespa::config::search::SummarymapConfig::Override & o = mapCfg->override[i];
+ if (o.command == "dynamicteaser") {
+ vespalib::string markupField = o.arguments;
+ if (markupField.empty())
+ continue;
+ // Assume just one argument: source field that must contain markup
+ _markupFields.insert(markupField);
+ LOG(info,
+ "Field %s has markup",
+ markupField.c_str());
+ }
+ }
+}
+
+
+int
+Test::Main()
+{
+ TEST_INIT("docsummary_test");
+
+ if (_argc > 0) {
+ DummyFileHeaderContext::setCreator(_argv[0]);
+ }
+ TEST_DO(requireThatSummaryAdapterHandlesPutAndRemove());
+ TEST_DO(requireThatAdapterHandlesAllFieldTypes());
+ TEST_DO(requireThatAdapterHandlesMultipleDocuments());
+ TEST_DO(requireThatAdapterHandlesDocumentIdField());
+ TEST_DO(requireThatDocsumRequestIsProcessed());
+ TEST_DO(requireThatRewritersAreUsed());
+ TEST_DO(requireThatAttributesAreUsed());
+ TEST_DO(requireThatAnnotationsAreUsed());
+ TEST_DO(requireThatUrisAreUsed());
+ TEST_DO(requireThatPositionsAreUsed());
+ TEST_DO(requireThatRawFieldsWorks());
+ TEST_DO(requireThatFieldCacheRepoCanReturnDefaultFieldCache());
+
+ TEST_DONE();
+}
+
+}
+
+TEST_APPHOOK(proton::Test);
diff --git a/searchcore/src/tests/proton/docsummary/docsummary_test.sh b/searchcore/src/tests/proton/docsummary/docsummary_test.sh
new file mode 100755
index 00000000000..4871911e1cd
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/docsummary_test.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+rm -rf tmp
+rm -rf tmpdb
+rm -rf summary
+rm -rf indexingdocument
+rm -rf searchdocument
+rm -rf *.dat
+$VALGRIND ./searchcore_docsummary_test_app
+rm -rf tmp
+rm -rf tmpdb
+rm -rf summary
+rm -rf indexingdocument
+rm -rf searchdocument
+rm -rf *.dat
+$VALGRIND ./searchcore_summaryfieldconverter_test_app
diff --git a/searchcore/src/tests/proton/docsummary/documentmanager.cfg b/searchcore/src/tests/proton/docsummary/documentmanager.cfg
new file mode 100644
index 00000000000..91c69cc0c70
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/documentmanager.cfg
@@ -0,0 +1,81 @@
+enablecompression false
+datatype[6]
+datatype[0].id -1636745577
+datatype[0].arraytype[0]
+datatype[0].weightedsettype[0]
+datatype[0].structtype[1]
+datatype[0].structtype[0].name typea.header
+datatype[0].structtype[0].version 0
+datatype[0].structtype[0].field[4]
+datatype[0].structtype[0].field[0].name floatfield
+datatype[0].structtype[0].field[0].id[0]
+datatype[0].structtype[0].field[0].datatype 1
+datatype[0].structtype[0].field[1].name stringfield
+datatype[0].structtype[0].field[1].id[0]
+datatype[0].structtype[0].field[1].datatype 2
+datatype[0].structtype[0].field[2].name longfield
+datatype[0].structtype[0].field[2].id[0]
+datatype[0].structtype[0].field[2].datatype 4
+datatype[0].structtype[0].field[3].name urifield
+datatype[0].structtype[0].field[3].id[0]
+datatype[0].structtype[0].field[3].datatype 10
+datatype[0].documenttype[0]
+datatype[1].id 1878320748
+datatype[1].arraytype[0]
+datatype[1].weightedsettype[0]
+datatype[1].structtype[1]
+datatype[1].structtype[0].name typea.body
+datatype[1].structtype[0].version 0
+datatype[1].structtype[0].field[4]
+datatype[1].structtype[0].field[0].name intfield
+datatype[1].structtype[0].field[0].id[0]
+datatype[1].structtype[0].field[0].datatype 0
+datatype[1].structtype[0].field[1].name rawfield
+datatype[1].structtype[0].field[1].id[0]
+datatype[1].structtype[0].field[1].datatype 3
+datatype[1].structtype[0].field[2].name doublefield
+datatype[1].structtype[0].field[2].id[0]
+datatype[1].structtype[0].field[2].datatype 5
+datatype[1].structtype[0].field[3].name bytefield
+datatype[1].structtype[0].field[3].id[0]
+datatype[1].structtype[0].field[3].datatype 16
+datatype[1].documenttype[0]
+datatype[2].id -1175657560
+datatype[2].arraytype[0]
+datatype[2].weightedsettype[0]
+datatype[2].structtype[0]
+datatype[2].documenttype[1]
+datatype[2].documenttype[0].name typea
+datatype[2].documenttype[0].version 0
+datatype[2].documenttype[0].inherits[0]
+datatype[2].documenttype[0].headerstruct -1636745577
+datatype[2].documenttype[0].bodystruct 1878320748
+datatype[3].id 192273965
+datatype[3].arraytype[0]
+datatype[3].weightedsettype[0]
+datatype[3].structtype[1]
+datatype[3].structtype[0].name typeb.header
+datatype[3].structtype[0].version 0
+datatype[3].structtype[0].field[0]
+datatype[3].documenttype[0]
+datatype[4].id -72846462
+datatype[4].arraytype[0]
+datatype[4].weightedsettype[0]
+datatype[4].structtype[1]
+datatype[4].structtype[0].name typeb.body
+datatype[4].structtype[0].version 0
+datatype[4].structtype[0].field[1]
+datatype[4].structtype[0].field[0].name intfield
+datatype[4].structtype[0].field[0].id[0]
+datatype[4].structtype[0].field[0].datatype 0
+datatype[4].documenttype[0]
+datatype[5].id -1146158894
+datatype[5].arraytype[0]
+datatype[5].weightedsettype[0]
+datatype[5].structtype[0]
+datatype[5].documenttype[1]
+datatype[5].documenttype[0].name typeb
+datatype[5].documenttype[0].version 0
+datatype[5].documenttype[0].inherits[0]
+datatype[5].documenttype[0].headerstruct 192273965
+datatype[5].documenttype[0].bodystruct -72846462
diff --git a/searchcore/src/tests/proton/docsummary/indexingdocument.cfg b/searchcore/src/tests/proton/docsummary/indexingdocument.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/indexingdocument.cfg
diff --git a/searchcore/src/tests/proton/docsummary/indexschema.cfg b/searchcore/src/tests/proton/docsummary/indexschema.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/indexschema.cfg
diff --git a/searchcore/src/tests/proton/docsummary/juniperrc.cfg b/searchcore/src/tests/proton/docsummary/juniperrc.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/juniperrc.cfg
diff --git a/searchcore/src/tests/proton/docsummary/rank-profiles.cfg b/searchcore/src/tests/proton/docsummary/rank-profiles.cfg
new file mode 100644
index 00000000000..34d8f0245df
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/rank-profiles.cfg
@@ -0,0 +1,2 @@
+rankprofile[1]
+rankprofile[0].name default
diff --git a/searchcore/src/tests/proton/docsummary/summary.cfg b/searchcore/src/tests/proton/docsummary/summary.cfg
new file mode 100644
index 00000000000..52f300ae3e0
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/summary.cfg
@@ -0,0 +1,108 @@
+defaultsummaryid 1
+classes[6]
+classes[0].name "class0"
+classes[0].id 0
+classes[0].fields[24]
+classes[0].fields[0].name "a"
+classes[0].fields[0].type "byte"
+classes[0].fields[1].name "b"
+classes[0].fields[1].type "short"
+classes[0].fields[2].name "c"
+classes[0].fields[2].type "integer"
+classes[0].fields[3].name "d"
+classes[0].fields[3].type "int64"
+classes[0].fields[4].name "e"
+classes[0].fields[4].type "float"
+classes[0].fields[5].name "f"
+classes[0].fields[5].type "double"
+classes[0].fields[6].name "g"
+classes[0].fields[6].type "string"
+classes[0].fields[7].name "h"
+classes[0].fields[7].type "longstring"
+classes[0].fields[8].name "i"
+classes[0].fields[8].type "data"
+classes[0].fields[9].name "j"
+classes[0].fields[9].type "longdata"
+classes[0].fields[10].name "k"
+classes[0].fields[10].type "xmlstring"
+classes[0].fields[11].name "l"
+classes[0].fields[11].type "jsonstring"
+classes[0].fields[12].name "dynamicstring"
+classes[0].fields[12].type "string"
+classes[0].fields[13].name "urisingle"
+classes[0].fields[13].type "string"
+classes[0].fields[14].name "uriarray"
+classes[0].fields[14].type "jsonstring"
+classes[0].fields[15].name "uriwset"
+classes[0].fields[15].type "jsonstring"
+classes[0].fields[16].name "sp1"
+classes[0].fields[16].type "string"
+classes[0].fields[17].name "sp2"
+classes[0].fields[17].type "string"
+classes[0].fields[18].name "ap1"
+classes[0].fields[18].type "jsonstring"
+classes[0].fields[19].name "ap2"
+classes[0].fields[19].type "jsonstring"
+classes[0].fields[20].name "wp1"
+classes[0].fields[20].type "jsonstring"
+classes[0].fields[21].name "wp2"
+classes[0].fields[21].type "jsonstring"
+classes[0].fields[22].name "araw"
+classes[0].fields[22].type "jsonstring"
+classes[0].fields[23].name "wraw"
+classes[0].fields[23].type "jsonstring"
+classes[1].name "class1"
+classes[1].id 1
+classes[1].fields[1]
+classes[1].fields[0].name "a"
+classes[1].fields[0].type "integer"
+classes[2].name "class2"
+classes[2].id 2
+classes[2].fields[2]
+classes[2].fields[0].name "aa"
+classes[2].fields[0].type "integer"
+classes[2].fields[1].name "ab"
+classes[2].fields[1].type "integer"
+classes[3].name "class3"
+classes[3].id 3
+classes[3].fields[10]
+classes[3].fields[0].name "ba"
+classes[3].fields[0].type "integer"
+classes[3].fields[1].name "bb"
+classes[3].fields[1].type "float"
+classes[3].fields[2].name "bc"
+classes[3].fields[2].type "longstring"
+classes[3].fields[3].name "bd"
+classes[3].fields[3].type "jsonstring"
+classes[3].fields[4].name "be"
+classes[3].fields[4].type "jsonstring"
+classes[3].fields[5].name "bf"
+classes[3].fields[5].type "jsonstring"
+classes[3].fields[6].name "bg"
+classes[3].fields[6].type "jsonstring"
+classes[3].fields[7].name "bh"
+classes[3].fields[7].type "jsonstring"
+classes[3].fields[8].name "bi"
+classes[3].fields[8].type "jsonstring"
+classes[3].fields[9].name "bj"
+classes[3].fields[9].type "jsonstring"
+classes[4].name "class4"
+classes[4].id 4
+classes[4].fields[1]
+classes[4].fields[0].name "documentid"
+classes[4].fields[0].type "longstring"
+classes[5].id 5
+classes[5].name "class5"
+classes[5].fields[6]
+classes[5].fields[0].name "sp2"
+classes[5].fields[0].type "string"
+classes[5].fields[1].name "sp2x"
+classes[5].fields[1].type "xmlstring"
+classes[5].fields[2].name "ap2"
+classes[5].fields[2].type "jsonstring"
+classes[5].fields[3].name "ap2x"
+classes[5].fields[3].type "xmlstring"
+classes[5].fields[4].name "wp2"
+classes[5].fields[4].type "jsonstring"
+classes[5].fields[5].name "wp2x"
+classes[5].fields[5].type "xmlstring"
diff --git a/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp b/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp
new file mode 100644
index 00000000000..f2e5f1a508b
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp
@@ -0,0 +1,713 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for summaryfieldconverter.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("summaryfieldconverter_test");
+
+#include <vespa/document/annotation/annotation.h>
+#include <vespa/document/annotation/span.h>
+#include <vespa/document/annotation/spanlist.h>
+#include <vespa/document/annotation/spantree.h>
+#include <vespa/document/base/documentid.h>
+#include <vespa/document/base/exceptions.h>
+#include <vespa/document/base/field.h>
+#include <vespa/document/datatype/annotationtype.h>
+#include <vespa/document/datatype/arraydatatype.h>
+#include <vespa/document/datatype/datatype.h>
+#include <vespa/document/datatype/documenttype.h>
+#include <vespa/document/datatype/structdatatype.h>
+#include <vespa/document/datatype/urldatatype.h>
+#include <vespa/document/datatype/weightedsetdatatype.h>
+#include <vespa/document/fieldvalue/arrayfieldvalue.h>
+#include <vespa/document/fieldvalue/bytefieldvalue.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/document/fieldvalue/doublefieldvalue.h>
+#include <vespa/document/fieldvalue/floatfieldvalue.h>
+#include <vespa/document/fieldvalue/intfieldvalue.h>
+#include <vespa/document/fieldvalue/longfieldvalue.h>
+#include <vespa/document/fieldvalue/predicatefieldvalue.h>
+#include <vespa/document/fieldvalue/rawfieldvalue.h>
+#include <vespa/document/fieldvalue/shortfieldvalue.h>
+#include <vespa/document/fieldvalue/stringfieldvalue.h>
+#include <vespa/document/fieldvalue/structfieldvalue.h>
+#include <vespa/document/fieldvalue/weightedsetfieldvalue.h>
+#include <vespa/document/fieldvalue/tensorfieldvalue.h>
+#include <vespa/document/predicate/predicate.h>
+#include <vespa/document/repo/configbuilder.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/searchcore/proton/docsummary/summaryfieldconverter.h>
+#include <vespa/searchcore/proton/docsummary/linguisticsannotation.h>
+#include <vespa/searchcore/proton/docsummary/searchdatatype.h>
+#include <vespa/searchcommon/common/schema.h>
+#include <vespa/config-summarymap.h>
+#include <vespa/vespalib/geo/zcurve.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/data/slime/json_format.h>
+#include <vespa/vespalib/data/slime/binary_format.h>
+#include <vespa/searchlib/util/slime_output_raw_buf_adapter.h>
+#include <vespa/vespalib/tensor/tensor.h>
+#include <vespa/vespalib/tensor/types.h>
+#include <vespa/vespalib/tensor/default_tensor.h>
+#include <vespa/vespalib/tensor/tensor_factory.h>
+
+using vespa::config::search::SummarymapConfig;
+using vespa::config::search::SummarymapConfigBuilder;
+using document::Annotation;
+using document::AnnotationType;
+using document::ArrayDataType;
+using document::ArrayFieldValue;
+using document::ByteFieldValue;
+using document::DataType;
+using document::Document;
+using document::DocumenttypesConfig;
+using document::DocumenttypesConfigBuilder;
+using document::DocumentId;
+using document::DocumentType;
+using document::DocumentTypeRepo;
+using document::DoubleFieldValue;
+using document::FeatureSet;
+using document::Field;
+using document::FieldNotFoundException;
+using document::FieldValue;
+using document::FloatFieldValue;
+using document::IntFieldValue;
+using document::LongFieldValue;
+using document::Predicate;
+using document::PredicateFieldValue;
+using document::RawFieldValue;
+using document::ShortFieldValue;
+using document::Span;
+using document::SpanList;
+using document::SpanTree;
+using document::StringFieldValue;
+using document::StructDataType;
+using document::StructFieldValue;
+using document::UrlDataType;
+using document::WeightedSetDataType;
+using document::WeightedSetFieldValue;
+using document::TensorFieldValue;
+using search::index::Schema;
+using vespalib::Slime;
+using vespalib::slime::Cursor;
+using vespalib::string;
+using namespace proton;
+using namespace proton::linguistics;
+using vespalib::geo::ZCurve;
+using vespalib::tensor::Tensor;
+using vespalib::tensor::TensorCells;
+using vespalib::tensor::TensorDimensions;
+
+typedef SummaryFieldConverter SFC;
+
+namespace {
+
+struct FieldBlock {
+ vespalib::string input;
+ Slime slime;
+ search::RawBuf binary;
+ vespalib::string json;
+
+ explicit FieldBlock(const vespalib::string &jsonInput)
+ : input(jsonInput), slime(), binary(1024), json()
+ {
+ size_t used = vespalib::slime::JsonFormat::decode(jsonInput, slime);
+ EXPECT_EQUAL(jsonInput.size(), used);
+ {
+ search::SlimeOutputRawBufAdapter adapter(binary);
+ vespalib::slime::JsonFormat::encode(slime, adapter, true);
+ json.assign(binary.GetDrainPos(), binary.GetUsedLen());
+ binary.reset();
+ }
+ search::SlimeOutputRawBufAdapter adapter(binary);
+ vespalib::slime::BinaryFormat::encode(slime, adapter);
+ }
+};
+
+class Test : public vespalib::TestApp {
+ std::unique_ptr<Schema> _schema;
+ std::unique_ptr<SummarymapConfigBuilder> _summarymap;
+ DocumentTypeRepo::SP _documentRepo;
+ const DocumentType *_documentType;
+ document::FixedTypeRepo _fixedRepo;
+
+ void setUp();
+ void tearDown();
+
+ const DataType &getDataType(const string &name) const;
+
+ template <typename T>
+ T getValueAs(const string &field_name, const Document &doc);
+
+ template <typename T>
+ T
+ cvtValueAs(const FieldValue::UP &fv);
+
+ template <typename T>
+ T
+ cvtAttributeAs(const FieldValue::UP &fv);
+
+ template <typename T>
+ T
+ cvtSummaryAs(bool markup, const FieldValue::UP &fv);
+
+ void checkString(const string &str, const FieldValue *value);
+ void checkData(const search::RawBuf &data, const FieldValue *value);
+ void checkArray(const string &str, const FieldValue *value);
+ template <unsigned int N>
+ void checkArray(const char *(&str)[N], const FieldValue *value);
+ Document getDoc(const string &name, const Document *doc);
+ void setIndexField(const string &name);
+ void setSummaryField(const string &name);
+ void setAttributeField(const string &name);
+
+ void requireThatSummaryIsAnUnmodifiedString();
+ void requireThatAttributeIsAnUnmodifiedString();
+ void requireThatArrayIsFlattenedInSummaryField();
+ void requireThatWeightedSetIsFlattenedInSummaryField();
+ void requireThatPositionsAreTransformedInSummary();
+ void requireThatArrayIsPreservedInAttributeField();
+ void requireThatPositionsAreTransformedInAttributeField();
+ void requireThatPositionArrayIsTransformedInAttributeField();
+ void requireThatPositionWeightedSetIsTransformedInAttributeField();
+ void requireThatAttributeCanBePrimitiveTypes();
+ void requireThatSummaryCanBePrimitiveTypes();
+ void requireThatSummaryHandlesCjk();
+ void requireThatSearchDataTypeUsesDefaultDataTypes();
+ void requireThatLinguisticsAnnotationUsesDefaultDataTypes();
+ void requireThatPredicateIsPrinted();
+ void requireThatTensorIsPrinted();
+ const DocumentType &getDocType() const { return *_documentType; }
+ Document makeDocument();
+ StringFieldValue annotateTerm(const string &term);
+ StringFieldValue makeAnnotatedChineseString();
+ StringFieldValue makeAnnotatedString();
+ void setSpanTree(StringFieldValue & value, SpanTree::UP tree);
+public:
+ Test();
+ int Main();
+};
+
+DocumenttypesConfig getDocumenttypesConfig() {
+ using namespace document::config_builder;
+ DocumenttypesConfigBuilderHelper builder;
+ builder.document(42, "indexingdocument",
+ Struct("indexingdocument.header")
+ .addField("empty", DataType::T_STRING)
+ .addField("string", DataType::T_STRING)
+ .addField("plain_string", DataType::T_STRING)
+ .addField("string_array", Array(DataType::T_STRING))
+ .addField("string_wset", Wset(DataType::T_STRING))
+ .addField("position1", DataType::T_INT)
+ .addField("position2", DataType::T_LONG)
+ .addField("position2_array", Array(DataType::T_LONG))
+ .addField("position2_wset", Wset(DataType::T_LONG))
+ .addField("uri", UrlDataType::getInstance().getId())
+ .addField("uri_array",
+ Array(UrlDataType::getInstance().getId()))
+ .addField("int", DataType::T_INT)
+ .addField("long", DataType::T_LONG)
+ .addField("short", DataType::T_SHORT)
+ .addField("byte", DataType::T_BYTE)
+ .addField("double", DataType::T_DOUBLE)
+ .addField("float", DataType::T_FLOAT)
+ .addField("chinese", DataType::T_STRING)
+ .addField("predicate", DataType::T_PREDICATE)
+ .addField("tensor", DataType::T_TENSOR),
+ Struct("indexingdocument.body"));
+ return builder.config();
+}
+
+Test::Test() :
+ _documentRepo(new DocumentTypeRepo(getDocumenttypesConfig())),
+ _documentType(_documentRepo->getDocumentType("indexingdocument")),
+ _fixedRepo(*_documentRepo, *_documentType)
+{
+ ASSERT_TRUE(_documentType);
+}
+
+#define TEST_CALL(func) \
+ TEST_DO(setUp()); \
+ TEST_DO(func); \
+ TEST_DO(tearDown())
+
+int
+Test::Main()
+{
+ TEST_INIT("summaryfieldconverter_test");
+
+ TEST_CALL(requireThatSummaryIsAnUnmodifiedString());
+ TEST_CALL(requireThatAttributeIsAnUnmodifiedString());
+ TEST_CALL(requireThatArrayIsFlattenedInSummaryField());
+ TEST_CALL(requireThatWeightedSetIsFlattenedInSummaryField());
+ TEST_CALL(requireThatPositionsAreTransformedInSummary());
+ TEST_CALL(requireThatArrayIsPreservedInAttributeField());
+ TEST_CALL(requireThatPositionsAreTransformedInAttributeField());
+ TEST_CALL(requireThatPositionArrayIsTransformedInAttributeField());
+ TEST_CALL(requireThatPositionWeightedSetIsTransformedInAttributeField());
+ TEST_CALL(requireThatAttributeCanBePrimitiveTypes());
+ TEST_CALL(requireThatSummaryCanBePrimitiveTypes());
+ TEST_CALL(requireThatSummaryHandlesCjk());
+ TEST_CALL(requireThatSearchDataTypeUsesDefaultDataTypes());
+ TEST_CALL(requireThatLinguisticsAnnotationUsesDefaultDataTypes());
+ TEST_CALL(requireThatPredicateIsPrinted());
+ TEST_CALL(requireThatTensorIsPrinted());
+
+ TEST_DONE();
+}
+
+void Test::setUp() {
+ _schema.reset(new Schema);
+ _summarymap.reset(new SummarymapConfigBuilder);
+}
+
+void Test::tearDown() {
+}
+
+const DataType &Test::getDataType(const string &name) const {
+ const DataType *type = _documentRepo->getDataType(*_documentType, name);
+ ASSERT_TRUE(type);
+ return *type;
+}
+
+template <typename T>
+std::unique_ptr<T> makeUP(T *p) { return std::unique_ptr<T>(p); }
+
+StringFieldValue Test::makeAnnotatedString() {
+ SpanList *span_list = new SpanList;
+ SpanTree::UP tree(new SpanTree(SPANTREE_NAME, makeUP(span_list)));
+ // Annotations don't have to be added sequentially.
+ tree->annotate(span_list->add(makeUP(new Span(8, 3))),
+ makeUP(new Annotation(*TERM,
+ makeUP(new StringFieldValue(
+ "Annotation")))));
+ tree->annotate(span_list->add(makeUP(new Span(0, 3))), *TERM);
+ tree->annotate(span_list->add(makeUP(new Span(4, 3))), *TERM);
+ tree->annotate(span_list->add(makeUP(new Span(4, 3))),
+ makeUP(new Annotation(*TERM,
+ makeUP(new StringFieldValue(
+ "Multiple")))));
+ tree->annotate(span_list->add(makeUP(new Span(1, 2))),
+ makeUP(new Annotation(*TERM,
+ makeUP(new StringFieldValue(
+ "Overlap")))));
+ StringFieldValue value("Foo Bar Baz");
+ setSpanTree(value, std::move(tree));
+ return value;
+}
+
+StringFieldValue Test::annotateTerm(const string &term) {
+ SpanTree::UP tree(new SpanTree(SPANTREE_NAME, makeUP(new Span(0, term.size()))));
+ tree->annotate(tree->getRoot(), *TERM);
+ StringFieldValue value(term);
+ setSpanTree(value, std::move(tree));
+ return value;
+}
+
+void Test::setSpanTree(StringFieldValue & value, SpanTree::UP tree) {
+ StringFieldValue::SpanTrees trees;
+ trees.push_back(std::move(tree));
+ value.setSpanTrees(trees, _fixedRepo);
+}
+
+StringFieldValue Test::makeAnnotatedChineseString() {
+ SpanList *span_list = new SpanList;
+ SpanTree::UP tree(new SpanTree(SPANTREE_NAME, makeUP(span_list)));
+ // These chinese characters each use 3 bytes in their UTF8 encoding.
+ tree->annotate(span_list->add(makeUP(new Span(0, 15))), *TERM);
+ tree->annotate(span_list->add(makeUP(new Span(15, 9))), *TERM);
+ StringFieldValue value("我就是那个大灰狼");
+ setSpanTree(value, std::move(tree));
+ return value;
+}
+
+Document Test::makeDocument() {
+ Document doc(getDocType(), DocumentId("doc:scheme:"));
+ doc.setRepo(*_documentRepo);
+ doc.setValue("string", makeAnnotatedString());
+
+ doc.setValue("plain_string", StringFieldValue("Plain"));
+
+ ArrayFieldValue array(getDataType("Array<String>"));
+ array.add(annotateTerm("\"foO\""));
+ array.add(annotateTerm("ba\\R"));
+ doc.setValue("string_array", array);
+
+ WeightedSetFieldValue wset(getDataType("WeightedSet<String>"));
+ wset.add(annotateTerm("\"foo\""), 2);
+ wset.add(annotateTerm("ba\\r"), 4);
+ doc.setValue("string_wset", wset);
+
+ doc.setValue("position1", IntFieldValue(5));
+
+ doc.setValue("position2", LongFieldValue(ZCurve::encode(4, 2)));
+
+ StructFieldValue uri(getDataType("url"));
+ uri.setValue("all", annotateTerm("http://www.yahoo.com:42/foobar?q#frag"));
+ uri.setValue("scheme", annotateTerm("http"));
+ uri.setValue("host", annotateTerm("www.yahoo.com"));
+ uri.setValue("port", annotateTerm("42"));
+ uri.setValue("path", annotateTerm("foobar"));
+ uri.setValue("query", annotateTerm("q"));
+ uri.setValue("fragment", annotateTerm("frag"));
+ doc.setValue("uri", uri);
+
+ ArrayFieldValue uri_array(getDataType("Array<url>"));
+ uri.setValue("all", annotateTerm("http://www.yahoo.com:80/foobar?q#frag"));
+ uri.setValue("port", annotateTerm("80"));
+ uri_array.add(uri);
+ uri.setValue("all", annotateTerm("https://www.yahoo.com:443/foo?q#frag"));
+ uri.setValue("scheme", annotateTerm("https"));
+ uri.setValue("path", annotateTerm("foo"));
+ uri.setValue("port", annotateTerm("443"));
+ uri_array.add(uri);
+ doc.setValue("uri_array", uri_array);
+
+ ArrayFieldValue position2_array(getDataType("Array<Long>"));
+ position2_array.add(LongFieldValue(ZCurve::encode(4, 2)));
+ position2_array.add(LongFieldValue(ZCurve::encode(4, 4)));
+ doc.setValue("position2_array", position2_array);
+
+ WeightedSetFieldValue position2_wset(getDataType("WeightedSet<Long>"));
+ position2_wset.add(LongFieldValue(ZCurve::encode(4, 2)), 4);
+ position2_wset.add(LongFieldValue(ZCurve::encode(4, 4)), 2);
+ doc.setValue("position2_wset", position2_wset);
+
+ doc.setValue("int", IntFieldValue(42));
+ doc.setValue("long", LongFieldValue(84));
+ doc.setValue("short", ShortFieldValue(21));
+ doc.setValue("byte", ByteFieldValue(11));
+ doc.setValue("double", DoubleFieldValue(0.4));
+ doc.setValue("float", FloatFieldValue(0.2f));
+
+ doc.setValue("chinese", makeAnnotatedChineseString());
+ return doc;
+}
+
+template <typename T>
+T Test::getValueAs(const string &field_name, const Document &doc) {
+ FieldValue::UP fv(doc.getValue(field_name));
+ const T *value = dynamic_cast<const T *>(fv.get());
+ ASSERT_TRUE(value);
+ return *value;
+}
+
+template <typename T>
+T
+Test::cvtValueAs(const FieldValue::UP &fv)
+{
+ ASSERT_TRUE(fv.get() != NULL);
+ const T *value = dynamic_cast<const T *>(fv.get());
+ ASSERT_TRUE(value);
+ return *value;
+}
+
+template <typename T>
+T
+Test::cvtAttributeAs(const FieldValue::UP &fv)
+{
+ ASSERT_TRUE(fv.get() != NULL);
+ return cvtValueAs<T>(fv);
+}
+
+template <typename T>
+T
+Test::cvtSummaryAs(bool markup, const FieldValue::UP &fv)
+{
+ ASSERT_TRUE(fv.get() != NULL);
+ FieldValue::UP r = SFC::convertSummaryField(markup, *fv, false);
+ return cvtValueAs<T>(r);
+}
+
+void Test::checkString(const string &str, const FieldValue *value) {
+ ASSERT_TRUE(value);
+ const StringFieldValue *s = dynamic_cast<const StringFieldValue *>(value);
+ ASSERT_TRUE(s);
+ // fprintf(stderr, ">>>%s<<< >>>%s<<<\n", str.c_str(), s->getValue().c_str());
+ EXPECT_EQUAL(str, s->getValue());
+}
+
+void Test::checkData(const search::RawBuf &buf, const FieldValue *value) {
+ ASSERT_TRUE(value);
+ const RawFieldValue *s = dynamic_cast<const RawFieldValue *>(value);
+ ASSERT_TRUE(s);
+ auto got = s->getAsRaw();
+ EXPECT_EQUAL(buf.GetUsedLen(), got.second);
+ EXPECT_TRUE(memcmp(buf.GetDrainPos(), got.first, got.second) == 0);
+}
+
+void Test::checkArray(const string &str, const FieldValue *value) {
+ ASSERT_TRUE(value);
+ const ArrayFieldValue *a = dynamic_cast<const ArrayFieldValue *>(value);
+ ASSERT_TRUE(a);
+ EXPECT_EQUAL(1u, a->size());
+ checkString(str, &(*a)[0]);
+}
+
+template <unsigned int N>
+void Test::checkArray(const char *(&str)[N], const FieldValue *value) {
+ ASSERT_TRUE(value);
+ const ArrayFieldValue *a = dynamic_cast<const ArrayFieldValue *>(value);
+ ASSERT_TRUE(a);
+ EXPECT_EQUAL(N, a->size());
+ for (size_t i = 0; i < a->size() && i < N; ++i) {
+ checkString(str[i], &(*a)[i]);
+ }
+}
+
+Document Test::getDoc(const string &name, const Document *doc) {
+ ASSERT_TRUE(doc);
+ return getValueAs<Document>(name, *doc);
+}
+
+void Test::setIndexField(const string &field) {
+ _schema->addIndexField(
+ Schema::IndexField(field, Schema::STRING));
+}
+
+void Test::setSummaryField(const string &field) {
+ _schema->addSummaryField(Schema::Field(field, Schema::STRING));
+}
+
+void Test::setAttributeField(const string &field) {
+ _schema->addAttributeField(Schema::Field(field, Schema::STRING));
+}
+
+void Test::requireThatSummaryIsAnUnmodifiedString() {
+ setSummaryField("string");
+ Document summary = makeDocument();
+ checkString("Foo Bar Baz", SFC::convertSummaryField(false,
+ *summary.getValue("string"),
+ false).get());
+}
+
+void Test::requireThatAttributeIsAnUnmodifiedString() {
+ setAttributeField("string");
+ Document attribute = makeDocument();
+ checkString("Foo Bar Baz",
+ attribute.getValue("string").get());
+}
+
+void Test::requireThatArrayIsFlattenedInSummaryField() {
+ setSummaryField("string_array");
+ Document summary = makeDocument();
+ FieldBlock expect("[\"\\\"foO\\\"\",\"ba\\\\R\"]");
+ checkString(expect.json,
+ SFC::convertSummaryField(false,
+ *summary.getValue("string_array"),
+ false).get());
+ checkData(expect.binary,
+ SFC::convertSummaryField(false,
+ *summary.getValue("string_array"),
+ true).get());
+}
+
+void Test::requireThatWeightedSetIsFlattenedInSummaryField() {
+ setSummaryField("string_wset");
+ Document summary = makeDocument();
+ FieldBlock expect("[{\"item\":\"\\\"foo\\\"\",\"weight\":2},{\"item\":\"ba\\\\r\",\"weight\":4}]");
+ checkString(expect.json,
+ SFC::convertSummaryField(false,
+ *summary.getValue("string_wset"),
+ false).get());
+ checkData(expect.binary,
+ SFC::convertSummaryField(false,
+ *summary.getValue("string_wset"),
+ true).get());
+}
+
+void Test::requireThatPositionsAreTransformedInSummary() {
+ setSummaryField("position1");
+ setSummaryField("position2");
+ Document summary = makeDocument();
+ FieldValue::UP fv = summary.getValue("position1");
+ EXPECT_EQUAL(5, cvtSummaryAs<IntFieldValue>(false, fv).getValue());
+ FieldValue::UP fv2 = summary.getValue("position2");
+ EXPECT_EQUAL(24, cvtSummaryAs<LongFieldValue>(false, fv2).getValue());
+}
+
+void Test::requireThatArrayIsPreservedInAttributeField() {
+ setAttributeField("string_array");
+ Document attribute = makeDocument();
+ const char *array[] = { "\"foO\"", "ba\\R" };
+ checkArray(array,
+ attribute.getValue("string_array").get());
+}
+
+void Test::requireThatPositionsAreTransformedInAttributeField() {
+ setAttributeField("position1");
+ setAttributeField("position2");
+ Document attr = makeDocument();
+ FieldValue::UP fv = attr.getValue("position1");
+ EXPECT_EQUAL(5, cvtAttributeAs<IntFieldValue>(fv).getValue());
+ fv = attr.getValue("position2");
+ EXPECT_EQUAL(24, cvtAttributeAs<LongFieldValue>(fv).getValue());
+}
+
+void Test::requireThatPositionArrayIsTransformedInAttributeField() {
+ setAttributeField("position2_array");
+ Document attr = makeDocument();
+ FieldValue::UP fv = attr.getValue("position2_array");
+ ArrayFieldValue a = cvtAttributeAs<ArrayFieldValue>(fv);
+ EXPECT_EQUAL(2u, a.size());
+ EXPECT_EQUAL(24, dynamic_cast<LongFieldValue &>(a[0]).getValue());
+ EXPECT_EQUAL(48, dynamic_cast<LongFieldValue &>(a[1]).getValue());
+}
+
+void Test::requireThatPositionWeightedSetIsTransformedInAttributeField() {
+ setAttributeField("position2_wset");
+ Document attr = makeDocument();
+ FieldValue::UP fv = attr.getValue("position2_wset");
+ WeightedSetFieldValue w = cvtAttributeAs<WeightedSetFieldValue>(fv);
+ EXPECT_EQUAL(2u, w.size());
+ WeightedSetFieldValue::iterator it = w.begin();
+ EXPECT_EQUAL(24, dynamic_cast<const LongFieldValue&>(*it->first).getValue());
+ EXPECT_EQUAL(4, dynamic_cast<IntFieldValue &>(*it->second).getValue());
+ ++it;
+ EXPECT_EQUAL(48, dynamic_cast<const LongFieldValue&>(*it->first).getValue());
+ EXPECT_EQUAL(2, dynamic_cast<IntFieldValue &>(*it->second).getValue());
+}
+
+void Test::requireThatAttributeCanBePrimitiveTypes() {
+ setAttributeField("int");
+ setAttributeField("long");
+ setAttributeField("short");
+ setAttributeField("byte");
+ setAttributeField("double");
+ setAttributeField("float");
+ Document attribute = makeDocument();
+ FieldValue::UP fv = attribute.getValue("int");
+ EXPECT_EQUAL(42, cvtAttributeAs<IntFieldValue>(fv).getValue());
+ fv = attribute.getValue("long");
+ EXPECT_EQUAL(84, cvtAttributeAs<LongFieldValue>(fv).getValue());
+ fv = attribute.getValue("short");
+ EXPECT_EQUAL(21, cvtAttributeAs<ShortFieldValue>(fv).getValue());
+ fv = attribute.getValue("byte");
+ EXPECT_EQUAL(11, cvtAttributeAs<ByteFieldValue>(fv).getValue());
+ fv = attribute.getValue("double");
+ EXPECT_EQUAL(0.4, cvtAttributeAs<DoubleFieldValue>(fv).getValue());
+ fv = attribute.getValue("float");
+ EXPECT_EQUAL(0.2f, cvtAttributeAs<FloatFieldValue>(fv).getValue());
+}
+
+void Test::requireThatSummaryCanBePrimitiveTypes() {
+ setSummaryField("int");
+ setSummaryField("long");
+ setSummaryField("short");
+ setSummaryField("byte");
+ setSummaryField("double");
+ setSummaryField("float");
+ Document summary = makeDocument();
+ FieldValue::UP fv = summary.getValue("int");
+ EXPECT_EQUAL(42, cvtSummaryAs<IntFieldValue>(false, fv).getValue());
+ fv = summary.getValue("long");
+ EXPECT_EQUAL(84, cvtSummaryAs<LongFieldValue>(false, fv).getValue());
+ fv = summary.getValue("short");
+ EXPECT_EQUAL(21, cvtSummaryAs<ShortFieldValue>(false, fv).getValue());
+ fv = summary.getValue("byte");
+ EXPECT_EQUAL(11, cvtSummaryAs<ShortFieldValue>(false, fv).getValue());
+ fv = summary.getValue("double");
+ EXPECT_EQUAL(0.4, cvtSummaryAs<DoubleFieldValue>(false, fv).getValue());
+ fv = summary.getValue("float");
+ EXPECT_EQUAL(0.2f, cvtSummaryAs<FloatFieldValue>(false, fv).getValue());
+}
+
+void Test::requireThatSummaryHandlesCjk() {
+ Document summary = makeDocument();
+ FieldValue::UP fv = summary.getValue("chinese");
+ EXPECT_EQUAL("我就是那个\037大灰狼\037",
+ cvtSummaryAs<StringFieldValue>(true, fv).getValue());
+}
+
+void Test::requireThatSearchDataTypeUsesDefaultDataTypes() {
+ const StructDataType *uri =
+ dynamic_cast<const StructDataType *>(SearchDataType::URI);
+ ASSERT_TRUE(uri);
+ ASSERT_TRUE(uri->hasField("all"));
+ ASSERT_TRUE(uri->hasField("scheme"));
+ ASSERT_TRUE(uri->hasField("host"));
+ ASSERT_TRUE(uri->hasField("port"));
+ ASSERT_TRUE(uri->hasField("path"));
+ ASSERT_TRUE(uri->hasField("query"));
+ ASSERT_TRUE(uri->hasField("fragment"));
+ EXPECT_EQUAL(*DataType::STRING, uri->getField("all").getDataType());
+ EXPECT_EQUAL(*DataType::STRING, uri->getField("scheme").getDataType());
+ EXPECT_EQUAL(*DataType::STRING, uri->getField("host").getDataType());
+ EXPECT_EQUAL(*DataType::STRING, uri->getField("port").getDataType());
+ EXPECT_EQUAL(*DataType::STRING, uri->getField("path").getDataType());
+ EXPECT_EQUAL(*DataType::STRING, uri->getField("query").getDataType());
+ EXPECT_EQUAL(*DataType::STRING, uri->getField("fragment").getDataType());
+}
+
+void Test::requireThatLinguisticsAnnotationUsesDefaultDataTypes() {
+ EXPECT_EQUAL(*AnnotationType::TERM, *linguistics::TERM);
+ ASSERT_TRUE(AnnotationType::TERM->getDataType());
+ ASSERT_TRUE(linguistics::TERM->getDataType());
+ EXPECT_EQUAL(*AnnotationType::TERM->getDataType(),
+ *linguistics::TERM->getDataType());
+}
+
+void
+Test::requireThatPredicateIsPrinted()
+{
+ std::unique_ptr<Slime> input(new Slime());
+ Cursor &obj = input->setObject();
+ obj.setLong(Predicate::NODE_TYPE, Predicate::TYPE_FEATURE_SET);
+ obj.setString(Predicate::KEY, "foo");
+ Cursor &arr = obj.setArray(Predicate::SET);
+ arr.addString("bar");
+
+ Document doc(getDocType(), DocumentId("doc:scheme:"));
+ doc.setRepo(*_documentRepo);
+ doc.setValue("predicate", PredicateFieldValue(std::move(input)));
+
+ checkString("'foo' in ['bar']\n",
+ SFC::convertSummaryField(false, *doc.getValue("predicate"), false).get());
+}
+
+
+Tensor::UP
+createTensor(const TensorCells &cells, const TensorDimensions &dimensions) {
+ vespalib::tensor::DefaultTensor::builder builder;
+ return vespalib::tensor::TensorFactory::create(cells, dimensions, builder);
+}
+
+void
+Test::requireThatTensorIsPrinted()
+{
+ TensorFieldValue tensorFieldValue;
+ tensorFieldValue = createTensor({ {{{"x", "4"}, {"y", "5"}}, 7} },
+ {"x", "y"});
+ Document doc(getDocType(), DocumentId("doc:scheme:"));
+ doc.setRepo(*_documentRepo);
+ doc.setValue("tensor", tensorFieldValue);
+
+ FieldBlock expect1("{ dimensions: [ 'x', 'y' ], cells: ["
+ "{ address: { x:'4', y:'5' }, value: 7.0 }"
+ "] }");
+
+ TEST_CALL(checkString(expect1.json,
+ SFC::convertSummaryField(false,
+ *doc.getValue("tensor"),
+ false).get()));
+ TEST_CALL(checkData(expect1.binary,
+ SFC::convertSummaryField(false,
+ *doc.getValue("tensor"),
+ true).get()));
+ doc.setValue("tensor", TensorFieldValue());
+
+ FieldBlock expect2("{ }");
+
+ TEST_CALL(checkString(expect2.json,
+ SFC::convertSummaryField(false,
+ *doc.getValue("tensor"),
+ false).get()));
+ TEST_CALL(checkData(expect2.binary,
+ SFC::convertSummaryField(false,
+ *doc.getValue("tensor"),
+ true).get()));
+}
+
+} // namespace
+
+TEST_APPHOOK(Test);
diff --git a/searchcore/src/tests/proton/docsummary/summarymap.cfg b/searchcore/src/tests/proton/docsummary/summarymap.cfg
new file mode 100644
index 00000000000..f2d429b1412
--- /dev/null
+++ b/searchcore/src/tests/proton/docsummary/summarymap.cfg
@@ -0,0 +1,48 @@
+override[16]
+override[0].field "aa"
+override[0].command "copy"
+override[0].arguments "ab"
+override[1].field "ab"
+override[1].command "empty"
+override[2].field "ba"
+override[2].command "attribute"
+override[2].arguments "ba"
+override[3].field "bb"
+override[3].command "attribute"
+override[3].arguments "bb"
+override[4].field "bc"
+override[4].command "attribute"
+override[4].arguments "bc"
+override[5].field "bd"
+override[5].command "attribute"
+override[5].arguments "bd"
+override[6].field "be"
+override[6].command "attribute"
+override[6].arguments "be"
+override[7].field "bf"
+override[7].command "attribute"
+override[7].arguments "bf"
+override[8].field "bg"
+override[8].command "attribute"
+override[8].arguments "bg"
+override[9].field "bh"
+override[9].command "attribute"
+override[9].arguments "bh"
+override[10].field "bi"
+override[10].command "attribute"
+override[10].arguments "bi"
+override[11].field "dynamicstring"
+override[11].command "dynamicteaser"
+override[11].arguments "dynamicstring"
+override[12].field "sp2x"
+override[12].command "positions"
+override[12].arguments "sp2"
+override[13].field "ap2x"
+override[13].command "positions"
+override[13].arguments "ap2"
+override[14].field "wp2x"
+override[14].command "positions"
+override[14].arguments "wp2"
+override[15].field "bj"
+override[15].command "attribute"
+override[15].arguments "bj"
diff --git a/searchcore/src/tests/proton/document_iterator/.gitignore b/searchcore/src/tests/proton/document_iterator/.gitignore
new file mode 100644
index 00000000000..323a5d517ba
--- /dev/null
+++ b/searchcore/src/tests/proton/document_iterator/.gitignore
@@ -0,0 +1 @@
+searchcore_document_iterator_test_app
diff --git a/searchcore/src/tests/proton/document_iterator/CMakeLists.txt b/searchcore/src/tests/proton/document_iterator/CMakeLists.txt
new file mode 100644
index 00000000000..03d910ef02e
--- /dev/null
+++ b/searchcore/src/tests/proton/document_iterator/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_document_iterator_test_app
+ SOURCES
+ document_iterator_test.cpp
+ DEPENDS
+ searchcore_persistenceengine
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_document_iterator_test_app COMMAND searchcore_document_iterator_test_app)
diff --git a/searchcore/src/tests/proton/document_iterator/FILES b/searchcore/src/tests/proton/document_iterator/FILES
new file mode 100644
index 00000000000..351464d9f46
--- /dev/null
+++ b/searchcore/src/tests/proton/document_iterator/FILES
@@ -0,0 +1 @@
+document_iterator_test.cpp
diff --git a/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp b/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
new file mode 100644
index 00000000000..e0d92cd2a1a
--- /dev/null
+++ b/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
@@ -0,0 +1,888 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/document/base/field.h>
+#include <vespa/document/datatype/documenttype.h>
+#include <vespa/document/fieldset/fieldsets.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/persistence/spi/bucket.h>
+#include <vespa/persistence/spi/docentry.h>
+#include <vespa/persistence/spi/result.h>
+#include <persistence/spi/types.h>
+#include <vespa/searchcore/proton/persistenceengine/document_iterator.h>
+#include <vespa/searchcore/proton/persistenceengine/i_document_retriever.h>
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/searchlib/attribute/attributecontext.h>
+#include <vespa/searchcore/proton/common/attrupdate.h>
+#include <vespa/searchlib/attribute/attributefactory.h>
+#include <vespa/searchcore/proton/server/commit_and_wait_document_retriever.h>
+
+using document::DocumentType;
+using document::Field;
+using namespace proton;
+
+using document::DataType;
+using document::Document;
+using document::DocumentId;
+using document::BucketId;
+using document::IntFieldValue;
+using document::DoubleFieldValue;
+using document::StringFieldValue;
+using search::DocumentIdT;
+using search::DocumentMetaData;
+using search::AttributeContext;
+using search::AttributeEnumGuard;
+using search::AttributeGuard;
+using search::AttributeVector;
+using search::attribute::BasicType;
+using search::attribute::CollectionType;
+using search::attribute::Config;
+using search::attribute::IAttributeContext;
+using search::index::Schema;
+using storage::spi::Timestamp;
+using storage::spi::Bucket;
+using storage::spi::PartitionId;
+using storage::spi::IterateResult;
+using storage::spi::DocEntry;
+using storage::spi::Selection;
+using storage::spi::DocumentSelection;
+using storage::spi::IncludedVersions;
+
+const uint64_t largeNum = 10000000;
+
+Bucket bucket(size_t x) {
+ return Bucket(BucketId(x), PartitionId(0));
+}
+
+Selection selectAll() {
+ return Selection(DocumentSelection(""));
+}
+
+Selection selectTimestampRange(uint64_t min, uint64_t max) {
+ Selection sel(DocumentSelection(""));
+ sel.setFromTimestamp(Timestamp(min));
+ sel.setToTimestamp(Timestamp(max));
+ return sel;
+}
+
+Selection selectTimestampSet(uint64_t a, uint64_t b, uint64_t c) {
+ Selection sel(DocumentSelection(""));
+ Selection::TimestampSubset subset;
+ subset.push_back(Timestamp(a));
+ subset.push_back(Timestamp(b));
+ subset.push_back(Timestamp(c));
+ sel.setTimestampSubset(subset);
+ return sel;
+}
+
+Selection selectDocs(const std::string &docSel) {
+ return Selection(DocumentSelection(docSel));
+}
+
+Selection selectDocsWithinRange(const std::string &docSel, uint64_t min, uint64_t max) {
+ Selection sel((DocumentSelection(docSel)));
+ sel.setFromTimestamp(Timestamp(min));
+ sel.setToTimestamp(Timestamp(max));
+ return sel;
+}
+
+IncludedVersions docV() {
+ return storage::spi::NEWEST_DOCUMENT_ONLY;
+}
+
+IncludedVersions newestV() {
+ return storage::spi::NEWEST_DOCUMENT_OR_REMOVE;
+}
+
+IncludedVersions allV() {
+ return storage::spi::ALL_VERSIONS;
+}
+
+struct UnitDR : DocumentRetrieverBaseForTest {
+ static DocumentIdT _docidCnt;
+
+ document::DocumentTypeRepo repo;
+ document::Document::UP document;
+ Timestamp timestamp;
+ Bucket bucket;
+ bool removed;
+ DocumentIdT docid;
+
+ UnitDR()
+ : repo(), document(new Document(*DataType::DOCUMENT, DocumentId())),
+ timestamp(0), bucket(), removed(false), docid(0) {}
+ UnitDR(document::Document::UP d, Timestamp t, Bucket b, bool r)
+ : repo(), document(std::move(d)), timestamp(t), bucket(b), removed(r), docid(++_docidCnt) {}
+ UnitDR(const document::DocumentType &dt, document::Document::UP d, Timestamp t, Bucket b, bool r)
+ : repo(dt), document(std::move(d)), timestamp(t), bucket(b), removed(r), docid(++_docidCnt) {}
+
+ const document::DocumentTypeRepo &getDocumentTypeRepo() const override {
+ return repo;
+ }
+ void getBucketMetaData(const Bucket &b, DocumentMetaData::Vector &result) const override
+ {
+ if (b == bucket) {
+ result.push_back(DocumentMetaData(docid, timestamp, bucket, document->getId().getGlobalId(), removed));
+ }
+ }
+ DocumentMetaData getDocumentMetaData(const document::DocumentId &id) const override {
+ if (document->getId() == id) {
+ return DocumentMetaData(docid, timestamp, bucket, document->getId().getGlobalId(), removed);
+ }
+ return DocumentMetaData();
+ }
+ document::Document::UP getDocument(DocumentIdT lid) const override {
+ return Document::UP((lid == docid) ? document->clone() : 0);
+ }
+
+ CachedSelect::SP parseSelect(const vespalib::string &selection) const override {
+ CachedSelect::SP res(new CachedSelect);
+ res->set(selection, repo);
+ return res;
+ }
+
+ static void reset() { _docidCnt = 2; }
+};
+
+struct VisitRecordingUnitDR : UnitDR {
+ using VisitedLIDs = std::unordered_set<DocumentIdT>;
+ VisitedLIDs& visited_lids;
+
+ VisitRecordingUnitDR(VisitedLIDs& visited, document::Document::UP d,
+ Timestamp t, Bucket b, bool r)
+ : UnitDR(std::move(d), t, b, r),
+ visited_lids(visited)
+ {
+ }
+
+ document::Document::UP getDocument(DocumentIdT lid) const override {
+ if (lid == docid) {
+ visited_lids.insert(lid);
+ }
+ return UnitDR::getDocument(lid);
+ }
+};
+
+class MyAttributeManager : public search::IAttributeManager
+{
+public:
+ typedef std::map<string, AttributeVector::SP> AttributeMap;
+
+ AttributeMap _attributes;
+
+ AttributeVector::SP
+ findAttribute(const vespalib::string &name) const {
+ AttributeMap::const_iterator itr = _attributes.find(name);
+ if (itr != _attributes.end()) {
+ return itr->second;
+ }
+ return AttributeVector::SP();
+ }
+
+ AttributeGuard::UP getAttribute(const string &name) const override {
+ AttributeVector::SP attr = findAttribute(name);
+ return AttributeGuard::UP(new AttributeGuard(attr));
+ }
+
+ AttributeGuard::UP getAttributeStableEnum(const string & name) const override {
+ AttributeVector::SP attr = findAttribute(name);
+ return AttributeGuard::UP(new AttributeEnumGuard(attr));
+ }
+
+ void getAttributeList(std::vector<AttributeGuard> & list) const override {
+ list.reserve(_attributes.size());
+ for (AttributeMap::const_iterator itr = _attributes.begin();
+ itr != _attributes.end();
+ ++itr) {
+ list.push_back(AttributeGuard(itr->second));
+ }
+ }
+
+ IAttributeContext::UP createContext() const override {
+ return IAttributeContext::UP(new AttributeContext(*this));
+ }
+
+ MyAttributeManager() : _attributes() { }
+
+ void addAttribute(const string &name, const AttributeVector::SP &av) {
+ av->addReservedDoc();
+ _attributes[name] = av;
+ }
+};
+
+struct AttrUnitDR : public UnitDR
+{
+ MyAttributeManager _amgr;
+ Schema _schema;
+ AttributeVector::SP _aa;
+ AttributeVector::SP _dd;
+ AttributeVector::SP _ss;
+
+ AttrUnitDR(document::Document::UP d, Timestamp t, Bucket b, bool r)
+ : UnitDR(d->getType(), document::Document::UP(d->clone()), t, b, r),
+ _amgr(), _schema(), _aa(), _dd(), _ss()
+ {
+ createAttribute(_aa, BasicType::INT32, Schema::INT32, "aa");
+ createAttribute(_dd, BasicType::DOUBLE, Schema::DOUBLE, "dd");
+ createAttribute(_ss, BasicType::STRING, Schema::STRING, "ss");
+ }
+
+ AttrUnitDR(document::Document::UP d, Timestamp t, Bucket b, bool r,
+ int32_t aa, double dd, const vespalib::string &ss)
+ : UnitDR(d->getType(), document::Document::UP(d->clone()), t, b, r),
+ _amgr(), _schema(), _aa(), _dd(), _ss()
+ {
+ createAttribute(_aa, BasicType::INT32, Schema::INT32, "aa");
+ addAttribute<IntFieldValue, int32_t>(*_aa, aa);
+ createAttribute(_dd, BasicType::DOUBLE, Schema::DOUBLE, "dd");
+ addAttribute<DoubleFieldValue, double>(*_dd, dd);
+ createAttribute(_ss, BasicType::STRING, Schema::STRING, "ss");
+ addAttribute<StringFieldValue, vespalib::string>(*_ss, ss);
+ }
+
+ void createAttribute(AttributeVector::SP &av, BasicType basicType,
+ Schema::DataType dataType, const vespalib::string &fieldName)
+ {
+ Config cfg(basicType, CollectionType::SINGLE);
+ cfg.setFastSearch(true);
+ av = search::AttributeFactory::createAttribute(fieldName, cfg);
+ _amgr.addAttribute(fieldName, av);
+ _schema.addAttributeField(Schema::AttributeField(fieldName, dataType));
+ while (docid >= av->getNumDocs()) {
+ AttributeVector::DocId checkDocId(0u);
+ ASSERT_TRUE(av->addDoc(checkDocId));
+ av->clearDoc(docid);
+ }
+ av->commit();
+ }
+
+ template <class FieldValType, typename FieldValArg>
+ void addAttribute(AttributeVector &av, const FieldValArg &val) {
+ search::AttrUpdate::handleValue(av, docid, FieldValType(val));
+ av.commit();
+ }
+
+ CachedSelect::SP parseSelect(const vespalib::string &selection) const override {
+ CachedSelect::SP res(new CachedSelect);
+ res->set(selection, "foo", Document(document->getType(), DocumentId()), repo, _schema, &_amgr, true);
+ return res;
+ }
+};
+
+DocumentIdT UnitDR::_docidCnt(2);
+
+struct PairDR : DocumentRetrieverBaseForTest {
+ IDocumentRetriever::SP first;
+ IDocumentRetriever::SP second;
+ PairDR(IDocumentRetriever::SP f, IDocumentRetriever::SP s)
+ : first(f), second(s) {}
+ const document::DocumentTypeRepo &getDocumentTypeRepo() const override {
+ return first->getDocumentTypeRepo();
+ }
+ void getBucketMetaData(const Bucket &b, DocumentMetaData::Vector &result) const override {
+ first->getBucketMetaData(b, result);
+ second->getBucketMetaData(b, result);
+ }
+ DocumentMetaData getDocumentMetaData(const document::DocumentId &id) const override {
+ DocumentMetaData ret = first->getDocumentMetaData(id);
+ return (ret.valid()) ? ret : second->getDocumentMetaData(id);
+ }
+ document::Document::UP getDocument(DocumentIdT lid) const override {
+ Document::UP ret = first->getDocument(lid);
+ return (ret.get() != 0) ? std::move(ret) : second->getDocument(lid);
+ }
+
+ CachedSelect::SP parseSelect(const vespalib::string &selection) const override {
+ CachedSelect::SP res(new CachedSelect);
+ res->set(selection, getDocumentTypeRepo());
+ return res;
+ }
+};
+
+struct Committer : public ICommitable {
+ size_t _commitCount;
+ size_t _commitAndWaitCount;
+ Committer() : _commitCount(0), _commitAndWaitCount(0) { }
+ void commit() override { _commitCount++; }
+ void commitAndWait() override { _commitAndWaitCount++; }
+};
+
+size_t getSize() {
+ return sizeof(DocEntry);
+}
+
+size_t getSize(const document::Document &doc) {
+ vespalib::nbostream tmp;
+ doc.serialize(tmp);
+ return tmp.size() + getSize();
+}
+
+size_t getSize(const document::DocumentId &id) {
+ return id.getSerializedSize() + getSize();
+}
+
+IDocumentRetriever::SP nil() { return IDocumentRetriever::SP(new UnitDR()); }
+
+IDocumentRetriever::SP doc(const std::string &id, Timestamp t, Bucket b) {
+ Document::UP d(new Document(*DataType::DOCUMENT, DocumentId(id)));
+ return IDocumentRetriever::SP(new UnitDR(std::move(d), t, b, false));
+}
+
+IDocumentRetriever::SP rem(const std::string &id, Timestamp t, Bucket b) {
+ Document::UP d(new Document(*DataType::DOCUMENT, DocumentId(id)));
+ return IDocumentRetriever::SP(new UnitDR(std::move(d), t, b, true));
+}
+
+IDocumentRetriever::SP cat(IDocumentRetriever::SP first, IDocumentRetriever::SP second) {
+ return IDocumentRetriever::SP(new PairDR(first, second));
+}
+
+const DocumentType &getDocType() {
+ static DocumentType::UP doc_type;
+ if (!doc_type.get()) {
+ doc_type.reset(new DocumentType("foo", 42));
+ doc_type->addField(Field("header", 43, *DataType::STRING, true));
+ doc_type->addField(Field("body", 44, *DataType::STRING, false));
+ }
+ return *doc_type;
+}
+
+const DocumentType &getAttrDocType() {
+ static DocumentType::UP doc_type;
+ if (!doc_type.get()) {
+ doc_type.reset(new DocumentType("foo", 42));
+ doc_type->addField(Field("header", 43, *DataType::STRING, true));
+ doc_type->addField(Field("body", 44, *DataType::STRING, false));
+ doc_type->addField(Field("aa", 45, *DataType::INT, false));
+ doc_type->addField(Field("ab", 46, *DataType::INT, false));
+ doc_type->addField(Field("dd", 47, *DataType::DOUBLE, false));
+ doc_type->addField(Field("ss", 48, *DataType::STRING, false));
+ }
+ return *doc_type;
+}
+
+IDocumentRetriever::SP doc_with_fields(const std::string &id, Timestamp t, Bucket b) {
+ Document::UP d(new Document(getDocType(), DocumentId(id)));
+ d->set("header", "foo");
+ d->set("body", "bar");
+ return IDocumentRetriever::SP(new UnitDR(getDocType(), std::move(d), t, b, false));
+}
+
+IDocumentRetriever::SP doc_with_null_fields(const std::string &id, Timestamp t, Bucket b) {
+ Document::UP d(new Document(getAttrDocType(), DocumentId(id)));
+ return IDocumentRetriever::SP(new AttrUnitDR(std::move(d), t, b, false));
+}
+
+IDocumentRetriever::SP doc_with_attr_fields(const vespalib::string &id,
+ Timestamp t, Bucket b,
+ int32_t aa, int32_t ab, int32_t attr_aa,
+ double dd, double attr_dd,
+ const vespalib::string &ss,
+ const vespalib::string &attr_ss)
+{
+ Document::UP d(new Document(getAttrDocType(), DocumentId(id)));
+ d->set("header", "foo");
+ d->set("body", "bar");
+ d->set("aa", aa);
+ d->set("ab", ab);
+ d->set("dd", dd);
+ d->set("ss", ss);
+ return IDocumentRetriever::SP(new AttrUnitDR(std::move(d), t, b, false,
+ attr_aa, attr_dd, attr_ss));
+}
+
+auto doc_rec(VisitRecordingUnitDR::VisitedLIDs& visited_lids,
+ const std::string &id, Timestamp t, Bucket b)
+{
+ Document::UP d(new Document(getDocType(), DocumentId(id)));
+ return std::make_shared<VisitRecordingUnitDR>(
+ visited_lids, std::move(d), t, b, false);
+}
+
+void checkDoc(const IDocumentRetriever &dr, const std::string &id,
+ size_t timestamp, size_t bucket, bool removed)
+{
+ DocumentMetaData dmd = dr.getDocumentMetaData(DocumentId(id));
+ EXPECT_TRUE(dmd.valid());
+ EXPECT_EQUAL(timestamp, dmd.timestamp);
+ EXPECT_EQUAL(bucket, dmd.bucketId.getId());
+ EXPECT_EQUAL(DocumentId(id).getGlobalId(), dmd.gid);
+ EXPECT_EQUAL(removed, dmd.removed);
+ Document::UP doc = dr.getDocument(dmd.lid);
+ ASSERT_TRUE(doc.get() != 0);
+ EXPECT_TRUE(DocumentId(id) == doc->getId());
+}
+
+void checkEntry(const IterateResult &res, size_t idx, const Timestamp &timestamp, int flags)
+{
+ ASSERT_LESS(idx, res.getEntries().size());
+ DocEntry expect(timestamp, flags);
+ EXPECT_EQUAL(expect, *res.getEntries()[idx]);
+ EXPECT_EQUAL(getSize(), res.getEntries()[idx]->getSize());
+}
+
+void checkEntry(const IterateResult &res, size_t idx, const DocumentId &id, const Timestamp &timestamp)
+{
+ ASSERT_LESS(idx, res.getEntries().size());
+ DocEntry expect(timestamp, storage::spi::REMOVE_ENTRY, id);
+ EXPECT_EQUAL(expect, *res.getEntries()[idx]);
+ EXPECT_EQUAL(getSize(id), res.getEntries()[idx]->getSize());
+ EXPECT_GREATER(getSize(id), 0u);
+}
+
+void checkEntry(const IterateResult &res, size_t idx, const Document &doc, const Timestamp &timestamp)
+{
+ ASSERT_LESS(idx, res.getEntries().size());
+ DocEntry expect(timestamp, storage::spi::NONE, Document::UP(doc.clone()));
+ EXPECT_EQUAL(expect, *res.getEntries()[idx]);
+ EXPECT_EQUAL(getSize(doc), res.getEntries()[idx]->getSize());
+ EXPECT_GREATER(getSize(doc), 0u);
+}
+
+TEST("require that custom retrievers work as expected") {
+ IDocumentRetriever::SP dr =
+ cat(cat(doc("doc:foo:1", Timestamp(2), bucket(5)),
+ rem("doc:foo:2", Timestamp(3), bucket(5))),
+ cat(doc("doc:foo:3", Timestamp(7), bucket(6)),
+ nil()));
+ EXPECT_FALSE(dr->getDocumentMetaData(DocumentId("doc:foo:bogus")).valid());
+ EXPECT_TRUE(dr->getDocument(1).get() == 0);
+ EXPECT_TRUE(dr->getDocument(2).get() == 0);
+ EXPECT_TRUE(dr->getDocument(3).get() != 0);
+ TEST_DO(checkDoc(*dr, "doc:foo:1", 2, 5, false));
+ TEST_DO(checkDoc(*dr, "doc:foo:2", 3, 5, true));
+ TEST_DO(checkDoc(*dr, "doc:foo:3", 7, 6, false));
+ DocumentMetaData::Vector b5;
+ DocumentMetaData::Vector b6;
+ dr->getBucketMetaData(bucket(5), b5);
+ dr->getBucketMetaData(bucket(6), b6);
+ ASSERT_EQUAL(2u, b5.size());
+ ASSERT_EQUAL(1u, b6.size());
+ EXPECT_EQUAL(5u, b5[0].timestamp + b5[1].timestamp);
+ EXPECT_EQUAL(7u, b6[0].timestamp);
+}
+
+TEST("require that an empty list of retrievers can be iterated") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_EQUAL(0u, res.getEntries().size());
+ EXPECT_TRUE(res.isCompleted());
+}
+
+TEST("require that a list of empty retrievers can be iterated") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ itr.add(nil());
+ itr.add(nil());
+ itr.add(nil());
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_EQUAL(0u, res.getEntries().size());
+ EXPECT_TRUE(res.isCompleted());
+}
+
+TEST("require that normal documents can be iterated") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
+ itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
+ doc("doc:foo:3", Timestamp(4), bucket(5))));
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(3u, res.getEntries().size());
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:1")), Timestamp(2)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(3)));
+ TEST_DO(checkEntry(res, 2, Document(*DataType::DOCUMENT, DocumentId("doc:foo:3")), Timestamp(4)));
+}
+
+void verifyReadConsistency(DocumentIterator & itr, Committer & committer) {
+ IDocumentRetriever::SP retriever = doc("doc:foo:1", Timestamp(2), bucket(5));
+ IDocumentRetriever::SP commitAndWaitRetriever(new CommitAndWaitDocumentRetriever(retriever, committer));
+ itr.add(commitAndWaitRetriever);
+
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(1u, res.getEntries().size());
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:1")), Timestamp(2)));
+ EXPECT_EQUAL(0u, committer._commitCount);
+}
+
+void verifyStrongReadConsistency(DocumentIterator & itr) {
+ Committer committer;
+ TEST_DO(verifyReadConsistency(itr, committer));
+ EXPECT_EQUAL(1u, committer._commitAndWaitCount);
+}
+
+void verifyWeakReadConsistency(DocumentIterator & itr) {
+ Committer committer;
+ TEST_DO(verifyReadConsistency(itr, committer));
+ EXPECT_EQUAL(0u, committer._commitAndWaitCount);
+}
+
+TEST("require that default readconsistency does commit") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ TEST_DO(verifyStrongReadConsistency(itr));
+}
+
+TEST("require that readconsistency::strong does commit") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false, storage::spi::ReadConsistency::STRONG);
+ TEST_DO(verifyStrongReadConsistency(itr));
+}
+
+TEST("require that readconsistency::weak does not commit") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false, storage::spi::ReadConsistency::WEAK);
+ TEST_DO(verifyWeakReadConsistency(itr));
+}
+
+TEST("require that remove entries can be iterated") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ itr.add(rem("doc:foo:1", Timestamp(2), bucket(5)));
+ itr.add(cat(rem("doc:foo:2", Timestamp(3), bucket(5)),
+ rem("doc:foo:3", Timestamp(4), bucket(5))));
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(3u, res.getEntries().size());
+ TEST_DO(checkEntry(res, 0, DocumentId("doc:foo:1"), Timestamp(2)));
+ TEST_DO(checkEntry(res, 1, DocumentId("doc:foo:2"), Timestamp(3)));
+ TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:3"), Timestamp(4)));
+}
+
+TEST("require that remove entries can be ignored") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), docV(), -1, false);
+ itr.add(rem("doc:foo:1", Timestamp(2), bucket(5)));
+ itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
+ rem("doc:foo:3", Timestamp(4), bucket(5))));
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(1u, res.getEntries().size());
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(3)));
+}
+
+TEST("require that iterating all versions returns both documents and removes") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), allV(), -1, false);
+ itr.add(rem("doc:foo:1", Timestamp(2), bucket(5)));
+ itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
+ rem("doc:foo:3", Timestamp(4), bucket(5))));
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(3u, res.getEntries().size());
+ TEST_DO(checkEntry(res, 0, DocumentId("doc:foo:1"), Timestamp(2)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(3)));
+ TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:3"), Timestamp(4)));
+}
+
+TEST("require that using an empty field set returns meta-data only") {
+ DocumentIterator itr(bucket(5), document::NoFields(), selectAll(), newestV(), -1, false);
+ itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
+ itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
+ rem("doc:foo:3", Timestamp(4), bucket(5))));
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(3u, res.getEntries().size());
+ TEST_DO(checkEntry(res, 0, Timestamp(2), storage::spi::NONE));
+ TEST_DO(checkEntry(res, 1, Timestamp(3), storage::spi::NONE));
+ TEST_DO(checkEntry(res, 2, Timestamp(4), storage::spi::REMOVE_ENTRY));
+}
+
+TEST("require that entries in other buckets are skipped") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ itr.add(rem("doc:foo:1", Timestamp(2), bucket(6)));
+ itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
+ doc("doc:foo:3", Timestamp(4), bucket(6))));
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(1u, res.getEntries().size());
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(3)));
+}
+
+TEST("require that maxBytes splits iteration results") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
+ itr.add(cat(rem("doc:foo:2", Timestamp(3), bucket(5)),
+ doc("doc:foo:3", Timestamp(4), bucket(5))));
+ IterateResult res1 = itr.iterate(getSize(Document(*DataType::DOCUMENT, DocumentId("doc:foo:1"))) +
+ getSize(DocumentId("doc:foo:2")));
+ EXPECT_TRUE(!res1.isCompleted());
+ EXPECT_EQUAL(2u, res1.getEntries().size());
+ TEST_DO(checkEntry(res1, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:1")), Timestamp(2)));
+ TEST_DO(checkEntry(res1, 1, DocumentId("doc:foo:2"), Timestamp(3)));
+
+ IterateResult res2 = itr.iterate(largeNum);
+ EXPECT_TRUE(res2.isCompleted());
+ TEST_DO(checkEntry(res2, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:3")), Timestamp(4)));
+
+ IterateResult res3 = itr.iterate(largeNum);
+ EXPECT_TRUE(res3.isCompleted());
+ EXPECT_EQUAL(0u, res3.getEntries().size());
+}
+
+TEST("require that maxBytes splits iteration results for meta-data only iteration") {
+ DocumentIterator itr(bucket(5), document::NoFields(), selectAll(), newestV(), -1, false);
+ itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
+ itr.add(cat(rem("doc:foo:2", Timestamp(3), bucket(5)),
+ doc("doc:foo:3", Timestamp(4), bucket(5))));
+ IterateResult res1 = itr.iterate(getSize() + getSize());
+ EXPECT_TRUE(!res1.isCompleted());
+ EXPECT_EQUAL(2u, res1.getEntries().size());
+ TEST_DO(checkEntry(res1, 0, Timestamp(2), storage::spi::NONE));
+ TEST_DO(checkEntry(res1, 1, Timestamp(3), storage::spi::REMOVE_ENTRY));
+
+ IterateResult res2 = itr.iterate(largeNum);
+ EXPECT_TRUE(res2.isCompleted());
+ TEST_DO(checkEntry(res2, 0, Timestamp(4), storage::spi::NONE));
+
+ IterateResult res3 = itr.iterate(largeNum);
+ EXPECT_TRUE(res3.isCompleted());
+ EXPECT_EQUAL(0u, res3.getEntries().size());
+}
+
+TEST("require that at least one document is returned by visit") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
+ itr.add(cat(rem("doc:foo:2", Timestamp(3), bucket(5)),
+ doc("doc:foo:3", Timestamp(4), bucket(5))));
+ IterateResult res1 = itr.iterate(0);
+ EXPECT_TRUE(1u <= res1.getEntries().size());
+ TEST_DO(checkEntry(res1, 0, Document(*DataType::DOCUMENT,DocumentId("doc:foo:1")), Timestamp(2)));
+}
+
+TEST("require that documents outside the timestamp limits are ignored") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectTimestampRange(100, 200), newestV(), -1, false);
+ itr.add(doc("doc:foo:1", Timestamp(99), bucket(5)));
+ itr.add(doc("doc:foo:2", Timestamp(100), bucket(5)));
+ itr.add(doc("doc:foo:3", Timestamp(200), bucket(5)));
+ itr.add(doc("doc:foo:4", Timestamp(201), bucket(5)));
+ itr.add(rem("doc:foo:5", Timestamp(99), bucket(5)));
+ itr.add(rem("doc:foo:6", Timestamp(100), bucket(5)));
+ itr.add(rem("doc:foo:7", Timestamp(200), bucket(5)));
+ itr.add(rem("doc:foo:8", Timestamp(201), bucket(5)));
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(4u, res.getEntries().size());
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(100)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:3")), Timestamp(200)));
+ TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:6"), Timestamp(100)));
+ TEST_DO(checkEntry(res, 3, DocumentId("doc:foo:7"), Timestamp(200)));
+}
+
+TEST("require that timestamp subset returns the appropriate documents") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectTimestampSet(200, 350, 400), newestV(), -1, false);
+ itr.add(doc("doc:foo:1", Timestamp(500), bucket(5)));
+ itr.add(doc("doc:foo:2", Timestamp(400), bucket(5)));
+ itr.add(doc("doc:foo:3", Timestamp(300), bucket(5)));
+ itr.add(doc("doc:foo:4", Timestamp(200), bucket(5)));
+ itr.add(rem("doc:foo:5", Timestamp(250), bucket(5)));
+ itr.add(rem("doc:foo:6", Timestamp(350), bucket(5)));
+ itr.add(rem("doc:foo:7", Timestamp(450), bucket(5)));
+ itr.add(rem("doc:foo:8", Timestamp(550), bucket(5)));
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(3u, res.getEntries().size());
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(400)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:4")), Timestamp(200)));
+ TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:6"), Timestamp(350)));
+}
+
+TEST("require that document selection will filter results") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("id=\"doc:foo:xxx*\""), newestV(), -1, false);
+ itr.add(doc("doc:foo:xxx1", Timestamp(99), bucket(5)));
+ itr.add(doc("doc:foo:yyy1", Timestamp(100), bucket(5)));
+ itr.add(doc("doc:foo:xxx2", Timestamp(200), bucket(5)));
+ itr.add(doc("doc:foo:yyy2", Timestamp(201), bucket(5)));
+ itr.add(rem("doc:foo:xxx3", Timestamp(99), bucket(5)));
+ itr.add(rem("doc:foo:yyy3", Timestamp(100), bucket(5)));
+ itr.add(rem("doc:foo:xxx4", Timestamp(200), bucket(5)));
+ itr.add(rem("doc:foo:yyy4", Timestamp(201), bucket(5)));
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(4u, res.getEntries().size());
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:xxx1")), Timestamp(99)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:xxx2")), Timestamp(200)));
+ TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:xxx3"), Timestamp(99)));
+ TEST_DO(checkEntry(res, 3, DocumentId("doc:foo:xxx4"), Timestamp(200)));
+}
+
+TEST("require that document selection handles 'field == null'") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("foo.aa == null"), newestV(), -1, false);
+ itr.add(doc_with_null_fields("doc:foo:xxx1", Timestamp(99), bucket(5)));
+ itr.add(doc_with_null_fields("doc:foo:xxx2", Timestamp(100), bucket(5)));
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ ASSERT_EQUAL(2u, res.getEntries().size());
+ Document expected1(getAttrDocType(), DocumentId("doc:foo:xxx1"));
+ TEST_DO(checkEntry(res, 0, expected1, Timestamp(99)));
+ Document expected2(getAttrDocType(), DocumentId("doc:foo:xxx2"));
+ TEST_DO(checkEntry(res, 1, expected2, Timestamp(100)));
+}
+
+TEST("require that invalid document selection returns no documents") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("=="), newestV(), -1, false);
+ itr.add(doc("doc:foo:xxx1", Timestamp(99), bucket(5)));
+ itr.add(doc("doc:foo:yyy1", Timestamp(100), bucket(5)));
+ itr.add(doc("doc:foo:xxx2", Timestamp(200), bucket(5)));
+ itr.add(doc("doc:foo:yyy2", Timestamp(201), bucket(5)));
+ itr.add(rem("doc:foo:xxx3", Timestamp(99), bucket(5)));
+ itr.add(rem("doc:foo:yyy3", Timestamp(100), bucket(5)));
+ itr.add(rem("doc:foo:xxx4", Timestamp(200), bucket(5)));
+ itr.add(rem("doc:foo:yyy4", Timestamp(201), bucket(5)));
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(0u, res.getEntries().size());
+}
+
+TEST("require that document selection and timestamp range works together") {
+ DocumentIterator itr(bucket(5), document::AllFields(), selectDocsWithinRange("id=\"doc:foo:xxx*\"", 100, 200), newestV(), -1, false);
+ itr.add(doc("doc:foo:xxx1", Timestamp(99), bucket(5)));
+ itr.add(doc("doc:foo:yyy1", Timestamp(100), bucket(5)));
+ itr.add(doc("doc:foo:xxx2", Timestamp(200), bucket(5)));
+ itr.add(doc("doc:foo:yyy2", Timestamp(201), bucket(5)));
+ itr.add(rem("doc:foo:xxx3", Timestamp(99), bucket(5)));
+ itr.add(rem("doc:foo:yyy3", Timestamp(100), bucket(5)));
+ itr.add(rem("doc:foo:xxx4", Timestamp(200), bucket(5)));
+ itr.add(rem("doc:foo:yyy4", Timestamp(201), bucket(5)));
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(2u, res.getEntries().size());
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:xxx2")), Timestamp(200)));
+ TEST_DO(checkEntry(res, 1, DocumentId("doc:foo:xxx4"), Timestamp(200)));
+}
+
+TEST("require that fieldset limits fields returned") {
+ DocumentIterator itr(bucket(5), document::HeaderFields(), selectAll(), newestV(), -1, false);
+ itr.add(doc_with_fields("doc:foo:xxx1", Timestamp(1), bucket(5)));
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(1u, res.getEntries().size());
+ Document expected(getDocType(), DocumentId("doc:foo:xxx1"));
+ expected.set("header", "foo");
+ TEST_DO(checkEntry(res, 0, expected, Timestamp(1)));
+}
+
+namespace {
+template <typename Container, typename T>
+bool contains(const Container& c, const T& value) {
+ return c.find(value) != c.end();
+}
+}
+
+TEST("require that userdoc-constrained selections pre-filter on GIDs") {
+ DocumentIterator itr(bucket(5), document::AllFields(),
+ selectDocs("id.user=1234"), newestV(), -1, false);
+ VisitRecordingUnitDR::VisitedLIDs visited_lids;
+ // Even though GID filtering is probabilistic when it comes to filtering
+ // user IDs that cover the 64-bit range, it's fully deterministic when the
+ // user IDs are all 32 bits or less, which is the case for the below IDs.
+ auto wanted_dr_1 = doc_rec(visited_lids, "id::foo:n=1234:a",
+ Timestamp(99), bucket(5));
+ auto filtered_dr_1 = doc_rec(visited_lids, "id::foo:n=4321:b",
+ Timestamp(200), bucket(5));
+ auto filtered_dr_2 = doc_rec(visited_lids, "id::foo:n=5678:c",
+ Timestamp(201), bucket(5));
+ auto wanted_dr_2 = doc_rec(visited_lids, "id::foo:n=1234:d",
+ Timestamp(300), bucket(5));
+ auto wanted_dr_3 = doc_rec(visited_lids, "id::foo:n=1234:e",
+ Timestamp(301), bucket(5));
+ itr.add(wanted_dr_1);
+ itr.add(filtered_dr_1);
+ itr.add(cat(filtered_dr_2, wanted_dr_2));
+ itr.add(wanted_dr_3);
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(3u, visited_lids.size());
+ EXPECT_TRUE(contains(visited_lids, wanted_dr_1->docid));
+ EXPECT_TRUE(contains(visited_lids, wanted_dr_2->docid));
+ EXPECT_TRUE(contains(visited_lids, wanted_dr_3->docid));
+}
+
+TEST("require that attributes are used")
+{
+ UnitDR::reset();
+ DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("foo.aa == 45"), docV(), -1, false);
+ itr.add(doc_with_attr_fields("doc:foo:xx1", Timestamp(1), bucket(5),
+ 27, 28, 27, 2.7, 2.8, "x27", "x28"));
+ itr.add(doc_with_attr_fields("doc:foo:xx2", Timestamp(2), bucket(5),
+ 27, 28, 45, 2.7, 4.5, "x27", "x45"));
+ itr.add(doc_with_attr_fields("doc:foo:xx3", Timestamp(3), bucket(5),
+ 45, 46, 27, 4.5, 2.7, "x45", "x27"));
+ itr.add(doc_with_attr_fields("doc:foo:xx4", Timestamp(4), bucket(5),
+ 45, 46, 45, 4.5, 4.5, "x45", "x45"));
+
+ IterateResult res = itr.iterate(largeNum);
+ EXPECT_TRUE(res.isCompleted());
+ EXPECT_EQUAL(2u, res.getEntries().size());
+ Document expected1(getAttrDocType(), DocumentId("doc:foo:xx2"));
+ expected1.set("header", "foo");
+ expected1.set("body", "bar");
+ expected1.set("aa", 27);
+ expected1.set("ab", 28);
+ expected1.set("dd", 2.7);
+ expected1.set("ss", "x27");
+ Document expected2(getAttrDocType(), DocumentId("doc:foo:xx4"));
+ expected2.set("header", "foo");
+ expected2.set("body", "bar");
+ expected2.set("aa", 45);
+ expected2.set("ab", 46);
+ expected2.set("dd", 4.5);
+ expected2.set("ss", "x45");
+ TEST_DO(checkEntry(res, 0, expected1, Timestamp(2)));
+ TEST_DO(checkEntry(res, 1, expected2, Timestamp(4)));
+
+ DocumentIterator itr2(bucket(5), document::AllFields(), selectDocs("foo.dd == 4.5"), docV(), -1, false);
+ itr2.add(doc_with_attr_fields("doc:foo:xx5", Timestamp(5), bucket(5),
+ 27, 28, 27, 2.7, 2.8, "x27", "x28"));
+ itr2.add(doc_with_attr_fields("doc:foo:xx6", Timestamp(6), bucket(5),
+ 27, 28, 45, 2.7, 4.5, "x27", "x45"));
+ itr2.add(doc_with_attr_fields("doc:foo:xx7", Timestamp(7), bucket(5),
+ 45, 46, 27, 4.5, 2.7, "x45", "x27"));
+ itr2.add(doc_with_attr_fields("doc:foo:xx8", Timestamp(8), bucket(5),
+ 45, 46, 45, 4.5, 4.5, "x45", "x45"));
+
+ IterateResult res2 = itr2.iterate(largeNum);
+ EXPECT_TRUE(res2.isCompleted());
+ EXPECT_EQUAL(2u, res2.getEntries().size());
+ Document expected3(getAttrDocType(), DocumentId("doc:foo:xx6"));
+ expected3.set("header", "foo");
+ expected3.set("body", "bar");
+ expected3.set("aa", 27);
+ expected3.set("ab", 28);
+ expected3.set("dd", 2.7);
+ expected3.set("ss", "x27");
+ Document expected4(getAttrDocType(), DocumentId("doc:foo:xx8"));
+ expected4.set("header", "foo");
+ expected4.set("body", "bar");
+ expected4.set("aa", 45);
+ expected4.set("ab", 46);
+ expected4.set("dd", 4.5);
+ expected4.set("ss", "x45");
+ TEST_DO(checkEntry(res2, 0, expected3, Timestamp(6)));
+ TEST_DO(checkEntry(res2, 1, expected4, Timestamp(8)));
+
+ DocumentIterator itr3(bucket(5), document::AllFields(), selectDocs("foo.ss == \"x45\""), docV(), -1, false);
+ itr3.add(doc_with_attr_fields("doc:foo:xx9", Timestamp(9), bucket(5),
+ 27, 28, 27, 2.7, 2.8, "x27", "x28"));
+ itr3.add(doc_with_attr_fields("doc:foo:xx10", Timestamp(10), bucket(5),
+ 27, 28, 45, 2.7, 4.5, "x27", "x45"));
+ itr3.add(doc_with_attr_fields("doc:foo:xx11", Timestamp(11), bucket(5),
+ 45, 46, 27, 4.5, 2.7, "x45", "x27"));
+ itr3.add(doc_with_attr_fields("doc:foo:xx12", Timestamp(12), bucket(5),
+ 45, 46, 45, 4.5, 4.5, "x45", "x45"));
+
+ IterateResult res3 = itr3.iterate(largeNum);
+ EXPECT_TRUE(res3.isCompleted());
+ EXPECT_EQUAL(2u, res3.getEntries().size());
+ Document expected5(getAttrDocType(), DocumentId("doc:foo:xx10"));
+ expected5.set("header", "foo");
+ expected5.set("body", "bar");
+ expected5.set("aa", 27);
+ expected5.set("ab", 28);
+ expected5.set("dd", 2.7);
+ expected5.set("ss", "x27");
+ Document expected6(getAttrDocType(), DocumentId("doc:foo:xx12"));
+ expected6.set("header", "foo");
+ expected6.set("body", "bar");
+ expected6.set("aa", 45);
+ expected6.set("ab", 46);
+ expected6.set("dd", 4.5);
+ expected6.set("ss", "x45");
+ TEST_DO(checkEntry(res3, 0, expected5, Timestamp(10)));
+ TEST_DO(checkEntry(res3, 1, expected6, Timestamp(12)));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
+
diff --git a/searchcore/src/tests/proton/documentdb/.gitignore b/searchcore/src/tests/proton/documentdb/.gitignore
new file mode 100644
index 00000000000..abcba544a6d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/.gitignore
@@ -0,0 +1,6 @@
+Makefile
+.depend
+documentdb_test
+tmp
+
+searchcore_documentdb_test_app
diff --git a/searchcore/src/tests/proton/documentdb/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/CMakeLists.txt
new file mode 100644
index 00000000000..9270a4b0b7c
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/CMakeLists.txt
@@ -0,0 +1,24 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_documentdb_test_app
+ SOURCES
+ documentdb_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_initializer
+ searchcore_reprocessing
+ searchcore_index
+ searchcore_docsummary
+ searchcore_persistenceengine
+ searchcore_feedoperation
+ searchcore_matching
+ searchcore_attribute
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_flushengine
+ searchcore_pcommon
+ searchcore_grouping
+ searchcore_proton_metrics
+ searchcore_fconfig
+ searchcore_util
+)
+vespa_add_test(NAME searchcore_documentdb_test_app COMMAND sh documentdb_test.sh)
diff --git a/searchcore/src/tests/proton/documentdb/DESC b/searchcore/src/tests/proton/documentdb/DESC
new file mode 100644
index 00000000000..0f8cbcb2eb0
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/DESC
@@ -0,0 +1 @@
+documentdb test. Take a look at documentdb_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentdb/FILES b/searchcore/src/tests/proton/documentdb/FILES
new file mode 100644
index 00000000000..50fef46855d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/FILES
@@ -0,0 +1 @@
+documentdb_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/buckethandler/.gitignore b/searchcore/src/tests/proton/documentdb/buckethandler/.gitignore
new file mode 100644
index 00000000000..c159971ebc7
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/buckethandler/.gitignore
@@ -0,0 +1 @@
+searchcore_buckethandler_test_app
diff --git a/searchcore/src/tests/proton/documentdb/buckethandler/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/buckethandler/CMakeLists.txt
new file mode 100644
index 00000000000..3c1f5c79a57
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/buckethandler/CMakeLists.txt
@@ -0,0 +1,18 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_buckethandler_test_app
+ SOURCES
+ buckethandler_test.cpp
+ DEPENDS
+ searchcore_test
+ searchcore_server
+ searchcore_feedoperation
+ searchcore_matching
+ searchcore_attribute
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_pcommon
+ searchcore_grouping
+ searchcore_util
+ searchcore_fconfig
+)
+vespa_add_test(NAME searchcore_buckethandler_test_app COMMAND searchcore_buckethandler_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/buckethandler/DESC b/searchcore/src/tests/proton/documentdb/buckethandler/DESC
new file mode 100644
index 00000000000..f844b837422
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/buckethandler/DESC
@@ -0,0 +1 @@
+buckethandler test. Take a look at buckethandler_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentdb/buckethandler/FILES b/searchcore/src/tests/proton/documentdb/buckethandler/FILES
new file mode 100644
index 00000000000..df0589a342b
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/buckethandler/FILES
@@ -0,0 +1 @@
+buckethandler_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/buckethandler/buckethandler_test.cpp b/searchcore/src/tests/proton/documentdb/buckethandler/buckethandler_test.cpp
new file mode 100644
index 00000000000..f139bf92e44
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/buckethandler/buckethandler_test.cpp
@@ -0,0 +1,265 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("buckethandler_test");
+#include <vespa/searchcore/proton/server/buckethandler.h>
+#include <vespa/searchcore/proton/server/ibucketstatechangedhandler.h>
+#include <vespa/searchcore/proton/server/ibucketmodifiedhandler.h>
+#include <vespa/searchcore/proton/test/test.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace proton;
+using document::BucketId;
+using document::GlobalId;
+using storage::spi::Bucket;
+using storage::spi::BucketInfo;
+using storage::spi::PartitionId;
+using storage::spi::Timestamp;
+using vespalib::ThreadStackExecutor;
+using proton::test::BucketStateCalculator;
+
+const PartitionId PART_ID(0);
+const GlobalId GID_1("111111111111");
+const BucketId BUCKET_1(8, GID_1.convertToBucketId().getRawId());
+const Timestamp TIME_1(1u);
+
+struct MySubDb
+{
+ DocumentMetaStore _metaStore;
+ test::UserDocuments _docs;
+ MySubDb(std::shared_ptr<BucketDBOwner> bucketDB, SubDbType subDbType)
+ : _metaStore(bucketDB,
+ DocumentMetaStore::getFixedName(),
+ search::GrowStrategy(),
+ documentmetastore::IGidCompare::SP(
+ new documentmetastore::DefaultGidCompare),
+ subDbType),
+ _docs()
+ {
+ }
+ void insertDocs(const test::UserDocuments &docs_) {
+ _docs = docs_;
+ for (test::UserDocuments::Iterator itr = _docs.begin(); itr != _docs.end(); ++itr) {
+ const test::BucketDocuments &bucketDocs = itr->second;
+ for (size_t i = 0; i < bucketDocs.getDocs().size(); ++i) {
+ const test::Document &testDoc = bucketDocs.getDocs()[i];
+ _metaStore.put(testDoc.getGid(), testDoc.getBucket(),
+ testDoc.getTimestamp(), testDoc.getLid());
+ }
+ }
+ }
+ BucketId bucket(uint32_t userId) {
+ return _docs.getBucket(userId);
+ }
+ test::DocumentVector docs(uint32_t userId) {
+ return _docs.getGidOrderDocs(userId);
+ }
+};
+
+
+struct MyChangedHandler : public IBucketStateChangedHandler
+{
+ BucketId _bucket;
+ BucketInfo::ActiveState _state;
+ MyChangedHandler() : _bucket(), _state(BucketInfo::NOT_ACTIVE) {}
+
+ virtual void notifyBucketStateChanged(const document::BucketId &bucketId,
+ storage::spi::BucketInfo::ActiveState newState) {
+ _bucket = bucketId;
+ _state = newState;
+ }
+};
+
+
+struct MyModifiedHandler : public IBucketModifiedHandler
+{
+ virtual void
+ notifyBucketModified(const BucketId &bucket)
+ {
+ (void) bucket;
+ }
+};
+
+
+bool
+expectEqual(uint32_t docCount, uint32_t metaCount, const BucketInfo &info)
+{
+ if (!EXPECT_EQUAL(docCount, info.getDocumentCount())) return false;
+ if (!EXPECT_EQUAL(metaCount, info.getEntryCount())) return false;
+ if (!EXPECT_EQUAL(docCount, info.getDocumentSize())) return false;
+ if (!EXPECT_EQUAL(metaCount, info.getUsedSize())) return false;
+ return true;
+}
+
+
+struct Fixture
+{
+ test::UserDocumentsBuilder _builder;
+ std::shared_ptr<BucketDBOwner> _bucketDB;
+ MySubDb _ready;
+ MySubDb _removed;
+ MySubDb _notReady;
+ ThreadStackExecutor _exec;
+ BucketHandler _handler;
+ MyChangedHandler _changedHandler;
+ MyModifiedHandler _modifiedHandler;
+ BucketStateCalculator::SP _calc;
+ test::BucketIdListResultHandler _bucketList;
+ test::BucketInfoResultHandler _bucketInfo;
+ test::GenericResultHandler _genResult;
+ Fixture()
+ : _builder(),
+ _bucketDB(std::make_shared<BucketDBOwner>()),
+ _ready(_bucketDB, SubDbType::READY),
+ _removed(_bucketDB, SubDbType::REMOVED),
+ _notReady(_bucketDB, SubDbType::NOTREADY),
+ _exec(1, 64000),
+ _handler(_exec),
+ _changedHandler(),
+ _modifiedHandler(),
+ _calc(new BucketStateCalculator()),
+ _bucketList(), _bucketInfo(), _genResult()
+ {
+ // bucket 2 & 3 & 4 & 7 in ready
+ _ready.insertDocs(_builder.createDocs(2, 1, 3). // 2 docs
+ createDocs(3, 3, 6). // 3 docs
+ createDocs(4, 6, 10). // 4 docs
+ createDocs(7, 10, 11). // 1 doc
+ getDocs());
+ // bucket 2 in removed
+ _removed.insertDocs(_builder.clearDocs().
+ createDocs(2, 16, 20). // 4 docs
+ getDocs());
+ // bucket 4 in not ready
+ _notReady.insertDocs(_builder.clearDocs().
+ createDocs(4, 22, 24). // 2 docs
+ getDocs());
+ _handler.setReadyBucketHandler(_ready._metaStore);
+ _handler.addBucketStateChangedHandler(&_changedHandler);
+ _handler.notifyClusterStateChanged(_calc);
+ }
+ ~Fixture()
+ {
+ _handler.removeBucketStateChangedHandler(&_changedHandler);
+ }
+ void sync() { _exec.sync(); }
+ void handleGetBucketInfo(const BucketId &bucket) {
+ _handler.handleGetBucketInfo(Bucket(bucket, PART_ID), _bucketInfo);
+ }
+ void
+ setNodeUp(bool value)
+ {
+ _calc->setNodeUp(value);
+ _handler.notifyClusterStateChanged(_calc);
+ }
+};
+
+
+TEST_F("require that handleListBuckets() returns buckets from all sub dbs", Fixture)
+{
+ f._handler.handleListBuckets(f._bucketList);
+ EXPECT_EQUAL(4u, f._bucketList.getList().size());
+ EXPECT_EQUAL(f._ready.bucket(2), f._bucketList.getList()[0]);
+ EXPECT_EQUAL(f._ready.bucket(3), f._bucketList.getList()[1]);
+ EXPECT_EQUAL(f._ready.bucket(4), f._bucketList.getList()[2]);
+ EXPECT_EQUAL(f._ready.bucket(7), f._bucketList.getList()[3]);
+ EXPECT_EQUAL(f._removed.bucket(2), f._bucketList.getList()[0]);
+ EXPECT_EQUAL(f._notReady.bucket(4), f._bucketList.getList()[2]);
+}
+
+
+TEST_F("require that bucket is reported in handleGetBucketInfo() and size faked", Fixture)
+{
+ f.handleGetBucketInfo(f._ready.bucket(3));
+ EXPECT_TRUE(expectEqual(3, 3, f._bucketInfo.getInfo()));
+
+ f.handleGetBucketInfo(f._ready.bucket(2)); // bucket 2 also in removed sub db
+ EXPECT_TRUE(expectEqual(2, 6, f._bucketInfo.getInfo()));
+}
+
+
+TEST_F("require that handleGetBucketInfo() can get cached bucket", Fixture)
+{
+ {
+ BucketDBOwner::Guard db = f._bucketDB->takeGuard();
+ db->add(GID_1, BUCKET_1, TIME_1, SubDbType::READY);
+ db->cacheBucket(BUCKET_1);
+ db->add(GID_1, BUCKET_1, TIME_1, SubDbType::NOTREADY);
+ }
+ f.handleGetBucketInfo(BUCKET_1);
+ EXPECT_TRUE(expectEqual(1, 1, f._bucketInfo.getInfo()));
+
+ f._bucketDB->takeGuard()->uncacheBucket();
+
+ f.handleGetBucketInfo(BUCKET_1);
+ EXPECT_TRUE(expectEqual(2, 2, f._bucketInfo.getInfo()));
+ {
+ // Must ensure empty bucket db before destruction.
+ BucketDBOwner::Guard db = f._bucketDB->takeGuard();
+ db->remove(GID_1, BUCKET_1, TIME_1, SubDbType::READY);
+ db->remove(GID_1, BUCKET_1, TIME_1, SubDbType::NOTREADY);
+ }
+}
+
+
+TEST_F("require that changed handlers are notified when bucket state changes", Fixture)
+{
+ f._handler.handleSetCurrentState(f._ready.bucket(2), BucketInfo::ACTIVE, f._genResult);
+ f.sync();
+ EXPECT_EQUAL(f._ready.bucket(2), f._changedHandler._bucket);
+ EXPECT_EQUAL(BucketInfo::ACTIVE, f._changedHandler._state);
+ f._handler.handleSetCurrentState(f._ready.bucket(3), BucketInfo::NOT_ACTIVE, f._genResult);
+ f.sync();
+ EXPECT_EQUAL(f._ready.bucket(3), f._changedHandler._bucket);
+ EXPECT_EQUAL(BucketInfo::NOT_ACTIVE, f._changedHandler._state);
+}
+
+
+TEST_F("require that unready bucket can be reported as active", Fixture)
+{
+ f._handler.handleSetCurrentState(f._ready.bucket(4),
+ BucketInfo::ACTIVE, f._genResult);
+ f.sync();
+ EXPECT_EQUAL(f._ready.bucket(4), f._changedHandler._bucket);
+ EXPECT_EQUAL(BucketInfo::ACTIVE, f._changedHandler._state);
+ f.handleGetBucketInfo(f._ready.bucket(4));
+ EXPECT_EQUAL(true, f._bucketInfo.getInfo().isActive());
+ EXPECT_EQUAL(false, f._bucketInfo.getInfo().isReady());
+}
+
+
+TEST_F("require that node being down deactivates buckets", Fixture)
+{
+ f._handler.handleSetCurrentState(f._ready.bucket(2),
+ BucketInfo::ACTIVE, f._genResult);
+ f.sync();
+ EXPECT_EQUAL(f._ready.bucket(2), f._changedHandler._bucket);
+ EXPECT_EQUAL(BucketInfo::ACTIVE, f._changedHandler._state);
+ f.handleGetBucketInfo(f._ready.bucket(2));
+ EXPECT_EQUAL(true, f._bucketInfo.getInfo().isActive());
+ f.setNodeUp(false);
+ f.sync();
+ f.handleGetBucketInfo(f._ready.bucket(2));
+ EXPECT_EQUAL(false, f._bucketInfo.getInfo().isActive());
+ f._handler.handleSetCurrentState(f._ready.bucket(2),
+ BucketInfo::ACTIVE, f._genResult);
+ f.sync();
+ f.handleGetBucketInfo(f._ready.bucket(2));
+ EXPECT_EQUAL(false, f._bucketInfo.getInfo().isActive());
+ f.setNodeUp(true);
+ f.sync();
+ f.handleGetBucketInfo(f._ready.bucket(2));
+ EXPECT_EQUAL(false, f._bucketInfo.getInfo().isActive());
+ f._handler.handleSetCurrentState(f._ready.bucket(2),
+ BucketInfo::ACTIVE, f._genResult);
+ f.sync();
+ f.handleGetBucketInfo(f._ready.bucket(2));
+ EXPECT_EQUAL(true, f._bucketInfo.getInfo().isActive());
+}
+
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
+
diff --git a/searchcore/src/tests/proton/documentdb/cfg/attributes.cfg b/searchcore/src/tests/proton/documentdb/cfg/attributes.cfg
new file mode 100644
index 00000000000..9d990996dd1
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/cfg/attributes.cfg
@@ -0,0 +1,3 @@
+attribute[1]
+attribute[0].name "attr1"
+attribute[0].datatype INT32
diff --git a/searchcore/src/tests/proton/documentdb/cfg/indexschema.cfg b/searchcore/src/tests/proton/documentdb/cfg/indexschema.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/cfg/indexschema.cfg
diff --git a/searchcore/src/tests/proton/documentdb/cfg/juniperrc.cfg b/searchcore/src/tests/proton/documentdb/cfg/juniperrc.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/cfg/juniperrc.cfg
diff --git a/searchcore/src/tests/proton/documentdb/cfg/rank-profiles.cfg b/searchcore/src/tests/proton/documentdb/cfg/rank-profiles.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/cfg/rank-profiles.cfg
diff --git a/searchcore/src/tests/proton/documentdb/cfg/summary.cfg b/searchcore/src/tests/proton/documentdb/cfg/summary.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/cfg/summary.cfg
diff --git a/searchcore/src/tests/proton/documentdb/cfg/summarymap.cfg b/searchcore/src/tests/proton/documentdb/cfg/summarymap.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/cfg/summarymap.cfg
diff --git a/searchcore/src/tests/proton/documentdb/clusterstatehandler/.gitignore b/searchcore/src/tests/proton/documentdb/clusterstatehandler/.gitignore
new file mode 100644
index 00000000000..bc38893db32
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/clusterstatehandler/.gitignore
@@ -0,0 +1 @@
+searchcore_clusterstatehandler_test_app
diff --git a/searchcore/src/tests/proton/documentdb/clusterstatehandler/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/clusterstatehandler/CMakeLists.txt
new file mode 100644
index 00000000000..f107cddd103
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/clusterstatehandler/CMakeLists.txt
@@ -0,0 +1,16 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_clusterstatehandler_test_app
+ SOURCES
+ clusterstatehandler_test.cpp
+ DEPENDS
+ searchcore_test
+ searchcore_server
+ searchcore_feedoperation
+ searchcore_matching
+ searchcore_attribute
+ searchcore_pcommon
+ searchcore_grouping
+ searchcore_util
+ searchcore_fconfig
+)
+vespa_add_test(NAME searchcore_clusterstatehandler_test_app COMMAND searchcore_clusterstatehandler_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/clusterstatehandler/DESC b/searchcore/src/tests/proton/documentdb/clusterstatehandler/DESC
new file mode 100644
index 00000000000..5d5921dea9a
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/clusterstatehandler/DESC
@@ -0,0 +1 @@
+clusterstatehandler test. Take a look at clusterstatehandler_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentdb/clusterstatehandler/FILES b/searchcore/src/tests/proton/documentdb/clusterstatehandler/FILES
new file mode 100644
index 00000000000..92fc297c2a4
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/clusterstatehandler/FILES
@@ -0,0 +1 @@
+clusterstatehandler_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/clusterstatehandler/clusterstatehandler_test.cpp b/searchcore/src/tests/proton/documentdb/clusterstatehandler/clusterstatehandler_test.cpp
new file mode 100644
index 00000000000..1b8bb37ac3a
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/clusterstatehandler/clusterstatehandler_test.cpp
@@ -0,0 +1,94 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("clusterstatehandler_test");
+#include <vespa/searchcore/proton/server/clusterstatehandler.h>
+#include <vespa/searchcore/proton/server/iclusterstatechangedhandler.h>
+#include <vespa/searchcore/proton/test/test.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace proton;
+using document::BucketId;
+using storage::lib::Distribution;
+using storage::spi::BucketIdListResult;
+using storage::spi::ClusterState;
+using storage::spi::Result;
+
+struct MyClusterStateChangedHandler : public IClusterStateChangedHandler
+{
+ IBucketStateCalculator::SP _calc;
+ virtual void
+ notifyClusterStateChanged(const IBucketStateCalculator::SP &newCalc) {
+ _calc = newCalc;
+ }
+};
+
+
+BucketId bucket1(1);
+BucketId bucket2(2);
+BucketId bucket3(3);
+Distribution distribution(Distribution::getDefaultDistributionConfig(3, 3));
+storage::lib::ClusterState rawClusterState("version:1 storage:3 distributor:3");
+ClusterState clusterState(rawClusterState, 0, distribution);
+
+
+struct Fixture
+{
+ vespalib::ThreadStackExecutor _exec;
+ ClusterStateHandler _stateHandler;
+ MyClusterStateChangedHandler _changedHandler;
+ test::GenericResultHandler _genericHandler;
+ test::BucketIdListResultHandler _bucketListHandler;
+ Fixture()
+ : _exec(1, 64000),
+ _stateHandler(_exec),
+ _changedHandler(),
+ _genericHandler(),
+ _bucketListHandler()
+ {
+ _stateHandler.addClusterStateChangedHandler(&_changedHandler);
+ }
+ ~Fixture()
+ {
+ _stateHandler.removeClusterStateChangedHandler(&_changedHandler);
+ }
+};
+
+
+TEST_F("require that cluster state change is notified", Fixture)
+{
+ f._stateHandler.handleSetClusterState(clusterState, f._genericHandler);
+ f._exec.sync();
+ EXPECT_TRUE(f._changedHandler._calc.get() != NULL);
+}
+
+
+TEST_F("require that modified buckets are returned", Fixture)
+{
+ f._stateHandler.handleSetClusterState(clusterState, f._genericHandler);
+ f._exec.sync();
+
+ // notify 2 buckets
+ IBucketModifiedHandler &bmh = f._stateHandler;
+ bmh.notifyBucketModified(bucket1);
+ bmh.notifyBucketModified(bucket2);
+ f._stateHandler.handleGetModifiedBuckets(f._bucketListHandler);
+ f._exec.sync();
+ EXPECT_EQUAL(2u, f._bucketListHandler.getList().size());
+ EXPECT_EQUAL(bucket1, f._bucketListHandler.getList()[0]);
+ EXPECT_EQUAL(bucket2, f._bucketListHandler.getList()[1]);
+
+ // notify 1 bucket, already reported buckets should be gone
+ bmh.notifyBucketModified(bucket3);
+ f._stateHandler.handleGetModifiedBuckets(f._bucketListHandler);
+ f._exec.sync();
+ EXPECT_EQUAL(1u, f._bucketListHandler.getList().size());
+ EXPECT_EQUAL(bucket3, f._bucketListHandler.getList()[0]);
+}
+
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
+
diff --git a/searchcore/src/tests/proton/documentdb/combiningfeedview/.gitignore b/searchcore/src/tests/proton/documentdb/combiningfeedview/.gitignore
new file mode 100644
index 00000000000..3302e827c3e
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/combiningfeedview/.gitignore
@@ -0,0 +1 @@
+searchcore_combiningfeedview_test_app
diff --git a/searchcore/src/tests/proton/documentdb/combiningfeedview/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/combiningfeedview/CMakeLists.txt
new file mode 100644
index 00000000000..74f605d36d0
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/combiningfeedview/CMakeLists.txt
@@ -0,0 +1,19 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_combiningfeedview_test_app
+ SOURCES
+ combiningfeedview_test.cpp
+ DEPENDS
+ searchcore_test
+ searchcore_server
+ searchcore_feedoperation
+ searchcore_matching
+ searchcore_attribute
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_pcommon
+ searchcore_grouping
+ searchcore_proton_metrics
+ searchcore_util
+ searchcore_fconfig
+)
+vespa_add_test(NAME searchcore_combiningfeedview_test_app COMMAND searchcore_combiningfeedview_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/combiningfeedview/DESC b/searchcore/src/tests/proton/documentdb/combiningfeedview/DESC
new file mode 100644
index 00000000000..9882151634a
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/combiningfeedview/DESC
@@ -0,0 +1 @@
+combiningfeedview test. Take a look at combiningfeedview_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentdb/combiningfeedview/FILES b/searchcore/src/tests/proton/documentdb/combiningfeedview/FILES
new file mode 100644
index 00000000000..791dc90442c
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/combiningfeedview/FILES
@@ -0,0 +1 @@
+combiningfeedview_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp b/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp
new file mode 100644
index 00000000000..d3d3aa4ac0d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp
@@ -0,0 +1,438 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("combiningfeedview_test");
+#include <vespa/searchcore/proton/feedoperation/moveoperation.h>
+#include <vespa/searchcore/proton/server/combiningfeedview.h>
+#include <vespa/searchcore/proton/test/test.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using document::DocumentTypeRepo;
+using document::DocumentUpdate;
+using search::SerialNum;
+using storage::spi::Timestamp;
+using namespace proton;
+
+typedef std::vector<IFeedView::SP> FeedViewVector;
+
+struct MyStreamHandler : public NewConfigOperation::IStreamHandler
+{
+ virtual void serializeConfig(SerialNum, vespalib::nbostream &) {}
+ virtual void deserializeConfig(SerialNum, vespalib::nbostream &) {}
+};
+
+
+struct MyFeedView : public test::DummyFeedView
+{
+ typedef std::shared_ptr<MyFeedView> SP;
+ DocumentMetaStore _metaStore;
+ MyStreamHandler _streamHandler;
+ uint32_t _preparePut;
+ uint32_t _handlePut;
+ uint32_t _prepareRemove;
+ uint32_t _handleRemove;
+ uint32_t _prepareUpdate;
+ uint32_t _handleUpdate;
+ uint32_t _prepareMove;
+ uint32_t _handleMove;
+ uint32_t _prepareDeleteBucket;
+ uint32_t _handleDeleteBucket;
+ uint32_t _heartBeat;
+ uint32_t _handlePrune;
+ uint32_t _wantedLidLimit;
+ MyFeedView(const DocumentTypeRepo::SP &repo,
+ std::shared_ptr<BucketDBOwner> bucketDB,
+ SubDbType subDbType) :
+ test::DummyFeedView(repo),
+ _metaStore(bucketDB,
+ DocumentMetaStore::getFixedName(),
+ search::GrowStrategy(),
+ documentmetastore::IGidCompare::SP(
+ new documentmetastore::DefaultGidCompare),
+ subDbType),
+ _streamHandler(),
+ _preparePut(0),
+ _handlePut(0),
+ _prepareRemove(0),
+ _handleRemove(0),
+ _prepareUpdate(0),
+ _handleUpdate(0),
+ _prepareMove(0),
+ _handleMove(0),
+ _prepareDeleteBucket(0),
+ _handleDeleteBucket(0),
+ _heartBeat(0),
+ _handlePrune(0),
+ _wantedLidLimit(0)
+ {
+ _metaStore.constructFreeList();
+ }
+
+ // Implements IFeedView
+ virtual const DocumentMetaStore *getDocumentMetaStorePtr() const { return &_metaStore; }
+ virtual void preparePut(PutOperation &) { ++_preparePut; }
+ virtual void handlePut(FeedToken *, const PutOperation &) { ++_handlePut; }
+ virtual void prepareUpdate(UpdateOperation &) { ++_prepareUpdate; }
+ virtual void handleUpdate(FeedToken *, const UpdateOperation &) { ++_handleUpdate; }
+ virtual void prepareRemove(RemoveOperation &) { ++_prepareRemove; }
+ virtual void handleRemove(FeedToken *, const RemoveOperation &) { ++_handleRemove; }
+ virtual void prepareDeleteBucket(DeleteBucketOperation &) { ++_prepareDeleteBucket; }
+ virtual void handleDeleteBucket(const DeleteBucketOperation &)
+ { ++_handleDeleteBucket; }
+ virtual void prepareMove(MoveOperation &) { ++_prepareMove; }
+ virtual void handleMove(const MoveOperation &) { ++_handleMove; }
+ virtual void heartBeat(SerialNum) { ++_heartBeat; }
+ virtual void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &) { ++_handlePrune; }
+ virtual void handleCompactLidSpace(const CompactLidSpaceOperation &op) {
+ _wantedLidLimit = op.getLidLimit();
+ }
+};
+
+
+struct MySubDb
+{
+ MyFeedView::SP _view;
+ MySubDb(const DocumentTypeRepo::SP &repo,
+ std::shared_ptr<BucketDBOwner> bucketDB,
+ SubDbType subDbType)
+ : _view(new MyFeedView(repo, bucketDB, subDbType))
+ {
+ }
+ void insertDocs(const test::BucketDocuments &docs) {
+ for (size_t i = 0; i < docs.getDocs().size(); ++i) {
+ const test::Document &testDoc = docs.getDocs()[i];
+ _view->_metaStore.put(testDoc.getGid(), testDoc.getBucket(),
+ testDoc.getTimestamp(), testDoc.getLid());
+ }
+ }
+};
+
+
+FeedViewVector
+getVector(const MySubDb &ready,
+ const MySubDb &removed,
+ const MySubDb &notReady)
+{
+ FeedViewVector retval;
+ retval.push_back(ready._view);
+ retval.push_back(removed._view);
+ retval.push_back(notReady._view);
+ return retval;
+}
+
+const uint32_t READY = 0;
+const uint32_t REMOVED = 1;
+const uint32_t NOT_READY = 2;
+
+struct Fixture
+{
+ test::UserDocumentsBuilder _builder;
+ std::shared_ptr<BucketDBOwner> _bucketDB;
+ MySubDb _ready;
+ MySubDb _removed;
+ MySubDb _notReady;
+ test::BucketStateCalculator::SP _calc;
+ CombiningFeedView _view;
+ Fixture() :
+ _builder(),
+ _bucketDB(std::make_shared<BucketDBOwner>()),
+ _ready(_builder.getRepo(), _bucketDB, SubDbType::READY),
+ _removed(_builder.getRepo(), _bucketDB, SubDbType::REMOVED),
+ _notReady(_builder.getRepo(), _bucketDB, SubDbType::NOTREADY),
+ _calc(new test::BucketStateCalculator()),
+ _view(getVector(_ready, _removed, _notReady), _calc)
+ {
+ _builder.createDoc(1, 1);
+ _builder.createDoc(2, 2);
+ }
+ const test::UserDocuments &userDocs() const { return _builder.getDocs(); }
+ const test::BucketDocuments &userDocs(uint32_t userId) const { return userDocs().getUserDocs(userId); }
+ PutOperation put(uint32_t userId) {
+ const test::Document &doc = userDocs().getDocs(userId)[0];
+ return PutOperation(doc.getBucket(), doc.getTimestamp(), doc.getDoc());
+ }
+ RemoveOperation remove(uint32_t userId) {
+ const test::Document &doc = userDocs().getDocs(userId)[0];
+ return RemoveOperation(doc.getBucket(), doc.getTimestamp(), doc.getDoc()->getId());
+ }
+ UpdateOperation update(uint32_t userId) {
+ const test::Document &doc = userDocs().getDocs(userId)[0];
+ return UpdateOperation(doc.getBucket(), doc.getTimestamp(), DocumentUpdate::SP());
+ }
+ MoveOperation move(uint32_t userId, DbDocumentId sourceDbdId, DbDocumentId targetDbdId) {
+ const test::Document &doc = userDocs().getDocs(userId)[0];
+ MoveOperation retval(doc.getBucket(), doc.getTimestamp(), doc.getDoc(),
+ sourceDbdId, targetDbdId.getSubDbId());
+ retval.setTargetLid(targetDbdId.getLid());
+ return retval;
+ }
+};
+
+
+TEST_F("require that preparePut() sends to ready view", Fixture)
+{
+ PutOperation op = f.put(1);
+ f._calc->addReady(f.userDocs().getBucket(1));
+ f._view.preparePut(op);
+ EXPECT_EQUAL(1u, f._ready._view->_preparePut);
+ EXPECT_EQUAL(0u, f._removed._view->_preparePut);
+ EXPECT_EQUAL(0u, f._notReady._view->_preparePut);
+ EXPECT_FALSE(op.getValidPrevDbdId());
+}
+
+
+TEST_F("require that preparePut() sends to not ready view", Fixture)
+{
+ PutOperation op = f.put(1);
+ f._view.preparePut(op);
+ EXPECT_EQUAL(0u, f._ready._view->_preparePut);
+ EXPECT_EQUAL(0u, f._removed._view->_preparePut);
+ EXPECT_EQUAL(1u, f._notReady._view->_preparePut);
+ EXPECT_FALSE(op.getValidPrevDbdId());
+}
+
+
+TEST_F("require that preparePut() can fill previous dbdId", Fixture)
+{
+ // insert bucket 1 in removed view
+ f._removed.insertDocs(f.userDocs(1));
+ PutOperation op = f.put(1);
+ f._view.preparePut(op);
+ EXPECT_EQUAL(1u, op.getPrevLid());
+ EXPECT_EQUAL(REMOVED, op.getPrevSubDbId());
+ EXPECT_EQUAL(Timestamp(1), op.getPrevTimestamp());
+ EXPECT_TRUE(op.getPrevMarkedAsRemoved());
+}
+
+
+TEST_F("require that handlePut() sends to 1 feed view", Fixture)
+{
+ PutOperation op = f.put(2);
+ op.setDbDocumentId(DbDocumentId(READY, 2));
+ f._view.handlePut(NULL, op);
+ EXPECT_EQUAL(1u, f._ready._view->_handlePut);
+ EXPECT_EQUAL(0u, f._removed._view->_handlePut);
+ EXPECT_EQUAL(0u, f._notReady._view->_handlePut);
+}
+
+
+TEST_F("require that handlePut() sends to 2 feed views", Fixture)
+{
+ PutOperation op = f.put(2);
+ op.setDbDocumentId(DbDocumentId(NOT_READY, 2));
+ op.setPrevDbDocumentId(DbDocumentId(REMOVED, 2));
+ f._view.handlePut(NULL, op);
+ EXPECT_EQUAL(0u, f._ready._view->_handlePut);
+ EXPECT_EQUAL(1u, f._removed._view->_handlePut);
+ EXPECT_EQUAL(1u, f._notReady._view->_handlePut);
+}
+
+
+TEST_F("require that prepareRemove() sends to removed view", Fixture)
+{
+ RemoveOperation op = f.remove(1);
+ f._view.prepareRemove(op);
+ EXPECT_EQUAL(0u, f._ready._view->_prepareRemove);
+ EXPECT_EQUAL(1u, f._removed._view->_prepareRemove);
+ EXPECT_EQUAL(0u, f._notReady._view->_prepareRemove);
+ EXPECT_FALSE(op.getValidPrevDbdId());
+}
+
+
+TEST_F("require that prepareRemove() can fill previous dbdId", Fixture)
+{
+ f._ready.insertDocs(f.userDocs(1));
+ RemoveOperation op = f.remove(1);
+ f._view.prepareRemove(op);
+ EXPECT_EQUAL(1u, op.getPrevLid());
+ EXPECT_EQUAL(READY, op.getPrevSubDbId());
+ EXPECT_EQUAL(Timestamp(1), op.getPrevTimestamp());
+ EXPECT_FALSE(op.getPrevMarkedAsRemoved());
+}
+
+
+TEST_F("require that handleRemove() sends op with valid dbdId to 1 feed view", Fixture)
+{
+ RemoveOperation op = f.remove(1);
+ op.setDbDocumentId(DbDocumentId(REMOVED, 1));
+ f._view.handleRemove(NULL, op);
+ EXPECT_EQUAL(0u, f._ready._view->_handleRemove);
+ EXPECT_EQUAL(1u, f._removed._view->_handleRemove);
+ EXPECT_EQUAL(0u, f._notReady._view->_handleRemove);
+}
+
+
+TEST_F("require that handleRemove() sends op with valid dbdId to 2 feed views", Fixture)
+{
+ RemoveOperation op = f.remove(1);
+ op.setDbDocumentId(DbDocumentId(REMOVED, 1));
+ op.setPrevDbDocumentId(DbDocumentId(READY, 1));
+ f._view.handleRemove(NULL, op);
+ EXPECT_EQUAL(1u, f._ready._view->_handleRemove);
+ EXPECT_EQUAL(1u, f._removed._view->_handleRemove);
+ EXPECT_EQUAL(0u, f._notReady._view->_handleRemove);
+}
+
+
+TEST_F("require that handleRemove() sends op with invalid dbdId to prev view", Fixture)
+{
+ RemoveOperation op = f.remove(1);
+ // can be used in the case where removed feed view does not remember removes.
+ op.setPrevDbDocumentId(DbDocumentId(READY, 1));
+ f._view.handleRemove(NULL, op);
+ EXPECT_EQUAL(1u, f._ready._view->_handleRemove);
+ EXPECT_EQUAL(0u, f._removed._view->_handleRemove);
+ EXPECT_EQUAL(0u, f._notReady._view->_handleRemove);
+}
+
+
+TEST_F("require that prepareUpdate() sends to ready view first", Fixture)
+{
+ UpdateOperation op = f.update(1);
+ // indicate that doc is in ready view
+ op.setPrevDbDocumentId(DbDocumentId(READY, 1));
+ f._view.prepareUpdate(op);
+ EXPECT_EQUAL(1u, f._ready._view->_prepareUpdate);
+ EXPECT_EQUAL(0u, f._removed._view->_prepareUpdate);
+ EXPECT_EQUAL(0u, f._notReady._view->_prepareUpdate);
+}
+
+
+TEST_F("require that prepareUpdate() sends to not ready view if not found in ready view", Fixture)
+{
+ UpdateOperation op = f.update(1);
+ f._view.prepareUpdate(op);
+ EXPECT_EQUAL(1u, f._ready._view->_prepareUpdate);
+ EXPECT_EQUAL(0u, f._removed._view->_prepareUpdate);
+ EXPECT_EQUAL(1u, f._notReady._view->_prepareUpdate);
+}
+
+
+TEST_F("require that handleUpdate() sends op to correct view", Fixture)
+{
+ UpdateOperation op = f.update(1);
+ op.setDbDocumentId(DbDocumentId(READY, 1));
+ op.setPrevDbDocumentId(DbDocumentId(READY, 1));
+ f._view.handleUpdate(NULL, op);
+ EXPECT_EQUAL(1u, f._ready._view->_handleUpdate);
+ EXPECT_EQUAL(0u, f._removed._view->_handleUpdate);
+ EXPECT_EQUAL(0u, f._notReady._view->_handleUpdate);
+}
+
+
+TEST_F("require that prepareMove() sends op to correct feed view", Fixture)
+{
+ MoveOperation op = f.move(1, DbDocumentId(READY, 1), DbDocumentId(NOT_READY, 1));
+ f._view.prepareMove(op);
+ EXPECT_EQUAL(0u, f._ready._view->_prepareMove);
+ EXPECT_EQUAL(0u, f._removed._view->_prepareMove);
+ EXPECT_EQUAL(1u, f._notReady._view->_prepareMove);
+}
+
+
+TEST_F("require that handleMove() sends op to 2 feed views", Fixture)
+{
+ MoveOperation op = f.move(1, DbDocumentId(READY, 1), DbDocumentId(NOT_READY, 1));
+ f._view.handleMove(op);
+ EXPECT_EQUAL(1u, f._ready._view->_handleMove);
+ EXPECT_EQUAL(0u, f._removed._view->_handleMove);
+ EXPECT_EQUAL(1u, f._notReady._view->_handleMove);
+}
+
+
+TEST_F("require that handleMove() sends op to 1 feed view", Fixture)
+{
+ // same source and target
+ MoveOperation op = f.move(1, DbDocumentId(READY, 1), DbDocumentId(READY, 1));
+ f._view.handleMove(op);
+ EXPECT_EQUAL(1u, f._ready._view->_handleMove);
+ EXPECT_EQUAL(0u, f._removed._view->_handleMove);
+ EXPECT_EQUAL(0u, f._notReady._view->_handleMove);
+}
+
+
+TEST_F("require that delete bucket is sent to all feed views", Fixture)
+{
+ DeleteBucketOperation op;
+ f._view.prepareDeleteBucket(op);
+ EXPECT_EQUAL(1u, f._ready._view->_prepareDeleteBucket);
+ EXPECT_EQUAL(1u, f._removed._view->_prepareDeleteBucket);
+ EXPECT_EQUAL(1u, f._notReady._view->_prepareDeleteBucket);
+ f._view.handleDeleteBucket(op);
+ EXPECT_EQUAL(1u, f._ready._view->_handleDeleteBucket);
+ EXPECT_EQUAL(1u, f._removed._view->_handleDeleteBucket);
+ EXPECT_EQUAL(1u, f._notReady._view->_handleDeleteBucket);
+}
+
+
+TEST_F("require that heart beat is sent to all feed views", Fixture)
+{
+ f._view.heartBeat(5);
+ EXPECT_EQUAL(1u, f._ready._view->_heartBeat);
+ EXPECT_EQUAL(1u, f._removed._view->_heartBeat);
+ EXPECT_EQUAL(1u, f._notReady._view->_heartBeat);
+}
+
+
+TEST_F("require that prune removed documents is sent to removed view", Fixture)
+{
+ PruneRemovedDocumentsOperation op;
+ f._view.handlePruneRemovedDocuments(op);
+ EXPECT_EQUAL(0u, f._ready._view->_handlePrune);
+ EXPECT_EQUAL(1u, f._removed._view->_handlePrune);
+ EXPECT_EQUAL(0u, f._notReady._view->_handlePrune);
+}
+
+
+TEST_F("require that calculator can be updated", Fixture)
+{
+ f._calc->addReady(f.userDocs().getBucket(1));
+ PutOperation op1 = f.put(1);
+ PutOperation op2 = f.put(2);
+ {
+ test::BucketStateCalculator::SP calc;
+ f._view.setCalculator(calc);
+ f._view.preparePut(op1);
+ EXPECT_EQUAL(1u, f._ready._view->_preparePut);
+ EXPECT_EQUAL(0u, f._notReady._view->_preparePut);
+ f._view.preparePut(op2);
+ EXPECT_EQUAL(2u, f._ready._view->_preparePut);
+ EXPECT_EQUAL(0u, f._notReady._view->_preparePut);
+ }
+ {
+ test::BucketStateCalculator::SP calc(new test::BucketStateCalculator());
+ calc->addReady(f.userDocs().getBucket(2));
+ f._view.setCalculator(calc);
+ f._view.preparePut(op1);
+ EXPECT_EQUAL(2u, f._ready._view->_preparePut);
+ EXPECT_EQUAL(1u, f._notReady._view->_preparePut);
+ f._view.preparePut(op2);
+ EXPECT_EQUAL(3u, f._ready._view->_preparePut);
+ EXPECT_EQUAL(1u, f._notReady._view->_preparePut);
+ }
+ {
+ test::BucketStateCalculator::SP calc(new test::BucketStateCalculator());
+ calc->setClusterUp(false);
+ f._view.setCalculator(calc);
+ f._view.preparePut(op1);
+ EXPECT_EQUAL(4u, f._ready._view->_preparePut);
+ EXPECT_EQUAL(1u, f._notReady._view->_preparePut);
+ f._view.preparePut(op2);
+ EXPECT_EQUAL(5u, f._ready._view->_preparePut);
+ EXPECT_EQUAL(1u, f._notReady._view->_preparePut);
+ }
+}
+
+TEST_F("require that compactLidSpace() is sent to correct feed view", Fixture)
+{
+ f._view.handleCompactLidSpace(CompactLidSpaceOperation(1, 99));
+ EXPECT_EQUAL(0u, f._ready._view->_wantedLidLimit);
+ EXPECT_EQUAL(99u, f._removed._view->_wantedLidLimit);
+ EXPECT_EQUAL(0u, f._notReady._view->_wantedLidLimit);
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
+
diff --git a/searchcore/src/tests/proton/documentdb/configurer/.gitignore b/searchcore/src/tests/proton/documentdb/configurer/.gitignore
new file mode 100644
index 00000000000..3714f1b204d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/configurer/.gitignore
@@ -0,0 +1 @@
+searchcore_configurer_test_app
diff --git a/searchcore/src/tests/proton/documentdb/configurer/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/configurer/CMakeLists.txt
new file mode 100644
index 00000000000..ee18f0f6938
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/configurer/CMakeLists.txt
@@ -0,0 +1,22 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_configurer_test_app
+ SOURCES
+ configurer_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_reprocessing
+ searchcore_index
+ searchcore_docsummary
+ searchcore_feedoperation
+ searchcore_matching
+ searchcore_attribute
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_flushengine
+ searchcore_pcommon
+ searchcore_grouping
+ searchcore_proton_metrics
+ searchcore_fconfig
+ searchcore_util
+)
+vespa_add_test(NAME searchcore_configurer_test_app COMMAND searchcore_configurer_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/configurer/DESC b/searchcore/src/tests/proton/documentdb/configurer/DESC
new file mode 100644
index 00000000000..5d7765db8d2
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/configurer/DESC
@@ -0,0 +1 @@
+configurer test. Take a look at configurer_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentdb/configurer/FILES b/searchcore/src/tests/proton/documentdb/configurer/FILES
new file mode 100644
index 00000000000..a7ff508edc0
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/configurer/FILES
@@ -0,0 +1 @@
+configurer_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp b/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp
new file mode 100644
index 00000000000..1764d6f2996
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp
@@ -0,0 +1,611 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("configurer_test");
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <vespa/searchcore/proton/attribute/attribute_writer.h>
+#include <vespa/searchcore/proton/attribute/attributemanager.h>
+#include <vespa/searchcore/proton/docsummary/summarymanager.h>
+#include <vespa/searchcore/proton/documentmetastore/documentmetastore.h>
+#include <vespa/searchcore/proton/documentmetastore/lidreusedelayer.h>
+#include <vespa/searchcore/proton/metrics/feed_metrics.h>
+#include <vespa/searchcore/proton/index/index_writer.h>
+#include <vespa/searchcore/proton/index/indexmanager.h>
+#include <vespa/searchcore/proton/reprocessing/attribute_reprocessing_initializer.h>
+#include <vespa/searchcore/proton/server/attributeadapterfactory.h>
+#include <vespa/searchcore/proton/server/documentdbconfigmanager.h>
+#include <vespa/searchcore/proton/server/searchable_doc_subdb_configurer.h>
+#include <vespa/searchcore/proton/server/executorthreadingservice.h>
+#include <vespa/searchcore/proton/server/fast_access_doc_subdb_configurer.h>
+#include <vespa/searchcore/proton/server/searchable_feed_view.h>
+#include <vespa/searchcore/proton/server/matchers.h>
+#include <vespa/searchcore/proton/server/summaryadapter.h>
+#include <vespa/searchcore/proton/common/commit_time_tracker.h>
+#include <vespa/searchlib/attribute/attributevector.h>
+#include <vespa/searchlib/common/tunefileinfo.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/searchlib/transactionlog/nosyncproxy.h>
+#include <vespa/vespalib/io/fileutil.h>
+
+using namespace config;
+using namespace document;
+using namespace proton;
+using namespace search::grouping;
+using namespace search::index;
+using namespace search::queryeval;
+using namespace search;
+using namespace vespa::config::search::core;
+using namespace vespa::config::search::summary;
+using namespace vespa::config::search;
+using namespace vespalib;
+
+using document::DocumenttypesConfig;
+using fastos::TimeStamp;
+using proton::matching::SessionManager;
+using searchcorespi::IndexSearchable;
+using searchcorespi::index::IThreadingService;
+
+
+typedef DocumentDBConfig::ComparisonResult ConfigComparisonResult;
+typedef SearchableDocSubDBConfigurer Configurer;
+typedef std::unique_ptr<SearchableDocSubDBConfigurer> ConfigurerUP;
+typedef SummaryManager::SummarySetup SummarySetup;
+typedef proton::DocumentDBConfig::DocumenttypesConfigSP DocumenttypesConfigSP;
+
+const vespalib::string BASE_DIR("baseDir");
+const vespalib::string DOC_TYPE("invalid");
+
+class IndexManagerDummyReconfigurer : public searchcorespi::IIndexManager::Reconfigurer
+{
+ virtual bool reconfigure(vespalib::Closure0<bool>::UP closure) {
+ bool ret = true;
+ if (closure.get() != NULL)
+ ret = closure->call(); // Perform index manager reconfiguration now
+ return ret;
+ }
+};
+
+DocumentTypeRepo::SP
+createRepo()
+{
+ DocumentType docType(DOC_TYPE, 0);
+ return DocumentTypeRepo::SP(new DocumentTypeRepo(docType));
+}
+
+struct ViewPtrs
+{
+ SearchView::SP sv;
+ SearchableFeedView::SP fv;
+};
+
+struct ViewSet
+{
+ IndexManagerDummyReconfigurer _reconfigurer;
+ DummyFileHeaderContext _fileHeaderContext;
+ ExecutorThreadingService _writeService;
+ SearchableFeedView::SerialNum serialNum;
+ DocumentTypeRepo::SP repo;
+ DocTypeName _docTypeName;
+ DocIdLimit _docIdLimit;
+ search::transactionlog::NoSyncProxy _noTlSyncer;
+ ISummaryManager::SP _summaryMgr;
+ IDocumentMetaStoreContext::SP _dmsc;
+ std::unique_ptr<documentmetastore::ILidReuseDelayer> _lidReuseDelayer;
+ CommitTimeTracker _commitTimeTracker;
+ VarHolder<SearchView::SP> searchView;
+ VarHolder<SearchableFeedView::SP> feedView;
+ ViewSet()
+ : _reconfigurer(),
+ _fileHeaderContext(),
+ _writeService(),
+ serialNum(1),
+ repo(createRepo()),
+ _docTypeName(DOC_TYPE),
+ _docIdLimit(0u),
+ _noTlSyncer(),
+ _summaryMgr(),
+ _dmsc(),
+ _lidReuseDelayer(),
+ _commitTimeTracker(TimeStamp()),
+ searchView(),
+ feedView()
+ {
+ }
+
+ ViewPtrs getViewPtrs() {
+ ViewPtrs ptrs;
+ ptrs.sv = searchView.get();
+ ptrs.fv = feedView.get();
+ return ptrs;
+ }
+};
+
+struct Fixture
+{
+ vespalib::Clock _clock;
+ matching::QueryLimiter _queryLimiter;
+ vespalib::ThreadStackExecutor _summaryExecutor;
+ ViewSet _views;
+ ConfigurerUP _configurer;
+ Fixture()
+ : _clock(),
+ _queryLimiter(),
+ _summaryExecutor(8, 128*1024),
+ _views(),
+ _configurer()
+ {
+ vespalib::mkdir(BASE_DIR);
+ initViewSet(_views);
+ _configurer.reset(new Configurer(_views._summaryMgr,
+ _views.searchView,
+ _views.feedView,
+ _queryLimiter,
+ _clock,
+ "test",
+ 0));
+ }
+ ~Fixture() {
+ vespalib::rmdir(BASE_DIR, true);
+ }
+ void initViewSet(ViewSet &views);
+};
+
+void
+Fixture::initViewSet(ViewSet &views)
+{
+ Matchers::SP matchers(new Matchers(_clock, _queryLimiter));
+ IndexManager::SP indexMgr(new IndexManager(BASE_DIR,
+ 0.0, 2, 0, Schema(), Schema(), views._reconfigurer,
+ views._writeService, _summaryExecutor, TuneFileIndexManager(),
+ TuneFileAttributes(), views._fileHeaderContext));
+ AttributeManager::SP attrMgr(new AttributeManager(BASE_DIR,
+ "test.subdb",
+ TuneFileAttributes(),
+ views._fileHeaderContext,
+ views._writeService.
+ attributeFieldWriter()));
+ ProtonConfig protonCfg;
+ SummaryManager::SP summaryMgr(
+ new SummaryManager(_summaryExecutor, ProtonConfig::Summary(),
+ GrowStrategy(), BASE_DIR, views._docTypeName,
+ TuneFileSummary(), views._fileHeaderContext,
+ views._noTlSyncer, search::IBucketizer::SP()));
+ SessionManager::SP sesMgr(
+ new SessionManager(protonCfg.grouping.sessionmanager.maxentries));
+ DocumentMetaStoreContext::SP metaStore(
+ new DocumentMetaStoreContext(std::make_shared<BucketDBOwner>()));
+ IIndexWriter::SP indexWriter(new IndexWriter(indexMgr));
+ AttributeWriter::SP attrWriter(new AttributeWriter(attrMgr));
+ ISummaryAdapter::SP summaryAdapter(new SummaryAdapter(summaryMgr));
+ Schema::SP schema(new Schema());
+ views._summaryMgr = summaryMgr;
+ views._dmsc = metaStore;
+ views._lidReuseDelayer.reset(
+ new documentmetastore::LidReuseDelayer(views._writeService,
+ metaStore->get()));
+ IndexSearchable::SP indexSearchable;
+ MatchView::SP matchView(new MatchView(matchers, indexSearchable, attrMgr,
+ sesMgr, metaStore, views._docIdLimit));
+ views.searchView.set(
+ SearchView::SP(
+ new SearchView(
+ summaryMgr->createSummarySetup(SummaryConfig(),
+ SummarymapConfig(),
+ JuniperrcConfig(),
+ views.repo,
+ attrMgr),
+ matchView)));
+ PerDocTypeFeedMetrics metrics(0);
+ views.feedView.set(
+ SearchableFeedView::SP(
+ new SearchableFeedView(StoreOnlyFeedView::Context(summaryAdapter,
+ schema,
+ views.searchView.get()->getDocumentMetaStore(),
+ views.repo,
+ views._writeService,
+ *views._lidReuseDelayer,
+ views._commitTimeTracker),
+ SearchableFeedView::PersistentParams(
+ views.serialNum,
+ views.serialNum,
+ views._docTypeName,
+ metrics,
+ 0u /* subDbId */,
+ SubDbType::READY),
+ FastAccessFeedView::Context(attrWriter, views._docIdLimit),
+ SearchableFeedView::Context(indexWriter))));
+}
+
+
+struct MySummaryAdapter : public ISummaryAdapter
+{
+ virtual void put(search::SerialNum, const document::Document &, const search::DocumentIdT) {}
+ virtual void remove(search::SerialNum, const search::DocumentIdT) {}
+ virtual void update(search::SerialNum, const document::DocumentUpdate &,
+ const search::DocumentIdT, const document::DocumentTypeRepo &) {}
+ virtual void heartBeat(search::SerialNum) {}
+ virtual const search::IDocumentStore &getDocumentStore() const {
+ const search::IDocumentStore *store = NULL;
+ return *store;
+ }
+ virtual std::unique_ptr<document::Document> get(const search::DocumentIdT,
+ const document::DocumentTypeRepo &) {
+ return std::unique_ptr<document::Document>();
+ }
+};
+
+struct MyFastAccessFeedView
+{
+ PerDocTypeFeedMetrics _metrics;
+ DummyFileHeaderContext _fileHeaderContext;
+ DocIdLimit _docIdLimit;
+ IThreadingService &_writeService;
+ IDocumentMetaStoreContext::SP _dmsc;
+ std::unique_ptr<documentmetastore::ILidReuseDelayer> _lidReuseDelayer;
+ CommitTimeTracker _commitTimeTracker;
+ VarHolder<FastAccessFeedView::SP> _feedView;
+
+ MyFastAccessFeedView(IThreadingService &writeService)
+ : _metrics(0),
+ _fileHeaderContext(),
+ _docIdLimit(0),
+ _writeService(writeService),
+ _dmsc(),
+ _lidReuseDelayer(),
+ _commitTimeTracker(TimeStamp()),
+ _feedView()
+ {
+ init();
+ }
+ void init() {
+ ISummaryAdapter::SP summaryAdapter(new MySummaryAdapter());
+ Schema::SP schema(new Schema());
+ DocumentMetaStoreContext::SP docMetaCtx(
+ new DocumentMetaStoreContext(std::make_shared<BucketDBOwner>()));
+ _dmsc = docMetaCtx;
+ _lidReuseDelayer.reset(
+ new documentmetastore::LidReuseDelayer(_writeService,
+ docMetaCtx->get()));
+ DocumentTypeRepo::SP repo = createRepo();
+ StoreOnlyFeedView::Context storeOnlyCtx(summaryAdapter, schema, docMetaCtx, repo, _writeService, *_lidReuseDelayer, _commitTimeTracker);
+ StoreOnlyFeedView::PersistentParams params(1, 1, DocTypeName(DOC_TYPE), _metrics, 0, SubDbType::NOTREADY);
+ AttributeManager::SP mgr(new AttributeManager(BASE_DIR, "test.subdb",
+ TuneFileAttributes(),
+ _fileHeaderContext,
+ _writeService.
+ attributeFieldWriter()));
+ IAttributeWriter::SP writer(new AttributeWriter(mgr));
+ FastAccessFeedView::Context fastUpdateCtx(writer, _docIdLimit);
+ _feedView.set(FastAccessFeedView::SP(new FastAccessFeedView(storeOnlyCtx,
+ params, fastUpdateCtx)));;
+ }
+};
+
+struct FastAccessFixture
+{
+ ExecutorThreadingService _writeService;
+ MyFastAccessFeedView _view;
+ FastAccessDocSubDBConfigurer _configurer;
+ FastAccessFixture()
+ : _writeService(),
+ _view(_writeService),
+ _configurer(_view._feedView,
+ IAttributeAdapterFactory::UP(new AttributeAdapterFactory), "test")
+ {
+ vespalib::mkdir(BASE_DIR);
+ }
+ ~FastAccessFixture() {
+ _writeService.sync();
+ vespalib::rmdir(BASE_DIR, true);
+ }
+};
+
+
+DocumentDBConfig::SP
+createConfig()
+{
+ DocumentDBConfig::SP config
+ (new DocumentDBConfig(
+ 0,
+ DocumentDBConfig::RankProfilesConfigSP(
+ new RankProfilesConfig()),
+ DocumentDBConfig::IndexschemaConfigSP(new IndexschemaConfig()),
+ DocumentDBConfig::AttributesConfigSP(new AttributesConfig()),
+ DocumentDBConfig::SummaryConfigSP(new SummaryConfig()),
+ DocumentDBConfig::SummarymapConfigSP(new SummarymapConfig()),
+ DocumentDBConfig::JuniperrcConfigSP(new JuniperrcConfig()),
+ DocumenttypesConfigSP(new DocumenttypesConfig()),
+ DocumentTypeRepo::SP(createRepo()),
+ TuneFileDocumentDB::SP(new TuneFileDocumentDB),
+ Schema::SP(new Schema),
+ DocumentDBMaintenanceConfig::SP(
+ new DocumentDBMaintenanceConfig),
+ "client", DOC_TYPE));
+ return config;
+}
+
+DocumentDBConfig::SP
+createConfig(const Schema::SP &schema)
+{
+ DocumentDBConfig::SP config
+ (new DocumentDBConfig(
+ 0,
+ DocumentDBConfig::RankProfilesConfigSP(new RankProfilesConfig()),
+ DocumentDBConfig::IndexschemaConfigSP(new IndexschemaConfig()),
+ DocumentDBConfig::AttributesConfigSP(new AttributesConfig()),
+ DocumentDBConfig::SummaryConfigSP(new SummaryConfig()),
+ DocumentDBConfig::SummarymapConfigSP(new SummarymapConfig()),
+ DocumentDBConfig::JuniperrcConfigSP(new JuniperrcConfig()),
+ DocumenttypesConfigSP(new DocumenttypesConfig()),
+ DocumentTypeRepo::SP(createRepo()),
+ TuneFileDocumentDB::SP(new TuneFileDocumentDB),
+ schema,
+ DocumentDBMaintenanceConfig::SP(
+ new DocumentDBMaintenanceConfig),
+ "client", DOC_TYPE));
+ return config;
+}
+
+struct SearchViewComparer
+{
+ SearchView::SP _old;
+ SearchView::SP _new;
+ SearchViewComparer(SearchView::SP old, SearchView::SP new_) : _old(old), _new(new_) {}
+ void expect_equal() {
+ EXPECT_EQUAL(_old.get(), _new.get());
+ }
+ void expect_not_equal() {
+ EXPECT_NOT_EQUAL(_old.get(), _new.get());
+ }
+ void expect_equal_summary_setup() {
+ EXPECT_EQUAL(_old->getSummarySetup().get(), _new->getSummarySetup().get());
+ }
+ void expect_not_equal_summary_setup() {
+ EXPECT_NOT_EQUAL(_old->getSummarySetup().get(), _new->getSummarySetup().get());
+ }
+ void expect_equal_match_view() {
+ EXPECT_EQUAL(_old->getMatchView().get(), _new->getMatchView().get());
+ }
+ void expect_not_equal_match_view() {
+ EXPECT_NOT_EQUAL(_old->getMatchView().get(), _new->getMatchView().get());
+ }
+ void expect_equal_matchers() {
+ EXPECT_EQUAL(_old->getMatchers().get(), _new->getMatchers().get());
+ }
+ void expect_not_equal_matchers() {
+ EXPECT_NOT_EQUAL(_old->getMatchers().get(), _new->getMatchers().get());
+ }
+ void expect_equal_index_searchable() {
+ EXPECT_EQUAL(_old->getIndexSearchable().get(), _new->getIndexSearchable().get());
+ }
+ void expect_not_equal_index_searchable() {
+ EXPECT_NOT_EQUAL(_old->getIndexSearchable().get(), _new->getIndexSearchable().get());
+ }
+ void expect_equal_attribute_manager() {
+ EXPECT_EQUAL(_old->getAttributeManager().get(), _new->getAttributeManager().get());
+ }
+ void expect_not_equal_attribute_manager() {
+ EXPECT_NOT_EQUAL(_old->getAttributeManager().get(), _new->getAttributeManager().get());
+ }
+ void expect_equal_session_manager() {
+ EXPECT_EQUAL(_old->getSessionManager().get(), _new->getSessionManager().get());
+ }
+ void expect_equal_document_meta_store() {
+ EXPECT_EQUAL(_old->getDocumentMetaStore().get(), _new->getDocumentMetaStore().get());
+ }
+};
+
+struct FeedViewComparer
+{
+ SearchableFeedView::SP _old;
+ SearchableFeedView::SP _new;
+ FeedViewComparer(SearchableFeedView::SP old, SearchableFeedView::SP new_) : _old(old), _new(new_) {}
+ void expect_equal() {
+ EXPECT_EQUAL(_old.get(), _new.get());
+ }
+ void expect_not_equal() {
+ EXPECT_NOT_EQUAL(_old.get(), _new.get());
+ }
+ void expect_equal_index_adapter() {
+ EXPECT_EQUAL(_old->getIndexWriter().get(), _new->getIndexWriter().get());
+ }
+ void expect_equal_attribute_adapter() {
+ EXPECT_EQUAL(_old->getAttributeWriter().get(), _new->getAttributeWriter().get());
+ }
+ void expect_not_equal_attribute_adapter() {
+ EXPECT_NOT_EQUAL(_old->getAttributeWriter().get(), _new->getAttributeWriter().get());
+ }
+ void expect_equal_summary_adapter() {
+ EXPECT_EQUAL(_old->getSummaryAdapter().get(), _new->getSummaryAdapter().get());
+ }
+ void expect_equal_schema() {
+ EXPECT_EQUAL(_old->getSchema().get(), _new->getSchema().get());
+ }
+ void expect_not_equal_schema() {
+ EXPECT_NOT_EQUAL(_old->getSchema().get(), _new->getSchema().get());
+ }
+};
+
+struct FastAccessFeedViewComparer
+{
+ FastAccessFeedView::SP _old;
+ FastAccessFeedView::SP _new;
+ FastAccessFeedViewComparer(FastAccessFeedView::SP old, FastAccessFeedView::SP new_)
+ : _old(old), _new(new_)
+ {}
+ void expect_not_equal() {
+ EXPECT_NOT_EQUAL(_old.get(), _new.get());
+ }
+ void expect_not_equal_attribute_adapter() {
+ EXPECT_NOT_EQUAL(_old->getAttributeWriter().get(), _new->getAttributeWriter().get());
+ }
+ void expect_equal_summary_adapter() {
+ EXPECT_EQUAL(_old->getSummaryAdapter().get(), _new->getSummaryAdapter().get());
+ }
+ void expect_not_equal_schema() {
+ EXPECT_NOT_EQUAL(_old->getSchema().get(), _new->getSchema().get());
+ }
+};
+
+TEST_F("require that we can reconfigure index searchable", Fixture)
+{
+ ViewPtrs o = f._views.getViewPtrs();
+ f._configurer->reconfigureIndexSearchable();
+
+ ViewPtrs n = f._views.getViewPtrs();
+ { // verify search view
+ SearchViewComparer cmp(o.sv, n.sv);
+ cmp.expect_not_equal();
+ cmp.expect_equal_summary_setup();
+ cmp.expect_not_equal_match_view();
+ cmp.expect_equal_matchers();
+ cmp.expect_not_equal_index_searchable();
+ cmp.expect_equal_attribute_manager();
+ cmp.expect_equal_session_manager();
+ cmp.expect_equal_document_meta_store();
+ }
+ { // verify feed view
+ FeedViewComparer cmp(o.fv, n.fv);
+ cmp.expect_not_equal();
+ cmp.expect_equal_index_adapter();
+ cmp.expect_equal_attribute_adapter();
+ cmp.expect_equal_summary_adapter();
+ cmp.expect_equal_schema();
+ }
+}
+
+TEST_F("require that we can reconfigure attribute manager", Fixture)
+{
+ ViewPtrs o = f._views.getViewPtrs();
+ ConfigComparisonResult cmpres;
+ cmpres.attributesChanged = true;
+ cmpres._schemaChanged = true;
+ AttributeCollectionSpec::AttributeList specList;
+ AttributeCollectionSpec spec(specList, 1, 0);
+ ReconfigParams params(cmpres);
+ // Use new config snapshot == old config snapshot (only relevant for reprocessing)
+ f._configurer->reconfigure(*createConfig(), *createConfig(), spec, params);
+
+ ViewPtrs n = f._views.getViewPtrs();
+ { // verify search view
+ SearchViewComparer cmp(o.sv, n.sv);
+ cmp.expect_not_equal();
+ cmp.expect_not_equal_summary_setup();
+ cmp.expect_not_equal_match_view();
+ cmp.expect_not_equal_matchers();
+ cmp.expect_equal_index_searchable();
+ cmp.expect_not_equal_attribute_manager();
+ cmp.expect_equal_session_manager();
+ cmp.expect_equal_document_meta_store();
+ }
+ { // verify feed view
+ FeedViewComparer cmp(o.fv, n.fv);
+ cmp.expect_not_equal();
+ cmp.expect_equal_index_adapter();
+ cmp.expect_not_equal_attribute_adapter();
+ cmp.expect_equal_summary_adapter();
+ cmp.expect_not_equal_schema();
+ }
+}
+
+TEST_F("require that reconfigure returns reprocessing initializer when changing attributes", Fixture)
+{
+ ConfigComparisonResult cmpres;
+ cmpres.attributesChanged = true;
+ cmpres._schemaChanged = true;
+ AttributeCollectionSpec::AttributeList specList;
+ AttributeCollectionSpec spec(specList, 1, 0);
+ ReconfigParams params(cmpres);
+ IReprocessingInitializer::UP init =
+ f._configurer->reconfigure(*createConfig(), *createConfig(), spec, params);
+
+ EXPECT_TRUE(init.get() != nullptr);
+ EXPECT_TRUE((dynamic_cast<AttributeReprocessingInitializer *>(init.get())) != nullptr);
+ EXPECT_FALSE(init->hasReprocessors());
+}
+
+TEST_F("require that we can reconfigure attribute adapter", FastAccessFixture)
+{
+ AttributeCollectionSpec::AttributeList specList;
+ AttributeCollectionSpec spec(specList, 1, 0);
+ FastAccessFeedView::SP o = f._view._feedView.get();
+ f._configurer.reconfigure(*createConfig(), *createConfig(), spec);
+ FastAccessFeedView::SP n = f._view._feedView.get();
+
+ FastAccessFeedViewComparer cmp(o, n);
+ cmp.expect_not_equal();
+ cmp.expect_not_equal_attribute_adapter();
+ cmp.expect_equal_summary_adapter();
+ cmp.expect_not_equal_schema();
+}
+
+TEST_F("require that reconfigure returns reprocessing initializer", FastAccessFixture)
+{
+ AttributeCollectionSpec::AttributeList specList;
+ AttributeCollectionSpec spec(specList, 1, 0);
+ IReprocessingInitializer::UP init =
+ f._configurer.reconfigure(*createConfig(), *createConfig(), spec);
+
+ EXPECT_TRUE(init.get() != nullptr);
+ EXPECT_TRUE((dynamic_cast<AttributeReprocessingInitializer *>(init.get())) != nullptr);
+ EXPECT_FALSE(init->hasReprocessors());
+}
+
+TEST_F("require that we can reconfigure summary manager", Fixture)
+{
+ ViewPtrs o = f._views.getViewPtrs();
+ ConfigComparisonResult cmpres;
+ cmpres.summarymapChanged = true;
+ ReconfigParams params(cmpres);
+ // Use new config snapshot == old config snapshot (only relevant for reprocessing)
+ f._configurer->reconfigure(*createConfig(), *createConfig(), params);
+
+ ViewPtrs n = f._views.getViewPtrs();
+ { // verify search view
+ SearchViewComparer cmp(o.sv, n.sv);
+ cmp.expect_not_equal();
+ cmp.expect_not_equal_summary_setup();
+ cmp.expect_equal_match_view();
+ }
+ { // verify feed view
+ FeedViewComparer cmp(o.fv, n.fv);
+ cmp.expect_equal();
+ }
+}
+
+TEST_F("require that we can reconfigure matchers", Fixture)
+{
+ ViewPtrs o = f._views.getViewPtrs();
+ ConfigComparisonResult cmpres;
+ cmpres.rankProfilesChanged = true;
+ // Use new config snapshot == old config snapshot (only relevant for reprocessing)
+ f._configurer->reconfigure(*createConfig(o.fv->getSchema()), *createConfig(o.fv->getSchema()),
+ ReconfigParams(cmpres));
+
+ ViewPtrs n = f._views.getViewPtrs();
+ { // verify search view
+ SearchViewComparer cmp(o.sv, n.sv);
+ cmp.expect_not_equal();
+ cmp.expect_equal_summary_setup();
+ cmp.expect_not_equal_match_view();
+ cmp.expect_not_equal_matchers();
+ cmp.expect_equal_index_searchable();
+ cmp.expect_equal_attribute_manager();
+ cmp.expect_equal_session_manager();
+ cmp.expect_equal_document_meta_store();
+ }
+ { // verify feed view
+ FeedViewComparer cmp(o.fv, n.fv);
+ cmp.expect_not_equal();
+ cmp.expect_equal_index_adapter();
+ cmp.expect_equal_attribute_adapter();
+ cmp.expect_equal_summary_adapter();
+ cmp.expect_equal_schema();
+ }
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/documentdb/configvalidator/.gitignore b/searchcore/src/tests/proton/documentdb/configvalidator/.gitignore
new file mode 100644
index 00000000000..2a8675dad8d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/configvalidator/.gitignore
@@ -0,0 +1 @@
+searchcore_configvalidator_test_app
diff --git a/searchcore/src/tests/proton/documentdb/configvalidator/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/configvalidator/CMakeLists.txt
new file mode 100644
index 00000000000..c7a3a6235cf
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/configvalidator/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_configvalidator_test_app
+ SOURCES
+ configvalidator_test.cpp
+ DEPENDS
+ searchcore_server
+)
+vespa_add_test(NAME searchcore_configvalidator_test_app COMMAND searchcore_configvalidator_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/configvalidator/DESC b/searchcore/src/tests/proton/documentdb/configvalidator/DESC
new file mode 100644
index 00000000000..9263515a290
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/configvalidator/DESC
@@ -0,0 +1 @@
+configvalidator test. Take a look at configvalidator_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentdb/configvalidator/FILES b/searchcore/src/tests/proton/documentdb/configvalidator/FILES
new file mode 100644
index 00000000000..a7acf2f384c
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/configvalidator/FILES
@@ -0,0 +1 @@
+configvalidator_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/configvalidator/configvalidator_test.cpp b/searchcore/src/tests/proton/documentdb/configvalidator/configvalidator_test.cpp
new file mode 100644
index 00000000000..cbcc97bdf68
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/configvalidator/configvalidator_test.cpp
@@ -0,0 +1,351 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("configvalidator_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/server/configvalidator.h>
+
+using namespace proton;
+using namespace search::index;
+using vespa::config::search::AttributesConfig;
+using vespa::config::search::AttributesConfigBuilder;
+
+typedef Schema::AttributeField AField;
+typedef Schema::IndexField IField;
+typedef Schema::SummaryField SField;
+
+const ConfigValidator::ResultType OK = ConfigValidator::OK;
+const ConfigValidator::ResultType DTC = ConfigValidator::DATA_TYPE_CHANGED;
+const ConfigValidator::ResultType CTC = ConfigValidator::COLLECTION_TYPE_CHANGED;
+const ConfigValidator::ResultType IAA = ConfigValidator::INDEX_ASPECT_ADDED;
+const ConfigValidator::ResultType IAR = ConfigValidator::INDEX_ASPECT_REMOVED;
+const ConfigValidator::ResultType AAA = ConfigValidator::ATTRIBUTE_ASPECT_ADDED;
+const ConfigValidator::ResultType AAR = ConfigValidator::ATTRIBUTE_ASPECT_REMOVED;
+const ConfigValidator::ResultType AFAA = ConfigValidator::ATTRIBUTE_FAST_ACCESS_ADDED;
+const ConfigValidator::ResultType AFAR = ConfigValidator::ATTRIBUTE_FAST_ACCESS_REMOVED;
+
+enum FType {
+ INDEX,
+ ATTRIBUTE,
+ SUMMARY
+};
+
+struct SchemaBuilder
+{
+ Schema _schema;
+ SchemaBuilder() : _schema() {}
+ SchemaBuilder &add(const vespalib::string &name, FType ftype,
+ Schema::DataType dtype, Schema::CollectionType ctype = Schema::SINGLE) {
+ switch (ftype) {
+ case INDEX:
+ _schema.addIndexField(IField(name, dtype, ctype));
+ break;
+ case ATTRIBUTE:
+ _schema.addAttributeField(AField(name, dtype, ctype));
+ break;
+ case SUMMARY:
+ _schema.addSummaryField(SField(name, dtype, ctype));
+ break;
+ }
+ return *this;
+ }
+ const Schema &schema() const { return _schema; }
+};
+
+Schema
+create(FType ftype, Schema::DataType dtype, Schema::CollectionType ctype)
+{
+ SchemaBuilder bld;
+ return bld.add("f1", ftype, dtype, ctype).schema();
+}
+
+Schema
+created(FType ftype, Schema::DataType dtype)
+{
+ return create(ftype, dtype, Schema::SINGLE);
+}
+
+Schema
+createc(FType ftype, Schema::CollectionType ctype)
+{
+ return create(ftype, Schema::STRING, ctype);
+}
+
+ConfigValidator::ResultType
+checkSchema(const Schema &newSchema,
+ const Schema &oldSchema,
+ const Schema &oldHistory)
+{
+ return ConfigValidator::validate(ConfigValidator::Config(newSchema, AttributesConfig()),
+ ConfigValidator::Config(oldSchema, AttributesConfig()), oldHistory).type();
+}
+
+ConfigValidator::ResultType
+checkAttribute(const AttributesConfig &newCfg,
+ const AttributesConfig &oldCfg)
+{
+ return ConfigValidator::validate(ConfigValidator::Config(Schema(), newCfg),
+ ConfigValidator::Config(Schema(), oldCfg), Schema()).type();
+}
+
+void
+requireThatChangedDataTypeIsDiscovered(FType ftype)
+{
+ EXPECT_EQUAL(DTC,
+ checkSchema(created(ftype, Schema::INT32),
+ created(ftype, Schema::STRING),
+ Schema()));
+ EXPECT_EQUAL(DTC,
+ checkSchema(created(ftype, Schema::INT32),
+ Schema(),
+ created(ftype, Schema::STRING)));
+}
+
+TEST("require that changed data type is discovered")
+{
+ requireThatChangedDataTypeIsDiscovered(INDEX);
+ requireThatChangedDataTypeIsDiscovered(ATTRIBUTE);
+ requireThatChangedDataTypeIsDiscovered(SUMMARY);
+}
+
+void
+requireThatChangedCollectionTypeIsDiscovered(FType ftype)
+{
+ EXPECT_EQUAL(CTC,
+ checkSchema(createc(ftype, Schema::ARRAY),
+ createc(ftype, Schema::SINGLE),
+ Schema()));
+ EXPECT_EQUAL(CTC,
+ checkSchema(createc(ftype, Schema::ARRAY),
+ Schema(),
+ createc(ftype, Schema::SINGLE)));
+}
+
+TEST("require that changed collection type is discovered")
+{
+ requireThatChangedCollectionTypeIsDiscovered(INDEX);
+ requireThatChangedCollectionTypeIsDiscovered(ATTRIBUTE);
+ requireThatChangedCollectionTypeIsDiscovered(SUMMARY);
+}
+
+TEST("require that changed index aspect is discovered")
+{
+ Schema s1 = created(SUMMARY, Schema::STRING);
+ s1.addIndexField(IField("f1", Schema::STRING));
+ Schema s2 = created(SUMMARY, Schema::STRING);
+ Schema s2h = created(INDEX, Schema::STRING);
+
+ Schema s3 = created(ATTRIBUTE, Schema::STRING);
+ s3.addIndexField(IField("f1", Schema::STRING));
+ Schema s4 = created(ATTRIBUTE, Schema::STRING);
+ Schema s4h = created(INDEX, Schema::STRING);
+ { // remove as index field
+ EXPECT_EQUAL(IAR, checkSchema(s2, s1, Schema()));
+ EXPECT_EQUAL(IAR, checkSchema(s2, Schema(), s1));
+ EXPECT_EQUAL(IAR, checkSchema(s4, s3, Schema()));
+ EXPECT_EQUAL(IAR, checkSchema(s4, Schema(), s3));
+ }
+ {
+ // undo field removal
+ EXPECT_EQUAL(OK, checkSchema(s1, Schema(), s1));
+ EXPECT_EQUAL(OK, checkSchema(s3, Schema(), s3));
+ }
+ { // add as index field
+ EXPECT_EQUAL(IAA, checkSchema(s1, s2, Schema()));
+ EXPECT_EQUAL(IAA, checkSchema(s1, s2, s2h));
+ EXPECT_EQUAL(IAA, checkSchema(s1, Schema(), s2));
+ EXPECT_EQUAL(IAA, checkSchema(s3, s4, Schema()));
+ EXPECT_EQUAL(IAA, checkSchema(s3, s4, s4h));
+ EXPECT_EQUAL(IAA, checkSchema(s3, Schema(), s4));
+ }
+}
+
+TEST("require that changed attribute aspect is discovered")
+{
+ Schema s1 = created(SUMMARY, Schema::STRING);
+ s1.addAttributeField(AField("f1", Schema::STRING));
+ Schema s2 = created(SUMMARY, Schema::STRING);
+ Schema s2h = created(ATTRIBUTE, Schema::STRING);
+
+ Schema s3 = created(INDEX, Schema::STRING);
+ s3.addAttributeField(AField("f1", Schema::STRING));
+ Schema s4 = created(INDEX, Schema::STRING);
+ Schema s4h = created(ATTRIBUTE, Schema::STRING);
+
+ Schema s5 = created(INDEX, Schema::STRING);
+ s5.addSummaryField(SField("f1", Schema::STRING));
+ s5.addAttributeField(AField("f1", Schema::STRING));
+ Schema s6 = created(INDEX, Schema::STRING);
+ s6.addSummaryField(SField("f1", Schema::STRING));
+ { // remove as attribute field
+ EXPECT_EQUAL(AAR, checkSchema(s2, s1, Schema()));
+ EXPECT_EQUAL(AAR, checkSchema(s2, Schema(), s1));
+ // remove as attribute is allowed when still existing as index.
+ EXPECT_EQUAL(OK, checkSchema(s4, s3, Schema()));
+ EXPECT_EQUAL(OK, checkSchema(s6, s5, Schema()));
+ EXPECT_EQUAL(IAA, checkSchema(s4, Schema(), s3));
+ }
+ {
+ // undo field removal
+ EXPECT_EQUAL(OK, checkSchema(s1, Schema(), s1));
+ EXPECT_EQUAL(OK, checkSchema(s3, Schema(), s3));
+ }
+ { // add as attribute field
+ EXPECT_EQUAL(AAA, checkSchema(s1, s2, Schema()));
+ EXPECT_EQUAL(AAA, checkSchema(s1, s2, s2h));
+ EXPECT_EQUAL(AAA, checkSchema(s1, Schema(), s2));
+ EXPECT_EQUAL(AAA, checkSchema(s3, s4, Schema()));
+ EXPECT_EQUAL(AAA, checkSchema(s3, s4, s4h));
+ EXPECT_EQUAL(AAA, checkSchema(s3, Schema(), s4));
+ }
+}
+
+TEST("require that changed summary aspect is allowed")
+{
+ Schema s1 = created(INDEX, Schema::STRING);
+ s1.addSummaryField(SField("f1", Schema::STRING));
+ Schema s2 = created(INDEX, Schema::STRING);
+ Schema s2h = created(SUMMARY, Schema::STRING);
+
+ Schema s3 = created(ATTRIBUTE, Schema::STRING);
+ s3.addSummaryField(SField("f1", Schema::STRING));
+ Schema s4 = created(ATTRIBUTE, Schema::STRING);
+ Schema s4h = created(SUMMARY, Schema::STRING);
+ { // remove as summary field
+ EXPECT_EQUAL(OK, checkSchema(s2, s1, Schema()));
+ EXPECT_EQUAL(IAA, checkSchema(s2, Schema(), s1));
+ EXPECT_EQUAL(OK, checkSchema(s4, s3, Schema()));
+ EXPECT_EQUAL(AAA, checkSchema(s4, Schema(), s3));
+ }
+ { // add as summary field
+ EXPECT_EQUAL(OK, checkSchema(s1, s2, Schema()));
+ EXPECT_EQUAL(OK, checkSchema(s1, s2, s2h));
+ EXPECT_EQUAL(OK, checkSchema(s1, Schema(), s2));
+ EXPECT_EQUAL(OK, checkSchema(s3, s4, Schema()));
+ EXPECT_EQUAL(OK, checkSchema(s3, s4, s4h));
+ EXPECT_EQUAL(OK, checkSchema(s3, Schema(), s4));
+ }
+}
+
+TEST("require that fields can be added and removed")
+{
+ Schema e;
+ Schema s1 = created(INDEX, Schema::STRING);
+ Schema s2 = created(ATTRIBUTE, Schema::STRING);
+ Schema s3 = created(SUMMARY, Schema::STRING);
+ Schema s4 = created(SUMMARY, Schema::STRING);
+ s4.addIndexField(IField("f1", Schema::STRING));
+ Schema s5 = created(SUMMARY, Schema::STRING);
+ s5.addAttributeField(AField("f1", Schema::STRING));
+ Schema s6 = created(SUMMARY, Schema::STRING);
+ s6.addIndexField(IField("f1", Schema::STRING));
+ s6.addAttributeField(AField("f1", Schema::STRING));
+ { // addition of field
+ EXPECT_EQUAL(OK, checkSchema(s1, e, e));
+ EXPECT_EQUAL(OK, checkSchema(s2, e, e));
+ EXPECT_EQUAL(OK, checkSchema(s3, e, e));
+ EXPECT_EQUAL(OK, checkSchema(s4, e, e));
+ EXPECT_EQUAL(OK, checkSchema(s5, e, e));
+ EXPECT_EQUAL(OK, checkSchema(s6, e, e));
+ }
+ { // removal of field
+ EXPECT_EQUAL(OK, checkSchema(e, s1, e));
+ EXPECT_EQUAL(OK, checkSchema(e, e, s1));
+ EXPECT_EQUAL(OK, checkSchema(e, s2, e));
+ EXPECT_EQUAL(OK, checkSchema(e, e, s2));
+ EXPECT_EQUAL(OK, checkSchema(e, s3, e));
+ EXPECT_EQUAL(OK, checkSchema(e, e, s3));
+ EXPECT_EQUAL(OK, checkSchema(e, s4, e));
+ EXPECT_EQUAL(OK, checkSchema(e, e, s4));
+ EXPECT_EQUAL(OK, checkSchema(e, s5, e));
+ EXPECT_EQUAL(OK, checkSchema(e, e, s5));
+ EXPECT_EQUAL(OK, checkSchema(e, s6, e));
+ EXPECT_EQUAL(OK, checkSchema(e, e, s6));
+ }
+}
+
+TEST("require that data type changed precedes collection type changed")
+{
+ Schema olds = SchemaBuilder().add("f1", FType::SUMMARY, Schema::STRING).
+ add("f2", FType::INDEX, Schema::STRING).schema();
+ Schema news = SchemaBuilder().add("f1", FType::SUMMARY, Schema::INT32).
+ add("f2", FType::INDEX, Schema::STRING, Schema::ARRAY).schema();
+ EXPECT_EQUAL(DTC, checkSchema(news, olds, Schema()));
+}
+
+TEST("require that collection type change precedes index aspect added")
+{
+ Schema olds = SchemaBuilder().add("f1", FType::SUMMARY, Schema::STRING).
+ add("f2", FType::SUMMARY, Schema::STRING).schema();
+ Schema news = SchemaBuilder().add("f1", FType::SUMMARY, Schema::STRING, Schema::ARRAY).
+ add("f2", FType::SUMMARY, Schema::STRING).
+ add("f2", FType::INDEX, Schema::STRING).schema();
+ EXPECT_EQUAL(CTC, checkSchema(news, olds, Schema()));
+}
+
+TEST("require that index aspect added precedes index aspect removed")
+{
+ Schema olds = SchemaBuilder().add("f1", FType::SUMMARY, Schema::STRING).
+ add("f2", FType::SUMMARY, Schema::STRING).
+ add("f2", FType::INDEX, Schema::STRING).schema();
+ Schema news = SchemaBuilder().add("f1", FType::SUMMARY, Schema::STRING).
+ add("f1", FType::INDEX, Schema::STRING).
+ add("f2", FType::SUMMARY, Schema::STRING).schema();
+ EXPECT_EQUAL(IAA, checkSchema(news, olds, Schema()));
+}
+
+TEST("require that index aspect removed precedes attribute aspect removed")
+{
+ Schema olds = SchemaBuilder().add("f1", FType::SUMMARY, Schema::STRING).
+ add("f1", FType::INDEX, Schema::STRING).
+ add("f2", FType::SUMMARY, Schema::STRING).
+ add("f2", FType::ATTRIBUTE, Schema::STRING).schema();
+ Schema news = SchemaBuilder().add("f1", FType::SUMMARY, Schema::STRING).
+ add("f2", FType::SUMMARY, Schema::STRING).schema();
+ EXPECT_EQUAL(IAR, checkSchema(news, olds, Schema()));
+}
+
+TEST("require that attribute aspect removed precedes attribute aspect added")
+{
+ Schema olds = SchemaBuilder().add("f1", FType::SUMMARY, Schema::STRING).
+ add("f1", FType::ATTRIBUTE, Schema::STRING).
+ add("f2", FType::SUMMARY, Schema::STRING).schema();
+ Schema news = SchemaBuilder().add("f1", FType::SUMMARY, Schema::STRING).
+ add("f2", FType::SUMMARY, Schema::STRING).
+ add("f2", FType::ATTRIBUTE, Schema::STRING).schema();
+ EXPECT_EQUAL(AAR, checkSchema(news, olds, Schema()));
+}
+
+AttributesConfigBuilder::Attribute
+createAttribute(const vespalib::string &name, bool fastAccess)
+{
+ AttributesConfigBuilder::Attribute attr;
+ attr.name = name;
+ attr.fastaccess = fastAccess;
+ return attr;
+}
+
+TEST("require that adding attribute fast-access is discovered")
+{
+ AttributesConfigBuilder oldCfg;
+ oldCfg.attribute.push_back(createAttribute("a1", false));
+ AttributesConfigBuilder newCfg;
+ newCfg.attribute.push_back(createAttribute("a1", true));
+
+ EXPECT_EQUAL(AFAA, checkAttribute(newCfg, oldCfg));
+}
+
+TEST("require that removing attribute fast-access is discovered")
+{
+ AttributesConfigBuilder oldCfg;
+ oldCfg.attribute.push_back(createAttribute("a1", true));
+ AttributesConfigBuilder newCfg;
+ newCfg.attribute.push_back(createAttribute("a1", false));
+
+ EXPECT_EQUAL(AFAR, checkAttribute(newCfg, oldCfg));
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/documentdb/document_scan_iterator/.gitignore b/searchcore/src/tests/proton/documentdb/document_scan_iterator/.gitignore
new file mode 100644
index 00000000000..6c961d2f232
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_scan_iterator/.gitignore
@@ -0,0 +1 @@
+searchcore_document_scan_iterator_test_app
diff --git a/searchcore/src/tests/proton/documentdb/document_scan_iterator/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/document_scan_iterator/CMakeLists.txt
new file mode 100644
index 00000000000..1a342660f7c
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_scan_iterator/CMakeLists.txt
@@ -0,0 +1,12 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_document_scan_iterator_test_app
+ SOURCES
+ document_scan_iterator_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_feedoperation
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_document_scan_iterator_test_app COMMAND searchcore_document_scan_iterator_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/document_scan_iterator/DESC b/searchcore/src/tests/proton/documentdb/document_scan_iterator/DESC
new file mode 100644
index 00000000000..b5965bc2f2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_scan_iterator/DESC
@@ -0,0 +1,2 @@
+Test for document scan iterator. Take a look at document_scan_iterator_test.cpp for details.
+
diff --git a/searchcore/src/tests/proton/documentdb/document_scan_iterator/FILES b/searchcore/src/tests/proton/documentdb/document_scan_iterator/FILES
new file mode 100644
index 00000000000..f1b6d86a774
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_scan_iterator/FILES
@@ -0,0 +1 @@
+document_scan_iterator_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/document_scan_iterator/document_scan_iterator_test.cpp b/searchcore/src/tests/proton/documentdb/document_scan_iterator/document_scan_iterator_test.cpp
new file mode 100644
index 00000000000..8a05d46d22f
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_scan_iterator/document_scan_iterator_test.cpp
@@ -0,0 +1,102 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("document_scan_iterator_test");
+
+#include <vespa/searchcore/proton/documentmetastore/documentmetastore.h>
+#include <vespa/searchcore/proton/server/document_scan_iterator.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace document;
+using namespace proton;
+using namespace search;
+
+using vespalib::make_string;
+
+typedef DocumentMetaStore::Result DMSResult;
+typedef DocumentMetaStore::Timestamp Timestamp;
+typedef std::set<uint32_t> LidSet;
+typedef std::vector<uint32_t> LidVector;
+
+struct Fixture
+{
+ DocumentMetaStore _metaStore;
+ DocumentScanIterator _itr;
+ Fixture()
+ : _metaStore(std::make_shared<BucketDBOwner>()),
+ _itr(_metaStore)
+ {
+ _metaStore.constructFreeList();
+ }
+ Fixture &add(const LidVector &lids) {
+ for (auto lid : lids) {
+ add(lid);
+ }
+ return *this;
+ }
+ Fixture &add(uint32_t lid) {
+ DocumentId docId(make_string("userdoc:test:%u:%u", 1, lid));
+ const GlobalId &gid = docId.getGlobalId();
+ DMSResult res = _metaStore.inspect(gid);
+ ASSERT_EQUAL(lid, res._lid);
+ _metaStore.put(gid, gid.convertToBucketId(), Timestamp(lid), lid);
+ return *this;
+ }
+ LidSet scan(uint32_t count, uint32_t compactLidLimit, uint32_t maxDocsToScan = 10) {
+ LidSet retval;
+ for (uint32_t i = 0; i < count; ++i) {
+ retval.insert(next(compactLidLimit, maxDocsToScan, false));
+ EXPECT_TRUE(_itr.valid());
+ }
+ EXPECT_EQUAL(0u, next(compactLidLimit, maxDocsToScan, false));
+ EXPECT_FALSE(_itr.valid());
+ return retval;
+ }
+ uint32_t next(uint32_t compactLidLimit, uint32_t maxDocsToScan = 10, bool retry = false) {
+ return _itr.next(compactLidLimit, maxDocsToScan, retry).lid;
+ }
+};
+
+void
+assertLidSet(const LidSet &exp, const LidSet &act)
+{
+ EXPECT_EQUAL(exp, act);
+}
+
+TEST_F("require that an empty document meta store don't return any thing", Fixture)
+{
+ assertLidSet({}, f.scan(0, 4));
+}
+
+TEST_F("require that only lids > lid limit are returned", Fixture)
+{
+ f.add({1,2,3,4,5,6,7,8});
+ assertLidSet({5,6,7,8}, f.scan(4, 4));
+}
+
+TEST_F("require that max docs to scan (1) are taken into consideration", Fixture)
+{
+ f.add({1,2,3,4,5,6,7,8});
+ assertLidSet({0,5,6,7,8}, f.scan(8, 4, 1));
+}
+
+TEST_F("require that max docs to scan (2) are taken into consideration", Fixture)
+{
+ f.add({1,2,3,4,5,6,7,8});
+ // scan order is: 8, {2,4}, 7, {5,3}, {1,6} (5 scans total)
+ assertLidSet({0,7,8}, f.scan(5, 6, 2));
+}
+
+TEST_F("require that we start scan at previous doc if retry is set", Fixture)
+{
+ f.add({1,2,3,4,5,6,7,8});
+ uint32_t lid1 = f.next(4, 10, false);
+ uint32_t lid2 = f.next(4, 10, true);
+ EXPECT_EQUAL(lid1, lid2);
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/.gitignore b/searchcore/src/tests/proton/documentdb/document_subdbs/.gitignore
new file mode 100644
index 00000000000..e47d2bafa0e
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/.gitignore
@@ -0,0 +1 @@
+searchcore_document_subdbs_test_app
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/document_subdbs/CMakeLists.txt
new file mode 100644
index 00000000000..d79b9ad92ae
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/CMakeLists.txt
@@ -0,0 +1,24 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_document_subdbs_test_app
+ SOURCES
+ document_subdbs_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_initializer
+ searchcore_reprocessing
+ searchcore_index
+ searchcore_docsummary
+ searchcore_persistenceengine
+ searchcore_feedoperation
+ searchcore_matching
+ searchcore_attribute
+ searchcore_flushengine
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_pcommon
+ searchcore_grouping
+ searchcore_proton_metrics
+ searchcore_util
+ searchcore_fconfig
+)
+vespa_add_test(NAME searchcore_document_subdbs_test_app COMMAND searchcore_document_subdbs_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/DESC b/searchcore/src/tests/proton/documentdb/document_subdbs/DESC
new file mode 100644
index 00000000000..22718579d4c
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/DESC
@@ -0,0 +1 @@
+Test for document sub db implementations. Take a look at document_subdbs_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/FILES b/searchcore/src/tests/proton/documentdb/document_subdbs/FILES
new file mode 100644
index 00000000000..3d5222f3212
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/FILES
@@ -0,0 +1 @@
+document_subdbs_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/attributes.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/attributes.cfg
new file mode 100644
index 00000000000..9d990996dd1
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/attributes.cfg
@@ -0,0 +1,3 @@
+attribute[1]
+attribute[0].name "attr1"
+attribute[0].datatype INT32
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/indexschema.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/indexschema.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/indexschema.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/juniperrc.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/juniperrc.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/juniperrc.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/rank-profiles.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/rank-profiles.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/rank-profiles.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/summary.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/summary.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/summary.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/summarymap.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/summarymap.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg1/summarymap.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/attributes.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/attributes.cfg
new file mode 100644
index 00000000000..3e488bbd7d9
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/attributes.cfg
@@ -0,0 +1,5 @@
+attribute[2]
+attribute[0].name "attr1"
+attribute[0].datatype INT32
+attribute[1].name "attr2"
+attribute[1].datatype INT32
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/indexschema.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/indexschema.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/indexschema.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/juniperrc.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/juniperrc.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/juniperrc.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/rank-profiles.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/rank-profiles.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/rank-profiles.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/summary.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/summary.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/summary.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/summarymap.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/summarymap.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg2/summarymap.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/attributes.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/attributes.cfg
new file mode 100644
index 00000000000..deb4ddcf63c
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/attributes.cfg
@@ -0,0 +1,6 @@
+attribute[2]
+attribute[0].name "attr1"
+attribute[0].datatype INT32
+attribute[0].fastaccess true
+attribute[1].name "attr2"
+attribute[1].datatype INT32
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/indexschema.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/indexschema.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/indexschema.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/juniperrc.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/juniperrc.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/juniperrc.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/rank-profiles.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/rank-profiles.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/rank-profiles.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/summary.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/summary.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/summary.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/summarymap.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/summarymap.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg3/summarymap.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/attributes.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/attributes.cfg
new file mode 100644
index 00000000000..d4fc1468739
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/attributes.cfg
@@ -0,0 +1,7 @@
+attribute[2]
+attribute[0].name "attr1"
+attribute[0].datatype INT32
+attribute[0].fastaccess true
+attribute[1].name "attr2"
+attribute[1].datatype INT32
+attribute[1].fastaccess true
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/indexschema.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/indexschema.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/indexschema.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/juniperrc.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/juniperrc.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/juniperrc.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/rank-profiles.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/rank-profiles.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/rank-profiles.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/summary.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/summary.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/summary.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/summarymap.cfg b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/summarymap.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/cfg4/summarymap.cfg
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp
new file mode 100644
index 00000000000..afe1253ed93
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp
@@ -0,0 +1,978 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("document_subdbs_test");
+
+#include <vespa/searchcore/proton/bucketdb/bucketdbhandler.h>
+#include <vespa/searchcore/proton/initializer/task_runner.h>
+#include <vespa/searchcore/proton/metrics/legacy_documentdb_metrics.h>
+#include <vespa/searchcore/proton/metrics/metricswireservice.h>
+#include <vespa/searchcore/proton/reprocessing/i_reprocessing_task.h>
+#include <vespa/searchcore/proton/reprocessing/reprocessingrunner.h>
+#include <vespa/searchcore/proton/server/document_subdb_explorer.h>
+#include <vespa/searchcore/proton/server/emptysearchview.h>
+#include <vespa/searchcore/proton/server/fast_access_document_retriever.h>
+#include <vespa/searchcore/proton/server/idocumentsubdb.h>
+#include <vespa/searchcore/proton/server/minimal_document_retriever.h>
+#include <vespa/searchcore/proton/server/searchable_document_retriever.h>
+#include <vespa/searchcore/proton/server/searchabledocsubdb.h>
+#include <vespa/searchcore/proton/test/test.h>
+#include <vespa/searchcore/proton/test/thread_utils.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <iostream>
+
+using namespace document;
+using namespace proton;
+using namespace proton::matching;
+using namespace search;
+using namespace search::common;
+using namespace search::index;
+using namespace search::transactionlog;
+using namespace searchcorespi;
+using namespace vespalib;
+using proton::bucketdb::BucketDBHandler;
+using proton::bucketdb::IBucketDBHandler;
+using proton::bucketdb::IBucketDBHandlerInitializer;
+
+using searchcorespi::IFlushTarget;
+using searchcorespi::index::IThreadingService;
+using storage::spi::Timestamp;
+using vespa::config::search::core::ProtonConfig;
+using vespalib::mkdir;
+
+typedef StoreOnlyDocSubDB::Config StoreOnlyConfig;
+typedef StoreOnlyDocSubDB::Context StoreOnlyContext;
+typedef FastAccessDocSubDB::Config FastAccessConfig;
+typedef FastAccessDocSubDB::Context FastAccessContext;
+typedef SearchableDocSubDB::Config SearchableConfig;
+typedef SearchableDocSubDB::Context SearchableContext;
+typedef std::vector<AttributeGuard> AttributeGuardList;
+
+const std::string DOCTYPE_NAME = "searchdocument";
+const std::string SUB_NAME = "subdb";
+const std::string BASE_DIR = "basedir";
+const SerialNum CFG_SERIAL = 5;
+
+struct ConfigDir1 { static vespalib::string dir() { return "cfg1"; } };
+struct ConfigDir2 { static vespalib::string dir() { return "cfg2"; } };
+struct ConfigDir3 { static vespalib::string dir() { return "cfg3"; } };
+struct ConfigDir4 { static vespalib::string dir() { return "cfg4"; } };
+
+struct MySubDBOwner : public IDocumentSubDB::IOwner
+{
+ uint32_t _syncCnt;
+ MySubDBOwner() : _syncCnt(0) {}
+ void syncFeedView() override { ++_syncCnt; }
+ IIndexManagerFactory::SP
+ getIndexManagerFactory(const vespalib::stringref &name) const override {
+ (void) name;
+ return IIndexManagerFactory::SP();
+ }
+ vespalib::string getName() const override { return "owner"; }
+ uint32_t getDistributionKey() const override { return -1; }
+};
+
+struct MySyncProxy : public SyncProxy
+{
+ virtual void sync(SerialNum) {}
+};
+
+
+struct MyGetSerialNum : public IGetSerialNum
+{
+ virtual SerialNum getSerialNum() const { return 0u; }
+};
+
+struct MyFileHeaderContext : public FileHeaderContext
+{
+ virtual void addTags(vespalib::GenericHeader &, const vespalib::string &) const {}
+};
+
+struct MyMetricsWireService : public MetricsWireService
+{
+ std::set<vespalib::string> _attributes;
+ MyMetricsWireService() : _attributes() {}
+ virtual void addAttribute(AttributeMetrics &, AttributeMetrics *, const std::string &name) {
+ _attributes.insert(name);
+ }
+ virtual void removeAttribute(AttributeMetrics &, AttributeMetrics *, const std::string &) {}
+ virtual void cleanAttributes(AttributeMetrics &, AttributeMetrics *) {}
+ virtual void addRankProfile(LegacyDocumentDBMetrics &, const std::string &, size_t) {}
+ virtual void cleanRankProfiles(LegacyDocumentDBMetrics &) {}
+};
+
+struct MyStoreOnlyConfig
+{
+ StoreOnlyConfig _cfg;
+ MyStoreOnlyConfig()
+ : _cfg(DocTypeName(DOCTYPE_NAME),
+ SUB_NAME,
+ BASE_DIR,
+ GrowStrategy(),
+ 0, 0, SubDbType::READY)
+ {
+ }
+};
+
+struct MyStoreOnlyContext
+{
+ MySubDBOwner _owner;
+ MySyncProxy _syncProxy;
+ MyGetSerialNum _getSerialNum;
+ MyFileHeaderContext _fileHeader;
+ LegacyDocumentDBMetrics _metrics;
+ vespalib::Lock _configLock;
+ StoreOnlyContext _ctx;
+ MyStoreOnlyContext(IThreadingService &writeService,
+ ThreadStackExecutorBase &summaryExecutor,
+ std::shared_ptr<BucketDBOwner> bucketDB,
+ IBucketDBHandlerInitializer &
+ bucketDBHandlerInitializer)
+ : _owner(),
+ _syncProxy(),
+ _getSerialNum(),
+ _fileHeader(),
+ _metrics(DOCTYPE_NAME, 1),
+ _configLock(),
+ _ctx(_owner,
+ _syncProxy,
+ _getSerialNum,
+ _fileHeader,
+ writeService,
+ summaryExecutor,
+ bucketDB,
+ bucketDBHandlerInitializer,
+ _metrics,
+ _configLock)
+ {
+ }
+ const MySubDBOwner &getOwner() const {
+ return _owner;
+ }
+};
+
+template <bool FastAccessAttributesOnly>
+struct MyFastAccessConfig
+{
+ FastAccessConfig _cfg;
+ MyFastAccessConfig()
+ : _cfg(MyStoreOnlyConfig()._cfg, true, true, FastAccessAttributesOnly)
+ {
+ }
+};
+
+struct MyFastAccessContext
+{
+ MyStoreOnlyContext _storeOnlyCtx;
+ AttributeMetrics _attributeMetrics;
+ MyMetricsWireService _wireService;
+ FastAccessContext _ctx;
+ MyFastAccessContext(IThreadingService &writeService,
+ ThreadStackExecutorBase &summaryExecutor,
+ std::shared_ptr<BucketDBOwner> bucketDB,
+ IBucketDBHandlerInitializer &
+ bucketDBHandlerInitializer)
+ : _storeOnlyCtx(writeService, summaryExecutor, bucketDB,
+ bucketDBHandlerInitializer),
+ _attributeMetrics(NULL),
+ _wireService(),
+ _ctx(_storeOnlyCtx._ctx, _attributeMetrics, NULL, _wireService)
+ {
+ }
+ const MyMetricsWireService &getWireService() const {
+ return _wireService;
+ }
+ const MySubDBOwner &getOwner() const {
+ return _storeOnlyCtx.getOwner();
+ }
+};
+
+struct MySearchableConfig
+{
+ SearchableConfig _cfg;
+ MySearchableConfig()
+ : _cfg(MyFastAccessConfig<false>()._cfg, 1)
+ {
+ }
+};
+
+struct MySearchableContext
+{
+ MyFastAccessContext _fastUpdCtx;
+ QueryLimiter _queryLimiter;
+ vespalib::Clock _clock;
+ SearchableContext _ctx;
+ MySearchableContext(IThreadingService &writeService,
+ ThreadStackExecutorBase &executor,
+ std::shared_ptr<BucketDBOwner> bucketDB,
+ IBucketDBHandlerInitializer &
+ bucketDBHandlerInitializer)
+ : _fastUpdCtx(writeService, executor, bucketDB,
+ bucketDBHandlerInitializer),
+ _queryLimiter(),
+ _clock(),
+ _ctx(_fastUpdCtx._ctx,
+ _queryLimiter,
+ _clock,
+ executor)
+ {
+ }
+ const MyMetricsWireService &getWireService() const {
+ return _fastUpdCtx.getWireService();
+ }
+ const MySubDBOwner &getOwner() const {
+ return _fastUpdCtx.getOwner();
+ }
+};
+
+struct OneAttrSchema : public Schema
+{
+ OneAttrSchema() {
+ addAttributeField(Schema::AttributeField("attr1", Schema::DataType::INT32));
+ }
+};
+
+struct TwoAttrSchema : public OneAttrSchema
+{
+ TwoAttrSchema() {
+ addAttributeField(Schema::AttributeField("attr2", Schema::DataType::INT32));
+ }
+};
+
+struct MyConfigSnapshot
+{
+ typedef std::unique_ptr<MyConfigSnapshot> UP;
+ Schema _schema;
+ DocBuilder _builder;
+ DocumentDBConfig::SP _cfg;
+ MyConfigSnapshot(const Schema &schema,
+ const vespalib::string &cfgDir)
+ : _schema(schema),
+ _builder(_schema),
+ _cfg()
+ {
+ DocumentDBConfig::DocumenttypesConfigSP documenttypesConfig
+ (new DocumenttypesConfig(_builder.getDocumenttypesConfig()));
+ TuneFileDocumentDB::SP tuneFileDocumentDB(new TuneFileDocumentDB());
+ BootstrapConfig::SP bootstrap
+ (new BootstrapConfig(1,
+ documenttypesConfig,
+ _builder.getDocumentTypeRepo(),
+ BootstrapConfig::ProtonConfigSP(new ProtonConfig()),
+ tuneFileDocumentDB));
+ config::DirSpec spec(cfgDir);
+ DocumentDBConfigHelper mgr(spec, "searchdocument");
+ mgr.forwardConfig(bootstrap);
+ mgr.nextGeneration(1);
+ _cfg = mgr.getConfig();
+ }
+};
+
+template <typename Traits>
+struct FixtureBase
+{
+ ExecutorThreadingService _writeService;
+ ThreadStackExecutor _summaryExecutor;
+ typename Traits::Config _cfg;
+ std::shared_ptr<BucketDBOwner> _bucketDB;
+ BucketDBHandler _bucketDBHandler;
+ typename Traits::Context _ctx;
+ typename Traits::Schema _baseSchema;
+ MyConfigSnapshot::UP _snapshot;
+ test::DirectoryHandler _baseDir;
+ typename Traits::SubDB _subDb;
+ IFeedView::SP _tmpFeedView;
+ FixtureBase()
+ : _writeService(),
+ _summaryExecutor(1, 64 * 1024),
+ _cfg(),
+ _bucketDB(std::make_shared<BucketDBOwner>()),
+ _bucketDBHandler(*_bucketDB),
+ _ctx(_writeService, _summaryExecutor, _bucketDB,
+ _bucketDBHandler),
+ _baseSchema(),
+ _snapshot(new MyConfigSnapshot(_baseSchema, Traits::ConfigDir::dir())),
+ _baseDir(BASE_DIR + "/" + SUB_NAME, BASE_DIR),
+ _subDb(_cfg._cfg, _ctx._ctx),
+ _tmpFeedView()
+ {
+ init();
+ }
+ ~FixtureBase() {
+ _writeService.sync();
+ }
+ template <typename FunctionType>
+ void runInMaster(FunctionType func) {
+ test::runInMaster(_writeService, func);
+ }
+ void init() {
+ Schema::SP unionSchema(new Schema());
+ DocumentSubDbInitializer::SP task =
+ _subDb.createInitializer(*_snapshot->_cfg,
+ Traits::configSerial(),
+ unionSchema,
+ ProtonConfig::Summary(),
+ ProtonConfig::Index());
+ vespalib::ThreadStackExecutor executor(1, 1024 * 1024);
+ initializer::TaskRunner taskRunner(executor);
+ taskRunner.runTask(task);
+ SessionManager::SP sessionMgr(new SessionManager(1));
+ runInMaster([&] () { _subDb.initViews(*_snapshot->_cfg, sessionMgr); });
+ }
+ void basicReconfig(SerialNum serialNum) {
+ runInMaster([&] () { performReconfig(serialNum, TwoAttrSchema(), ConfigDir2::dir()); });
+ }
+ void reconfig(SerialNum serialNum,
+ const Schema &reconfigSchema,
+ const vespalib::string &reconfigConfigDir) {
+ runInMaster([&] () { performReconfig(serialNum, reconfigSchema, reconfigConfigDir); });
+ }
+ void performReconfig(SerialNum serialNum,
+ const Schema &reconfigSchema,
+ const vespalib::string &reconfigConfigDir) {
+ MyConfigSnapshot::UP newCfg(new MyConfigSnapshot(reconfigSchema, reconfigConfigDir));
+ DocumentDBConfig::ComparisonResult cmpResult;
+ cmpResult.attributesChanged = true;
+ cmpResult._documenttypesChanged = true;
+ cmpResult._documentTypeRepoChanged = true;
+ IReprocessingTask::List tasks =
+ _subDb.applyConfig(*newCfg->_cfg,
+ *_snapshot->_cfg,
+ serialNum,
+ ReconfigParams(cmpResult));
+ _snapshot = std::move(newCfg);
+ if (!tasks.empty()) {
+ ReprocessingRunner runner;
+ runner.addTasks(tasks);
+ runner.run();
+ }
+ _subDb.onReprocessDone(serialNum);
+ }
+ void sync() {
+ _writeService.master().sync();
+ }
+ proton::IAttributeManager::SP getAttributeManager() {
+ return _subDb.getAttributeManager();
+ }
+ const typename Traits::FeedView *getFeedView() {
+ _tmpFeedView = _subDb.getFeedView();
+ const typename Traits::FeedView *retval =
+ dynamic_cast<typename Traits::FeedView *>(_tmpFeedView.get());
+ ASSERT_TRUE(retval != NULL);
+ return retval;
+ }
+ const MyMetricsWireService &getWireService() const {
+ return _ctx.getWireService();
+ }
+ const MySubDBOwner &getOwner() const {
+ return _ctx.getOwner();
+ }
+};
+
+template <typename SchemaT, typename ConfigDirT, uint32_t ConfigSerial = CFG_SERIAL>
+struct BaseTraitsT
+{
+ typedef SchemaT Schema;
+ typedef ConfigDirT ConfigDir;
+ static uint32_t configSerial() { return ConfigSerial; }
+};
+
+typedef BaseTraitsT<OneAttrSchema, ConfigDir1> BaseTraits;
+
+struct StoreOnlyTraits : public BaseTraits
+{
+ typedef MyStoreOnlyConfig Config;
+ typedef MyStoreOnlyContext Context;
+ typedef StoreOnlyDocSubDB SubDB;
+ typedef StoreOnlyFeedView FeedView;
+};
+
+typedef FixtureBase<StoreOnlyTraits> StoreOnlyFixture;
+
+struct FastAccessTraits : public BaseTraits
+{
+ typedef MyFastAccessConfig<false> Config;
+ typedef MyFastAccessContext Context;
+ typedef FastAccessDocSubDB SubDB;
+ typedef FastAccessFeedView FeedView;
+};
+
+typedef FixtureBase<FastAccessTraits> FastAccessFixture;
+
+template <typename ConfigDirT>
+struct FastAccessOnlyTraitsBase : public BaseTraitsT<TwoAttrSchema, ConfigDirT>
+{
+ typedef MyFastAccessConfig<true> Config;
+ typedef MyFastAccessContext Context;
+ typedef FastAccessDocSubDB SubDB;
+ typedef FastAccessFeedView FeedView;
+};
+
+// Setup with 1 fast-access attribute
+typedef FastAccessOnlyTraitsBase<ConfigDir3> FastAccessOnlyTraits;
+typedef FixtureBase<FastAccessOnlyTraits> FastAccessOnlyFixture;
+
+template <typename SchemaT, typename ConfigDirT>
+struct SearchableTraitsBase : public BaseTraitsT<SchemaT, ConfigDirT>
+{
+ typedef MySearchableConfig Config;
+ typedef MySearchableContext Context;
+ typedef SearchableDocSubDB SubDB;
+ typedef proton::SearchableFeedView FeedView;
+};
+
+typedef SearchableTraitsBase<OneAttrSchema, ConfigDir1> SearchableTraits;
+typedef FixtureBase<SearchableTraits> SearchableFixture;
+
+void
+assertAttributes1(const AttributeGuardList &attributes)
+{
+ EXPECT_EQUAL(1u, attributes.size());
+ EXPECT_EQUAL("attr1", attributes[0].get().getName());
+}
+
+void
+assertAttributes1(const std::vector<search::AttributeVector *> &attributes)
+{
+ EXPECT_EQUAL(1u, attributes.size());
+ EXPECT_EQUAL("attr1", attributes[0]->getName());
+}
+
+void
+assertAttributes2(const AttributeGuardList &attributes)
+{
+ EXPECT_EQUAL(2u, attributes.size());
+ EXPECT_EQUAL("attr1", attributes[0].get().getName());
+ EXPECT_EQUAL("attr2", attributes[1].get().getName());
+}
+
+void
+assertAttributes2(const std::vector<search::AttributeVector *> &attributes)
+{
+ EXPECT_EQUAL(2u, attributes.size());
+ EXPECT_EQUAL("attr1", attributes[0]->getName());
+ EXPECT_EQUAL("attr2", attributes[1]->getName());
+}
+
+TEST_F("require that managers and components are instantiated", StoreOnlyFixture)
+{
+ EXPECT_TRUE(f._subDb.getSummaryManager().get() != NULL);
+ EXPECT_TRUE(f._subDb.getSummaryAdapter().get() != NULL);
+ EXPECT_TRUE(f._subDb.getAttributeManager().get() == NULL);
+ EXPECT_TRUE(f._subDb.getIndexManager().get() == NULL);
+ EXPECT_TRUE(f._subDb.getIndexWriter().get() == NULL);
+ EXPECT_TRUE(f._subDb.getFeedView().get() != NULL);
+ EXPECT_TRUE(f._subDb.getSearchView().get() != NULL);
+ EXPECT_TRUE(dynamic_cast<StoreOnlyFeedView *>(f._subDb.getFeedView().get()) != NULL);
+ EXPECT_TRUE(dynamic_cast<EmptySearchView *>(f._subDb.getSearchView().get()) != NULL);
+ EXPECT_TRUE(dynamic_cast<MinimalDocumentRetriever *>(f._subDb.getDocumentRetriever().get()) != NULL);
+}
+
+TEST_F("require that managers and components are instantiated", FastAccessFixture)
+{
+ EXPECT_TRUE(f._subDb.getSummaryManager().get() != NULL);
+ EXPECT_TRUE(f._subDb.getSummaryAdapter().get() != NULL);
+ EXPECT_TRUE(f._subDb.getAttributeManager().get() != NULL);
+ EXPECT_TRUE(f._subDb.getIndexManager().get() == NULL);
+ EXPECT_TRUE(f._subDb.getIndexWriter().get() == NULL);
+ EXPECT_TRUE(f._subDb.getFeedView().get() != NULL);
+ EXPECT_TRUE(f._subDb.getSearchView().get() != NULL);
+ EXPECT_TRUE(dynamic_cast<FastAccessFeedView *>(f._subDb.getFeedView().get()) != NULL);
+ EXPECT_TRUE(dynamic_cast<EmptySearchView *>(f._subDb.getSearchView().get()) != NULL);
+ EXPECT_TRUE(dynamic_cast<FastAccessDocumentRetriever *>(f._subDb.getDocumentRetriever().get()) != NULL);
+}
+
+TEST_F("require that managers and components are instantiated", SearchableFixture)
+{
+ EXPECT_TRUE(f._subDb.getSummaryManager().get() != NULL);
+ EXPECT_TRUE(f._subDb.getSummaryAdapter().get() != NULL);
+ EXPECT_TRUE(f._subDb.getAttributeManager().get() != NULL);
+ EXPECT_TRUE(f._subDb.getIndexManager().get() != NULL);
+ EXPECT_TRUE(f._subDb.getIndexWriter().get() != NULL);
+ EXPECT_TRUE(f._subDb.getFeedView().get() != NULL);
+ EXPECT_TRUE(f._subDb.getSearchView().get() != NULL);
+ EXPECT_TRUE(dynamic_cast<SearchableFeedView *>(f._subDb.getFeedView().get()) != NULL);
+ EXPECT_TRUE(dynamic_cast<SearchView *>(f._subDb.getSearchView().get()) != NULL);
+ EXPECT_TRUE(dynamic_cast<SearchableDocumentRetriever *>(f._subDb.getDocumentRetriever().get()) != NULL);
+}
+
+template<typename Fixture>
+void
+requireThatAttributeManagerIsInstantiated(Fixture &f)
+{
+ std::vector<AttributeGuard> attributes;
+ f.getAttributeManager()->getAttributeList(attributes);
+ assertAttributes1(attributes);
+}
+
+TEST_F("require that attribute manager is instantiated", FastAccessFixture)
+{
+ requireThatAttributeManagerIsInstantiated(f);
+}
+
+TEST_F("require that attribute manager is instantiated", SearchableFixture)
+{
+ requireThatAttributeManagerIsInstantiated(f);
+}
+
+template <typename Fixture>
+void
+requireThatAttributesAreAccessibleViaFeedView(Fixture &f)
+{
+ assertAttributes1(f.getFeedView()->getAttributeWriter()->getWritableAttributes());
+}
+
+TEST_F("require that attributes are accessible via feed view", FastAccessFixture)
+{
+ requireThatAttributesAreAccessibleViaFeedView(f);
+}
+
+TEST_F("require that attributes are accessible via feed view", SearchableFixture)
+{
+ requireThatAttributesAreAccessibleViaFeedView(f);
+}
+
+template <typename Fixture>
+void
+requireThatAttributeManagerCanBeReconfigured(Fixture &f)
+{
+ f.basicReconfig(10);
+ std::vector<AttributeGuard> attributes;
+ f.getAttributeManager()->getAttributeList(attributes);
+ assertAttributes2(attributes);
+}
+
+TEST_F("require that attribute manager can be reconfigured", FastAccessFixture)
+{
+ requireThatAttributeManagerCanBeReconfigured(f);
+}
+
+TEST_F("require that attribute manager can be reconfigured", SearchableFixture)
+{
+ requireThatAttributeManagerCanBeReconfigured(f);
+}
+
+template <typename Fixture>
+void
+requireThatReconfiguredAttributesAreAccessibleViaFeedView(Fixture &f)
+{
+ f.basicReconfig(10);
+ assertAttributes2(f.getFeedView()->getAttributeWriter()->getWritableAttributes());
+}
+
+TEST_F("require that reconfigured attributes are accessible via feed view", FastAccessFixture)
+{
+ requireThatReconfiguredAttributesAreAccessibleViaFeedView(f);
+}
+
+TEST_F("require that reconfigured attributes are accessible via feed view", SearchableFixture)
+{
+ requireThatReconfiguredAttributesAreAccessibleViaFeedView(f);
+}
+
+template <typename Fixture>
+void
+requireThatOwnerIsNotifiedWhenFeedViewChanges(Fixture &f)
+{
+ EXPECT_EQUAL(0u, f.getOwner()._syncCnt);
+ f.basicReconfig(10);
+ EXPECT_EQUAL(1u, f.getOwner()._syncCnt);
+}
+
+TEST_F("require that owner is noticed when feed view changes", StoreOnlyFixture)
+{
+ requireThatOwnerIsNotifiedWhenFeedViewChanges(f);
+}
+
+TEST_F("require that owner is noticed when feed view changes", FastAccessFixture)
+{
+ requireThatOwnerIsNotifiedWhenFeedViewChanges(f);
+}
+
+TEST_F("require that owner is noticed when feed view changes", SearchableFixture)
+{
+ EXPECT_EQUAL(1u, f.getOwner()._syncCnt); // NOTE: init also notifies owner
+ f.basicReconfig(10);
+ EXPECT_EQUAL(2u, f.getOwner()._syncCnt);
+}
+
+template <typename Fixture>
+void
+requireThatAttributeMetricsAreRegistered(Fixture &f)
+{
+ EXPECT_EQUAL(2u, f.getWireService()._attributes.size());
+ auto itr = f.getWireService()._attributes.begin();
+ EXPECT_EQUAL("[documentmetastore]", *itr++);
+ EXPECT_EQUAL("attr1", *itr);
+}
+
+TEST_F("require that attribute metrics are registered", FastAccessFixture)
+{
+ requireThatAttributeMetricsAreRegistered(f);
+}
+
+TEST_F("require that attribute metrics are registered", SearchableFixture)
+{
+ requireThatAttributeMetricsAreRegistered(f);
+}
+
+template <typename Fixture>
+void
+requireThatAttributeMetricsCanBeReconfigured(Fixture &f)
+{
+ f.basicReconfig(10);
+ EXPECT_EQUAL(3u, f.getWireService()._attributes.size());
+ auto itr = f.getWireService()._attributes.begin();
+ EXPECT_EQUAL("[documentmetastore]", *itr++);
+ EXPECT_EQUAL("attr1", *itr++);
+ EXPECT_EQUAL("attr2", *itr);
+}
+
+TEST_F("require that attribute metrics can be reconfigured", FastAccessFixture)
+{
+ requireThatAttributeMetricsCanBeReconfigured(f);
+}
+
+TEST_F("require that attribute metrics can be reconfigured", SearchableFixture)
+{
+ requireThatAttributeMetricsCanBeReconfigured(f);
+}
+
+template <typename Fixture>
+IFlushTarget::List
+getFlushTargets(Fixture &f)
+{
+ IFlushTarget::List targets = (static_cast<IDocumentSubDB &>(f._subDb)).getFlushTargets();
+ std::sort(targets.begin(), targets.end(),
+ [](const IFlushTarget::SP &lhs, const IFlushTarget::SP &rhs) {
+ return lhs->getName() < rhs->getName(); });
+ return targets;
+}
+
+typedef IFlushTarget::Type FType;
+typedef IFlushTarget::Component FComponent;
+
+bool
+assertTarget(const vespalib::string &name,
+ FType type,
+ FComponent component,
+ const IFlushTarget &target)
+{
+ if (!EXPECT_EQUAL(name, target.getName())) return false;
+ if (!EXPECT_TRUE(type == target.getType())) return false;
+ if (!EXPECT_TRUE(component == target.getComponent())) return false;
+ return true;
+}
+
+TEST_F("require that flush targets can be retrieved", FastAccessFixture)
+{
+ IFlushTarget::List targets = getFlushTargets(f);
+ EXPECT_EQUAL(4u, targets.size());
+ EXPECT_EQUAL("subdb.attribute.attr1", targets[0]->getName());
+ EXPECT_EQUAL("subdb.documentmetastore", targets[1]->getName());
+ EXPECT_EQUAL("subdb.summary.compact", targets[2]->getName());
+ EXPECT_EQUAL("subdb.summary.flush", targets[3]->getName());
+}
+
+TEST_F("require that flush targets can be retrieved", SearchableFixture)
+{
+ IFlushTarget::List targets = getFlushTargets(f);
+ EXPECT_EQUAL(6u, targets.size());
+ EXPECT_TRUE(assertTarget("subdb.attribute.attr1", FType::SYNC, FComponent::ATTRIBUTE, *targets[0]));
+ EXPECT_TRUE(assertTarget("subdb.documentmetastore", FType::SYNC, FComponent::ATTRIBUTE, *targets[1]));
+ EXPECT_TRUE(assertTarget("subdb.memoryindex.flush", FType::FLUSH, FComponent::INDEX, *targets[2]));
+ EXPECT_TRUE(assertTarget("subdb.memoryindex.fusion", FType::GC, FComponent::INDEX, *targets[3]));
+ EXPECT_TRUE(assertTarget("subdb.summary.compact", FType::GC, FComponent::DOCUMENT_STORE, *targets[4]));
+ EXPECT_TRUE(assertTarget("subdb.summary.flush", FType::SYNC, FComponent::DOCUMENT_STORE, *targets[5]));
+}
+
+TEST_F("require that only fast-access attributes are instantiated", FastAccessOnlyFixture)
+{
+ std::vector<AttributeGuard> attrs;
+ f.getAttributeManager()->getAttributeList(attrs);
+ EXPECT_EQUAL(1u, attrs.size());
+ EXPECT_EQUAL("attr1", attrs[0].get().getName());
+}
+
+template <typename FixtureType>
+struct DocumentHandler
+{
+ FixtureType &_f;
+ DocBuilder _builder;
+ DocumentHandler(FixtureType &f) : _f(f), _builder(f._baseSchema) {}
+ static constexpr uint32_t BUCKET_USED_BITS = 8;
+ static DocumentId createDocId(uint32_t docId)
+ {
+ return DocumentId(vespalib::make_string("id:searchdocument:"
+ "searchdocument::%u", docId));
+ }
+ Document::UP createEmptyDoc(uint32_t docId) {
+ return _builder.startDocument
+ (vespalib::make_string("id:searchdocument:searchdocument::%u",
+ docId)).
+ endDocument();
+ }
+ Document::UP createDoc(uint32_t docId, int64_t attr1Value, int64_t attr2Value) {
+ return _builder.startDocument
+ (vespalib::make_string("id:searchdocument:searchdocument::%u", docId)).
+ startAttributeField("attr1").addInt(attr1Value).endField().
+ startAttributeField("attr2").addInt(attr2Value).endField().endDocument();
+ }
+ PutOperation createPut(Document::UP doc, Timestamp timestamp, SerialNum serialNum) {
+ test::Document testDoc(Document::SP(doc.release()), 0, timestamp);
+ PutOperation op(testDoc.getBucket(), testDoc.getTimestamp(), testDoc.getDoc());
+ op.setSerialNum(serialNum);
+ return op;
+ }
+ MoveOperation createMove(Document::UP doc, Timestamp timestamp,
+ DbDocumentId sourceDbdId,
+ uint32_t targetSubDbId,
+ SerialNum serialNum)
+ {
+ test::Document testDoc(Document::SP(doc.release()), 0, timestamp);
+ MoveOperation op(testDoc.getBucket(), testDoc.getTimestamp(), testDoc.getDoc(), sourceDbdId, targetSubDbId);
+ op.setSerialNum(serialNum);
+ return op;
+ }
+ RemoveOperation createRemove(const DocumentId &docId, Timestamp timestamp,
+ SerialNum serialNum)
+ {
+ const document::GlobalId &gid = docId.getGlobalId();
+ BucketId bucket = gid.convertToBucketId();
+ bucket.setUsedBits(BUCKET_USED_BITS);
+ bucket = bucket.stripUnused();
+ RemoveOperation op(bucket, timestamp, docId);
+ op.setSerialNum(serialNum);
+ return op;
+ }
+ void putDoc(PutOperation &op) {
+ IFeedView::SP feedView = _f._subDb.getFeedView();
+ _f.runInMaster([&]() { feedView->preparePut(op);
+ feedView->handlePut(NULL, op); } );
+ }
+ void moveDoc(MoveOperation &op) {
+ IFeedView::SP feedView = _f._subDb.getFeedView();
+ _f.runInMaster([&]() { feedView->handleMove(op); } );
+ }
+ void removeDoc(RemoveOperation &op)
+ {
+ IFeedView::SP feedView = _f._subDb.getFeedView();
+ _f.runInMaster([&]() { feedView->prepareRemove(op);
+ feedView->handleRemove(NULL, op); } );
+ }
+ void putDocs() {
+ PutOperation putOp = createPut(std::move(createDoc(1, 22, 33)),
+ Timestamp(10), 10);
+ putDoc(putOp);
+ putOp = createPut(std::move(createDoc(2, 44, 55)), Timestamp(20), 20);
+ putDoc(putOp);
+ }
+};
+
+void
+assertAttribute(const AttributeGuard &attr,
+ const vespalib::string &name,
+ uint32_t numDocs,
+ int64_t doc1Value,
+ int64_t doc2Value,
+ SerialNum createSerialNum,
+ SerialNum lastSerialNum)
+{
+ EXPECT_EQUAL(name, attr.get().getName());
+ EXPECT_EQUAL(numDocs, attr.get().getNumDocs());
+ EXPECT_EQUAL(doc1Value, attr.get().getInt(1));
+ EXPECT_EQUAL(doc2Value, attr.get().getInt(2));
+ EXPECT_EQUAL(createSerialNum, attr.get().getCreateSerialNum());
+ EXPECT_EQUAL(lastSerialNum, attr.get().getStatus().getLastSyncToken());
+}
+
+void
+assertAttribute1(const AttributeGuard &attr,
+ SerialNum createSerialNum,
+ SerialNum lastSerialNum)
+{
+ assertAttribute(attr, "attr1", 3, 22, 44, createSerialNum, lastSerialNum);
+}
+
+void
+assertAttribute2(const AttributeGuard &attr,
+ SerialNum createSerialNum,
+ SerialNum lastSerialNum)
+{
+ assertAttribute(attr, "attr2", 3, 33, 55, createSerialNum, lastSerialNum);
+}
+
+TEST_F("require that fast-access attributes are populated during feed", FastAccessOnlyFixture)
+{
+ f._subDb.onReplayDone();
+ DocumentHandler<FastAccessOnlyFixture> handler(f);
+ handler.putDocs();
+
+ std::vector<AttributeGuard> attrs;
+ f.getAttributeManager()->getAttributeList(attrs);
+ EXPECT_EQUAL(1u, attrs.size());
+ assertAttribute1(attrs[0], CFG_SERIAL, 20);
+}
+
+template <typename FixtureType, typename ConfigDirT>
+void
+requireThatAttributesArePopulatedDuringReprocessing(FixtureType &f)
+{
+ f._subDb.onReplayDone();
+ DocumentHandler<FixtureType> handler(f);
+ handler.putDocs();
+
+ {
+ std::vector<AttributeGuard> attrs;
+ f.getAttributeManager()->getAttributeList(attrs);
+ EXPECT_EQUAL(1u, attrs.size());
+ }
+
+ // Reconfig to 2 attribute fields
+ f.reconfig(40u, TwoAttrSchema(), ConfigDirT::dir());
+
+ {
+ std::vector<AttributeGuard> attrs;
+ f.getAttributeManager()->getAttributeList(attrs);
+ EXPECT_EQUAL(2u, attrs.size());
+ assertAttribute1(attrs[0], CFG_SERIAL, 40);
+ assertAttribute2(attrs[1], 40, 40);
+ }
+}
+
+TEST_F("require that fast-access attributes are populated during reprocessing",
+ FastAccessOnlyFixture)
+{
+ requireThatAttributesArePopulatedDuringReprocessing<FastAccessOnlyFixture, ConfigDir4>(f);
+}
+
+// Setup with 2 fields (1 attribute according to config in dir)
+typedef SearchableTraitsBase<TwoAttrSchema, ConfigDir1> SearchableTraitsTwoField;
+typedef FixtureBase<SearchableTraitsTwoField> SearchableFixtureTwoField;
+
+TEST_F("require that regular attributes are populated during reprocessing",
+ SearchableFixtureTwoField)
+{
+ requireThatAttributesArePopulatedDuringReprocessing<SearchableFixtureTwoField, ConfigDir2>(f);
+}
+
+namespace
+{
+
+bool
+assertOperation(DocumentOperation &op,
+ uint32_t expPrevSubDbId, uint32_t expPrevLid,
+ uint32_t expSubDbId, uint32_t expLid)
+{
+ if (!EXPECT_EQUAL(expPrevSubDbId, op.getPrevSubDbId())) {
+ return false;
+ }
+ if (!EXPECT_EQUAL(expPrevLid, op.getPrevLid())) {
+ return false;
+ }
+ if (!EXPECT_EQUAL(expSubDbId, op.getSubDbId())) {
+ return false;
+ }
+ if (!EXPECT_EQUAL(expLid, op.getLid())) {
+ return false;
+ }
+ return true;
+}
+
+}
+
+TEST_F("require that lid allocation uses lowest free lid", StoreOnlyFixture)
+{
+ f._subDb.onReplayDone();
+ DocumentHandler<StoreOnlyFixture> handler(f);
+ Document::UP doc;
+ PutOperation putOp;
+ RemoveOperation rmOp;
+ MoveOperation moveOp;
+
+ doc = handler.createEmptyDoc(1);
+ putOp = handler.createPut(std::move(doc), Timestamp(10), 10);
+ handler.putDoc(putOp);
+ EXPECT_TRUE(assertOperation(putOp, 0, 0, 0, 1));
+ doc = handler.createEmptyDoc(2);
+ putOp = handler.createPut(std::move(doc), Timestamp(20), 20);
+ handler.putDoc(putOp);
+ EXPECT_TRUE(assertOperation(putOp, 0, 0, 0, 2));
+ rmOp = handler.createRemove(handler.createDocId(1), Timestamp(30), 30);
+ handler.removeDoc(rmOp);
+ EXPECT_TRUE(assertOperation(rmOp, 0, 1, 0, 0));
+ doc = handler.createEmptyDoc(3);
+ putOp = handler.createPut(std::move(doc), Timestamp(40), 40);
+ handler.putDoc(putOp);
+ EXPECT_TRUE(assertOperation(putOp, 0, 0, 0, 1));
+ rmOp = handler.createRemove(handler.createDocId(3), Timestamp(50), 50);
+ handler.removeDoc(rmOp);
+ EXPECT_TRUE(assertOperation(rmOp, 0, 1, 0, 0));
+ doc = handler.createEmptyDoc(2);
+ moveOp = handler.createMove(std::move(doc), Timestamp(20),
+ DbDocumentId(0, 2), 0, 60);
+ moveOp.setTargetLid(1);
+ handler.moveDoc(moveOp);
+ EXPECT_TRUE(assertOperation(moveOp, 0, 2, 0, 1));
+ doc = handler.createEmptyDoc(3);
+ putOp = handler.createPut(std::move(doc), Timestamp(70), 70);
+ handler.putDoc(putOp);
+ EXPECT_TRUE(assertOperation(putOp, 0, 0, 0, 2));
+}
+
+template <typename FixtureType>
+struct ExplorerFixture : public FixtureType
+{
+ DocumentSubDBExplorer _explorer;
+ ExplorerFixture()
+ : FixtureType(),
+ _explorer(this->_subDb)
+ {
+ }
+};
+
+typedef ExplorerFixture<StoreOnlyFixture> StoreOnlyExplorerFixture;
+typedef ExplorerFixture<FastAccessFixture> FastAccessExplorerFixture;
+typedef ExplorerFixture<SearchableFixture> SearchableExplorerFixture;
+typedef std::vector<vespalib::string> StringVector;
+
+void
+assertExplorer(const StringVector &extraNames, const vespalib::StateExplorer &explorer)
+{
+ StringVector allNames = {"documentmetastore", "documentstore"};
+ allNames.insert(allNames.end(), extraNames.begin(), extraNames.end());
+ EXPECT_EQUAL(allNames, explorer.get_children_names());
+ EXPECT_TRUE(explorer.get_child("documentmetastore").get() != nullptr);
+ EXPECT_TRUE(explorer.get_child("documentstore").get() != nullptr);
+}
+
+TEST_F("require that underlying components are explorable", StoreOnlyExplorerFixture)
+{
+ assertExplorer({}, f._explorer);
+ EXPECT_TRUE(f._explorer.get_child("attribute").get() == nullptr);
+ EXPECT_TRUE(f._explorer.get_child("index").get() == nullptr);
+}
+
+TEST_F("require that underlying components are explorable", FastAccessExplorerFixture)
+{
+ assertExplorer({"attribute"}, f._explorer);
+ EXPECT_TRUE(f._explorer.get_child("attribute").get() != nullptr);
+ EXPECT_TRUE(f._explorer.get_child("index").get() == nullptr);
+}
+
+TEST_F("require that underlying components are explorable", SearchableExplorerFixture)
+{
+ assertExplorer({"attribute", "index"}, f._explorer);
+ EXPECT_TRUE(f._explorer.get_child("attribute").get() != nullptr);
+ EXPECT_TRUE(f._explorer.get_child("index").get() != nullptr);
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
+
diff --git a/searchcore/src/tests/proton/documentdb/documentbucketmover/.gitignore b/searchcore/src/tests/proton/documentdb/documentbucketmover/.gitignore
new file mode 100644
index 00000000000..4c7bc43b278
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentbucketmover/.gitignore
@@ -0,0 +1 @@
+searchcore_documentbucketmover_test_app
diff --git a/searchcore/src/tests/proton/documentdb/documentbucketmover/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/documentbucketmover/CMakeLists.txt
new file mode 100644
index 00000000000..97a9ae1516b
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentbucketmover/CMakeLists.txt
@@ -0,0 +1,19 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_documentbucketmover_test_app
+ SOURCES
+ documentbucketmover_test.cpp
+ DEPENDS
+ searchcore_test
+ searchcore_server
+ searchcore_persistenceengine
+ searchcore_feedoperation
+ searchcore_matching
+ searchcore_attribute
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_pcommon
+ searchcore_grouping
+ searchcore_util
+ searchcore_fconfig
+)
+vespa_add_test(NAME searchcore_documentbucketmover_test_app COMMAND searchcore_documentbucketmover_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/documentbucketmover/DESC b/searchcore/src/tests/proton/documentdb/documentbucketmover/DESC
new file mode 100644
index 00000000000..7746f5e2222
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentbucketmover/DESC
@@ -0,0 +1 @@
+documentbucketmover test. Take a look at documentbucketmover_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentdb/documentbucketmover/FILES b/searchcore/src/tests/proton/documentdb/documentbucketmover/FILES
new file mode 100644
index 00000000000..b035525acff
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentbucketmover/FILES
@@ -0,0 +1 @@
+documentbucketmover_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp b/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp
new file mode 100644
index 00000000000..9062067132c
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp
@@ -0,0 +1,1182 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("documentbucketmover_test");
+#include <vespa/searchcore/proton/common/bucketfactory.h>
+#include <vespa/searchcore/proton/feedoperation/moveoperation.h>
+#include <vespa/searchcore/proton/server/bucketmovejob.h>
+#include <vespa/searchcore/proton/server/documentbucketmover.h>
+#include <vespa/searchcore/proton/test/test.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/feedoperation/moveoperation.h>
+#include <vespa/searchcore/proton/server/idocumentmovehandler.h>
+#include <vespa/searchcore/proton/test/clusterstatehandler.h>
+#include <vespa/searchcore/proton/test/buckethandler.h>
+#include <vespa/searchcore/proton/server/maintenancedocumentsubdb.h>
+#include <vespa/searchcore/proton/bucketdb/bucketdbhandler.h>
+
+using namespace proton;
+using document::BucketId;
+using document::Document;
+using document::DocumentId;
+using document::DocumentTypeRepo;
+using document::GlobalId;
+using search::DocumentIdT;
+using search::DocumentMetaData;
+using search::index::DocBuilder;
+using search::index::Schema;
+using storage::spi::Timestamp;
+using vespalib::make_string;
+using storage::spi::BucketInfo;
+
+typedef std::vector<MoveOperation> MoveOperationVector;
+typedef std::vector<Document::SP> DocumentVector;
+typedef BucketId::List BucketIdVector;
+typedef std::set<BucketId> BucketIdSet;
+typedef BucketMoveJob::ScanPosition ScanPos;
+typedef BucketMoveJob::ScanIterator ScanItr;
+
+namespace {
+
+const uint32_t FIRST_SCAN_PASS = 1;
+const uint32_t SECOND_SCAN_PASS = 2;
+
+}
+
+struct MyMoveHandler : public IDocumentMoveHandler
+{
+ BucketDBOwner &_bucketDb;
+ MoveOperationVector _moves;
+ size_t _numCachedBuckets;
+ MyMoveHandler(BucketDBOwner &bucketDb)
+ : _bucketDb(bucketDb),
+ _moves(),
+ _numCachedBuckets()
+ {}
+ virtual void handleMove(MoveOperation &op) {
+ _moves.push_back(op);
+ if (_bucketDb.takeGuard()->isCachedBucket(op.getBucketId())) {
+ ++_numCachedBuckets;
+ }
+ }
+ void reset() {
+ _moves.clear();
+ _numCachedBuckets = 0;
+ }
+};
+
+
+struct MyDocumentRetriever : public DocumentRetrieverBaseForTest
+{
+ DocumentTypeRepo::SP _repo;
+ DocumentVector _docs;
+ MyDocumentRetriever(DocumentTypeRepo::SP repo) : _repo(repo), _docs() {
+ _docs.push_back(Document::SP()); // lid 0 invalid
+ }
+ virtual const document::DocumentTypeRepo &getDocumentTypeRepo() const { return *_repo; }
+ virtual void getBucketMetaData(const storage::spi::Bucket &,
+ DocumentMetaData::Vector &) const {}
+ virtual DocumentMetaData getDocumentMetaData(const DocumentId &) const { return DocumentMetaData(); }
+ virtual Document::UP getDocument(DocumentIdT lid) const {
+ return Document::UP(_docs[lid]->clone());
+ }
+
+ virtual CachedSelect::SP
+ parseSelect(const vespalib::string &) const
+ {
+ return CachedSelect::SP();
+ }
+};
+
+
+struct MyBucketModifiedHandler : public IBucketModifiedHandler
+{
+ BucketIdVector _modified;
+ virtual void notifyBucketModified(const BucketId &bucket) {
+ BucketIdVector::const_iterator itr = std::find(_modified.begin(), _modified.end(), bucket);
+ ASSERT_TRUE(itr == _modified.end());
+ _modified.push_back(bucket);
+ }
+ void reset() { _modified.clear(); }
+};
+
+
+struct MySubDb
+{
+ DocumentMetaStore::SP _metaStoreSP;
+ DocumentMetaStore & _metaStore;
+ std::shared_ptr<MyDocumentRetriever> _realRetriever;
+ std::shared_ptr<IDocumentRetriever> _retriever;
+ MaintenanceDocumentSubDB _subDb;
+ test::UserDocuments _docs;
+ bucketdb::BucketDBHandler _bucketDBHandler;
+ MySubDb(const DocumentTypeRepo::SP &repo,
+ std::shared_ptr<BucketDBOwner> bucketDB,
+ uint32_t subDbId,
+ SubDbType subDbType)
+ : _metaStoreSP(std::make_shared<DocumentMetaStore>(bucketDB,
+ DocumentMetaStore::getFixedName(),
+ search::GrowStrategy(),
+ documentmetastore::IGidCompare::SP(new documentmetastore::DefaultGidCompare),
+ subDbType)),
+ _metaStore(*_metaStoreSP),
+ _realRetriever(std::make_shared<MyDocumentRetriever>(repo)),
+ _retriever(_realRetriever),
+ _subDb(_metaStoreSP, _retriever, subDbId), _docs(),
+ _bucketDBHandler(*bucketDB)
+ {
+ _bucketDBHandler.addDocumentMetaStore(_metaStoreSP.get(), 0);
+ }
+ void insertDocs(const test::UserDocuments &docs_) {
+ for (test::UserDocuments::Iterator itr = docs_.begin(); itr != docs_.end(); ++itr) {
+ const test::BucketDocuments &bucketDocs = itr->second;
+ for (size_t i = 0; i < bucketDocs.getDocs().size(); ++i) {
+ const test::Document &testDoc = bucketDocs.getDocs()[i];
+ _metaStore.put(testDoc.getGid(), testDoc.getBucket(),
+ testDoc.getTimestamp(), testDoc.getLid());
+ _realRetriever->_docs.push_back(testDoc.getDoc());
+ ASSERT_EQUAL(testDoc.getLid() + 1,
+ _realRetriever->_docs.size());
+ }
+ }
+ _docs.merge(docs_);
+ }
+
+ BucketId bucket(uint32_t userId) const {
+ return _docs.getBucket(userId);
+ }
+
+ test::DocumentVector docs(uint32_t userId) {
+ return _docs.getGidOrderDocs(userId);
+ }
+
+ void setBucketState(const BucketId &bucketId, bool active) {
+ _metaStore.setBucketState(bucketId, active);
+ }
+
+ void removeBucket(uint32_t userId) {
+ const test::DocumentVector &userDocs = _docs.getDocs(userId);
+ for (size_t i = 0; i < userDocs.size(); ++i) {
+ _metaStore.remove(userDocs[i].getLid());
+ if (_metaStore.getFreeListActive()) {
+ _metaStore.removeComplete(userDocs[i].getLid());
+ }
+ }
+ BucketId b(bucket(userId));
+ EXPECT_EQUAL(0u, _metaStore.getBucketDB().takeGuard()->get(b).getEntryCount());
+ _bucketDBHandler.handleDeleteBucket(b);
+ }
+
+};
+
+
+struct MySubDbTwoBuckets : public MySubDb
+{
+ MySubDbTwoBuckets(test::UserDocumentsBuilder &builder,
+ std::shared_ptr<BucketDBOwner> bucketDB,
+ uint32_t subDbId,
+ SubDbType subDbType)
+ : MySubDb(builder.getRepo(), bucketDB, subDbId, subDbType)
+ {
+ builder.createDocs(1, 1, 6);
+ builder.createDocs(2, 6, 9);
+ insertDocs(builder.getDocs());
+ ASSERT_NOT_EQUAL(bucket(1), bucket(2));
+ ASSERT_EQUAL(5u, docs(1).size());
+ ASSERT_EQUAL(3u, docs(2).size());
+ ASSERT_EQUAL(9u, _realRetriever->_docs.size());
+ }
+};
+
+
+struct MoveFixture
+{
+ test::UserDocumentsBuilder _builder;
+ std::shared_ptr<BucketDBOwner> _bucketDB;
+ DocumentBucketMover _mover;
+ MySubDbTwoBuckets _source;
+ BucketDBOwner _bucketDb;
+ MyMoveHandler _handler;
+ MoveFixture()
+ : _builder(),
+ _bucketDB(std::make_shared<BucketDBOwner>()),
+ _mover(),
+ _source(_builder, _bucketDB, 0u, SubDbType::READY),
+ _bucketDb(),
+ _handler(_bucketDb)
+ {
+ }
+ void setupForBucket(const BucketId &bucket,
+ uint32_t sourceSubDbId,
+ uint32_t targetSubDbId) {
+ _source._subDb._subDbId = sourceSubDbId;
+ _mover.setupForBucket(bucket, &_source._subDb, targetSubDbId, _handler, _bucketDb);
+ }
+ void moveDocuments(size_t maxDocsToMove) {
+ _mover.moveDocuments(maxDocsToMove);
+ }
+};
+
+
+TEST("require that initial bucket mover is done")
+{
+ DocumentBucketMover dbm;
+ EXPECT_TRUE(dbm.bucketDone());
+ dbm.moveDocuments(2);
+ EXPECT_TRUE(dbm.bucketDone());
+}
+
+
+bool
+assertEqual(const BucketId &bucket, const test::Document &doc,
+ uint32_t sourceSubDbId, uint32_t targetSubDbId, const MoveOperation &op)
+{
+ if (!EXPECT_EQUAL(bucket, op.getBucketId())) return false;
+ if (!EXPECT_EQUAL(doc.getTimestamp(), op.getTimestamp())) return false;
+ if (!EXPECT_EQUAL(doc.getDocId(), op.getDocument()->getId())) return false;
+ if (!EXPECT_EQUAL(doc.getLid(), op.getSourceDbdId().getLid())) return false;
+ if (!EXPECT_EQUAL(sourceSubDbId, op.getSourceDbdId().getSubDbId())) return false;
+ if (!EXPECT_EQUAL(0u, op.getTargetDbdId().getLid())) return false;
+ if (!EXPECT_EQUAL(targetSubDbId, op.getTargetDbdId().getSubDbId())) return false;
+ return true;
+}
+
+
+TEST_F("require that we can move all documents", MoveFixture)
+{
+ f.setupForBucket(f._source.bucket(1), 6, 9);
+ f.moveDocuments(5);
+ EXPECT_TRUE(f._mover.bucketDone());
+ EXPECT_EQUAL(5u, f._handler._moves.size());
+ for (size_t i = 0; i < 5u; ++i) {
+ assertEqual(f._source.bucket(1), f._source.docs(1)[0], 6, 9, f._handler._moves[0]);
+ }
+}
+
+TEST_F("require that bucket is cached when IDocumentMoveHandler handles move operation",
+ MoveFixture)
+{
+ f.setupForBucket(f._source.bucket(1), 6, 9);
+ f.moveDocuments(5);
+ EXPECT_TRUE(f._mover.bucketDone());
+ EXPECT_EQUAL(5u, f._handler._moves.size());
+ EXPECT_EQUAL(5u, f._handler._numCachedBuckets);
+ EXPECT_FALSE(f._bucketDb.takeGuard()->isCachedBucket(f._source.bucket(1)));
+}
+
+TEST_F("require that we can move documents in several steps", MoveFixture)
+{
+ f.setupForBucket(f._source.bucket(1), 6, 9);
+ f.moveDocuments(2);
+ EXPECT_FALSE(f._mover.bucketDone());
+ EXPECT_EQUAL(2u, f._handler._moves.size());
+ assertEqual(f._source.bucket(1), f._source.docs(1)[0], 6, 9, f._handler._moves[0]);
+ assertEqual(f._source.bucket(1), f._source.docs(1)[1], 6, 9, f._handler._moves[1]);
+ f.moveDocuments(2);
+ EXPECT_FALSE(f._mover.bucketDone());
+ EXPECT_EQUAL(4u, f._handler._moves.size());
+ assertEqual(f._source.bucket(1), f._source.docs(1)[2], 6, 9, f._handler._moves[2]);
+ assertEqual(f._source.bucket(1), f._source.docs(1)[3], 6, 9, f._handler._moves[3]);
+ f.moveDocuments(2);
+ EXPECT_TRUE(f._mover.bucketDone());
+ EXPECT_EQUAL(5u, f._handler._moves.size());
+ assertEqual(f._source.bucket(1), f._source.docs(1)[4], 6, 9, f._handler._moves[4]);
+ f.moveDocuments(2);
+ EXPECT_TRUE(f._mover.bucketDone());
+ EXPECT_EQUAL(5u, f._handler._moves.size());
+}
+
+
+struct ScanFixtureBase
+{
+ test::UserDocumentsBuilder _builder;
+ std::shared_ptr<BucketDBOwner> _bucketDB;
+ MySubDb _ready;
+ MySubDb _notReady;
+ ScanFixtureBase()
+ : _builder(),
+ _bucketDB(std::make_shared<BucketDBOwner>()),
+ _ready(_builder.getRepo(), _bucketDB, 1, SubDbType::READY),
+ _notReady(_builder.getRepo(), _bucketDB, 2, SubDbType::NOTREADY)
+ {
+ }
+
+ ScanItr
+ getItr(void)
+ {
+ return ScanItr(_bucketDB->takeGuard(), BucketId());
+ }
+
+ ScanItr
+ getItr(BucketId bucket,
+ BucketId endBucket = BucketId(),
+ uint32_t pass = FIRST_SCAN_PASS)
+ {
+ return ScanItr(_bucketDB->takeGuard(), pass,
+ bucket, endBucket);
+ }
+};
+
+
+struct ScanFixture : public ScanFixtureBase
+{
+ ScanFixture() : ScanFixtureBase()
+ {
+ _builder.createDocs(6, 1, 2);
+ _builder.createDocs(8, 2, 3);
+ _ready.insertDocs(_builder.getDocs());
+ _builder.clearDocs();
+ _builder.createDocs(2, 1, 2);
+ _builder.createDocs(4, 2, 3);
+ _notReady.insertDocs(_builder.getDocs());
+ _builder.clearDocs();
+ }
+};
+
+
+struct OnlyNotReadyScanFixture : public ScanFixtureBase
+{
+ OnlyNotReadyScanFixture() : ScanFixtureBase()
+ {
+ _builder.createDocs(2, 1, 2);
+ _builder.createDocs(4, 2, 3);
+ _notReady.insertDocs(_builder.getDocs());
+ }
+};
+
+
+struct OnlyReadyScanFixture : public ScanFixtureBase
+{
+ OnlyReadyScanFixture() : ScanFixtureBase()
+ {
+ _builder.createDocs(6, 1, 2);
+ _builder.createDocs(8, 2, 3);
+ _ready.insertDocs(_builder.getDocs());
+ }
+};
+
+
+struct BucketVector : public BucketId::List
+{
+ BucketVector() : BucketId::List() {}
+ BucketVector &add(const BucketId &bucket) {
+ push_back(bucket);
+ return *this;
+ }
+};
+
+
+void
+advanceToFirstBucketWithDocs(ScanItr &itr, SubDbType subDbType)
+{
+ while (itr.valid()) {
+ if (subDbType == SubDbType::READY) {
+ if (itr.hasReadyBucketDocs())
+ return;
+ } else {
+ if (itr.hasNotReadyBucketDocs())
+ return;
+ }
+ ++itr;
+ }
+}
+
+
+void assertEquals(const BucketVector &exp, ScanItr &itr, SubDbType subDbType)
+{
+ for (size_t i = 0; i < exp.size(); ++i) {
+ advanceToFirstBucketWithDocs(itr, subDbType);
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(exp[i], itr.getBucket());
+ ++itr;
+ }
+ advanceToFirstBucketWithDocs(itr, subDbType);
+ EXPECT_FALSE(itr.valid());
+}
+
+
+TEST_F("require that we can iterate all buckets from start to end", ScanFixture)
+{
+ {
+ ScanItr itr = f.getItr();
+ assertEquals(BucketVector().
+ add(f._notReady.bucket(2)).
+ add(f._notReady.bucket(4)), itr, SubDbType::NOTREADY);
+ }
+ {
+ ScanItr itr = f.getItr();
+ assertEquals(BucketVector().
+ add(f._ready.bucket(6)).
+ add(f._ready.bucket(8)), itr, SubDbType::READY);
+ }
+}
+
+
+TEST_F("require that we can iterate from the middle of not ready buckets", ScanFixture)
+{
+ BucketId bucket = f._notReady.bucket(2);
+ {
+ ScanItr itr = f.getItr(bucket, bucket, FIRST_SCAN_PASS);
+ assertEquals(BucketVector().
+ add(f._notReady.bucket(4)), itr, SubDbType::NOTREADY);
+ }
+ {
+ ScanItr itr = f.getItr(BucketId(), bucket, SECOND_SCAN_PASS);
+ assertEquals(BucketVector().
+ add(f._notReady.bucket(2)), itr, SubDbType::NOTREADY);
+ }
+ {
+ ScanItr itr = f.getItr();
+ assertEquals(BucketVector().
+ add(f._ready.bucket(6)).
+ add(f._ready.bucket(8)), itr, SubDbType::READY);
+ }
+}
+
+
+TEST_F("require that we can iterate from the middle of ready buckets", ScanFixture)
+{
+ BucketId bucket = f._ready.bucket(6);
+ {
+ ScanItr itr = f.getItr();
+ assertEquals(BucketVector().
+ add(f._notReady.bucket(2)).
+ add(f._notReady.bucket(4)), itr, SubDbType::NOTREADY);
+ }
+ {
+ ScanItr itr = f.getItr(bucket, bucket, FIRST_SCAN_PASS);
+ assertEquals(BucketVector().
+ add(f._ready.bucket(8)), itr, SubDbType::READY);
+ }
+ {
+ ScanItr itr = f.getItr(BucketId(), bucket, SECOND_SCAN_PASS);
+ assertEquals(BucketVector().
+ add(f._ready.bucket(6)), itr, SubDbType::READY);
+ }
+}
+
+
+TEST_F("require that we can iterate only not ready buckets", OnlyNotReadyScanFixture)
+{
+ ScanItr itr = f.getItr();
+ assertEquals(BucketVector().
+ add(f._notReady.bucket(2)).
+ add(f._notReady.bucket(4)), itr, SubDbType::NOTREADY);
+}
+
+
+TEST_F("require that we can iterate only ready buckets", OnlyReadyScanFixture)
+{
+ ScanItr itr = f.getItr();
+ assertEquals(BucketVector().
+ add(f._ready.bucket(6)).
+ add(f._ready.bucket(8)), itr, SubDbType::READY);
+}
+
+
+TEST_F("require that we can iterate zero buckets", ScanFixtureBase)
+{
+ ScanItr itr = f.getItr();
+ EXPECT_FALSE(itr.valid());
+}
+
+
+struct MyFrozenBucketHandler : public IFrozenBucketHandler
+{
+ BucketIdSet _frozen;
+ std::set<IBucketFreezeListener *> _listeners;
+
+ MyFrozenBucketHandler()
+ : IFrozenBucketHandler(),
+ _frozen(),
+ _listeners()
+ {
+ }
+
+ virtual ~MyFrozenBucketHandler()
+ {
+ assert(_listeners.empty());
+ }
+
+ MyFrozenBucketHandler &addFrozen(const BucketId &bucket) {
+ _frozen.insert(bucket);
+ return *this;
+ }
+ MyFrozenBucketHandler &remFrozen(const BucketId &bucket) {
+ _frozen.erase(bucket);
+ for (auto &listener : _listeners) {
+ listener->notifyThawedBucket(bucket);
+ }
+ return *this;
+ }
+ virtual void addListener(IBucketFreezeListener *listener) override {
+ _listeners.insert(listener);
+ }
+ virtual void removeListener(IBucketFreezeListener *listener) override {
+ _listeners.erase(listener);
+ }
+
+ virtual ExclusiveBucketGuard::UP acquireExclusiveBucket(BucketId bucket) override {
+ return (_frozen.count(bucket) != 0)
+ ? ExclusiveBucketGuard::UP()
+ : std::make_unique<ExclusiveBucketGuard>(bucket);
+ }
+};
+
+struct ControllerFixtureBase
+{
+ test::UserDocumentsBuilder _builder;
+ test::BucketStateCalculator::SP _calc;
+ test::ClusterStateHandler _clusterStateHandler;
+ test::BucketHandler _bucketHandler;
+ MyBucketModifiedHandler _modifiedHandler;
+ std::shared_ptr<BucketDBOwner> _bucketDB;
+ MyMoveHandler _moveHandler;
+ MySubDb _ready;
+ MySubDb _notReady;
+ MyFrozenBucketHandler _fbh;
+ BucketMoveJob _bmj;
+ ControllerFixtureBase()
+ : _builder(),
+ _calc(new test::BucketStateCalculator),
+ _bucketHandler(),
+ _modifiedHandler(),
+ _bucketDB(std::make_shared<BucketDBOwner>()),
+ _moveHandler(*_bucketDB),
+ _ready(_builder.getRepo(), _bucketDB, 1, SubDbType::READY),
+ _notReady(_builder.getRepo(), _bucketDB, 2, SubDbType::NOTREADY),
+ _fbh(),
+ _bmj(_calc, _moveHandler, _modifiedHandler, _ready._subDb,
+ _notReady._subDb, _fbh, _clusterStateHandler, _bucketHandler,
+ "test")
+ {
+ }
+ ControllerFixtureBase &addReady(const BucketId &bucket) {
+ _calc->addReady(bucket);
+ return *this;
+ }
+ ControllerFixtureBase &remReady(const BucketId &bucket) {
+ _calc->remReady(bucket);
+ return *this;
+ }
+ ControllerFixtureBase &changeCalc() {
+ _calc->resetAsked();
+ _moveHandler.reset();
+ _modifiedHandler.reset();
+ _clusterStateHandler.notifyClusterStateChanged(_calc);
+ return *this;
+ }
+ ControllerFixtureBase &addFrozen(const BucketId &bucket) {
+ _fbh.addFrozen(bucket);
+ return *this;
+ }
+ ControllerFixtureBase &remFrozen(const BucketId &bucket) {
+ _fbh.remFrozen(bucket);
+ _bmj.notifyThawedBucket(bucket);
+ return *this;
+ }
+ ControllerFixtureBase &activateBucket(const BucketId &bucket) {
+ _ready.setBucketState(bucket, true);
+ _bucketHandler.notifyBucketStateChanged(bucket,
+ BucketInfo::ActiveState::
+ ACTIVE);
+ return *this;
+ }
+ ControllerFixtureBase &deactivateBucket(const BucketId &bucket) {
+ _ready.setBucketState(bucket, false);
+ _bucketHandler.notifyBucketStateChanged(bucket,
+ BucketInfo::ActiveState::
+ NOT_ACTIVE);
+ return *this;
+ }
+ const MoveOperationVector &docsMoved() const {
+ return _moveHandler._moves;
+ }
+ const BucketIdVector &bucketsModified() const {
+ return _modifiedHandler._modified;
+ }
+ const BucketIdVector &calcAsked() const {
+ return _calc->asked();
+ }
+};
+
+
+struct ControllerFixture : public ControllerFixtureBase
+{
+ ControllerFixture() : ControllerFixtureBase()
+ {
+ _builder.createDocs(1, 1, 4); // 3 docs
+ _builder.createDocs(2, 4, 6); // 2 docs
+ _ready.insertDocs(_builder.getDocs());
+ _builder.clearDocs();
+ _builder.createDocs(3, 1, 3); // 2 docs
+ _builder.createDocs(4, 3, 6); // 3 docs
+ _notReady.insertDocs(_builder.getDocs());
+ }
+};
+
+
+struct OnlyReadyControllerFixture : public ControllerFixtureBase
+{
+ OnlyReadyControllerFixture() : ControllerFixtureBase()
+ {
+ _builder.createDocs(1, 1, 2); // 1 docs
+ _builder.createDocs(2, 2, 4); // 2 docs
+ _builder.createDocs(3, 4, 7); // 3 docs
+ _builder.createDocs(4, 7, 11); // 4 docs
+ _ready.insertDocs(_builder.getDocs());
+ }
+};
+
+
+TEST_F("require that nothing is moved if bucket state says so", ControllerFixture)
+{
+ EXPECT_FALSE(f._bmj.done());
+ f.addReady(f._ready.bucket(1));
+ f.addReady(f._ready.bucket(2));
+ f._bmj.scanAndMove(4, 3);
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_TRUE(f.docsMoved().empty());
+ EXPECT_TRUE(f.bucketsModified().empty());
+}
+
+
+TEST_F("require that not ready bucket is moved to ready if bucket state says so", ControllerFixture)
+{
+ // bucket 4 should be moved
+ f.addReady(f._ready.bucket(1));
+ f.addReady(f._ready.bucket(2));
+ f.addReady(f._notReady.bucket(4));
+ f._bmj.scanAndMove(4, 3);
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(3u, f.docsMoved().size());
+ assertEqual(f._notReady.bucket(4), f._notReady.docs(4)[0], 2, 1, f.docsMoved()[0]);
+ assertEqual(f._notReady.bucket(4), f._notReady.docs(4)[1], 2, 1, f.docsMoved()[1]);
+ assertEqual(f._notReady.bucket(4), f._notReady.docs(4)[2], 2, 1, f.docsMoved()[2]);
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._notReady.bucket(4), f.bucketsModified()[0]);
+}
+
+
+TEST_F("require that ready bucket is moved to not ready if bucket state says so", ControllerFixture)
+{
+ // bucket 2 should be moved
+ f.addReady(f._ready.bucket(1));
+ f._bmj.scanAndMove(4, 3);
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(2u, f.docsMoved().size());
+ assertEqual(f._ready.bucket(2), f._ready.docs(2)[0], 1, 2, f.docsMoved()[0]);
+ assertEqual(f._ready.bucket(2), f._ready.docs(2)[1], 1, 2, f.docsMoved()[1]);
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._ready.bucket(2), f.bucketsModified()[0]);
+}
+
+
+TEST_F("require that maxBucketsToScan is taken into consideration between not ready and ready scanning",
+ ControllerFixture)
+{
+ // bucket 4 should moved (last bucket)
+ f.addReady(f._ready.bucket(1));
+ f.addReady(f._ready.bucket(2));
+ f.addReady(f._notReady.bucket(4));
+
+ // buckets 1, 2, and 3 considered
+ f._bmj.scanAndMove(3, 3);
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+
+ // move bucket 4
+ f._bmj.scanAndMove(1, 4);
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(3u, f.docsMoved().size());
+ assertEqual(f._notReady.bucket(4), f._notReady.docs(4)[0], 2, 1, f.docsMoved()[0]);
+ assertEqual(f._notReady.bucket(4), f._notReady.docs(4)[1], 2, 1, f.docsMoved()[1]);
+ assertEqual(f._notReady.bucket(4), f._notReady.docs(4)[2], 2, 1, f.docsMoved()[2]);
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._notReady.bucket(4), f.bucketsModified()[0]);
+}
+
+
+TEST_F("require that we move buckets in several steps", ControllerFixture)
+{
+ // bucket 2, 3, and 4 should be moved
+ f.addReady(f._ready.bucket(1));
+ f.addReady(f._notReady.bucket(3));
+ f.addReady(f._notReady.bucket(4));
+
+ // consider move bucket 1
+ f._bmj.scanAndMove(1, 2);
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+
+ // move bucket 2, docs 1,2
+ f._bmj.scanAndMove(1, 2);
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(2u, f.docsMoved().size());
+ EXPECT_TRUE(assertEqual(f._ready.bucket(2), f._ready.docs(2)[0], 1, 2, f.docsMoved()[0]));
+ EXPECT_TRUE(assertEqual(f._ready.bucket(2), f._ready.docs(2)[1], 1, 2, f.docsMoved()[1]));
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._ready.bucket(2), f.bucketsModified()[0]);
+
+ // move bucket 3, docs 1,2
+ f._bmj.scanAndMove(1, 2);
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(4u, f.docsMoved().size());
+ EXPECT_TRUE(assertEqual(f._notReady.bucket(3), f._notReady.docs(3)[0], 2, 1, f.docsMoved()[2]));
+ EXPECT_TRUE(assertEqual(f._notReady.bucket(3), f._notReady.docs(3)[1], 2, 1, f.docsMoved()[3]));
+ EXPECT_EQUAL(2u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._notReady.bucket(3), f.bucketsModified()[1]);
+
+ // move bucket 4, docs 1,2
+ f._bmj.scanAndMove(1, 2);
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(6u, f.docsMoved().size());
+ EXPECT_TRUE(assertEqual(f._notReady.bucket(4), f._notReady.docs(4)[0], 2, 1, f.docsMoved()[4]));
+ EXPECT_TRUE(assertEqual(f._notReady.bucket(4), f._notReady.docs(4)[1], 2, 1, f.docsMoved()[5]));
+ EXPECT_EQUAL(2u, f.bucketsModified().size());
+
+ // move bucket 4, docs 3
+ f._bmj.scanAndMove(1, 2);
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(7u, f.docsMoved().size());
+ EXPECT_TRUE(assertEqual(f._notReady.bucket(4), f._notReady.docs(4)[2], 2, 1, f.docsMoved()[6]));
+ EXPECT_EQUAL(3u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._notReady.bucket(4), f.bucketsModified()[2]);
+}
+
+
+TEST_F("require that we can change calculator and continue scanning where we left off", ControllerFixture)
+{
+ // no buckets should move
+ // original scan sequence is bucket1, bucket2, bucket3, bucket4
+ f.addReady(f._ready.bucket(1));
+ f.addReady(f._ready.bucket(2));
+
+ // start with bucket2
+ f._bmj.scanAndMove(1, 0);
+ f.changeCalc();
+ f._bmj.scanAndMove(5, 0);
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(4u, f.calcAsked().size());
+ EXPECT_EQUAL(f._ready.bucket(2), f.calcAsked()[0]);
+ EXPECT_EQUAL(f._notReady.bucket(3), f.calcAsked()[1]);
+ EXPECT_EQUAL(f._notReady.bucket(4), f.calcAsked()[2]);
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[3]);
+
+ // start with bucket3
+ f.changeCalc();
+ f._bmj.scanAndMove(2, 0);
+ f.changeCalc();
+ f._bmj.scanAndMove(5, 0);
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(4u, f.calcAsked().size());
+ EXPECT_EQUAL(f._notReady.bucket(3), f.calcAsked()[0]);
+ EXPECT_EQUAL(f._notReady.bucket(4), f.calcAsked()[1]);
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[2]);
+ EXPECT_EQUAL(f._ready.bucket(2), f.calcAsked()[3]);
+
+ // start with bucket4
+ f.changeCalc();
+ f._bmj.scanAndMove(3, 0);
+ f.changeCalc();
+ f._bmj.scanAndMove(5, 0);
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(4u, f.calcAsked().size());
+ EXPECT_EQUAL(f._notReady.bucket(4), f.calcAsked()[0]);
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[1]);
+ EXPECT_EQUAL(f._ready.bucket(2), f.calcAsked()[2]);
+ EXPECT_EQUAL(f._notReady.bucket(3), f.calcAsked()[3]);
+
+ // start with bucket1
+ f.changeCalc();
+ f._bmj.scanAndMove(5, 0);
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(4u, f.calcAsked().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[0]);
+ EXPECT_EQUAL(f._ready.bucket(2), f.calcAsked()[1]);
+ EXPECT_EQUAL(f._notReady.bucket(3), f.calcAsked()[2]);
+ EXPECT_EQUAL(f._notReady.bucket(4), f.calcAsked()[3]);
+
+ // change calc in second pass
+ f.changeCalc();
+ f._bmj.scanAndMove(3, 0);
+ f.changeCalc();
+ f._bmj.scanAndMove(2, 0);
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(2u, f.calcAsked().size());
+ EXPECT_EQUAL(f._notReady.bucket(4), f.calcAsked()[0]);
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[1]);
+ f.changeCalc();
+ f._bmj.scanAndMove(5, 0);
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(4u, f.calcAsked().size());
+ EXPECT_EQUAL(f._ready.bucket(2), f.calcAsked()[0]);
+ EXPECT_EQUAL(f._notReady.bucket(3), f.calcAsked()[1]);
+ EXPECT_EQUAL(f._notReady.bucket(4), f.calcAsked()[2]);
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[3]);
+
+ // check 1 bucket at a time, start with bucket2
+ f.changeCalc();
+ f._bmj.scanAndMove(1, 0);
+ f.changeCalc();
+ f._bmj.scanAndMove(1, 0);
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(1u, f.calcAsked().size());
+ EXPECT_EQUAL(f._ready.bucket(2), f.calcAsked()[0]);
+ f._bmj.scanAndMove(1, 0);
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(2u, f.calcAsked().size());
+ EXPECT_EQUAL(f._notReady.bucket(3), f.calcAsked()[1]);
+ f._bmj.scanAndMove(1, 0);
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(3u, f.calcAsked().size());
+ EXPECT_EQUAL(f._notReady.bucket(4), f.calcAsked()[2]);
+ f._bmj.scanAndMove(1, 0);
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(4u, f.calcAsked().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[3]);
+}
+
+
+TEST_F("require that current bucket moving is cancelled when we change calculator", ControllerFixture)
+{
+ // bucket 1 should be moved
+ f.addReady(f._ready.bucket(2));
+ f._bmj.scanAndMove(3, 1);
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(1u, f.docsMoved().size());
+ EXPECT_EQUAL(1u, f.calcAsked().size());
+ f.changeCalc(); // Not cancelled, bucket 1 still moving to notReady
+ EXPECT_EQUAL(1u, f.calcAsked().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[0]);
+ f._calc->resetAsked();
+ f._bmj.scanAndMove(2, 1);
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(1u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.calcAsked().size());
+ f.addReady(f._ready.bucket(1));
+ f.changeCalc(); // cancelled, bucket 1 no longer moving to notReady
+ EXPECT_EQUAL(1u, f.calcAsked().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[0]);
+ f._calc->resetAsked();
+ f.remReady(f._ready.bucket(1));
+ f.changeCalc(); // not cancelled. No active bucket move
+ EXPECT_EQUAL(0u, f.calcAsked().size());
+ f._calc->resetAsked();
+ f._bmj.scanAndMove(2, 1);
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(2u, f.calcAsked().size());
+ EXPECT_EQUAL(f._ready.bucket(2), f.calcAsked()[0]);
+ EXPECT_EQUAL(f._notReady.bucket(3), f.calcAsked()[1]);
+ f._bmj.scanAndMove(2, 3);
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(3u, f.docsMoved().size());
+ EXPECT_EQUAL(4u, f.calcAsked().size());
+ EXPECT_EQUAL(f._notReady.bucket(4), f.calcAsked()[2]);
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[3]);
+}
+
+
+TEST_F("require that last bucket is moved before reporting done", ControllerFixture)
+{
+ // bucket 4 should be moved
+ f.addReady(f._ready.bucket(1));
+ f.addReady(f._ready.bucket(2));
+ f.addReady(f._notReady.bucket(4));
+ f._bmj.scanAndMove(4, 1);
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(1u, f.docsMoved().size());
+ EXPECT_EQUAL(4u, f.calcAsked().size());
+ f._bmj.scanAndMove(0, 2);
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(3u, f.docsMoved().size());
+ EXPECT_EQUAL(4u, f.calcAsked().size());
+}
+
+
+TEST_F("require that frozen bucket is not moved until thawed", ControllerFixture)
+{
+ // bucket 1 should be moved but is frozen
+ f.addReady(f._ready.bucket(2));
+ f.addFrozen(f._ready.bucket(1));
+ f._bmj.scanAndMove(4, 3); // scan all, delay frozen bucket 1
+ f.remFrozen(f._ready.bucket(1));
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+ f._bmj.scanAndMove(0, 3); // move delayed and thawed bucket 1
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(3u, f.docsMoved().size());
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.bucketsModified()[0]);
+}
+
+
+TEST_F("require that thawed bucket is moved before other buckets", ControllerFixture)
+{
+ // bucket 2 should be moved but is frozen.
+ // bucket 3 & 4 should also be moved
+ f.addReady(f._ready.bucket(1));
+ f.addReady(f._notReady.bucket(3));
+ f.addReady(f._notReady.bucket(4));
+ f.addFrozen(f._ready.bucket(2));
+ f._bmj.scanAndMove(3, 2); // delay bucket 2, move bucket 3
+ f.remFrozen(f._ready.bucket(2));
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(2u, f.docsMoved().size());
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._notReady.bucket(3), f.bucketsModified()[0]);
+ f._bmj.scanAndMove(2, 2); // move thawed bucket 2
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(4u, f.docsMoved().size());
+ EXPECT_EQUAL(2u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._ready.bucket(2), f.bucketsModified()[1]);
+ f._bmj.scanAndMove(1, 4); // move bucket 4
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(7u, f.docsMoved().size());
+ EXPECT_EQUAL(3u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._notReady.bucket(4), f.bucketsModified()[2]);
+}
+
+
+TEST_F("require that re-frozen thawed bucket is not moved until re-thawed", ControllerFixture)
+{
+ // bucket 1 should be moved but is re-frozen
+ f.addReady(f._ready.bucket(2));
+ f.addFrozen(f._ready.bucket(1));
+ f._bmj.scanAndMove(1, 0); // scan, delay frozen bucket 1
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+ EXPECT_EQUAL(1u, f.calcAsked().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[0]);
+ f.remFrozen(f._ready.bucket(1));
+ f.addFrozen(f._ready.bucket(1));
+ f._bmj.scanAndMove(1, 0); // scan, but nothing to move
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+ EXPECT_EQUAL(3u, f.calcAsked().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[1]);
+ EXPECT_EQUAL(f._ready.bucket(2), f.calcAsked()[2]);
+ f.remFrozen(f._ready.bucket(1));
+ f._bmj.scanAndMove(3, 4); // move delayed and thawed bucket 1
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(3u, f.docsMoved().size());
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.bucketsModified()[0]);
+ EXPECT_EQUAL(4u, f.calcAsked().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[3]);
+ f._bmj.scanAndMove(2, 0); // scan the rest
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(3u, f.docsMoved().size());
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(6u, f.calcAsked().size());
+}
+
+
+TEST_F("require that thawed bucket is not moved if new calculator does not say so", ControllerFixture)
+{
+ // bucket 3 should be moved
+ f.addReady(f._ready.bucket(1));
+ f.addReady(f._ready.bucket(2));
+ f.addReady(f._notReady.bucket(3));
+ f.addFrozen(f._notReady.bucket(3));
+ f._bmj.scanAndMove(4, 3); // scan all, delay frozen bucket 3
+ f.remFrozen(f._notReady.bucket(3));
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+ EXPECT_EQUAL(4u, f.calcAsked().size());
+ f.changeCalc();
+ f.remReady(f._notReady.bucket(3));
+ f._bmj.scanAndMove(0, 3); // consider delayed bucket 3
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+ EXPECT_EQUAL(1u, f.calcAsked().size());
+ EXPECT_EQUAL(f._notReady.bucket(3), f.calcAsked()[0]);
+}
+
+
+TEST_F("require that current bucket mover is cancelled if bucket is frozen", ControllerFixture)
+{
+ // bucket 3 should be moved
+ f.addReady(f._ready.bucket(1));
+ f.addReady(f._ready.bucket(2));
+ f.addReady(f._notReady.bucket(3));
+ f._bmj.scanAndMove(3, 1); // move 1 doc from bucket 3
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(1u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+ EXPECT_EQUAL(3u, f.calcAsked().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[0]);
+ EXPECT_EQUAL(f._ready.bucket(2), f.calcAsked()[1]);
+ EXPECT_EQUAL(f._notReady.bucket(3), f.calcAsked()[2]);
+
+ f.addFrozen(f._notReady.bucket(3));
+ f._bmj.scanAndMove(1, 3); // done scanning
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(1u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+ EXPECT_EQUAL(3u, f.calcAsked().size());
+
+ f._bmj.scanAndMove(1, 3); // done scanning
+ f.remFrozen(f._notReady.bucket(3));
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(1u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+ EXPECT_EQUAL(4u, f.calcAsked().size());
+
+ EXPECT_EQUAL(f._notReady.bucket(4), f.calcAsked()[3]);
+ f._bmj.scanAndMove(0, 2); // move all docs from bucket 3 again
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(3u, f.docsMoved().size());
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._notReady.bucket(3), f.bucketsModified()[0]);
+ EXPECT_EQUAL(5u, f.calcAsked().size());
+ EXPECT_EQUAL(f._notReady.bucket(3), f.calcAsked()[4]);
+}
+
+
+TEST_F("require that current bucket mover is not cancelled if another bucket is frozen", ControllerFixture)
+{
+ // bucket 3 and 4 should be moved
+ f.addReady(f._ready.bucket(1));
+ f.addReady(f._ready.bucket(2));
+ f.addReady(f._notReady.bucket(3));
+ f.addReady(f._notReady.bucket(4));
+ f._bmj.scanAndMove(3, 1); // move 1 doc from bucket 3
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(1u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+ EXPECT_EQUAL(3u, f.calcAsked().size());
+ f.addFrozen(f._notReady.bucket(4));
+ f._bmj.scanAndMove(1, 2); // move rest of docs from bucket 3
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(2u, f.docsMoved().size());
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._notReady.bucket(3), f.bucketsModified()[0]);
+ EXPECT_EQUAL(3u, f.calcAsked().size());
+}
+
+
+TEST_F("require that active bucket is not moved from ready to not ready until being not active", ControllerFixture)
+{
+ // bucket 1 should be moved but is active
+ f.addReady(f._ready.bucket(2));
+ f.activateBucket(f._ready.bucket(1));
+ f._bmj.scanAndMove(4, 3); // scan all, delay active bucket 1
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+
+ f.deactivateBucket(f._ready.bucket(1));
+ EXPECT_FALSE(f._bmj.done());
+ f._bmj.scanAndMove(0, 3); // move delayed and de-activated bucket 1
+ EXPECT_TRUE(f._bmj.done());
+ EXPECT_EQUAL(3u, f.docsMoved().size());
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.bucketsModified()[0]);
+}
+
+
+TEST_F("require that de-activated bucket is moved before other buckets", OnlyReadyControllerFixture)
+{
+ // bucket 1, 2, 3 should be moved (but bucket 1 is active)
+ f.addReady(f._ready.bucket(4));
+ f.activateBucket(f._ready.bucket(1));
+ f._bmj.scanAndMove(2, 4); // delay bucket 1, move bucket 2
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(2u, f.docsMoved().size());
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._ready.bucket(2), f.bucketsModified()[0]);
+
+ f.deactivateBucket(f._ready.bucket(1));
+ f._bmj.scanAndMove(2, 4); // move de-activated bucket 1
+ EXPECT_FALSE(f._bmj.done());
+ EXPECT_EQUAL(3u, f.docsMoved().size());
+ EXPECT_EQUAL(2u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.bucketsModified()[1]);
+
+ f._bmj.scanAndMove(2, 4); // move bucket 3
+ // EXPECT_TRUE(f._bmj.done()); // TODO(geirst): fix this
+ EXPECT_EQUAL(6u, f.docsMoved().size());
+ EXPECT_EQUAL(3u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._ready.bucket(3), f.bucketsModified()[2]);
+}
+
+
+TEST_F("require that de-activated bucket is not moved if new calculator does not say so", ControllerFixture)
+{
+ // bucket 1 should be moved
+ f.addReady(f._ready.bucket(2));
+ f.activateBucket(f._ready.bucket(1));
+ f._bmj.scanAndMove(4, 3); // scan all, delay active bucket 1
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+
+ f.deactivateBucket(f._ready.bucket(1));
+ f.addReady(f._ready.bucket(1));
+ f.changeCalc();
+ f._bmj.scanAndMove(0, 3); // consider delayed bucket 3
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+ EXPECT_EQUAL(1u, f.calcAsked().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.calcAsked()[0]);
+}
+
+
+TEST_F("require that de-activated bucket is not moved if frozen as well", ControllerFixture)
+{
+ // bucket 1 should be moved
+ f.addReady(f._ready.bucket(2));
+ f.activateBucket(f._ready.bucket(1));
+ f._bmj.scanAndMove(4, 3); // scan all, delay active bucket 1
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+
+ f.addFrozen(f._ready.bucket(1));
+ f.deactivateBucket(f._ready.bucket(1));
+ f._bmj.scanAndMove(0, 3); // bucket 1 de-activated but frozen
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+
+ f.remFrozen(f._ready.bucket(1));
+ f._bmj.scanAndMove(0, 3); // handle thawed bucket 1
+ EXPECT_EQUAL(3u, f.docsMoved().size());
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.bucketsModified()[0]);
+}
+
+
+TEST_F("require that thawed bucket is not moved if active as well", ControllerFixture)
+{
+ // bucket 1 should be moved
+ f.addReady(f._ready.bucket(2));
+ f.addFrozen(f._ready.bucket(1));
+ f._bmj.scanAndMove(4, 3); // scan all, delay frozen bucket 1
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+
+ f.activateBucket(f._ready.bucket(1));
+ f.remFrozen(f._ready.bucket(1));
+ f._bmj.scanAndMove(0, 3); // bucket 1 thawed but active
+ EXPECT_EQUAL(0u, f.docsMoved().size());
+ EXPECT_EQUAL(0u, f.bucketsModified().size());
+
+ f.deactivateBucket(f._ready.bucket(1));
+ f._bmj.scanAndMove(0, 3); // handle de-activated bucket 1
+ EXPECT_EQUAL(3u, f.docsMoved().size());
+ EXPECT_EQUAL(1u, f.bucketsModified().size());
+ EXPECT_EQUAL(f._ready.bucket(1), f.bucketsModified()[0]);
+}
+
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
+
diff --git a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
new file mode 100644
index 00000000000..cba08197b56
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
@@ -0,0 +1,218 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("documentdb_test");
+
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/messagebus/emptyreply.h>
+#include <vespa/messagebus/testlib/receptor.h>
+#include <vespa/searchcommon/common/schema.h>
+#include <vespa/searchcore/proton/attribute/flushableattribute.h>
+#include <vespa/searchcore/proton/docsummary/summaryflushtarget.h>
+#include <vespa/searchcore/proton/documentmetastore/documentmetastoreflushtarget.h>
+#include <vespa/searchcore/proton/flushengine/threadedflushtarget.h>
+#include <vespa/searchcore/proton/server/document_db_explorer.h>
+#include <vespa/searchcore/proton/server/documentdb.h>
+#include <vespa/searchcore/proton/server/memoryconfigstore.h>
+#include <vespa/searchcore/proton/metrics/job_tracked_flush_target.h>
+#include <vespa/searchcore/proton/metrics/metricswireservice.h>
+#include <vespa/searchcorespi/index/indexflushtarget.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/searchlib/transactionlog/translogserver.h>
+#include <tests/proton/common/dummydbowner.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using document::DocumentType;
+using document::DocumentTypeRepo;
+using search::index::Schema;
+using search::transactionlog::TransLogServer;
+using namespace proton;
+using namespace vespalib::slime;
+using search::TuneFileDocumentDB;
+using document::DocumenttypesConfig;
+using search::index::DummyFileHeaderContext;
+using searchcorespi::index::IndexFlushTarget;
+using vespa::config::search::core::ProtonConfig;
+using vespalib::Slime;
+
+namespace {
+
+class LocalTransport : public FeedToken::ITransport {
+ mbus::Receptor _receptor;
+
+public:
+ void send(mbus::Reply::UP reply) {
+ fprintf(stderr, "in local transport.");
+ _receptor.handleReply(std::move(reply));
+ }
+
+ mbus::Reply::UP getReply() {
+ return _receptor.getReply(10000);
+ }
+};
+
+struct Fixture {
+ DummyWireService _dummy;
+ DummyDBOwner _dummyDBOwner;
+ vespalib::ThreadStackExecutor _summaryExecutor;
+ DocumentDB::SP _db;
+ DummyFileHeaderContext _fileHeaderContext;
+ TransLogServer _tls;
+ matching::QueryLimiter _queryLimiter;
+ vespalib::Clock _clock;
+
+ Fixture();
+};
+
+Fixture::Fixture()
+ : _summaryExecutor(8, 128*1024),
+ _tls("tmp", 9014, ".", _fileHeaderContext) {
+
+ DocumentDBConfig::DocumenttypesConfigSP documenttypesConfig(new DocumenttypesConfig());
+ DocumentType docType("typea", 0);
+ DocumentTypeRepo::SP repo(new DocumentTypeRepo(docType));
+ TuneFileDocumentDB::SP tuneFileDocumentDB(new TuneFileDocumentDB);
+ config::DirSpec spec("cfg");
+ DocumentDBConfigHelper mgr(spec, "typea");
+ BootstrapConfig::SP
+ b(new BootstrapConfig(1,
+ documenttypesConfig,
+ repo,
+ BootstrapConfig::ProtonConfigSP(new ProtonConfig()),
+ tuneFileDocumentDB));
+ mgr.forwardConfig(b);
+ mgr.nextGeneration(0);
+ _db.reset(new DocumentDB(".", mgr.getConfig(), "tcp/localhost:9014",
+ _queryLimiter, _clock, DocTypeName("typea"),
+ ProtonConfig(),
+ _dummyDBOwner, _summaryExecutor, _summaryExecutor, NULL, _dummy, _fileHeaderContext,
+ ConfigStore::UP(new MemoryConfigStore),
+ std::make_shared<vespalib::ThreadStackExecutor>
+ (16, 128 * 1024)));
+ _db->start();
+ _db->waitForOnlineState();
+}
+
+const IFlushTarget *
+extractRealFlushTarget(const IFlushTarget *target)
+{
+ const JobTrackedFlushTarget *tracked =
+ dynamic_cast<const JobTrackedFlushTarget*>(target);
+ if (tracked != nullptr) {
+ const ThreadedFlushTarget *threaded =
+ dynamic_cast<const ThreadedFlushTarget*>(&tracked->getTarget());
+ if (threaded != nullptr) {
+ return threaded->getFlushTarget().get();
+ }
+ }
+ return nullptr;
+}
+
+TEST_F("requireThatIndexFlushTargetIsUsed", Fixture) {
+ std::vector<IFlushTarget::SP> targets = f._db->getFlushTargets();
+ ASSERT_TRUE(!targets.empty());
+ const IndexFlushTarget *index = 0;
+ for (size_t i = 0; i < targets.size(); ++i) {
+ const IFlushTarget *target = extractRealFlushTarget(targets[i].get());
+ if (target != NULL) {
+ index = dynamic_cast<const IndexFlushTarget *>(target);
+ }
+ if (index) {
+ break;
+ }
+ }
+ ASSERT_TRUE(index);
+}
+
+template <typename Target>
+size_t getNumTargets(const std::vector<IFlushTarget::SP> & targets)
+{
+ size_t retval = 0;
+ for (size_t i = 0; i < targets.size(); ++i) {
+ const IFlushTarget *target = extractRealFlushTarget(targets[i].get());
+ if (dynamic_cast<const Target*>(target) == NULL) {
+ continue;
+ }
+ retval++;
+ }
+ return retval;
+}
+
+TEST_F("requireThatFlushTargetsAreNamedBySubDocumentDB", Fixture) {
+ std::vector<IFlushTarget::SP> targets = f._db->getFlushTargets();
+ ASSERT_TRUE(!targets.empty());
+ for (const IFlushTarget::SP & target : f._db->getFlushTargets()) {
+ vespalib::string name = target->getName();
+ EXPECT_TRUE((name.find("0.ready.") == 0) ||
+ (name.find("1.removed.") == 0) ||
+ (name.find("2.notready.") == 0));
+ }
+}
+
+TEST_F("requireThatAttributeFlushTargetsAreUsed", Fixture) {
+ std::vector<IFlushTarget::SP> targets = f._db->getFlushTargets();
+ ASSERT_TRUE(!targets.empty());
+ size_t numAttrs = getNumTargets<FlushableAttribute>(targets);
+ // attr1 defined in attributes.cfg
+ EXPECT_EQUAL(1u, numAttrs);
+}
+
+TEST_F("requireThatDocumentMetaStoreFlushTargetIsUsed", Fixture) {
+ std::vector<IFlushTarget::SP> targets = f._db->getFlushTargets();
+ ASSERT_TRUE(!targets.empty());
+ size_t numMetaStores =
+ getNumTargets<DocumentMetaStoreFlushTarget>(targets);
+ // document meta store
+ EXPECT_EQUAL(3u, numMetaStores);
+}
+
+TEST_F("requireThatSummaryFlushTargetsIsUsed", Fixture) {
+ std::vector<IFlushTarget::SP> targets = f._db->getFlushTargets();
+ ASSERT_TRUE(!targets.empty());
+ size_t num = getNumTargets<SummaryFlushTarget>(targets);
+ EXPECT_EQUAL(3u, num);
+}
+
+TEST_F("requireThatCorrectStatusIsReported", Fixture) {
+ StatusReport::UP report(f._db->reportStatus());
+ EXPECT_EQUAL("documentdb:typea", report->getComponent());
+ EXPECT_EQUAL(StatusReport::UPOK, report->getState());
+ EXPECT_EQUAL("", report->getMessage());
+}
+
+TEST_F("requireThatStateIsReported", Fixture)
+{
+ Slime slime;
+ SlimeInserter inserter(slime);
+ DocumentDBExplorer(f._db).get_state(inserter, false);
+
+ EXPECT_EQUAL(
+ "{\n"
+ " \"documentType\": \"typea\",\n"
+ " \"status\": {\n"
+ " \"state\": \"ONLINE\",\n"
+ " \"configState\": \"OK\"\n"
+ " },\n"
+ " \"documents\": {\n"
+ " \"active\": 0,\n"
+ " \"indexed\": 0,\n"
+ " \"stored\": 0,\n"
+ " \"removed\": 0\n"
+ " }\n"
+ "}\n",
+ slime.toString());
+}
+
+TEST_F("require that session manager can be explored", Fixture)
+{
+ EXPECT_TRUE(DocumentDBExplorer(f._db).get_child("session").get() != nullptr);
+}
+
+} // namespace
+
+TEST_MAIN() {
+ DummyFileHeaderContext::setCreator("documentdb_test");
+ FastOS_File::MakeDirectory("typea");
+ TEST_RUN_ALL();
+ FastOS_FileInterface::EmptyAndRemoveDirectory("typea");
+}
diff --git a/searchcore/src/tests/proton/documentdb/documentdb_test.sh b/searchcore/src/tests/proton/documentdb/documentdb_test.sh
new file mode 100644
index 00000000000..272ecacbd8b
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentdb_test.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+$VALGRIND ./searchcore_documentdb_test_app
+rm -rf typea tmp
diff --git a/searchcore/src/tests/proton/documentdb/documentdbconfig/.gitignore b/searchcore/src/tests/proton/documentdb/documentdbconfig/.gitignore
new file mode 100644
index 00000000000..18a34a296b9
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentdbconfig/.gitignore
@@ -0,0 +1 @@
+searchcore_documentdbconfig_test_app
diff --git a/searchcore/src/tests/proton/documentdb/documentdbconfig/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/documentdbconfig/CMakeLists.txt
new file mode 100644
index 00000000000..b9105fc9c91
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentdbconfig/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_documentdbconfig_test_app
+ SOURCES
+ documentdbconfig_test.cpp
+ DEPENDS
+ searchcore_server
+)
+vespa_add_test(NAME searchcore_documentdbconfig_test_app COMMAND searchcore_documentdbconfig_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/documentdbconfig/DESC b/searchcore/src/tests/proton/documentdb/documentdbconfig/DESC
new file mode 100644
index 00000000000..e2893dff557
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentdbconfig/DESC
@@ -0,0 +1 @@
+DocumentDBConfig test. Take a look at documentdbconfig_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentdb/documentdbconfig/FILES b/searchcore/src/tests/proton/documentdb/documentdbconfig/FILES
new file mode 100644
index 00000000000..5298158fdbd
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentdbconfig/FILES
@@ -0,0 +1 @@
+documentdbconfig_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/documentdbconfig/documentdbconfig_test.cpp b/searchcore/src/tests/proton/documentdb/documentdbconfig/documentdbconfig_test.cpp
new file mode 100644
index 00000000000..5f9de9ae545
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentdbconfig/documentdbconfig_test.cpp
@@ -0,0 +1,70 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("documentdbconfig_test");
+
+#include <vespa/searchcore/proton/server/documentdbconfig.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace document;
+using namespace proton;
+using namespace search;
+using namespace search::index;
+using namespace vespa::config::search;
+using std::shared_ptr;
+using std::make_shared;
+
+typedef shared_ptr<DocumentDBConfig> DDBCSP;
+
+namespace
+{
+
+DDBCSP
+getConfig(int64_t generation, const Schema::SP &schema,
+ shared_ptr<DocumentTypeRepo> repo,
+ const RankProfilesConfig &rankProfiles)
+{
+ return make_shared<DocumentDBConfig>(
+ generation,
+ make_shared<RankProfilesConfig>(rankProfiles),
+ make_shared<IndexschemaConfig>(),
+ make_shared<AttributesConfig>(),
+ make_shared<SummaryConfig>(),
+ make_shared<SummarymapConfig>(),
+ make_shared<summary::JuniperrcConfig>(),
+ make_shared<DocumenttypesConfig>(),
+ repo,
+ make_shared<TuneFileDocumentDB>(),
+ schema,
+ make_shared<DocumentDBMaintenanceConfig>(),
+ "client", "test");
+}
+
+}
+
+TEST("Test that makeReplayConfig drops unneeded configs")
+{
+ RankProfilesConfigBuilder rp;
+ using DDBC = DocumentDBConfig;
+ shared_ptr<DocumentTypeRepo> repo(make_shared<DocumentTypeRepo>());
+ Schema::SP schema(make_shared<Schema>());
+ DDBCSP cfg0 = getConfig(4, schema, repo, rp);
+ rp.rankprofile.resize(1);
+ RankProfilesConfigBuilder::Rankprofile &rpr = rp.rankprofile.back();
+ rpr.name = "dummy";
+ DDBCSP cfg1 = getConfig(4, schema, repo, rp);
+ EXPECT_FALSE(*cfg0 == *cfg1);
+ DDBCSP cfg2 = DocumentDBConfig::makeReplayConfig(cfg1);
+ EXPECT_TRUE(*cfg0 == *cfg2);
+ EXPECT_TRUE(cfg0->getOriginalConfig().get() == nullptr);
+ EXPECT_TRUE(cfg1->getOriginalConfig().get() == nullptr);
+ EXPECT_TRUE(cfg2->getOriginalConfig().get() == cfg1.get());
+ EXPECT_TRUE(DDBC::preferOriginalConfig(cfg0).get() == cfg0.get());
+ EXPECT_TRUE(DDBC::preferOriginalConfig(cfg1).get() == cfg1.get());
+ EXPECT_TRUE(DDBC::preferOriginalConfig(cfg2).get() == cfg1.get());
+ DDBCSP cfg3;
+ EXPECT_TRUE(DDBC::preferOriginalConfig(cfg3).get() == nullptr);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/documentdb/documentdbconfigscout/.gitignore b/searchcore/src/tests/proton/documentdb/documentdbconfigscout/.gitignore
new file mode 100644
index 00000000000..482e85e5db0
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentdbconfigscout/.gitignore
@@ -0,0 +1 @@
+searchcore_documentdbconfigscout_test_app
diff --git a/searchcore/src/tests/proton/documentdb/documentdbconfigscout/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/documentdbconfigscout/CMakeLists.txt
new file mode 100644
index 00000000000..e1dea56782d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentdbconfigscout/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_documentdbconfigscout_test_app
+ SOURCES
+ documentdbconfigscout_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_attribute
+)
+vespa_add_test(NAME searchcore_documentdbconfigscout_test_app COMMAND searchcore_documentdbconfigscout_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/documentdbconfigscout/DESC b/searchcore/src/tests/proton/documentdb/documentdbconfigscout/DESC
new file mode 100644
index 00000000000..6585b4bc2b2
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentdbconfigscout/DESC
@@ -0,0 +1 @@
+DocumentDBConfigScout test. Take a look at documentdbconfigscout_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentdb/documentdbconfigscout/FILES b/searchcore/src/tests/proton/documentdb/documentdbconfigscout/FILES
new file mode 100644
index 00000000000..38b76884ae0
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentdbconfigscout/FILES
@@ -0,0 +1 @@
+documentdbconfigscout_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/documentdbconfigscout/documentdbconfigscout_test.cpp b/searchcore/src/tests/proton/documentdb/documentdbconfigscout/documentdbconfigscout_test.cpp
new file mode 100644
index 00000000000..e935fff1431
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/documentdbconfigscout/documentdbconfigscout_test.cpp
@@ -0,0 +1,264 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("documentdbconfigscout_test");
+
+#include <vespa/searchcore/proton/server/documentdbconfig.h>
+#include <vespa/searchcore/proton/server/documentdbconfigscout.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace document;
+using namespace proton;
+using namespace search;
+using namespace search::index;
+using namespace vespa::config::search;
+using std::shared_ptr;
+using std::make_shared;
+
+typedef shared_ptr<DocumentDBConfig> DDBCSP;
+
+namespace
+{
+
+DDBCSP
+getConfig(int64_t generation, const Schema::SP &schema,
+ shared_ptr<DocumentTypeRepo> repo,
+ const AttributesConfig &attributes)
+{
+ return make_shared<DocumentDBConfig>(
+ generation,
+ make_shared<RankProfilesConfig>(),
+ make_shared<IndexschemaConfig>(),
+ make_shared<AttributesConfig>(attributes),
+ make_shared<SummaryConfig>(),
+ make_shared<SummarymapConfig>(),
+ make_shared<summary::JuniperrcConfig>(),
+ make_shared<DocumenttypesConfig>(),
+ repo,
+ make_shared<TuneFileDocumentDB>(),
+ schema,
+ make_shared<DocumentDBMaintenanceConfig>(),
+ "client", "test");
+}
+
+
+bool
+assertDefaultAttribute(const AttributesConfig::Attribute &attribute,
+ const vespalib::string &name)
+{
+ if (!EXPECT_EQUAL(name, attribute.name)) {
+ return false;
+ }
+ if (!EXPECT_FALSE(attribute.fastsearch)) {
+ return false;
+ }
+ if (!EXPECT_FALSE(attribute.huge)) {
+ return false;
+ }
+ if (!EXPECT_FALSE(attribute.enablebitvectors)) {
+ return false;
+ }
+ if (!EXPECT_FALSE(attribute.enableonlybitvector)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool
+assertFastSearchAttribute(const AttributesConfig::Attribute &attribute,
+ const vespalib::string &name)
+{
+ if (!EXPECT_EQUAL(name, attribute.name)) {
+ return false;
+ }
+ if (!EXPECT_TRUE(attribute.fastsearch)) {
+ return false;
+ }
+ if (!EXPECT_FALSE(attribute.huge)) {
+ return false;
+ }
+ if (!EXPECT_FALSE(attribute.enablebitvectors)) {
+ return false;
+ }
+ if (!EXPECT_FALSE(attribute.enableonlybitvector)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool
+assertFastSearchAndMoreAttribute(const AttributesConfig::Attribute &attribute,
+ const vespalib::string &name)
+{
+ if (!EXPECT_EQUAL(name, attribute.name)) {
+ return false;
+ }
+ if (!EXPECT_TRUE(attribute.fastsearch)) {
+ return false;
+ }
+ if (!EXPECT_TRUE(attribute.huge)) {
+ return false;
+ }
+ if (!EXPECT_TRUE(attribute.enablebitvectors)) {
+ return false;
+ }
+ if (!EXPECT_TRUE(attribute.enableonlybitvector)) {
+ return false;
+ }
+ return true;
+}
+
+
+bool
+assertAttributes(const AttributesConfig::AttributeVector &attributes)
+{
+ if (!EXPECT_EQUAL(4u, attributes.size())) {
+ return false;
+ }
+ if (!assertDefaultAttribute(attributes[0], "a1")) {
+ return false;
+ }
+ if (!assertDefaultAttribute(attributes[1], "a2")) {
+ return false;
+ }
+ if (!assertDefaultAttribute(attributes[2], "a3")) {
+ return false;
+ }
+ if (!assertDefaultAttribute(attributes[3], "a4")) {
+ return false;
+ }
+ return true;
+}
+
+
+bool
+assertLiveAttributes(const AttributesConfig::AttributeVector &attributes)
+{
+ if (!EXPECT_EQUAL(5u, attributes.size())) {
+ return false;
+ }
+ if (!assertFastSearchAttribute(attributes[0], "a0")) {
+ return false;
+ }
+ if (!assertFastSearchAndMoreAttribute(attributes[1], "a1")) {
+ return false;
+ }
+ if (!assertFastSearchAttribute(attributes[2], "a2")) {
+ return false;
+ }
+ if (!assertFastSearchAttribute(attributes[3], "a3")) {
+ return false;
+ }
+ if (!assertFastSearchAttribute(attributes[4], "a4")) {
+ return false;
+ }
+ return true;
+}
+
+
+bool
+assertScoutedAttributes(const AttributesConfig::AttributeVector &attributes)
+{
+ if (!EXPECT_EQUAL(4u, attributes.size())) {
+ return false;
+ }
+ if (!assertFastSearchAndMoreAttribute(attributes[0], "a1")) {
+ return false;
+ }
+ if (!assertDefaultAttribute(attributes[1], "a2")) {
+ return false;
+ }
+ if (!assertDefaultAttribute(attributes[2], "a3")) {
+ return false;
+ }
+ if (!assertDefaultAttribute(attributes[3], "a4")) {
+ return false;
+ }
+ return true;
+}
+
+
+AttributesConfig::Attribute
+setupDefaultAttribute(const vespalib::string name)
+{
+ AttributesConfig::Attribute attribute;
+ attribute.name = name;
+ return attribute;
+}
+
+
+AttributesConfig::Attribute
+setupFastSearchAttribute(const vespalib::string name)
+{
+ AttributesConfig::Attribute attribute;
+ attribute.name = name;
+ attribute.fastsearch = true;
+ return attribute;
+}
+
+
+AttributesConfig::Attribute
+setupFastSearchAndMoreAttribute(const vespalib::string name)
+{
+ AttributesConfig::Attribute attribute;
+ attribute.name = name;
+ attribute.fastsearch = true;
+ attribute.huge = true;
+ attribute.enablebitvectors = true;
+ attribute.enableonlybitvector = true;
+ return attribute;
+}
+
+
+void
+setupDefaultAttributes(AttributesConfigBuilder::AttributeVector &attributes)
+{
+ attributes.push_back(setupDefaultAttribute("a1"));
+ attributes.push_back(setupDefaultAttribute("a2"));
+ attributes.push_back(setupDefaultAttribute("a3"));
+ attributes.push_back(setupDefaultAttribute("a4"));
+}
+
+
+void
+setupLiveAttributes(AttributesConfigBuilder::AttributeVector &attributes)
+{
+ attributes.push_back(setupFastSearchAttribute("a0"));
+ attributes.push_back(setupFastSearchAndMoreAttribute("a1"));
+ attributes.push_back(setupFastSearchAttribute("a2"));
+ attributes.back().datatype = AttributesConfig::Attribute::INT8;
+ attributes.push_back(setupFastSearchAttribute("a3"));
+ attributes.back().collectiontype = AttributesConfig::Attribute::ARRAY;
+ attributes.push_back(setupFastSearchAttribute("a4"));
+ attributes.back().createifnonexistent = true;
+}
+
+}
+
+TEST("Test that DocumentDBConfigScout::scout looks ahead")
+{
+ AttributesConfigBuilder attributes;
+ setupDefaultAttributes(attributes.attribute);
+
+ AttributesConfigBuilder liveAttributes;
+ setupLiveAttributes(liveAttributes.attribute);
+
+ shared_ptr<DocumentTypeRepo> repo(make_shared<DocumentTypeRepo>());
+ Schema::SP schema(make_shared<Schema>());
+ DDBCSP cfg = getConfig(4, schema, repo, attributes);
+ DDBCSP liveCfg = getConfig(4, schema, repo, liveAttributes);
+ EXPECT_FALSE(*cfg == *liveCfg);
+ DDBCSP scoutedCfg = DocumentDBConfigScout::scout(cfg, *liveCfg);
+ EXPECT_FALSE(*cfg == *scoutedCfg);
+ EXPECT_FALSE(*liveCfg == *scoutedCfg);
+
+ EXPECT_TRUE(assertAttributes(cfg->getAttributesConfig().attribute));
+ EXPECT_TRUE(assertLiveAttributes(liveCfg->getAttributesConfig().attribute));
+ EXPECT_TRUE(assertScoutedAttributes(scoutedCfg->getAttributesConfig().
+ attribute));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/documentdb/feedhandler/.gitignore b/searchcore/src/tests/proton/documentdb/feedhandler/.gitignore
new file mode 100644
index 00000000000..b464e44d740
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/feedhandler/.gitignore
@@ -0,0 +1 @@
+searchcore_feedhandler_test_app
diff --git a/searchcore/src/tests/proton/documentdb/feedhandler/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/feedhandler/CMakeLists.txt
new file mode 100644
index 00000000000..756c11f35b4
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/feedhandler/CMakeLists.txt
@@ -0,0 +1,18 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_feedhandler_test_app
+ SOURCES
+ feedhandler_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_bucketdb
+ searchcore_persistenceengine
+ searchcore_feedoperation
+ searchcore_matching
+ searchcore_attribute
+ searchcore_pcommon
+ searchcore_grouping
+ searchcore_proton_metrics
+ searchcore_util
+ searchcore_fconfig
+)
+vespa_add_test(NAME searchcore_feedhandler_test_app COMMAND sh feedhandler_test.sh)
diff --git a/searchcore/src/tests/proton/documentdb/feedhandler/DESC b/searchcore/src/tests/proton/documentdb/feedhandler/DESC
new file mode 100644
index 00000000000..c29d8c9a6e9
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/feedhandler/DESC
@@ -0,0 +1 @@
+feedhandler test. Take a look at feedhandler_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentdb/feedhandler/FILES b/searchcore/src/tests/proton/documentdb/feedhandler/FILES
new file mode 100644
index 00000000000..483ca5f0fcd
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/feedhandler/FILES
@@ -0,0 +1 @@
+feedhandler_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
new file mode 100644
index 00000000000..8a7f646383f
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
@@ -0,0 +1,748 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("feedhandler_test");
+#include <vespa/documentapi/messagebus/documentprotocol.h>
+#include <vespa/documentapi/messagebus/messages/documentreply.h>
+#include <vespa/documentapi/messagebus/messages/removedocumentreply.h>
+#include <vespa/documentapi/messagebus/messages/updatedocumentreply.h>
+#include <vespa/persistence/spi/result.h>
+#include <vespa/searchcore/proton/common/bucketfactory.h>
+#include <vespa/searchcore/proton/metrics/feed_metrics.h>
+#include <vespa/searchcore/proton/feedoperation/moveoperation.h>
+#include <vespa/searchcore/proton/feedoperation/wipehistoryoperation.h>
+#include <vespa/searchcore/proton/server/configstore.h>
+#include <vespa/searchcore/proton/server/executorthreadingservice.h>
+#include <vespa/searchcore/proton/server/feedhandler.h>
+#include <vespa/searchcore/proton/server/ddbstate.h>
+#include <vespa/searchcore/proton/test/dummy_feed_view.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/searchlib/transactionlog/translogclient.h>
+#include <vespa/searchlib/transactionlog/translogserver.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/util/closuretask.h>
+#include <vespa/searchcore/proton/bucketdb/bucketdbhandler.h>
+
+using document::BucketId;
+using document::Document;
+using document::DocumentId;
+using document::DocumentTypeRepo;
+using document::DocumentUpdate;
+using document::GlobalId;
+using documentapi::DocumentProtocol;
+using documentapi::DocumentReply;
+using documentapi::RemoveDocumentReply;
+using documentapi::UpdateDocumentReply;
+using mbus::Reply;
+using search::index::DocBuilder;
+using search::index::DummyFileHeaderContext;
+using search::index::Schema;
+using search::SerialNum;
+using search::transactionlog::TransLogServer;
+using storage::spi::PartitionId;
+using storage::spi::RemoveResult;
+using storage::spi::Result;
+using storage::spi::Timestamp;
+using storage::spi::UpdateResult;
+using vespalib::BlockingThreadStackExecutor;
+using vespalib::ThreadStackExecutor;
+using vespalib::ThreadStackExecutorBase;
+using vespalib::makeClosure;
+using vespalib::makeTask;
+using search::makeLambdaTask;
+using namespace proton;
+
+typedef std::unique_ptr<vespalib::CountDownLatch> CountDownLatchUP;
+
+namespace {
+
+struct Rendezvous {
+ vespalib::Gate enter;
+ vespalib::Gate leave;
+ vespalib::Gate gone;
+ typedef std::unique_ptr<Rendezvous> UP;
+ Rendezvous() : enter(), leave(), gone() {}
+ bool run(uint32_t timeout = 80000) {
+ enter.countDown();
+ bool retval = leave.await(timeout);
+ gone.countDown();
+ return retval;
+ }
+ bool waitForEnter(uint32_t timeout = 80000) {
+ return enter.await(timeout);
+ }
+ bool leaveAndWait(uint32_t timeout = 80000) {
+ leave.countDown();
+ return gone.await(timeout);
+ }
+ bool await(uint32_t timeout = 80000) {
+ if (waitForEnter(timeout)) {
+ return leaveAndWait(timeout);
+ }
+ return false;
+ }
+};
+
+
+struct MyOwner : public FeedHandler::IOwner
+{
+ bool rejected_config;
+ bool _allowPrune;
+ int wipe_history_count;
+
+ MyOwner()
+ :
+ rejected_config(false),
+ _allowPrune(false),
+ wipe_history_count(0)
+ {
+ }
+ virtual void performWipeHistory() { ++wipe_history_count; }
+ virtual void onTransactionLogReplayDone() {
+ LOG(info, "MyOwner::onTransactionLogReplayDone()");
+ }
+ virtual void enterRedoReprocessState() {}
+ virtual void onPerformPrune(SerialNum) {}
+ virtual bool isFeedBlockedByRejectedConfig() { return rejected_config; }
+
+ virtual bool
+ getAllowPrune(void) const
+ {
+ return _allowPrune;
+ }
+};
+
+
+struct MyResourceWriteFilter : public IResourceWriteFilter
+{
+ bool _acceptWriteOperation;
+ vespalib::string _message;
+ MyResourceWriteFilter()
+ : _acceptWriteOperation(true),
+ _message()
+ {}
+
+ virtual bool acceptWriteOperation() const override { return _acceptWriteOperation; }
+ virtual State getAcceptState() const override {
+ return IResourceWriteFilter::State(acceptWriteOperation(), _message);
+ }
+};
+
+
+struct MyReplayConfig : public IReplayConfig {
+ virtual void replayConfig(SerialNum) {}
+ virtual void replayWipeHistory(SerialNum, fastos::TimeStamp) {}
+};
+
+void ackToken(FeedToken *token) {
+ if (token != NULL) {
+ token->ack();
+ }
+}
+
+struct MyDocumentMetaStore {
+ struct Entry {
+ DbDocumentId _id;
+ DbDocumentId _prevId;
+ Timestamp _prevTimestamp;
+ Entry() : _id(0, 0), _prevId(0, 0), _prevTimestamp(0) {}
+ Entry(uint32_t lid, uint32_t prevLid, Timestamp prevTimestamp)
+ : _id(0, lid),
+ _prevId(0, prevLid),
+ _prevTimestamp(prevTimestamp)
+ {}
+ };
+ std::map<GlobalId, Entry> _pool;
+ std::map<GlobalId, Entry> _allocated;
+ MyDocumentMetaStore() : _pool(), _allocated() {}
+ MyDocumentMetaStore &insert(const GlobalId &gid, const Entry &e) {
+ _pool[gid] = e;
+ return *this;
+ }
+ MyDocumentMetaStore &allocate(const GlobalId &gid) {
+ auto itr = _pool.find(gid);
+ if (itr != _pool.end()) {
+ _allocated[gid] = itr->second;
+ }
+ return *this;
+ }
+ const Entry *get(const GlobalId &gid) const {
+ auto itr = _allocated.find(gid);
+ if (itr != _allocated.end()) {
+ return &itr->second;
+ }
+ return NULL;
+ }
+};
+
+struct MyFeedView : public test::DummyFeedView {
+ Rendezvous putRdz;
+ bool usePutRdz;
+ CountDownLatchUP putLatch;
+ MyDocumentMetaStore metaStore;
+ int put_count;
+ SerialNum put_serial;
+ int heartbeat_count;
+ int remove_count;
+ int move_count;
+ int prune_removed_count;
+ int update_count;
+ SerialNum update_serial;
+ MyFeedView(const DocumentTypeRepo::SP &dtr) :
+ test::DummyFeedView(dtr),
+ putRdz(),
+ usePutRdz(false),
+ putLatch(),
+ metaStore(),
+ put_count(0),
+ put_serial(0),
+ heartbeat_count(0),
+ remove_count(0),
+ move_count(0),
+ prune_removed_count(0),
+ update_count(0),
+ update_serial(0)
+ {
+ }
+ void resetPutLatch(uint32_t count) { putLatch.reset(new vespalib::CountDownLatch(count)); }
+ virtual void preparePut(PutOperation &op) {
+ prepareDocumentOperation(op, op.getDocument()->getId().getGlobalId());
+ }
+ void prepareDocumentOperation(DocumentOperation &op, const GlobalId &gid) {
+ const MyDocumentMetaStore::Entry *entry = metaStore.get(gid);
+ if (entry != NULL) {
+ op.setDbDocumentId(entry->_id);
+ op.setPrevDbDocumentId(entry->_prevId);
+ op.setPrevTimestamp(entry->_prevTimestamp);
+ }
+ }
+ virtual void handlePut(FeedToken *token, const PutOperation &putOp) {
+ LOG(info, "MyFeedView::handlePut(): docId(%s), putCount(%u), putLatchCount(%u)",
+ putOp.getDocument()->getId().toString().c_str(), put_count,
+ (putLatch.get() != NULL ? putLatch->getCount() : 0u));
+ if (usePutRdz) {
+ putRdz.run();
+ }
+ ++put_count;
+ put_serial = putOp.getSerialNum();
+ metaStore.allocate(putOp.getDocument()->getId().getGlobalId());
+ if (putLatch.get() != NULL) {
+ putLatch->countDown();
+ }
+ ackToken(token);
+ }
+ virtual void prepareUpdate(UpdateOperation &op) {
+ prepareDocumentOperation(op, op.getUpdate()->getId().getGlobalId());
+ }
+ virtual void handleUpdate(FeedToken *token, const UpdateOperation &op) {
+ ++update_count;
+ update_serial = op.getSerialNum();
+ ackToken(token);
+ }
+ virtual void handleRemove(FeedToken *token, const RemoveOperation &)
+ { ++remove_count; ackToken(token); }
+ virtual void handleMove(const MoveOperation &) { ++move_count; }
+ virtual void heartBeat(SerialNum) { ++heartbeat_count; }
+ virtual void handlePruneRemovedDocuments(
+ const PruneRemovedDocumentsOperation &) { ++prune_removed_count; }
+ virtual const ISimpleDocumentMetaStore *getDocumentMetaStorePtr() const {
+ return NULL;
+ }
+};
+
+
+struct SchemaContext {
+ Schema::SP schema;
+ std::unique_ptr<DocBuilder> builder;
+ SchemaContext() :
+ schema(new Schema()),
+ builder()
+ {
+ schema->addIndexField(Schema::IndexField("i1", Schema::STRING, Schema::SINGLE));
+ builder.reset(new DocBuilder(*schema));
+ }
+ DocTypeName getDocType() const {
+ return DocTypeName(builder->getDocumentType().getName());
+ }
+ const document::DocumentTypeRepo::SP &getRepo() const { return builder->getDocumentTypeRepo(); }
+};
+
+
+struct DocumentContext {
+ Document::SP doc;
+ BucketId bucketId;
+ DocumentContext(const vespalib::string &docId, DocBuilder &builder) :
+ doc(builder.startDocument(docId).endDocument().release()),
+ bucketId(BucketFactory::getBucketId(doc->getId()))
+ {
+ }
+};
+
+
+struct UpdateContext {
+ DocumentUpdate::SP update;
+ BucketId bucketId;
+ UpdateContext(const vespalib::string &docId, DocBuilder &builder) :
+ update(new DocumentUpdate(builder.getDocumentType(), DocumentId(docId))),
+ bucketId(BucketFactory::getBucketId(update->getId()))
+ {
+ }
+};
+
+
+struct MyTransport : public FeedToken::ITransport {
+ vespalib::Gate gate;
+ ResultUP result;
+ bool documentWasFound;
+ MyTransport() : gate(), result(), documentWasFound(false) {}
+ virtual void send(Reply::UP, ResultUP res, bool documentWasFound_, double) {
+ result = std::move(res);
+ documentWasFound = documentWasFound_;
+ gate.countDown();
+ }
+};
+
+Reply::UP getReply(uint32_t type) {
+ if (type == DocumentProtocol::REPLY_REMOVEDOCUMENT) {
+ return Reply::UP(new RemoveDocumentReply);
+ } else if (type == DocumentProtocol::REPLY_UPDATEDOCUMENT) {
+ return Reply::UP(new UpdateDocumentReply);
+ }
+ return Reply::UP(new DocumentReply(type));
+}
+
+struct FeedTokenContext {
+ MyTransport transport;
+ FeedToken::UP token_ap;
+ FeedToken &token;
+
+ FeedTokenContext(uint32_t type = 0) :
+ transport(),
+ token_ap(new FeedToken(transport, getReply(type))),
+ token(*token_ap) {
+ token.getReply().getTrace().setLevel(9);
+ }
+ bool await(uint32_t timeout = 80000)
+ { return transport.gate.await(timeout); }
+ const Result *getResult() {
+ if (transport.result.get()) {
+ return transport.result.get();
+ }
+ return &token.getResult();
+ }
+};
+
+
+struct PutContext {
+ FeedTokenContext tokenCtx;
+ DocumentContext docCtx;
+ typedef std::shared_ptr<PutContext> SP;
+ PutContext(const vespalib::string &docId, DocBuilder &builder) :
+ tokenCtx(DocumentProtocol::REPLY_PUTDOCUMENT),
+ docCtx(docId, builder)
+ {
+ }
+};
+
+
+struct PutHandler {
+ FeedHandler &handler;
+ DocBuilder &builder;
+ Timestamp timestamp;
+ std::vector<PutContext::SP> puts;
+ PutHandler(FeedHandler &fh, DocBuilder &db) :
+ handler(fh),
+ builder(db),
+ timestamp(0),
+ puts()
+ {
+ }
+ void put(const vespalib::string &docId) {
+ PutContext::SP pc(new PutContext(docId, builder));
+ FeedOperation::UP op(new PutOperation(pc->docCtx.bucketId,
+ timestamp, pc->docCtx.doc));
+ handler.handleOperation(pc->tokenCtx.token, std::move(op));
+ timestamp = Timestamp(timestamp + 1);
+ puts.push_back(pc);
+ }
+ bool await(uint32_t timeout = 80000) {
+ for (size_t i = 0; i < puts.size(); ++i) {
+ if (!puts[i]->tokenCtx.await(timeout)) {
+ return false;
+ }
+ }
+ return true;
+ }
+};
+
+
+struct MyFeedMetrics : public metrics::MetricSet
+{
+ PerDocTypeFeedMetrics _feed;
+
+ MyFeedMetrics(void)
+ : metrics::MetricSet("myfeedmetrics", "", "My feed metrics", NULL),
+ _feed(this)
+ {
+ }
+};
+
+
+struct MyTlsWriter : TlsWriter {
+ int store_count;
+ int erase_count;
+ bool erase_return;
+
+ MyTlsWriter() : store_count(0), erase_count(0), erase_return(true) {}
+ virtual void storeOperation(const FeedOperation &) { ++store_count; }
+ virtual bool erase(SerialNum) { ++erase_count; return erase_return; }
+
+ virtual SerialNum
+ sync(SerialNum syncTo)
+ {
+ return syncTo;
+ }
+};
+
+
+struct FeedHandlerFixture
+{
+ DummyFileHeaderContext _fileHeaderContext;
+ TransLogServer tls;
+ vespalib::string tlsSpec;
+ ExecutorThreadingService writeService;
+ SchemaContext schema;
+ MyOwner owner;
+ MyResourceWriteFilter writeFilter;
+ DDBState _state;
+ MyReplayConfig replayConfig;
+ MyFeedView feedView;
+ MyFeedMetrics feedMetrics;
+ MyTlsWriter tls_writer;
+ BucketDBOwner _bucketDB;
+ bucketdb::BucketDBHandler _bucketDBHandler;
+ FeedHandler handler;
+ FeedHandlerFixture()
+ : _fileHeaderContext(),
+ tls("mytls", 9016, "mytlsdir", _fileHeaderContext, 0x10000),
+ tlsSpec("tcp/localhost:9016"),
+ writeService(),
+ schema(),
+ owner(),
+ _state(),
+ replayConfig(),
+ feedView(schema.getRepo()),
+ _bucketDB(),
+ _bucketDBHandler(_bucketDB),
+ handler(writeService, tlsSpec, schema.getDocType(),
+ feedMetrics._feed, _state, owner, writeFilter, replayConfig, NULL, &tls_writer)
+ {
+ _state.enterLoadState();
+ _state.enterReplayTransactionLogState();
+ handler.setActiveFeedView(&feedView);
+ handler.setBucketDBHandler(&_bucketDBHandler);
+ handler.init(1);
+ }
+
+ ~FeedHandlerFixture()
+ {
+ writeService.sync();
+ }
+ template <class FunctionType>
+ inline void runAsMaster(FunctionType &&function) {
+ writeService.master().execute(makeLambdaTask(std::move(function)));
+ writeService.master().sync();
+ }
+ void syncMaster() {
+ writeService.master().sync();
+ }
+};
+
+
+struct MyConfigStore : ConfigStore {
+ virtual SerialNum getBestSerialNum() const { return 1; }
+ virtual SerialNum getOldestSerialNum() const { return 1; }
+ virtual void saveConfig(const DocumentDBConfig &,
+ const search::index::Schema &, SerialNum) {}
+ virtual void loadConfig(const DocumentDBConfig &, SerialNum,
+ DocumentDBConfig::SP &,
+ search::index::Schema::SP &) {}
+ virtual void removeInvalid() {}
+ void prune(SerialNum) {}
+ virtual bool hasValidSerial(SerialNum) const { return true; }
+ virtual SerialNum getPrevValidSerial(SerialNum) const { return 1; }
+ virtual void saveWipeHistoryConfig(SerialNum,
+ fastos::TimeStamp) {}
+ virtual void serializeConfig(SerialNum, vespalib::nbostream &) {}
+ virtual void deserializeConfig(SerialNum, vespalib::nbostream &) {}
+ virtual void setProtonConfig(const ProtonConfigSP &) override { }
+};
+
+
+struct ReplayTransactionLogContext {
+ IIndexWriter::SP iwriter;
+ MyConfigStore config_store;
+ DocumentDBConfig::SP cfgSnap;
+};
+
+
+TEST_F("require that heartBeat calls FeedView's heartBeat",
+ FeedHandlerFixture)
+{
+ f.runAsMaster([&]() { f.handler.heartBeat(); });
+ EXPECT_EQUAL(1, f.feedView.heartbeat_count);
+}
+
+TEST_F("require that rejected config disables operations and heartbeat",
+ FeedHandlerFixture)
+{
+ f.owner.rejected_config = true;
+ f.handler.changeToNormalFeedState();
+ f.owner._allowPrune = true;
+
+ DocumentContext doc_context("doc:test:foo", *f.schema.builder);
+ FeedOperation::UP op(new PutOperation(doc_context.bucketId,
+ Timestamp(10), doc_context.doc));
+ FeedTokenContext token1;
+ f.handler.performOperation(std::move(token1.token_ap), std::move(op));
+ EXPECT_EQUAL(0, f.feedView.put_count);
+ EXPECT_EQUAL(Result::PERMANENT_ERROR, token1.getResult()->getErrorCode());
+
+ FeedTokenContext token2(DocumentProtocol::REPLY_REMOVEDOCUMENT);
+ op.reset(new RemoveOperation(doc_context.bucketId, Timestamp(10),
+ doc_context.doc->getId()));
+ f.handler.performOperation(std::move(token2.token_ap), std::move(op));
+ EXPECT_EQUAL(0, f.feedView.remove_count);
+ EXPECT_TRUE(dynamic_cast<const RemoveResult *>(token2.getResult()));
+ EXPECT_EQUAL(Result::PERMANENT_ERROR, token2.getResult()->getErrorCode());
+
+ FeedTokenContext token3(DocumentProtocol::REPLY_UPDATEDOCUMENT);
+ op.reset(new UpdateOperation(doc_context.bucketId, Timestamp(10),
+ document::DocumentUpdate::SP()));
+ f.handler.performOperation(std::move(token3.token_ap), std::move(op));
+ EXPECT_EQUAL(0, f.feedView.update_count);
+ EXPECT_TRUE(dynamic_cast<const UpdateResult *>(token3.getResult()));
+ EXPECT_EQUAL(Result::PERMANENT_ERROR, token3.getResult()->getErrorCode());
+
+ f.runAsMaster([&]() { f.handler.heartBeat(); });
+ EXPECT_EQUAL(0, f.feedView.heartbeat_count);
+
+ EXPECT_EQUAL(0, f.tls_writer.store_count);
+}
+
+TEST_F("require that outdated remove is ignored", FeedHandlerFixture)
+{
+ DocumentContext doc_context("doc:test:foo", *f.schema.builder);
+ FeedOperation::UP op(new RemoveOperation(doc_context.bucketId,
+ Timestamp(10),
+ doc_context.doc->getId()));
+ static_cast<DocumentOperation &>(*op).setPrevDbDocumentId(DbDocumentId(4));
+ static_cast<DocumentOperation &>(*op).setPrevTimestamp(Timestamp(10000));
+ FeedTokenContext token_context(DocumentProtocol::REPLY_REMOVEDOCUMENT);
+ f.handler.performOperation(std::move(token_context.token_ap), std::move(op));
+ EXPECT_EQUAL(0, f.feedView.remove_count);
+ EXPECT_EQUAL(0, f.tls_writer.store_count);
+}
+
+TEST_F("require that outdated put is ignored", FeedHandlerFixture)
+{
+ DocumentContext doc_context("doc:test:foo", *f.schema.builder);
+ FeedOperation::UP op(new PutOperation(doc_context.bucketId,
+ Timestamp(10), doc_context.doc));
+ static_cast<DocumentOperation &>(*op).setPrevTimestamp(Timestamp(10000));
+ FeedTokenContext token_context;
+ f.handler.performOperation(std::move(token_context.token_ap), std::move(op));
+ EXPECT_EQUAL(0, f.feedView.put_count);
+ EXPECT_EQUAL(0, f.tls_writer.store_count);
+}
+
+void
+addLidToRemove(RemoveDocumentsOperation &op)
+{
+ LidVectorContext::LP lids(new LidVectorContext(42));
+ lids->addLid(4);
+ op.setLidsToRemove(0, lids);
+}
+
+
+TEST_F("require that WipeHistory calls owner", FeedHandlerFixture)
+{
+ MyTransport transport;
+ FeedTokenContext token_context;
+ f.handler.performOperation(std::move(token_context.token_ap),
+ FeedOperation::UP(new WipeHistoryOperation));
+ EXPECT_EQUAL(1, f.owner.wipe_history_count);
+ EXPECT_EQUAL(0, f.tls_writer.store_count); // Not stored in tls.
+}
+
+TEST_F("require that handleMove calls FeedView", FeedHandlerFixture)
+{
+ DocumentContext doc_context("doc:test:foo", *f.schema.builder);
+ MoveOperation op(doc_context.bucketId, Timestamp(2), doc_context.doc,
+ DbDocumentId(0, 2), 1);
+ op.setDbDocumentId(DbDocumentId(1, 2));
+ f.runAsMaster([&]() { f.handler.handleMove(op); });
+ EXPECT_EQUAL(1, f.feedView.move_count);
+ EXPECT_EQUAL(1, f.tls_writer.store_count);
+}
+
+TEST_F("require that performPruneRemovedDocuments calls FeedView",
+ FeedHandlerFixture)
+{
+ PruneRemovedDocumentsOperation op;
+ f.handler.performPruneRemovedDocuments(op);
+ EXPECT_EQUAL(0, f.feedView.prune_removed_count);
+ EXPECT_EQUAL(0, f.tls_writer.store_count);
+
+ addLidToRemove(op);
+ f.handler.performPruneRemovedDocuments(op);
+ EXPECT_EQUAL(1, f.feedView.prune_removed_count);
+ EXPECT_EQUAL(1, f.tls_writer.store_count);
+}
+
+TEST_F("require that failed prune throws", FeedHandlerFixture)
+{
+ f.tls_writer.erase_return = false;
+ EXPECT_EXCEPTION(f.handler.tlsPrune(10), vespalib::IllegalStateException,
+ "Failed to prune TLS to token 10.");
+}
+
+TEST_F("require that flush done calls prune", FeedHandlerFixture)
+{
+ f.handler.changeToNormalFeedState();
+ f.owner._allowPrune = true;
+ f.handler.flushDone(10);
+ f.syncMaster();
+ EXPECT_EQUAL(1, f.tls_writer.erase_count);
+ EXPECT_EQUAL(10u, f.handler.getPrunedSerialNum());
+}
+
+TEST_F("require that flush in init state delays pruning", FeedHandlerFixture)
+{
+ f.handler.flushDone(10);
+ f.syncMaster();
+ EXPECT_EQUAL(0, f.tls_writer.erase_count);
+ EXPECT_EQUAL(10u, f.handler.getPrunedSerialNum());
+}
+
+TEST_F("require that flush cannot unprune", FeedHandlerFixture)
+{
+ f.handler.flushDone(10);
+ f.syncMaster();
+ EXPECT_EQUAL(10u, f.handler.getPrunedSerialNum());
+
+ f.handler.flushDone(5); // Try to unprune.
+ f.syncMaster();
+ EXPECT_EQUAL(10u, f.handler.getPrunedSerialNum());
+}
+
+TEST_F("require that remove of unknown document with known data type "
+ "stores remove", FeedHandlerFixture)
+{
+ DocumentContext doc_context("id:test:searchdocument::foo",
+ *f.schema.builder);
+ FeedOperation::UP op(new RemoveOperation(doc_context.bucketId,
+ Timestamp(10),
+ doc_context.doc->getId()));
+ FeedTokenContext token_context(DocumentProtocol::REPLY_REMOVEDOCUMENT);
+ f.handler.performOperation(std::move(token_context.token_ap), std::move(op));
+ EXPECT_EQUAL(1, f.feedView.remove_count);
+ EXPECT_EQUAL(1, f.tls_writer.store_count);
+}
+
+TEST_F("require that partial update for non-existing document is tagged as such",
+ FeedHandlerFixture)
+{
+ UpdateContext upCtx("id:test:searchdocument::foo", *f.schema.builder);
+ FeedOperation::UP op(new UpdateOperation(upCtx.bucketId,
+ Timestamp(10),
+ upCtx.update));
+ FeedTokenContext token_context(DocumentProtocol::REPLY_UPDATEDOCUMENT);
+ f.handler.performOperation(std::move(token_context.token_ap), std::move(op));
+ const UpdateResult *result = static_cast<const UpdateResult *>(token_context.getResult());
+
+ EXPECT_FALSE(token_context.transport.documentWasFound);
+ EXPECT_EQUAL(0u, result->getExistingTimestamp());
+ EXPECT_EQUAL(0, f.feedView.put_count);
+ EXPECT_EQUAL(0, f.feedView.update_count);
+ EXPECT_EQUAL(0, f.tls_writer.store_count);
+}
+
+TEST_F("require that partial update for non-existing document is created if specified",
+ FeedHandlerFixture)
+{
+ f.handler.setSerialNum(15);
+ UpdateContext upCtx("id:test:searchdocument::foo", *f.schema.builder);
+ upCtx.update->setCreateIfNonExistent(true);
+ f.feedView.metaStore.insert(upCtx.update->getId().getGlobalId(),
+ MyDocumentMetaStore::Entry(5, 5, Timestamp(10)));
+ FeedOperation::UP op(new UpdateOperation(upCtx.bucketId,
+ Timestamp(10),
+ upCtx.update));
+ FeedTokenContext token_context(DocumentProtocol::REPLY_UPDATEDOCUMENT);
+ f.handler.performOperation(std::move(token_context.token_ap), std::move(op));
+ const UpdateResult *result = static_cast<const UpdateResult *>(token_context.getResult());
+
+ EXPECT_TRUE(token_context.transport.documentWasFound);
+ EXPECT_EQUAL(10u, result->getExistingTimestamp());
+ EXPECT_EQUAL(1, f.feedView.put_count);
+ EXPECT_EQUAL(16u, f.feedView.put_serial);
+ EXPECT_EQUAL(0, f.feedView.update_count);
+ EXPECT_EQUAL(0u, f.feedView.update_serial);
+ EXPECT_EQUAL(1u, f.feedView.metaStore._allocated.size());
+ EXPECT_EQUAL(1, f.tls_writer.store_count);
+}
+
+TEST_F("require that put is rejected if resource limit is reached", FeedHandlerFixture)
+{
+ f.writeFilter._acceptWriteOperation = false;
+ f.writeFilter._message = "Attribute resource limit reached";
+
+ DocumentContext docCtx("id:test:searchdocument::foo", *f.schema.builder);
+ FeedOperation::UP op = std::make_unique<PutOperation>(docCtx.bucketId, Timestamp(10), docCtx.doc);
+ FeedTokenContext token;
+ f.handler.performOperation(std::move(token.token_ap), std::move(op));
+ EXPECT_EQUAL(0, f.feedView.put_count);
+ EXPECT_EQUAL(Result::RESOURCE_EXHAUSTED, token.getResult()->getErrorCode());
+ EXPECT_EQUAL("Put operation rejected for document 'id:test:searchdocument::foo' of type 'searchdocument': 'Attribute resource limit reached'",
+ token.getResult()->getErrorMessage());
+}
+
+TEST_F("require that update is rejected if resource limit is reached", FeedHandlerFixture)
+{
+ f.writeFilter._acceptWriteOperation = false;
+ f.writeFilter._message = "Attribute resource limit reached";
+
+ UpdateContext updCtx("id:test:searchdocument::foo", *f.schema.builder);
+ FeedOperation::UP op = std::make_unique<UpdateOperation>(updCtx.bucketId, Timestamp(10), updCtx.update);
+ FeedTokenContext token(DocumentProtocol::REPLY_UPDATEDOCUMENT);
+ f.handler.performOperation(std::move(token.token_ap), std::move(op));
+ EXPECT_EQUAL(0, f.feedView.update_count);
+ EXPECT_TRUE(dynamic_cast<const UpdateResult *>(token.getResult()));
+ EXPECT_EQUAL(Result::RESOURCE_EXHAUSTED, token.getResult()->getErrorCode());
+ EXPECT_EQUAL("Update operation rejected for document 'id:test:searchdocument::foo' of type 'searchdocument': 'Attribute resource limit reached'",
+ token.getResult()->getErrorMessage());
+}
+
+TEST_F("require that remove is NOT rejected if resource limit is reached", FeedHandlerFixture)
+{
+ f.writeFilter._acceptWriteOperation = false;
+ f.writeFilter._message = "Attribute resource limit reached";
+
+ DocumentContext docCtx("id:test:searchdocument::foo", *f.schema.builder);
+ FeedOperation::UP op = std::make_unique<RemoveOperation>(docCtx.bucketId, Timestamp(10), docCtx.doc->getId());
+ FeedTokenContext token(DocumentProtocol::REPLY_REMOVEDOCUMENT);
+ f.handler.performOperation(std::move(token.token_ap), std::move(op));
+ EXPECT_EQUAL(1, f.feedView.remove_count);
+ EXPECT_EQUAL(Result::NONE, token.getResult()->getErrorCode());
+ EXPECT_EQUAL("", token.getResult()->getErrorMessage());
+}
+
+} // namespace
+
+TEST_MAIN()
+{
+ DummyFileHeaderContext::setCreator("feedhandler_test");
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.sh b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.sh
new file mode 100644
index 00000000000..bc49b207155
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+$VALGRIND ./searchcore_feedhandler_test_app
+rm -rf mytlsdir
+rm -rf myfilecfg
diff --git a/searchcore/src/tests/proton/documentdb/feedview/.gitignore b/searchcore/src/tests/proton/documentdb/feedview/.gitignore
new file mode 100644
index 00000000000..596e11ac15a
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/feedview/.gitignore
@@ -0,0 +1 @@
+searchcore_feedview_test_app
diff --git a/searchcore/src/tests/proton/documentdb/feedview/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/feedview/CMakeLists.txt
new file mode 100644
index 00000000000..0ea395a339a
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/feedview/CMakeLists.txt
@@ -0,0 +1,19 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_feedview_test_app
+ SOURCES
+ feedview_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_index
+ searchcore_feedoperation
+ searchcore_matching
+ searchcore_attribute
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_pcommon
+ searchcore_grouping
+ searchcore_proton_metrics
+ searchcore_util
+ searchcore_fconfig
+)
+vespa_add_test(NAME searchcore_feedview_test_app COMMAND searchcore_feedview_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/feedview/DESC b/searchcore/src/tests/proton/documentdb/feedview/DESC
new file mode 100644
index 00000000000..4c35f3d2a61
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/feedview/DESC
@@ -0,0 +1 @@
+feedview test. Take a look at feedview_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentdb/feedview/FILES b/searchcore/src/tests/proton/documentdb/feedview/FILES
new file mode 100644
index 00000000000..3f8ae0c6889
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/feedview/FILES
@@ -0,0 +1 @@
+feedview_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
new file mode 100644
index 00000000000..94167b8216a
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
@@ -0,0 +1,1211 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("feedview_test");
+#include <vespa/documentapi/messagebus/documentprotocol.h>
+#include <vespa/documentapi/messagebus/messages/documentreply.h>
+#include <vespa/documentapi/messagebus/messages/removedocumentreply.h>
+#include <vespa/documentapi/messagebus/messages/updatedocumentreply.h>
+#include <vespa/searchcore/proton/attribute/i_attribute_writer.h>
+#include <vespa/searchcore/proton/common/bucketfactory.h>
+#include <vespa/searchcore/proton/common/commit_time_tracker.h>
+#include <vespa/searchcore/proton/index/i_index_writer.h>
+#include <vespa/searchcore/proton/metrics/feed_metrics.h>
+#include <vespa/searchcore/proton/server/ifrozenbuckethandler.h>
+#include <vespa/searchcore/proton/server/executorthreadingservice.h>
+#include <vespa/searchcore/proton/server/searchable_feed_view.h>
+#include <vespa/searchcore/proton/server/isummaryadapter.h>
+#include <vespa/searchcore/proton/server/matchview.h>
+#include <vespa/searchcore/proton/documentmetastore/lidreusedelayer.h>
+#include <vespa/searchcore/proton/test/document_meta_store_context_observer.h>
+#include <vespa/searchcore/proton/test/dummy_document_store.h>
+#include <vespa/searchcore/proton/test/dummy_summary_manager.h>
+#include <vespa/searchcore/proton/test/mock_index_writer.h>
+#include <vespa/searchcore/proton/test/mock_index_manager.h>
+#include <vespa/searchcore/proton/test/thread_utils.h>
+#include <vespa/searchcore/proton/test/threading_service_observer.h>
+#include <vespa/searchlib/docstore/cachestats.h>
+#include <vespa/searchlib/docstore/idocumentstore.h>
+#include <vespa/searchlib/attribute/attributefactory.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/util/blockingthreadstackexecutor.h>
+#include <mutex>
+
+using documentapi::RemoveDocumentReply;
+using namespace proton;
+using document::BucketId;
+using document::DataType;
+using document::Document;
+using document::DocumentId;
+using document::DocumentUpdate;
+using documentapi::DocumentProtocol;
+using proton::matching::SessionManager;
+using search::index::DocBuilder;
+using search::index::Schema;
+using search::AttributeVector;
+using search::CacheStats;
+using search::DocumentMetaData;
+using search::SearchableStats;
+using searchcorespi::IndexSearchable;
+using storage::spi::BucketChecksum;
+using storage::spi::BucketInfo;
+using storage::spi::PartitionId;
+using storage::spi::Timestamp;
+using storage::spi::UpdateResult;
+using vespalib::tensor::TensorType;
+using fastos::TimeStamp;
+
+typedef SearchableFeedView::SerialNum SerialNum;
+typedef search::DocumentIdT DocumentIdT;
+typedef DocumentProtocol::MessageType MessageType;
+
+struct MyLidVector : public std::vector<DocumentIdT>
+{
+ MyLidVector &add(DocumentIdT lid) { push_back(lid); return *this; }
+};
+
+
+const uint32_t subdb_id = 0;
+const vespalib::string indexAdapterTypeName = "index";
+const vespalib::string attributeAdapterTypeName = "attribute";
+
+struct MyTracer
+{
+ vespalib::asciistream _os;
+ using Mutex = std::mutex;
+ using Guard = std::lock_guard<Mutex>;
+ Mutex _mutex;
+
+ MyTracer()
+ : _os(),
+ _mutex()
+ {
+ }
+
+ void addComma() { if (!_os.empty()) { _os << ","; } }
+
+ void traceAck(const ResultUP &result) {
+ Guard guard(_mutex);
+ addComma();
+ _os << "ack(";
+ if (result) {
+ _os << result->toString();
+ } else {
+ _os << "null";
+ }
+ _os << ")";
+ }
+
+ void tracePut(const vespalib::string &adapterType,
+ SerialNum serialNum, uint32_t lid, bool immediateCommit) {
+ Guard guard(_mutex);
+ addComma();
+ _os << "put(adapter=" << adapterType <<
+ ",serialNum=" << serialNum << ",lid=" << lid << ",commit=" << immediateCommit << ")";
+ }
+
+ void traceRemove(const vespalib::string &adapterType,
+ SerialNum serialNum, uint32_t lid, bool immediateCommit) {
+ Guard guard(_mutex);
+ addComma();
+ _os << "remove(adapter=" << adapterType <<
+ ",serialNum=" << serialNum << ",lid=" << lid << ",commit=" << immediateCommit << ")";
+ }
+
+ void traceCommit(const vespalib::string &adapterType, SerialNum serialNum) {
+ Guard guard(_mutex);
+ addComma();
+ _os << "commit(adapter=" << adapterType <<
+ ",serialNum=" << serialNum << ")";
+ }
+};
+
+struct ParamsContext
+{
+ DocTypeName _docTypeName;
+ FeedMetrics _feedMetrics;
+ PerDocTypeFeedMetrics _metrics;
+ SearchableFeedView::PersistentParams _params;
+
+ ParamsContext(const vespalib::string &docType,
+ const vespalib::string &baseDir)
+ : _docTypeName(docType),
+ _feedMetrics(),
+ _metrics(&_feedMetrics),
+ _params(0,
+ 0,
+ _docTypeName,
+ _metrics,
+ subdb_id,
+ SubDbType::READY)
+ {
+ (void) baseDir;
+ }
+ const SearchableFeedView::PersistentParams &getParams() const { return _params; }
+};
+
+struct MyIndexWriter : public test::MockIndexWriter
+{
+ MyLidVector _removes;
+ int _heartBeatCount;
+ uint32_t _commitCount;
+ MyTracer &_tracer;
+ MyIndexWriter(MyTracer &tracer)
+ : test::MockIndexWriter(IIndexManager::SP(new test::MockIndexManager())),
+ _removes(),
+ _heartBeatCount(0),
+ _commitCount(0),
+ _tracer(tracer)
+ {}
+ virtual void put(SerialNum serialNum, const document::Document &doc,
+ const DocumentIdT lid) override {
+ (void) doc;
+ _tracer.tracePut(indexAdapterTypeName, serialNum, lid, false);
+ }
+ virtual void remove(SerialNum serialNum, const search::DocumentIdT lid) override {
+ LOG(info, "MyIndexAdapter::remove(): serialNum(%" PRIu64 "), docId(%u)",
+ serialNum, lid);
+ _removes.push_back(lid);
+ _tracer.traceRemove(indexAdapterTypeName, serialNum, lid, false);
+ }
+ virtual void commit(SerialNum serialNum, OnWriteDoneType) override {
+ ++_commitCount;
+ _tracer.traceCommit(indexAdapterTypeName, serialNum);
+ }
+ virtual void heartBeat(SerialNum) override { ++_heartBeatCount; }
+};
+
+struct MyDocumentStore : public test::DummyDocumentStore
+{
+ typedef std::map<DocumentIdT, document::Document::SP> DocMap;
+ DocMap _docs;
+ uint64_t _lastSyncToken;
+ MyDocumentStore()
+ : test::DummyDocumentStore("."),
+ _docs(),
+ _lastSyncToken(0)
+ {}
+ virtual Document::UP read(DocumentIdT lid, const document::DocumentTypeRepo &) const {
+ DocMap::const_iterator itr = _docs.find(lid);
+ if (itr != _docs.end()) {
+ Document::UP retval(itr->second->clone());
+ return retval;
+ }
+ return Document::UP();
+ }
+ virtual void write(uint64_t syncToken, const document::Document& doc, DocumentIdT lid) {
+ _lastSyncToken = syncToken;
+ _docs[lid] = Document::SP(doc.clone());
+ }
+ virtual void remove(uint64_t syncToken, DocumentIdT lid) {
+ _lastSyncToken = syncToken;
+ _docs.erase(lid);
+ }
+ virtual uint64_t initFlush(uint64_t syncToken) {
+ return syncToken;
+ }
+ virtual uint64_t lastSyncToken() const { return _lastSyncToken; }
+};
+
+struct MySummaryManager : public test::DummySummaryManager
+{
+ MyDocumentStore _store;
+ MySummaryManager() : _store() {}
+ virtual search::IDocumentStore &getBackingStore() { return _store; }
+};
+
+struct MySummaryAdapter : public ISummaryAdapter
+{
+ ISummaryManager::SP _sumMgr;
+ MyDocumentStore &_store;
+ MyLidVector _removes;
+
+ MySummaryAdapter()
+ : _sumMgr(new MySummaryManager()),
+ _store(static_cast<MyDocumentStore &>(_sumMgr->getBackingStore())),
+ _removes()
+ {
+ }
+ virtual void put(SerialNum serialNum, const document::Document &doc,
+ const DocumentIdT lid) {
+ (void) serialNum;
+ _store.write(serialNum, doc, lid);
+ }
+ virtual void remove(SerialNum serialNum, const DocumentIdT lid) {
+ LOG(info,
+ "MySummaryAdapter::remove(): serialNum(%" PRIu64 "), docId(%u)",
+ serialNum, lid);
+ _store.remove(serialNum, lid);
+ _removes.push_back(lid);
+ }
+ virtual void update(SerialNum serialNum, const document::DocumentUpdate &upd,
+ const DocumentIdT lid, const document::DocumentTypeRepo &repo) {
+ (void) serialNum; (void) upd; (void) lid; (void) repo;
+ }
+ virtual void heartBeat(SerialNum) {}
+ virtual const search::IDocumentStore &getDocumentStore() const {
+ return _store;
+ }
+ virtual std::unique_ptr<document::Document> get(const search::DocumentIdT lid,
+ const document::DocumentTypeRepo &repo) {
+ return _store.read(lid, repo);
+ }
+};
+
+struct MyAttributeWriter : public IAttributeWriter
+{
+ MyLidVector _removes;
+ SerialNum _putSerial;
+ DocumentId _putDocId;
+ DocumentIdT _putLid;
+ SerialNum _updateSerial;
+ DocumentId _updateDocId;
+ DocumentIdT _updateLid;
+ SerialNum _removeSerial;
+ DocumentIdT _removeLid;
+ int _heartBeatCount;
+ uint32_t _commitCount;
+ uint32_t _wantedLidLimit;
+ using AttrMap = std::map<vespalib::string,
+ std::shared_ptr<AttributeVector>>;
+ AttrMap _attrMap;
+ std::set<vespalib::string> _attrs;
+ proton::IAttributeManager::SP _mgr;
+ MyTracer &_tracer;
+ MyAttributeWriter(MyTracer &tracer)
+ : _removes(),
+ _putSerial(0),
+ _putDocId(),
+ _putLid(0),
+ _updateSerial(0),
+ _updateDocId(),
+ _updateLid(0),
+ _removeSerial(0),
+ _removeLid(0),
+ _heartBeatCount(0),
+ _commitCount(0),
+ _wantedLidLimit(0),
+ _attrMap(),
+ _attrs(),
+ _mgr(),
+ _tracer(tracer)
+ {
+ search::attribute::Config cfg(search::attribute::BasicType::INT32);
+ _attrMap["a1"] = search::AttributeFactory::createAttribute("test", cfg);
+ search::attribute::Config
+ cfg2(search::attribute::BasicType::PREDICATE);
+ _attrMap["a2"] = search::AttributeFactory::createAttribute("test2",
+ cfg2);
+ search::attribute::Config cfg3(search::attribute::BasicType::TENSOR);
+ cfg3.setTensorType(TensorType::fromSpec("tensor(x[10])"));
+ _attrMap["a3"] = search::AttributeFactory::createAttribute("test3",
+ cfg3);
+ }
+ virtual std::vector<AttributeVector *>
+ getWritableAttributes() const override {
+ return std::vector<AttributeVector *>();
+ }
+ virtual AttributeVector *getWritableAttribute(const vespalib::string &attrName) const override {
+ if (_attrs.count(attrName) == 0) {
+ return nullptr;
+ }
+ AttrMap::const_iterator itr = _attrMap.find(attrName);
+ return ((itr == _attrMap.end()) ? nullptr : itr->second.get());
+ }
+ virtual void put(SerialNum serialNum, const document::Document &doc, DocumentIdT lid,
+ bool immediateCommit, OnWriteDoneType) override {
+ _putSerial = serialNum;
+ _putDocId = doc.getId();
+ _putLid = lid;
+ _tracer.tracePut(attributeAdapterTypeName, serialNum, lid, immediateCommit);
+ if (immediateCommit) {
+ ++_commitCount;
+ }
+ }
+ virtual void remove(SerialNum serialNum, DocumentIdT lid,
+ bool immediateCommit, OnWriteDoneType) override {
+ _removeSerial = serialNum;
+ _removeLid = lid;
+ _tracer.traceRemove(attributeAdapterTypeName, serialNum, lid, immediateCommit);
+ if (immediateCommit) {
+ ++_commitCount;
+ }
+ }
+ virtual void remove(const LidVector & lidsToRemove, SerialNum serialNum,
+ bool immediateCommit, OnWriteDoneType) override {
+ for (uint32_t lid : lidsToRemove) {
+ LOG(info, "MyAttributeAdapter::remove(): serialNum(%" PRIu64 "), docId(%u)", serialNum, lid);
+ _removes.push_back(lid);
+ _tracer.traceRemove(attributeAdapterTypeName, serialNum, lid, immediateCommit);
+ }
+ }
+ virtual void update(SerialNum serialNum, const document::DocumentUpdate &upd,
+ DocumentIdT lid, bool, OnWriteDoneType) override {
+ _updateSerial = serialNum;
+ _updateDocId = upd.getId();
+ _updateLid = lid;
+ }
+ virtual void heartBeat(SerialNum) override { ++_heartBeatCount; }
+ virtual void compactLidSpace(uint32_t wantedLidLimit, SerialNum serialNum) override {
+ (void) serialNum;
+ _wantedLidLimit = wantedLidLimit;
+ }
+ virtual const proton::IAttributeManager::SP &getAttributeManager() const override {
+ return _mgr;
+ }
+ void commit(SerialNum serialNum, OnWriteDoneType) override {
+ (void) serialNum; ++_commitCount;
+ _tracer.traceCommit(attributeAdapterTypeName, serialNum);
+ }
+
+ virtual void onReplayDone(uint32_t docIdLimit) override
+ {
+ (void) docIdLimit;
+ }
+};
+
+struct MyTransport : public FeedToken::ITransport
+{
+ ResultUP lastResult;
+ vespalib::Gate _gate;
+ MyTracer &_tracer;
+ MyTransport(MyTracer &tracer) : lastResult(), _gate(), _tracer(tracer) {}
+ virtual void send(mbus::Reply::UP reply,
+ ResultUP result,
+ bool documentWasFound,
+ double latency_ms) {
+ (void) reply; (void) documentWasFound, (void) latency_ms;
+ lastResult = std::move(result);
+ _tracer.traceAck(lastResult);
+ _gate.countDown();
+ }
+ void await() { _gate.await(); }
+};
+
+
+struct MyResultHandler : public IGenericResultHandler
+{
+ vespalib::Gate _gate;
+ MyResultHandler() : _gate() {}
+ virtual void handle(const storage::spi::Result &) {
+ _gate.countDown();
+ }
+ void await() { _gate.await(); }
+};
+
+struct SchemaContext
+{
+ Schema::SP _schema;
+ std::unique_ptr<DocBuilder> _builder;
+ SchemaContext() :
+ _schema(new Schema()),
+ _builder()
+ {
+ _schema->addIndexField(Schema::IndexField("i1", Schema::STRING, Schema::SINGLE));
+ _schema->addAttributeField(Schema::AttributeField("a1", Schema::STRING, Schema::SINGLE));
+ _schema->addAttributeField(Schema::AttributeField("a2", Schema::BOOLEANTREE, Schema::SINGLE));
+ _schema->addAttributeField(Schema::AttributeField("a3", Schema::TENSOR, Schema::SINGLE));
+ _schema->addSummaryField(Schema::SummaryField("s1", Schema::STRING, Schema::SINGLE));
+ _builder.reset(new DocBuilder(*_schema));
+ }
+ const document::DocumentTypeRepo::SP &getRepo() const { return _builder->getDocumentTypeRepo(); }
+};
+
+struct DocumentContext
+{
+ Document::SP doc;
+ DocumentUpdate::SP upd;
+ BucketId bid;
+ Timestamp ts;
+ typedef std::vector<DocumentContext> List;
+ DocumentContext(const vespalib::string &docId, uint64_t timestamp, DocBuilder &builder) :
+ doc(builder.startDocument(docId)
+ .startSummaryField("s1").addStr(docId).endField()
+ .endDocument().release()),
+ upd(new DocumentUpdate(builder.getDocumentType(), doc->getId())),
+ bid(BucketFactory::getNumBucketBits(),
+ doc->getId().getGlobalId().convertToBucketId().getRawId()),
+ ts(timestamp)
+ {
+ }
+ void addFieldUpdate(DocBuilder &builder,
+ const vespalib::string &fieldName) {
+ const document::Field &field =
+ builder.getDocumentType().getField(fieldName);
+ upd->addUpdate(document::FieldUpdate(field));
+ }
+};
+
+namespace {
+
+mbus::Reply::UP
+createReply(MessageType mtype)
+{
+ if (mtype == DocumentProtocol::REPLY_UPDATEDOCUMENT) {
+ return mbus::Reply::UP(new documentapi::UpdateDocumentReply);
+ } else if (mtype == DocumentProtocol::REPLY_REMOVEDOCUMENT) {
+ return mbus::Reply::UP(new documentapi::RemoveDocumentReply);
+ } else {
+ return mbus::Reply::UP(new documentapi::DocumentReply(mtype));
+ }
+}
+
+} // namespace
+
+struct FeedTokenContext
+{
+ MyTransport mt;
+ FeedToken ft;
+ typedef std::shared_ptr<FeedTokenContext> SP;
+ typedef std::vector<SP> List;
+ FeedTokenContext(MyTracer &tracer, MessageType mtype) :
+ mt(tracer),
+ ft(mt, createReply(mtype))
+ {
+ }
+};
+
+struct FixtureBase
+{
+ MyTracer _tracer;
+ IIndexWriter::SP iw;
+ ISummaryAdapter::SP sa;
+ IAttributeWriter::SP aw;
+ MyIndexWriter &miw;
+ MySummaryAdapter &msa;
+ MyAttributeWriter &maw;
+ SchemaContext sc;
+ DocIdLimit _docIdLimit;
+ DocumentMetaStoreContext::SP _dmscReal;
+ test::DocumentMetaStoreContextObserver::SP _dmsc;
+ ParamsContext pc;
+ ExecutorThreadingService _writeServiceReal;
+ test::ThreadingServiceObserver _writeService;
+ documentmetastore::LidReuseDelayer _lidReuseDelayer;
+ CommitTimeTracker _commitTimeTracker;
+ SerialNum serial;
+ FixtureBase(TimeStamp visibilityDelay) :
+ _tracer(),
+ iw(new MyIndexWriter(_tracer)),
+ sa(new MySummaryAdapter),
+ aw(new MyAttributeWriter(_tracer)),
+ miw(static_cast<MyIndexWriter&>(*iw)),
+ msa(static_cast<MySummaryAdapter&>(*sa)),
+ maw(static_cast<MyAttributeWriter&>(*aw)),
+ sc(),
+ _docIdLimit(0u),
+ _dmscReal(new DocumentMetaStoreContext(std::make_shared<BucketDBOwner>())),
+ _dmsc(new test::DocumentMetaStoreContextObserver(*_dmscReal)),
+ pc(sc._builder->getDocumentType().getName(), "fileconfig_test"),
+ _writeServiceReal(),
+ _writeService(_writeServiceReal),
+ _lidReuseDelayer(_writeService, _dmsc->get()),
+ _commitTimeTracker(visibilityDelay),
+ serial(0)
+ {
+ _dmsc->constructFreeList();
+ _lidReuseDelayer.setImmediateCommit(visibilityDelay == 0);
+ }
+
+ virtual ~FixtureBase() {
+ _writeServiceReal.sync();
+ }
+
+ void syncMaster() {
+ _writeService.master().sync();
+ }
+
+ void syncIndex() {
+ _writeService.sync();
+ }
+
+ void sync() {
+ _writeServiceReal.sync();
+ }
+
+ const test::DocumentMetaStoreObserver &metaStoreObserver() {
+ return _dmsc->getObserver();
+ }
+
+ const test::ThreadingServiceObserver &writeServiceObserver() {
+ return _writeService;
+ }
+
+ template <typename FunctionType>
+ void runInMaster(FunctionType func) {
+ test::runInMaster(_writeService, func);
+ }
+
+ virtual IFeedView &getFeedView() = 0;
+
+ const IDocumentMetaStore &getMetaStore() const {
+ return _dmsc->get();
+ }
+
+ BucketDBOwner::Guard getBucketDB() const {
+ return getMetaStore().getBucketDB().takeGuard();
+ }
+
+ DocumentMetaData getMetaData(const DocumentContext &doc_) const {
+ return getMetaStore().getMetaData(doc_.doc->getId().getGlobalId());
+ }
+
+ DocBuilder &getBuilder() { return *sc._builder; }
+
+ DocumentContext doc(const vespalib::string &docId, uint64_t timestamp) {
+ return DocumentContext(docId, timestamp, getBuilder());
+ }
+
+ DocumentContext doc1(uint64_t timestamp = 10) {
+ return doc("doc:test:1", timestamp);
+ }
+
+ void performPut(FeedToken *token, PutOperation &op) {
+ getFeedView().preparePut(op);
+ op.setSerialNum(++serial);
+ getFeedView().handlePut(token, op);
+ }
+
+ void putAndWait(const DocumentContext::List &docs) {
+ for (size_t i = 0; i < docs.size(); ++i) {
+ putAndWait(docs[i]);
+ }
+ }
+
+ void putAndWait(const DocumentContext &docCtx) {
+ FeedTokenContext token(_tracer, DocumentProtocol::REPLY_PUTDOCUMENT);
+ PutOperation op(docCtx.bid, docCtx.ts, docCtx.doc);
+ runInMaster([&] () { performPut(&token.ft, op); });
+ }
+
+ void performUpdate(FeedToken *token, UpdateOperation &op) {
+ getFeedView().prepareUpdate(op);
+ op.setSerialNum(++serial);
+ getFeedView().handleUpdate(token, op);
+ }
+
+ void updateAndWait(const DocumentContext &docCtx) {
+ FeedTokenContext token(_tracer, DocumentProtocol::REPLY_UPDATEDOCUMENT);
+ UpdateOperation op(docCtx.bid, docCtx.ts, docCtx.upd);
+ runInMaster([&] () { performUpdate(&token.ft, op); });
+ }
+
+ void performRemove(FeedToken *token, RemoveOperation &op) {
+ getFeedView().prepareRemove(op);
+ if (op.getValidNewOrPrevDbdId()) {
+ op.setSerialNum(++serial);
+ getFeedView().handleRemove(token, op);
+ } else {
+ if (token != NULL) {
+ token->ack(op.getType(), pc._metrics);
+ }
+ }
+ }
+
+ void removeAndWait(const DocumentContext &docCtx) {
+ FeedTokenContext token(_tracer, DocumentProtocol::REPLY_REMOVEDOCUMENT);
+ RemoveOperation op(docCtx.bid, docCtx.ts, docCtx.doc->getId());
+ runInMaster([&] () { performRemove(&token.ft, op); });
+ }
+
+ void removeAndWait(const DocumentContext::List &docs) {
+ for (size_t i = 0; i < docs.size(); ++i) {
+ removeAndWait(docs[i]);
+ }
+ }
+ void performDeleteBucket(DeleteBucketOperation &op) {
+ getFeedView().prepareDeleteBucket(op);
+ op.setSerialNum(++serial);
+ getFeedView().handleDeleteBucket(op);
+ }
+
+ void performForceCommit() { getFeedView().forceCommit(serial); }
+ void forceCommitAndWait() { runInMaster([&]() { performForceCommit(); }); }
+
+ bool assertTrace(const vespalib::string &exp) {
+ return EXPECT_EQUAL(exp, _tracer._os.str());
+ }
+
+ DocumentContext::List
+ makeDummyDocs(uint32_t first, uint32_t count, uint64_t tsfirst) {
+ DocumentContext::List docs;
+ for (uint32_t i = 0; i < count; ++i) {
+ uint32_t id = first + i;
+ uint64_t ts = tsfirst + i;
+ vespalib::asciistream os;
+ os << "doc:test:" << id;
+ docs.push_back(doc(os.str(), ts));
+ }
+ return docs;
+ }
+
+ void performCompactLidSpace(uint32_t wantedLidLimit) {
+ auto &fv = getFeedView();
+ CompactLidSpaceOperation op(0, wantedLidLimit);
+ op.setSerialNum(++serial);
+ fv.handleCompactLidSpace(op);
+ }
+ void compactLidSpaceAndWait(uint32_t wantedLidLimit) {
+ runInMaster([&] () { performCompactLidSpace(wantedLidLimit); });
+ }
+};
+
+struct SearchableFeedViewFixture : public FixtureBase
+{
+ SearchableFeedView fv;
+ SearchableFeedViewFixture(TimeStamp visibilityDelay = 0) :
+ FixtureBase(visibilityDelay),
+ fv(StoreOnlyFeedView::Context(sa,
+ sc._schema,
+ _dmsc,
+ sc.getRepo(),
+ _writeService,
+ _lidReuseDelayer,
+ _commitTimeTracker),
+ pc.getParams(),
+ FastAccessFeedView::Context(aw, _docIdLimit),
+ SearchableFeedView::Context(iw))
+ {
+ runInMaster([&]() { _lidReuseDelayer.setHasIndexedFields(true); });
+ }
+ virtual IFeedView &getFeedView() { return fv; }
+};
+
+struct FastAccessFeedViewFixture : public FixtureBase
+{
+ FastAccessFeedView fv;
+ FastAccessFeedViewFixture(TimeStamp visibilityDelay = 0) :
+ FixtureBase(visibilityDelay),
+ fv(StoreOnlyFeedView::Context(sa,
+ sc._schema,
+ _dmsc,
+ sc.getRepo(),
+ _writeService,
+ _lidReuseDelayer,
+ _commitTimeTracker),
+ pc.getParams(),
+ FastAccessFeedView::Context(aw, _docIdLimit))
+ {
+ }
+ virtual IFeedView &getFeedView() { return fv; }
+};
+
+void
+assertBucketInfo(const BucketId &ebid,
+ const Timestamp &ets,
+ uint32_t lid,
+ const IDocumentMetaStore &metaStore)
+{
+ document::GlobalId gid;
+ EXPECT_TRUE(metaStore.getGid(lid, gid));
+ search::DocumentMetaData meta = metaStore.getMetaData(gid);
+ EXPECT_TRUE(meta.valid());
+ BucketId abid;
+ EXPECT_EQUAL(ebid, meta.bucketId);
+ Timestamp ats;
+ EXPECT_EQUAL(ets, meta.timestamp);
+}
+
+void
+assertLidVector(const MyLidVector &exp, const MyLidVector &act)
+{
+ EXPECT_EQUAL(exp.size(), act.size());
+ for (size_t i = 0; i < exp.size(); ++i) {
+ EXPECT_TRUE(std::find(act.begin(), act.end(), exp[i]) != act.end());
+ }
+}
+
+void
+assertAttributeUpdate(SerialNum serialNum,
+ const document::DocumentId &docId,
+ DocumentIdT lid,
+ MyAttributeWriter adapter)
+{
+ EXPECT_EQUAL(serialNum, adapter._updateSerial);
+ EXPECT_EQUAL(docId, adapter._updateDocId);
+ EXPECT_EQUAL(lid, adapter._updateLid);
+}
+
+
+TEST_F("require that put() updates document meta store with bucket info",
+ SearchableFeedViewFixture)
+{
+ DocumentContext dc = f.doc1();
+ f.putAndWait(dc);
+
+ assertBucketInfo(dc.bid, dc.ts, 1, f.getMetaStore());
+ // TODO: rewrite to use getBucketInfo() when available
+ BucketInfo bucketInfo = f.getBucketDB()->get(dc.bid);
+ EXPECT_EQUAL(1u, bucketInfo.getDocumentCount());
+ EXPECT_NOT_EQUAL(bucketInfo.getChecksum(), BucketChecksum(0));
+}
+
+TEST_F("require that put() calls attribute adapter", SearchableFeedViewFixture)
+{
+ DocumentContext dc = f.doc1();
+ EXPECT_EQUAL(0u, f._docIdLimit.get());
+ f.putAndWait(dc);
+
+ EXPECT_EQUAL(1u, f.maw._putSerial);
+ EXPECT_EQUAL(DocumentId("doc:test:1"), f.maw._putDocId);
+ EXPECT_EQUAL(1u, f.maw._putLid);
+ EXPECT_EQUAL(2u, f._docIdLimit.get());
+}
+
+TEST_F("require that update() updates document meta store with bucket info",
+ SearchableFeedViewFixture)
+{
+ DocumentContext dc1 = f.doc1(10);
+ DocumentContext dc2 = f.doc1(20);
+ f.putAndWait(dc1);
+ BucketChecksum bcs = f.getBucketDB()->get(dc1.bid).getChecksum();
+ f.updateAndWait(dc2);
+
+ assertBucketInfo(dc1.bid, Timestamp(20), 1, f.getMetaStore());
+ // TODO: rewrite to use getBucketInfo() when available
+ BucketInfo bucketInfo = f.getBucketDB()->get(dc1.bid);
+ EXPECT_EQUAL(1u, bucketInfo.getDocumentCount());
+ EXPECT_NOT_EQUAL(bucketInfo.getChecksum(), bcs);
+ EXPECT_NOT_EQUAL(bucketInfo.getChecksum(), BucketChecksum(0));
+}
+
+TEST_F("require that update() calls attribute adapter", SearchableFeedViewFixture)
+{
+ DocumentContext dc1 = f.doc1(10);
+ DocumentContext dc2 = f.doc1(20);
+ f.putAndWait(dc1);
+ f.updateAndWait(dc2);
+
+ assertAttributeUpdate(2u, DocumentId("doc:test:1"), 1u, f.maw);
+}
+
+TEST_F("require that remove() updates document meta store with bucket info",
+ SearchableFeedViewFixture)
+{
+ DocumentContext dc1 = f.doc("userdoc:test:1:1", 10);
+ DocumentContext dc2 = f.doc("userdoc:test:1:2", 11);
+ f.putAndWait(dc1);
+ BucketChecksum bcs1 = f.getBucketDB()->get(dc1.bid).getChecksum();
+ f.putAndWait(dc2);
+ BucketChecksum bcs2 = f.getBucketDB()->get(dc2.bid).getChecksum();
+ f.removeAndWait(DocumentContext("userdoc:test:1:2", 20, f.getBuilder()));
+
+ assertBucketInfo(dc1.bid, Timestamp(10), 1, f.getMetaStore());
+ EXPECT_FALSE(f.getMetaStore().validLid(2)); // don't remember remove
+ // TODO: rewrite to use getBucketInfo() when available
+ BucketInfo bucketInfo = f.getBucketDB()->get(dc1.bid);
+ EXPECT_EQUAL(1u, bucketInfo.getDocumentCount());
+ EXPECT_NOT_EQUAL(bucketInfo.getChecksum(), bcs2);
+ EXPECT_EQUAL(bucketInfo.getChecksum(), bcs1);
+}
+
+TEST_F("require that remove() calls attribute adapter", SearchableFeedViewFixture)
+{
+ DocumentContext dc1 = f.doc1(10);
+ DocumentContext dc2 = f.doc1(20);
+ f.putAndWait(dc1);
+ f.removeAndWait(dc2);
+
+ EXPECT_EQUAL(2u, f.maw._removeSerial);
+ EXPECT_EQUAL(1u, f.maw._removeLid);
+}
+
+bool
+assertThreadObserver(uint32_t masterExecuteCnt,
+ uint32_t indexExecuteCnt,
+ const test::ThreadingServiceObserver &observer)
+{
+ if (!EXPECT_EQUAL(masterExecuteCnt, observer.masterObserver().getExecuteCnt())) return false;
+ if (!EXPECT_EQUAL(indexExecuteCnt, observer.indexObserver().getExecuteCnt())) return false;
+ return true;
+}
+
+TEST_F("require that remove() calls removeComplete() via delayed thread service",
+ SearchableFeedViewFixture)
+{
+ EXPECT_TRUE(assertThreadObserver(1, 0, f.writeServiceObserver()));
+ f.putAndWait(f.doc1(10));
+ // put index fields handled in index thread
+ EXPECT_TRUE(assertThreadObserver(2, 1, f.writeServiceObserver()));
+ f.removeAndWait(f.doc1(20));
+ // remove index fields handled in index thread
+ // delayed remove complete handled in same index thread, then master thread
+ EXPECT_TRUE(assertThreadObserver(4, 2, f.writeServiceObserver()));
+ EXPECT_EQUAL(1u, f.metaStoreObserver()._removeCompleteCnt);
+ EXPECT_EQUAL(1u, f.metaStoreObserver()._removeCompleteLid);
+}
+
+TEST_F("require that handleDeleteBucket() removes documents", SearchableFeedViewFixture)
+{
+ DocumentContext::List docs;
+ docs.push_back(f.doc("userdoc:test:1:1", 10));
+ docs.push_back(f.doc("userdoc:test:1:2", 11));
+ docs.push_back(f.doc("userdoc:test:1:3", 12));
+ docs.push_back(f.doc("userdoc:test:2:1", 13));
+ docs.push_back(f.doc("userdoc:test:2:2", 14));
+
+ f.putAndWait(docs);
+
+ DocumentIdT lid;
+ EXPECT_TRUE(f.getMetaStore().getLid(docs[0].doc->getId().getGlobalId(), lid));
+ EXPECT_EQUAL(1u, lid);
+ EXPECT_TRUE(f.getMetaStore().getLid(docs[1].doc->getId().getGlobalId(), lid));
+ EXPECT_EQUAL(2u, lid);
+ EXPECT_TRUE(f.getMetaStore().getLid(docs[2].doc->getId().getGlobalId(), lid));
+ EXPECT_EQUAL(3u, lid);
+
+ // delete bucket for user 1
+ DeleteBucketOperation op(docs[0].bid);
+ f.runInMaster([&] () { f.performDeleteBucket(op); });
+
+ EXPECT_EQUAL(0u, f.getBucketDB()->get(docs[0].bid).getDocumentCount());
+ EXPECT_EQUAL(2u, f.getBucketDB()->get(docs[3].bid).getDocumentCount());
+ EXPECT_FALSE(f.getMetaStore().getLid(docs[0].doc->getId().getGlobalId(), lid));
+ EXPECT_FALSE(f.getMetaStore().getLid(docs[1].doc->getId().getGlobalId(), lid));
+ EXPECT_FALSE(f.getMetaStore().getLid(docs[2].doc->getId().getGlobalId(), lid));
+ MyLidVector exp = MyLidVector().add(1).add(2).add(3);
+ assertLidVector(exp, f.miw._removes);
+ assertLidVector(exp, f.msa._removes);
+ assertLidVector(exp, f.maw._removes);
+}
+
+void
+assertPostConditionAfterRemoves(const DocumentContext::List &docs,
+ SearchableFeedViewFixture &f)
+{
+ EXPECT_EQUAL(3u, f.getMetaStore().getNumUsedLids());
+ EXPECT_FALSE(f.getMetaData(docs[0]).valid());
+ EXPECT_TRUE(f.getMetaData(docs[1]).valid());
+ EXPECT_FALSE(f.getMetaData(docs[1]).removed);
+ EXPECT_TRUE(f.getMetaData(docs[2]).valid());
+ EXPECT_FALSE(f.getMetaData(docs[2]).removed);
+ EXPECT_FALSE(f.getMetaData(docs[3]).valid());
+ EXPECT_TRUE(f.getMetaData(docs[4]).valid());
+ EXPECT_FALSE(f.getMetaData(docs[4]).removed);
+
+ assertLidVector(MyLidVector().add(1).add(4), f.miw._removes);
+ assertLidVector(MyLidVector().add(1).add(4), f.msa._removes);
+ MyDocumentStore::DocMap &sdocs = f.msa._store._docs;
+ EXPECT_EQUAL(3u, sdocs.size());
+ EXPECT_TRUE(sdocs.find(1) == sdocs.end());
+ EXPECT_TRUE(sdocs.find(4) == sdocs.end());
+}
+
+TEST_F("require that removes are not remembered", SearchableFeedViewFixture)
+{
+ DocumentContext::List docs;
+ docs.push_back(f.doc("userdoc:test:1:1", 10));
+ docs.push_back(f.doc("userdoc:test:1:2", 11));
+ docs.push_back(f.doc("userdoc:test:1:3", 12));
+ docs.push_back(f.doc("userdoc:test:2:1", 13));
+ docs.push_back(f.doc("userdoc:test:2:2", 14));
+
+ f.putAndWait(docs);
+ f.removeAndWait(docs[0]);
+ f.removeAndWait(docs[3]);
+ assertPostConditionAfterRemoves(docs, f);
+
+ // try to remove again : should have little effect
+ f.removeAndWait(docs[0]);
+ f.removeAndWait(docs[3]);
+ assertPostConditionAfterRemoves(docs, f);
+
+ // re-add docs
+ f.putAndWait(docs[3]);
+ f.putAndWait(docs[0]);
+ EXPECT_EQUAL(5u, f.getMetaStore().getNumUsedLids());
+ EXPECT_TRUE(f.getMetaData(docs[0]).valid());
+ EXPECT_TRUE(f.getMetaData(docs[1]).valid());
+ EXPECT_TRUE(f.getMetaData(docs[2]).valid());
+ EXPECT_TRUE(f.getMetaData(docs[3]).valid());
+ EXPECT_TRUE(f.getMetaData(docs[4]).valid());
+ EXPECT_FALSE(f.getMetaData(docs[0]).removed);
+ EXPECT_FALSE(f.getMetaData(docs[1]).removed);
+ EXPECT_FALSE(f.getMetaData(docs[2]).removed);
+ EXPECT_FALSE(f.getMetaData(docs[3]).removed);
+ EXPECT_FALSE(f.getMetaData(docs[4]).removed);
+ EXPECT_EQUAL(5u, f.msa._store._docs.size());
+ const Document::SP &doc1 = f.msa._store._docs[1];
+ EXPECT_EQUAL(docs[3].doc->getId(), doc1->getId());
+ EXPECT_EQUAL(docs[3].doc->getId().toString(),
+ doc1->getValue("s1")->toString());
+ const Document::SP &doc4 = f.msa._store._docs[4];
+ EXPECT_EQUAL(docs[0].doc->getId(), doc4->getId());
+ EXPECT_EQUAL(docs[0].doc->getId().toString(),
+ doc4->getValue("s1")->toString());
+ EXPECT_EQUAL(5u, f.msa._store._docs.size());
+
+ f.removeAndWait(docs[0]);
+ f.removeAndWait(docs[3]);
+ EXPECT_EQUAL(3u, f.msa._store._docs.size());
+}
+
+TEST_F("require that heartbeat propagates to index- and attributeadapter",
+ SearchableFeedViewFixture)
+{
+ f.runInMaster([&] () { f.fv.heartBeat(2); });
+ EXPECT_EQUAL(1, f.miw._heartBeatCount);
+ EXPECT_EQUAL(1, f.maw._heartBeatCount);
+}
+
+template <typename Fixture>
+void putDocumentAndUpdate(Fixture &f, const vespalib::string &fieldName)
+{
+ DocumentContext dc1 = f.doc1();
+ f.putAndWait(dc1);
+ EXPECT_EQUAL(1u, f.msa._store._lastSyncToken);
+
+ DocumentContext dc2("doc:test:1", 20, f.getBuilder());
+ dc2.addFieldUpdate(f.getBuilder(), fieldName);
+ f.updateAndWait(dc2);
+}
+
+template <typename Fixture>
+void requireThatUpdateOnlyUpdatesAttributeAndNotDocumentStore(Fixture &f)
+{
+ putDocumentAndUpdate(f, "a1");
+
+ EXPECT_EQUAL(1u, f.msa._store._lastSyncToken); // document store not updated
+ assertAttributeUpdate(2u, DocumentId("doc:test:1"), 1, f.maw);
+}
+
+TEST_F("require that update() to fast-access attribute only updates attribute and not document store",
+ FastAccessFeedViewFixture)
+{
+ f.maw._attrs.insert("a1"); // mark a1 as fast-access attribute field
+ requireThatUpdateOnlyUpdatesAttributeAndNotDocumentStore(f);
+}
+
+TEST_F("require that update() to attribute only updates attribute and not document store",
+ SearchableFeedViewFixture)
+{
+ f.maw._attrs.insert("a1"); // mark a1 as attribute field
+ requireThatUpdateOnlyUpdatesAttributeAndNotDocumentStore(f);
+}
+
+TEST_F("require that update to non fast-access attribute also updates document store",
+ FastAccessFeedViewFixture)
+{
+ putDocumentAndUpdate(f, "a1");
+
+ EXPECT_EQUAL(2u, f.msa._store._lastSyncToken); // document store updated
+ assertAttributeUpdate(2u, DocumentId("doc:test:1"), 1, f.maw);
+}
+
+template <typename Fixture>
+void requireThatUpdateUpdatesAttributeAndDocumentStore(Fixture &f,
+ const vespalib::string &
+ fieldName)
+{
+ putDocumentAndUpdate(f, fieldName);
+
+ EXPECT_EQUAL(2u, f.msa._store._lastSyncToken); // document store updated
+ assertAttributeUpdate(2u, DocumentId("doc:test:1"), 1, f.maw);
+}
+
+TEST_F("require that update() to fast-access predicate attribute updates attribute and document store",
+ FastAccessFeedViewFixture)
+{
+ f.maw._attrs.insert("a2"); // mark a2 as fast-access attribute field
+ requireThatUpdateUpdatesAttributeAndDocumentStore(f, "a2");
+}
+
+TEST_F("require that update() to predicate attribute updates attribute and document store",
+ SearchableFeedViewFixture)
+{
+ f.maw._attrs.insert("a2"); // mark a2 as attribute field
+ requireThatUpdateUpdatesAttributeAndDocumentStore(f, "a2");
+}
+
+TEST_F("require that update() to fast-access tensor attribute updates attribute and document store",
+ FastAccessFeedViewFixture)
+{
+ f.maw._attrs.insert("a3"); // mark a3 as fast-access attribute field
+ requireThatUpdateUpdatesAttributeAndDocumentStore(f, "a3");
+}
+
+TEST_F("require that update() to tensor attribute updates attribute and document store",
+ SearchableFeedViewFixture)
+{
+ f.maw._attrs.insert("a3"); // mark a3 as attribute field
+ requireThatUpdateUpdatesAttributeAndDocumentStore(f, "a3");
+}
+
+TEST_F("require that compactLidSpace() propagates to document meta store and "
+ "blocks lid space shrinkage until generation is no longer used",
+ SearchableFeedViewFixture)
+{
+ EXPECT_TRUE(assertThreadObserver(1, 0, f.writeServiceObserver()));
+ CompactLidSpaceOperation op(0, 99);
+ op.setSerialNum(1);
+ f.runInMaster([&] () { f.fv.handleCompactLidSpace(op); });
+ // performIndexForceCommit in index thread, then completion callback
+ // in master thread.
+ EXPECT_TRUE(assertThreadObserver(3, 1, f.writeServiceObserver()));
+ EXPECT_EQUAL(99u, f.metaStoreObserver()._compactLidSpaceLidLimit);
+ EXPECT_EQUAL(1u, f.metaStoreObserver()._holdUnblockShrinkLidSpaceCnt);
+ EXPECT_EQUAL(99u, f._docIdLimit.get());
+}
+
+TEST_F("require that compactLidSpace() doesn't propagate to "
+ "document meta store and "
+ "blocks lid space shrinkage until generation is no longer used",
+ SearchableFeedViewFixture)
+{
+ EXPECT_TRUE(assertThreadObserver(1, 0, f.writeServiceObserver()));
+ CompactLidSpaceOperation op(0, 99);
+ op.setSerialNum(0);
+ f.runInMaster([&] () { f.fv.handleCompactLidSpace(op); });
+ // Delayed holdUnblockShrinkLidSpace() in index thread, then master thread
+ EXPECT_TRUE(assertThreadObserver(2, 0, f.writeServiceObserver()));
+ EXPECT_EQUAL(0u, f.metaStoreObserver()._compactLidSpaceLidLimit);
+ EXPECT_EQUAL(0u, f.metaStoreObserver()._holdUnblockShrinkLidSpaceCnt);
+}
+
+TEST_F("require that compactLidSpace() propagates to attributeadapter",
+ FastAccessFeedViewFixture)
+{
+ f.runInMaster([&] () { f.fv.handleCompactLidSpace(CompactLidSpaceOperation(0, 99)); });
+ EXPECT_EQUAL(99u, f.maw._wantedLidLimit);
+}
+
+
+TEST_F("require that commit is called if visibility delay is 0",
+ SearchableFeedViewFixture)
+{
+ DocumentContext dc = f.doc1();
+ f.putAndWait(dc);
+ EXPECT_EQUAL(1u, f.miw._commitCount);
+ EXPECT_EQUAL(1u, f.maw._commitCount);
+ f.removeAndWait(dc);
+ EXPECT_EQUAL(2u, f.miw._commitCount);
+ EXPECT_EQUAL(2u, f.maw._commitCount);
+ f.assertTrace("put(adapter=attribute,serialNum=1,lid=1,commit=1),"
+ "put(adapter=index,serialNum=1,lid=1,commit=0),"
+ "commit(adapter=index,serialNum=1),"
+ "ack(Result(0, )),"
+ "remove(adapter=attribute,serialNum=2,lid=1,commit=1),"
+ "remove(adapter=index,serialNum=2,lid=1,commit=0),"
+ "commit(adapter=index,serialNum=2),"
+ "ack(Result(0, ))");
+}
+
+const TimeStamp LONG_DELAY(TimeStamp::Seconds(60.0));
+const TimeStamp SHORT_DELAY(TimeStamp::Seconds(0.5));
+
+TEST_F("require that commit is not called when inside a commit interval",
+ SearchableFeedViewFixture(LONG_DELAY))
+{
+ DocumentContext dc = f.doc1();
+ f.putAndWait(dc);
+ EXPECT_EQUAL(0u, f.miw._commitCount);
+ EXPECT_EQUAL(0u, f.maw._commitCount);
+ EXPECT_EQUAL(0u, f._docIdLimit.get());
+ f.removeAndWait(dc);
+ EXPECT_EQUAL(0u, f.miw._commitCount);
+ EXPECT_EQUAL(0u, f.maw._commitCount);
+ EXPECT_EQUAL(0u, f._docIdLimit.get());
+ f.assertTrace("ack(Result(0, )),"
+ "put(adapter=attribute,serialNum=1,lid=1,commit=0),"
+ "put(adapter=index,serialNum=1,lid=1,commit=0),"
+ "ack(Result(0, )),"
+ "remove(adapter=attribute,serialNum=2,lid=1,commit=0),"
+ "remove(adapter=index,serialNum=2,lid=1,commit=0)");
+}
+
+TEST_F("require that commit is called when crossing a commit interval",
+ SearchableFeedViewFixture(SHORT_DELAY))
+{
+ FastOS_Thread::Sleep(SHORT_DELAY.ms() + 10);
+ DocumentContext dc = f.doc1();
+ f.putAndWait(dc);
+ EXPECT_EQUAL(1u, f.miw._commitCount);
+ EXPECT_EQUAL(1u, f.maw._commitCount);
+ EXPECT_EQUAL(2u, f._docIdLimit.get());
+ FastOS_Thread::Sleep(SHORT_DELAY.ms() + 10);
+ f.removeAndWait(dc);
+ EXPECT_EQUAL(2u, f.miw._commitCount);
+ EXPECT_EQUAL(2u, f.maw._commitCount);
+ f.assertTrace("ack(Result(0, )),"
+ "put(adapter=attribute,serialNum=1,lid=1,commit=1),"
+ "put(adapter=index,serialNum=1,lid=1,commit=0),"
+ "commit(adapter=index,serialNum=1),"
+ "ack(Result(0, )),"
+ "remove(adapter=attribute,serialNum=2,lid=1,commit=1),"
+ "remove(adapter=index,serialNum=2,lid=1,commit=0),"
+ "commit(adapter=index,serialNum=2)");
+}
+
+
+TEST_F("require that commit is not implicitly called after "
+ "handover to maintenance job",
+ SearchableFeedViewFixture(SHORT_DELAY))
+{
+ f._commitTimeTracker.setReplayDone();
+ FastOS_Thread::Sleep(SHORT_DELAY.ms() + 10);
+ DocumentContext dc = f.doc1();
+ f.putAndWait(dc);
+ EXPECT_EQUAL(0u, f.miw._commitCount);
+ EXPECT_EQUAL(0u, f.maw._commitCount);
+ EXPECT_EQUAL(0u, f._docIdLimit.get());
+ FastOS_Thread::Sleep(SHORT_DELAY.ms() + 10);
+ f.removeAndWait(dc);
+ EXPECT_EQUAL(0u, f.miw._commitCount);
+ EXPECT_EQUAL(0u, f.maw._commitCount);
+ EXPECT_EQUAL(0u, f._docIdLimit.get());
+ f.assertTrace("ack(Result(0, )),"
+ "put(adapter=attribute,serialNum=1,lid=1,commit=0),"
+ "put(adapter=index,serialNum=1,lid=1,commit=0),"
+ "ack(Result(0, )),"
+ "remove(adapter=attribute,serialNum=2,lid=1,commit=0),"
+ "remove(adapter=index,serialNum=2,lid=1,commit=0)");
+}
+
+TEST_F("require that forceCommit updates docid limit",
+ SearchableFeedViewFixture(LONG_DELAY))
+{
+ f._commitTimeTracker.setReplayDone();
+ DocumentContext dc = f.doc1();
+ f.putAndWait(dc);
+ EXPECT_EQUAL(0u, f.miw._commitCount);
+ EXPECT_EQUAL(0u, f.maw._commitCount);
+ EXPECT_EQUAL(0u, f._docIdLimit.get());
+ f.forceCommitAndWait();
+ EXPECT_EQUAL(1u, f.miw._commitCount);
+ EXPECT_EQUAL(1u, f.maw._commitCount);
+ EXPECT_EQUAL(2u, f._docIdLimit.get());
+ f.assertTrace("ack(Result(0, )),"
+ "put(adapter=attribute,serialNum=1,lid=1,commit=0),"
+ "put(adapter=index,serialNum=1,lid=1,commit=0),"
+ "commit(adapter=attribute,serialNum=1),"
+ "commit(adapter=index,serialNum=1)");
+}
+
+TEST_F("require that forceCommit updates docid limit during shrink",
+ SearchableFeedViewFixture(LONG_DELAY))
+{
+ f._commitTimeTracker.setReplayDone();
+ f.putAndWait(f.makeDummyDocs(0, 3, 1000));
+ EXPECT_EQUAL(0u, f._docIdLimit.get());
+ f.forceCommitAndWait();
+ EXPECT_EQUAL(4u, f._docIdLimit.get());
+ f.removeAndWait(f.makeDummyDocs(1, 2, 2000));
+ EXPECT_EQUAL(4u, f._docIdLimit.get());
+ f.forceCommitAndWait();
+ EXPECT_EQUAL(4u, f._docIdLimit.get());
+ f.compactLidSpaceAndWait(2);
+ EXPECT_EQUAL(2u, f._docIdLimit.get());
+ f.forceCommitAndWait();
+ EXPECT_EQUAL(2u, f._docIdLimit.get());
+ f.putAndWait(f.makeDummyDocs(1, 1, 3000));
+ EXPECT_EQUAL(2u, f._docIdLimit.get());
+ f.forceCommitAndWait();
+ EXPECT_EQUAL(3u, f._docIdLimit.get());
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
+
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/.gitignore b/searchcore/src/tests/proton/documentdb/fileconfigmanager/.gitignore
new file mode 100644
index 00000000000..7aac27360eb
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/.gitignore
@@ -0,0 +1,5 @@
+config-mycfg.cpp
+config-mycfg.h
+/out
+/out2
+searchcore_fileconfigmanager_test_app
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/fileconfigmanager/CMakeLists.txt
new file mode 100644
index 00000000000..458607e66c5
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/CMakeLists.txt
@@ -0,0 +1,11 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_fileconfigmanager_test_app
+ SOURCES
+ fileconfigmanager_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_pcommon
+ searchcore_fconfig
+)
+vespa_add_test(NAME searchcore_fileconfigmanager_test_app COMMAND sh fileconfigmanager_test.sh)
+vespa_generate_config(searchcore_fileconfigmanager_test_app mycfg.def)
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/DESC b/searchcore/src/tests/proton/documentdb/fileconfigmanager/DESC
new file mode 100644
index 00000000000..ab2f0ed8b46
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/DESC
@@ -0,0 +1 @@
+fileconfigmanager test. Take a look at fileconfigmanager.cpp for details.
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/FILES b/searchcore/src/tests/proton/documentdb/fileconfigmanager/FILES
new file mode 100644
index 00000000000..842440c7182
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/FILES
@@ -0,0 +1 @@
+fileconfigmanager_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/attributes.cfg b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/attributes.cfg
new file mode 100644
index 00000000000..fa887fb404e
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/attributes.cfg
@@ -0,0 +1,2 @@
+attribute[1]
+attribute[0].name "afield"
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/documenttypes.cfg b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/documenttypes.cfg
new file mode 100644
index 00000000000..9cfeb00111a
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/documenttypes.cfg
@@ -0,0 +1,15 @@
+documenttype[1]
+documenttype[0].bodystruct -1270491200
+documenttype[0].headerstruct 306916075
+documenttype[0].id -877171244
+documenttype[0].name "test"
+documenttype[0].version 0
+documenttype[0].datatype[2]
+documenttype[0].datatype[0].id -1270491200
+documenttype[0].datatype[0].type STRUCT
+documenttype[0].datatype[0].sstruct.name "test.body"
+documenttype[0].datatype[0].sstruct.version 0
+documenttype[0].datatype[1].id 306916075
+documenttype[0].datatype[1].type STRUCT
+documenttype[0].datatype[1].sstruct.name "test.header"
+documenttype[0].datatype[1].sstruct.version 0
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/indexschema.cfg b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/indexschema.cfg
new file mode 100644
index 00000000000..fdd519542db
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/indexschema.cfg
@@ -0,0 +1,3 @@
+indexfield[1]
+indexfield[0].name "ifield"
+
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/juniperrc.cfg b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/juniperrc.cfg
new file mode 100644
index 00000000000..8f89b73e22d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/juniperrc.cfg
@@ -0,0 +1,2 @@
+override[1]
+override[0].fieldname "jfield"
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/mycfg.cfg b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/mycfg.cfg
new file mode 100644
index 00000000000..09e75cc45f8
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/mycfg.cfg
@@ -0,0 +1 @@
+myField "foo"
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/rank-profiles.cfg b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/rank-profiles.cfg
new file mode 100644
index 00000000000..a8ed6c47477
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/rank-profiles.cfg
@@ -0,0 +1,2 @@
+rankprofile[1]
+rankprofile[0].name "default"
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/summary.cfg b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/summary.cfg
new file mode 100644
index 00000000000..02e3b0cdafe
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/summary.cfg
@@ -0,0 +1,7 @@
+defaultsummaryid 1
+classes[1]
+classes[0].id 1
+classes[0].name "sclass"
+classes[0].fields[1]
+classes[0].fields[0].name "sfield"
+classes[0].fields[0].type "longstring"
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/summarymap.cfg b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/summarymap.cfg
new file mode 100644
index 00000000000..17ce68e3319
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/cfg/summarymap.cfg
@@ -0,0 +1,3 @@
+override[1]
+override[0].field "ofield"
+override[0].command "empty"
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/fileconfigmanager_test.cpp b/searchcore/src/tests/proton/documentdb/fileconfigmanager/fileconfigmanager_test.cpp
new file mode 100644
index 00000000000..0960ff6b2fe
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/fileconfigmanager_test.cpp
@@ -0,0 +1,322 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("fileconfigmanager_test");
+
+#include "config-mycfg.h"
+#include <vespa/searchcore/proton/server/fileconfigmanager.h>
+#include <vespa/searchcore/proton/server/documentdbconfigmanager.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/util/closure.h>
+#include <vespa/searchcore/proton/common/schemautil.h>
+
+using namespace config;
+using namespace document;
+using namespace proton;
+using namespace search::index;
+using namespace search;
+using namespace vespa::config::search::core;
+using namespace vespa::config::search;
+
+typedef DocumentDBConfigHelper DBCM;
+typedef DocumentDBConfig::DocumenttypesConfigSP DocumenttypesConfigSP;
+using vespalib::nbostream;
+
+vespalib::string myId("myconfigid");
+
+namespace
+{
+
+DocumentDBConfig::SP
+getConfig(int64_t generation, const Schema::SP &schema)
+{
+ typedef DocumentDBConfig::RankProfilesConfigSP RankProfilesConfigSP;
+ typedef DocumentDBConfig::IndexschemaConfigSP IndexschemaConfigSP;
+ typedef DocumentDBConfig::AttributesConfigSP AttributesConfigSP;
+ typedef DocumentDBConfig::SummaryConfigSP SummaryConfigSP;
+ typedef DocumentDBConfig::SummarymapConfigSP SummarymapConfigSP;
+ typedef DocumentDBConfig::JuniperrcConfigSP JuniperrcConfigSP;
+ typedef DocumentDBConfig::DocumenttypesConfigSP DocumenttypesConfigSP;
+
+ RankProfilesConfigSP rp(new vespa::config::search::RankProfilesConfig);
+ IndexschemaConfigSP is(new vespa::config::search::IndexschemaConfig);
+ AttributesConfigSP a(new vespa::config::search::AttributesConfig);
+ SummaryConfigSP s(new vespa::config::search::SummaryConfig);
+ SummarymapConfigSP sm(new vespa::config::search::SummarymapConfig);
+ JuniperrcConfigSP j(new vespa::config::search::summary::JuniperrcConfig);
+ DocumenttypesConfigSP dt(new document::DocumenttypesConfig);
+ document::DocumentTypeRepo::SP dtr(new document::DocumentTypeRepo);
+ search::TuneFileDocumentDB::SP tf(new search::TuneFileDocumentDB);
+ DocumentDBMaintenanceConfig::SP ddbm(new DocumentDBMaintenanceConfig);
+
+ return DocumentDBConfig::SP(
+ new DocumentDBConfig(
+ generation,
+ rp,
+ is,
+ a,
+ s,
+ sm,
+ j,
+ dt,
+ dtr,
+ tf,
+ schema,
+ ddbm,
+ "client", "test"));
+}
+
+Schema::SP
+getSchema(int step)
+{
+ Schema::SP schema(new Schema);
+ schema->addIndexField(Schema::IndexField("foo1", Schema::STRING));
+ if (step < 2) {
+ schema->addIndexField(Schema::IndexField("foo2", Schema::STRING));
+ }
+ if (step < 1) {
+ schema->addIndexField(Schema::IndexField("foo3", Schema::STRING));
+ }
+ return schema;
+}
+
+ }
+
+DocumentDBConfig::SP
+makeBaseConfigSnapshot()
+{
+ config::DirSpec spec("cfg");
+ ConfigKeySet extraKeySet;
+ extraKeySet.add<MycfgConfig>("");
+ DBCM dbcm(spec, "test", extraKeySet);
+ DocumenttypesConfigSP dtcfg(config::ConfigGetter<DocumenttypesConfig>::getConfig("", spec).release());
+ BootstrapConfig::SP b(new BootstrapConfig(1,
+ dtcfg,
+ DocumentTypeRepo::SP(new DocumentTypeRepo(*dtcfg)),
+ BootstrapConfig::ProtonConfigSP(new ProtonConfig()),
+ TuneFileDocumentDB::SP(new TuneFileDocumentDB())));
+ dbcm.forwardConfig(b);
+ dbcm.nextGeneration(0);
+ DocumentDBConfig::SP snap = dbcm.getConfig();
+ snap->setConfigId(myId);
+ ASSERT_TRUE(snap.get() != NULL);
+ return snap;
+}
+
+Schema
+makeHistorySchema()
+{
+ Schema hs;
+ hs.addIndexField(Schema::IndexField("history", Schema::STRING));
+ return hs;
+}
+
+void
+saveBaseConfigSnapshot(const DocumentDBConfig &snap, const Schema &history, SerialNum num)
+{
+ FileConfigManager cm("out", myId, snap.getDocTypeName());
+ cm.saveConfig(snap, history, num);
+}
+
+
+DocumentDBConfig::SP
+makeEmptyConfigSnapshot(void)
+{
+ return DocumentDBConfig::SP(new DocumentDBConfig(
+ 0,
+ DocumentDBConfig::RankProfilesConfigSP(),
+ DocumentDBConfig::IndexschemaConfigSP(),
+ DocumentDBConfig::AttributesConfigSP(),
+ DocumentDBConfig::SummaryConfigSP(),
+ DocumentDBConfig::SummarymapConfigSP(),
+ DocumentDBConfig::JuniperrcConfigSP(),
+ DocumenttypesConfigSP(),
+ DocumentTypeRepo::SP(),
+ TuneFileDocumentDB::SP(
+ new TuneFileDocumentDB()),
+ Schema::SP(),
+ DocumentDBMaintenanceConfig::SP(),
+ "client", "test"));
+}
+
+void incInt(int *i, const DocumentType&) { ++*i; }
+
+void
+assertEqualExtraConfigs(const DocumentDBConfig &expSnap, const DocumentDBConfig &actSnap)
+{
+ const ConfigSnapshot &exp = expSnap.getExtraConfigs();
+ const ConfigSnapshot &act = actSnap.getExtraConfigs();
+ EXPECT_EQUAL(1u, exp.size());
+ EXPECT_EQUAL(1u, act.size());
+ std::unique_ptr<MycfgConfig> expCfg = exp.getConfig<MycfgConfig>("");
+ std::unique_ptr<MycfgConfig> actCfg = act.getConfig<MycfgConfig>("");
+ EXPECT_EQUAL("foo", expCfg->myField);
+ EXPECT_EQUAL("foo", actCfg->myField);
+}
+
+void
+assertEqualSnapshot(const DocumentDBConfig &exp, const DocumentDBConfig &act)
+{
+ EXPECT_TRUE(exp.getRankProfilesConfig() == act.getRankProfilesConfig());
+ EXPECT_TRUE(exp.getIndexschemaConfig() == act.getIndexschemaConfig());
+ EXPECT_TRUE(exp.getAttributesConfig() == act.getAttributesConfig());
+ EXPECT_TRUE(exp.getSummaryConfig() == act.getSummaryConfig());
+ EXPECT_TRUE(exp.getSummarymapConfig() == act.getSummarymapConfig());
+ EXPECT_TRUE(exp.getJuniperrcConfig() == act.getJuniperrcConfig());
+ int expTypeCount = 0;
+ int actTypeCount = 0;
+ exp.getDocumentTypeRepoSP()->forEachDocumentType(
+ *vespalib::makeClosure(incInt, &expTypeCount));
+ act.getDocumentTypeRepoSP()->forEachDocumentType(
+ *vespalib::makeClosure(incInt, &actTypeCount));
+ EXPECT_EQUAL(expTypeCount, actTypeCount);
+ EXPECT_TRUE(*exp.getSchemaSP() == *act.getSchemaSP());
+ EXPECT_EQUAL(expTypeCount, actTypeCount);
+ EXPECT_EQUAL(exp.getConfigId(), act.getConfigId());
+ assertEqualExtraConfigs(exp, act);
+}
+
+TEST_FF("requireThatConfigCanBeSavedAndLoaded", DocumentDBConfig::SP(makeBaseConfigSnapshot()),
+ Schema(makeHistorySchema()))
+{
+ saveBaseConfigSnapshot(*f1, f2, 20);
+ DocumentDBConfig::SP esnap(makeEmptyConfigSnapshot());
+ Schema::SP ehs;
+ {
+ FileConfigManager cm("out", myId, "dummy");
+ cm.loadConfig(*esnap, 20, esnap, ehs);
+ }
+ assertEqualSnapshot(*f1, *esnap);
+ EXPECT_TRUE(f2 == *ehs);
+}
+
+TEST_FF("requireThatConfigCanBeSerializedAndDeserialized", DocumentDBConfig::SP(makeBaseConfigSnapshot()),
+ Schema(makeHistorySchema()))
+{
+ saveBaseConfigSnapshot(*f1, f2, 30);
+ nbostream stream;
+ {
+ FileConfigManager cm("out", myId, "dummy");
+ cm.serializeConfig(30, stream);
+ }
+ {
+ FileConfigManager cm("out", myId, "dummy");
+ cm.deserializeConfig(40, stream);
+ }
+ DocumentDBConfig::SP fsnap(makeEmptyConfigSnapshot());
+ Schema::SP fhs;
+ {
+ FileConfigManager cm("out", myId, "dummy");
+ cm.loadConfig(*fsnap, 40, fsnap, fhs);
+ }
+ assertEqualSnapshot(*f1, *fsnap);
+ EXPECT_TRUE(f2 == *fhs);
+ EXPECT_EQUAL("dummy", fsnap->getDocTypeName());
+}
+
+TEST_FF("requireThatWipeHistoryCanBeSaved", DocumentDBConfig::SP(makeBaseConfigSnapshot()),
+ Schema(makeHistorySchema()))
+{
+ saveBaseConfigSnapshot(*f1, f2, 50);
+ {
+ FileConfigManager cm("out", myId, "dummy");
+ cm.saveWipeHistoryConfig(60, 0);
+ }
+ DocumentDBConfig::SP gsnap(makeEmptyConfigSnapshot());
+ Schema::SP ghs;
+ {
+ FileConfigManager cm("out", myId, "dummy");
+ cm.loadConfig(*gsnap, 60, gsnap, ghs);
+ }
+ assertEqualSnapshot(*f1, *gsnap);
+ EXPECT_TRUE(f2 != *ghs);
+ EXPECT_TRUE(!f2.empty());
+ EXPECT_TRUE(ghs->empty());
+}
+
+
+TEST("require that wipe history clears only portions of history")
+{
+ FileConfigManager cm("out2", myId, "dummy");
+ Schema::SP schema(getSchema(0));
+ Schema::SP history(new Schema);
+ DocumentDBConfig::SP config(getConfig(5, schema));
+ cm.saveConfig(*config, *history, 5);
+ Schema::SP oldSchema(schema);
+ schema = getSchema(1);
+ config = getConfig(6, schema);
+ history = SchemaUtil::makeHistorySchema(*schema, *oldSchema, *history,
+ 100);
+ cm.saveConfig(*config, *history, 10);
+ oldSchema = schema;
+ schema = getSchema(2);
+ config = getConfig(7, schema);
+ history = SchemaUtil::makeHistorySchema(*schema, *oldSchema, *history,
+ 200);
+ cm.saveConfig(*config, *history, 15);
+ cm.saveWipeHistoryConfig(20, 50);
+ cm.saveWipeHistoryConfig(25, 100);
+ cm.saveWipeHistoryConfig(30, 150);
+ cm.saveWipeHistoryConfig(35, 200);
+ cm.saveWipeHistoryConfig(40, 250);
+ DocumentDBConfig::SP oldconfig(config);
+ cm.loadConfig(*oldconfig, 20, config, history);
+ EXPECT_EQUAL(2u, history->getNumIndexFields());
+ oldconfig = config;
+ cm.loadConfig(*oldconfig, 25, config, history);
+ EXPECT_EQUAL(2u, history->getNumIndexFields());
+ oldconfig = config;
+ cm.loadConfig(*oldconfig, 30, config, history);
+ EXPECT_EQUAL(1u, history->getNumIndexFields());
+ oldconfig = config;
+ cm.loadConfig(*oldconfig, 35, config, history);
+ EXPECT_EQUAL(1u, history->getNumIndexFields());
+ oldconfig = config;
+ cm.loadConfig(*oldconfig, 40, config, history);
+ EXPECT_EQUAL(0u, history->getNumIndexFields());
+}
+
+TEST_FF("requireThatConfigCanBeLoadedWithoutExtraConfigsDataFile", DocumentDBConfig::SP(makeBaseConfigSnapshot()),
+ Schema(makeHistorySchema()))
+{
+ saveBaseConfigSnapshot(*f1, f2, 70);
+ EXPECT_TRUE(vespalib::unlink("out/config-70/extraconfigs.dat"));
+ DocumentDBConfig::SP esnap(makeEmptyConfigSnapshot());
+ Schema::SP ehs;
+ {
+ FileConfigManager cm("out", myId, "dummy");
+ cm.loadConfig(*esnap, 70, esnap, ehs);
+ }
+ EXPECT_EQUAL(0u, esnap->getExtraConfigs().size());
+}
+
+
+TEST_FF("requireThatVisibilityDelayIsPropagated",
+ DocumentDBConfig::SP(makeBaseConfigSnapshot()),
+ Schema(makeHistorySchema()))
+{
+ saveBaseConfigSnapshot(*f1, f2, 80);
+ DocumentDBConfig::SP esnap(makeEmptyConfigSnapshot());
+ Schema::SP ehs;
+ {
+ ProtonConfigBuilder protonConfigBuilder;
+ ProtonConfigBuilder::Documentdb ddb;
+ ddb.inputdoctypename = "dummy";
+ ddb.visibilitydelay = 61.0;
+ protonConfigBuilder.documentdb.push_back(ddb);
+ protonConfigBuilder.maxvisibilitydelay = 100.0;
+ FileConfigManager cm("out", myId, "dummy");
+ using ProtonConfigSP = BootstrapConfig::ProtonConfigSP;
+ cm.setProtonConfig(
+ ProtonConfigSP(new ProtonConfig(protonConfigBuilder)));
+ cm.loadConfig(*esnap, 70, esnap, ehs);
+ }
+ EXPECT_EQUAL(0u, esnap->getExtraConfigs().size());
+ EXPECT_EQUAL(61.0, esnap->getMaintenanceConfigSP()->getVisibilityDelay().sec());
+}
+
+
+
+TEST_MAIN() { TEST_RUN_ALL(); }
+
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/fileconfigmanager_test.sh b/searchcore/src/tests/proton/documentdb/fileconfigmanager/fileconfigmanager_test.sh
new file mode 100644
index 00000000000..4d1279a8413
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/fileconfigmanager_test.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+rm -rf out
+rm -rf out2
+$VALGRIND ./searchcore_fileconfigmanager_test_app
diff --git a/searchcore/src/tests/proton/documentdb/fileconfigmanager/mycfg.def b/searchcore/src/tests/proton/documentdb/fileconfigmanager/mycfg.def
new file mode 100644
index 00000000000..d31c1b61f07
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/fileconfigmanager/mycfg.def
@@ -0,0 +1,4 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+namespace=config
+
+myField string default=""
diff --git a/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/.gitignore b/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/.gitignore
new file mode 100644
index 00000000000..1e657f33c1a
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/.gitignore
@@ -0,0 +1 @@
+searchcore_job_tracked_maintenance_job_test_app
diff --git a/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/CMakeLists.txt
new file mode 100644
index 00000000000..3b81994e7da
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_job_tracked_maintenance_job_test_app
+ SOURCES
+ job_tracked_maintenance_job_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_proton_metrics
+)
+vespa_add_test(NAME searchcore_job_tracked_maintenance_job_test_app COMMAND searchcore_job_tracked_maintenance_job_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/DESC b/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/DESC
new file mode 100644
index 00000000000..4ba7520eab8
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/DESC
@@ -0,0 +1,2 @@
+job tracked maintenance job test. Take a look at job_tracked_maintenance_job_test.cpp for details.
+
diff --git a/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/FILES b/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/FILES
new file mode 100644
index 00000000000..a871a1fa8aa
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/FILES
@@ -0,0 +1 @@
+job_tracked_maintenance_job_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/job_tracked_maintenance_job_test.cpp b/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/job_tracked_maintenance_job_test.cpp
new file mode 100644
index 00000000000..e483bc35b96
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/job_tracked_maintenance_job_test.cpp
@@ -0,0 +1,134 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("job_tracked_maintenance_test");
+
+#include <vespa/searchcore/proton/server/job_tracked_maintenance_job.h>
+#include <vespa/searchcore/proton/test/simple_job_tracker.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/util/closuretask.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <vespa/vespalib/util/sync.h>
+
+using namespace proton;
+using namespace vespalib;
+using test::SimpleJobTracker;
+typedef std::unique_ptr<Gate> GateUP;
+typedef std::vector<GateUP> GateVector;
+
+GateVector
+getGateVector(size_t size)
+{
+ GateVector retval;
+ for (size_t i = 0; i < size; ++i) {
+ retval.push_back(std::move(GateUP(new Gate())));
+ }
+ return retval;
+}
+
+struct MyMaintenanceJob : public IMaintenanceJob
+{
+ GateVector _runGates;
+ size_t _runIdx;
+ MyMaintenanceJob(size_t numRuns)
+ : IMaintenanceJob("myjob", 10, 20),
+ _runGates(getGateVector(numRuns)),
+ _runIdx(0)
+ {}
+ void block() {
+ setBlocked(true);
+ }
+ virtual bool run() {
+ _runGates[_runIdx++]->await(5000);
+ return _runIdx == _runGates.size();
+ }
+};
+
+struct Fixture
+{
+ SimpleJobTracker::SP _tracker;
+ IMaintenanceJob::UP _job;
+ MyMaintenanceJob *_myJob;
+ IMaintenanceJob::UP _trackedJob;
+ bool _runRetval;
+ GateVector _runGates;
+ size_t _runIdx;
+ ThreadStackExecutor _exec;
+ Fixture(size_t numRuns = 1)
+ : _tracker(new SimpleJobTracker(1)),
+ _job(new MyMaintenanceJob(numRuns)),
+ _myJob(static_cast<MyMaintenanceJob *>(_job.get())),
+ _trackedJob(new JobTrackedMaintenanceJob(_tracker, std::move(_job))),
+ _runRetval(false),
+ _runGates(getGateVector(numRuns)),
+ _runIdx(0),
+ _exec(1, 64000)
+ {
+ }
+ void runJob() {
+ _runRetval = _trackedJob->run();
+ _runGates[_runIdx++]->countDown();
+ }
+ void assertTracker(size_t startedGateCount, size_t endedGateCount) {
+ EXPECT_EQUAL(startedGateCount, _tracker->_started.getCount());
+ EXPECT_EQUAL(endedGateCount, _tracker->_ended.getCount());
+ }
+ void runJobAndWait(size_t runIdx, size_t startedGateCount, size_t endedGateCount) {
+ _exec.execute(makeTask(makeClosure(this, &Fixture::runJob)));
+ _tracker->_started.await(5000);
+ assertTracker(startedGateCount, endedGateCount);
+ _myJob->_runGates[runIdx]->countDown();
+ _runGates[runIdx]->await(5000);
+ }
+};
+
+TEST_F("require that maintenance job name, delay and interval are preserved", Fixture)
+{
+ EXPECT_EQUAL("myjob", f._trackedJob->getName());
+ EXPECT_EQUAL(10, f._trackedJob->getDelay());
+ EXPECT_EQUAL(20, f._trackedJob->getInterval());
+}
+
+TEST_F("require that maintenance job that needs 1 run is tracked", Fixture)
+{
+ f.assertTracker(1, 1);
+ f.runJobAndWait(0, 0, 1);
+ f.assertTracker(0, 0);
+ EXPECT_TRUE(f._runRetval);
+}
+
+TEST_F("require that maintenance job that needs several runs is tracked", Fixture(2))
+{
+ f.assertTracker(1, 1);
+ f.runJobAndWait(0, 0, 1);
+ f.assertTracker(0, 1);
+ EXPECT_FALSE(f._runRetval);
+
+ f.runJobAndWait(1, 0, 1);
+ f.assertTracker(0, 0);
+ EXPECT_TRUE(f._runRetval);
+}
+
+TEST_F("require that maintenance job that is destroyed is tracked", Fixture(2))
+{
+ f.assertTracker(1, 1);
+ f.runJobAndWait(0, 0, 1);
+ f.assertTracker(0, 1);
+ EXPECT_FALSE(f._runRetval);
+
+ f._trackedJob.reset();
+ f.assertTracker(0, 0);
+}
+
+TEST_F("require that block calls are sent to underlying job", Fixture)
+{
+ EXPECT_FALSE(f._trackedJob->isBlocked());
+ f._myJob->block();
+ EXPECT_TRUE(f._myJob->isBlocked());
+ EXPECT_TRUE(f._trackedJob->isBlocked());
+ f._trackedJob->unBlock();
+ EXPECT_FALSE(f._myJob->isBlocked());
+ EXPECT_FALSE(f._trackedJob->isBlocked());
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/.gitignore b/searchcore/src/tests/proton/documentdb/lid_space_compaction/.gitignore
new file mode 100644
index 00000000000..c031fe6605d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/.gitignore
@@ -0,0 +1 @@
+searchcore_lid_space_compaction_test_app
diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/lid_space_compaction/CMakeLists.txt
new file mode 100644
index 00000000000..938e0dc7baf
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/CMakeLists.txt
@@ -0,0 +1,13 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_lid_space_compaction_test_app
+ SOURCES
+ lid_space_compaction_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_initializer
+ searchcore_feedoperation
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_lid_space_compaction_test_app COMMAND searchcore_lid_space_compaction_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/DESC b/searchcore/src/tests/proton/documentdb/lid_space_compaction/DESC
new file mode 100644
index 00000000000..b361d232d13
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/DESC
@@ -0,0 +1,2 @@
+Test for lid space compaction. Take a look at lid_space_compaction_test.cpp for details.
+
diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/FILES b/searchcore/src/tests/proton/documentdb/lid_space_compaction/FILES
new file mode 100644
index 00000000000..48fa9ef87c6
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/FILES
@@ -0,0 +1 @@
+lid_space_compaction_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp
new file mode 100644
index 00000000000..aa0ab2ebfb1
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp
@@ -0,0 +1,450 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("lid_space_compaction_test");
+
+#include <vespa/searchcore/proton/server/i_lid_space_compaction_handler.h>
+#include <vespa/searchcore/proton/server/lid_space_compaction_handler.h>
+#include <vespa/searchcore/proton/server/lid_space_compaction_job.h>
+#include <vespa/searchcore/proton/test/test.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/server/ifrozenbuckethandler.h>
+
+using namespace document;
+using namespace proton;
+using namespace search;
+using namespace search::index;
+using namespace vespalib;
+using storage::spi::Timestamp;
+
+const uint32_t SUBDB_ID = 2;
+const double JOB_DELAY = 1.0;
+const uint32_t ALLOWED_LID_BLOAT = 1;
+const double ALLOWED_LID_BLOAT_FACTOR = 0.3;
+const uint32_t MAX_DOCS_TO_SCAN = 100;
+const vespalib::string DOC_ID = "id:test:searchdocument::0";
+const BucketId BUCKET_ID_1(1);
+const BucketId BUCKET_ID_2(2);
+const Timestamp TIMESTAMP_1(1);
+const GlobalId GID_1;
+
+typedef std::vector<uint32_t> LidVector;
+typedef std::pair<uint32_t, uint32_t> LidPair;
+typedef std::vector<LidPair> LidPairVector;
+
+struct MyScanIterator : public IDocumentScanIterator
+{
+ LidVector _lids;
+ LidVector::const_iterator _itr;
+ bool _validItr;
+ MyScanIterator(const LidVector &lids) : _lids(lids), _itr(_lids.begin()), _validItr(true) {}
+ virtual bool valid() const {
+ return _validItr;
+ }
+ virtual search::DocumentMetaData next(uint32_t compactLidLimit,
+ uint32_t maxDocsToScan,
+ bool retry) {
+ if (!retry && _itr != _lids.begin()) {
+ ++_itr;
+ }
+ for (uint32_t i = 0; i < maxDocsToScan && _itr != _lids.end() && (*_itr) <= compactLidLimit;
+ ++i, ++_itr) {}
+ if (_itr != _lids.end()) {
+ uint32_t lid = *_itr;
+ if (lid > compactLidLimit) {
+ return search::DocumentMetaData(lid, TIMESTAMP_1, BUCKET_ID_1, GID_1);
+ }
+ } else {
+ _validItr = false;
+ }
+ return search::DocumentMetaData();
+ }
+};
+
+struct MyHandler : public ILidSpaceCompactionHandler
+{
+ std::vector<LidUsageStats> _stats;
+ std::vector<LidVector> _lids;
+ mutable uint32_t _moveFromLid;
+ mutable uint32_t _moveToLid;
+ uint32_t _handleMoveCnt;
+ uint32_t _wantedSubDbId;
+ uint32_t _wantedLidLimit;
+ mutable uint32_t _iteratorCnt;
+
+ MyHandler()
+ : _stats(),
+ _moveFromLid(0),
+ _moveToLid(0),
+ _handleMoveCnt(0),
+ _wantedSubDbId(0),
+ _wantedLidLimit(0),
+ _iteratorCnt(0)
+ {}
+ virtual vespalib::string getName() const {
+ return "myhandler";
+ }
+ virtual uint32_t getSubDbId() const { return 2; }
+ virtual LidUsageStats getLidStatus() const {
+ ASSERT_TRUE(_handleMoveCnt < _stats.size());
+ return _stats[_handleMoveCnt];
+ }
+ virtual IDocumentScanIterator::UP getIterator() const {
+ ASSERT_TRUE(_iteratorCnt < _lids.size());
+ return IDocumentScanIterator::UP(new MyScanIterator(_lids[_iteratorCnt++]));
+ }
+ virtual MoveOperation::UP createMoveOperation(const search::DocumentMetaData &document,
+ uint32_t moveToLid) const {
+ ASSERT_TRUE(document.lid > moveToLid);
+ _moveFromLid = document.lid;
+ _moveToLid = moveToLid;
+ return MoveOperation::UP(new MoveOperation());
+ }
+ virtual void handleMove(const MoveOperation &) {
+ ++_handleMoveCnt;
+ }
+ virtual void handleCompactLidSpace(const CompactLidSpaceOperation &op) {
+ _wantedSubDbId = op.getSubDbId();
+ _wantedLidLimit = op.getLidLimit();
+ }
+};
+
+struct MyStorer : public IOperationStorer
+{
+ uint32_t _moveCnt;
+ uint32_t _compactCnt;
+ MyStorer()
+ : _moveCnt(0),
+ _compactCnt(0)
+ {}
+ virtual void storeOperation(FeedOperation &op) {
+ if (op.getType() == FeedOperation::MOVE) {
+ ++ _moveCnt;
+ } else if (op.getType() == FeedOperation::COMPACT_LID_SPACE) {
+ ++_compactCnt;
+ }
+ }
+};
+
+struct MyFrozenBucketHandler : public IFrozenBucketHandler
+{
+ BucketId _bucket;
+ MyFrozenBucketHandler() : _bucket() {}
+ virtual ExclusiveBucketGuard::UP acquireExclusiveBucket(BucketId bucket) override {
+ return (_bucket == bucket)
+ ? ExclusiveBucketGuard::UP()
+ : std::make_unique<ExclusiveBucketGuard>(bucket);
+ }
+ virtual void addListener(IBucketFreezeListener *) override { }
+ virtual void removeListener(IBucketFreezeListener *) override { }
+};
+
+struct MyFeedView : public test::DummyFeedView
+{
+ MyFeedView(const DocumentTypeRepo::SP &repo)
+ : test::DummyFeedView(repo)
+ {
+ }
+};
+
+struct MyDocumentStore : public test::DummyDocumentStore
+{
+ Document::SP _readDoc;
+ mutable uint32_t _readLid;
+ MyDocumentStore() : _readDoc(), _readLid(0) {}
+ virtual document::Document::UP
+ read(search::DocumentIdT lid, const document::DocumentTypeRepo &) const {
+ _readLid = lid;
+ return Document::UP(_readDoc->clone());
+ }
+};
+
+struct MySummaryManager : public test::DummySummaryManager
+{
+ MyDocumentStore _store;
+ MySummaryManager() : _store() {}
+ virtual search::IDocumentStore &getBackingStore() { return _store; }
+};
+
+struct MySubDb : public test::DummyDocumentSubDb
+{
+ DocumentTypeRepo::SP _repo;
+ MySubDb(const DocumentTypeRepo::SP &repo,
+ std::shared_ptr<BucketDBOwner> bucketDB)
+ : test::DummyDocumentSubDb(bucketDB, SUBDB_ID),
+ _repo(repo)
+ {
+ _summaryManager.reset(new MySummaryManager());
+ }
+ virtual IFeedView::SP getFeedView() const {
+ return IFeedView::SP(new MyFeedView(_repo));
+ }
+};
+
+struct JobFixture
+{
+ MyHandler _handler;
+ MyStorer _storer;
+ MyFrozenBucketHandler _frozenHandler;
+ LidSpaceCompactionJob _job;
+ JobFixture(uint32_t allowedLidBloat = ALLOWED_LID_BLOAT,
+ double allowedLidBloatFactor = ALLOWED_LID_BLOAT_FACTOR,
+ uint32_t maxDocsToScan = MAX_DOCS_TO_SCAN)
+ : _handler(),
+ _job(DocumentDBLidSpaceCompactionConfig(JOB_DELAY,
+ allowedLidBloat, allowedLidBloatFactor, maxDocsToScan),
+ _handler, _storer, _frozenHandler)
+ {
+ }
+ JobFixture &addStats(uint32_t docIdLimit,
+ const LidVector &usedLids,
+ const LidPairVector &usedFreePairs) {
+ return addMultiStats(docIdLimit, {usedLids}, usedFreePairs);
+ }
+ JobFixture &addMultiStats(uint32_t docIdLimit,
+ const std::vector<LidVector> &usedLidsVector,
+ const LidPairVector &usedFreePairs) {
+ uint32_t numDocs = usedLidsVector[0].size();
+ for (auto pair : usedFreePairs) {
+ uint32_t highestUsedLid = pair.first;
+ uint32_t lowestFreeLid = pair.second;
+ _handler._stats.push_back(LidUsageStats
+ (docIdLimit, numDocs, lowestFreeLid, highestUsedLid));
+ }
+ _handler._lids = usedLidsVector;
+ return *this;
+ }
+ JobFixture &addStats(uint32_t docIdLimit,
+ uint32_t numDocs,
+ uint32_t lowestFreeLid,
+ uint32_t highestUsedLid) {
+ _handler._stats.push_back(LidUsageStats
+ (docIdLimit, numDocs, lowestFreeLid, highestUsedLid));
+ return *this;
+ }
+ bool run() {
+ return _job.run();
+ }
+ JobFixture &endScan() {
+ EXPECT_FALSE(run());
+ return *this;
+ }
+ JobFixture &compact() {
+ EXPECT_TRUE(run());
+ return *this;
+ }
+};
+
+struct HandlerFixture
+{
+ DocBuilder _docBuilder;
+ std::shared_ptr<BucketDBOwner> _bucketDB;
+ MySubDb _subDb;
+ MySummaryManager &_summaryMgr;
+ MyDocumentStore &_docStore;
+ LidSpaceCompactionHandler _handler;
+ HandlerFixture()
+ : _docBuilder(Schema()),
+ _bucketDB(std::make_shared<BucketDBOwner>()),
+ _subDb(_docBuilder.getDocumentTypeRepo(), _bucketDB),
+ _summaryMgr(static_cast<MySummaryManager &>(*_subDb.getSummaryManager())),
+ _docStore(_summaryMgr._store),
+ _handler(_subDb, "test")
+ {
+ _docStore._readDoc = _docBuilder.startDocument(DOC_ID).endDocument();
+ }
+};
+
+bool
+assertJobContext(uint32_t moveToLid,
+ uint32_t moveFromLid,
+ uint32_t handleMoveCnt,
+ uint32_t wantedLidLimit,
+ uint32_t compactStoreCnt,
+ const JobFixture &f)
+{
+ if (!EXPECT_EQUAL(moveToLid, f._handler._moveToLid)) return false;
+ if (!EXPECT_EQUAL(moveFromLid, f._handler._moveFromLid)) return false;
+ if (!EXPECT_EQUAL(handleMoveCnt, f._handler._handleMoveCnt)) return false;
+ if (!EXPECT_EQUAL(handleMoveCnt, f._storer._moveCnt)) return false;
+ if (!EXPECT_EQUAL(wantedLidLimit, f._handler._wantedLidLimit)) return false;
+ if (!EXPECT_EQUAL(compactStoreCnt, f._storer._compactCnt)) return false;
+ return true;
+}
+
+TEST_F("require that handler name is used as part of job name", JobFixture)
+{
+ EXPECT_EQUAL("lid_space_compaction.myhandler", f._job.getName());
+}
+
+TEST_F("require that no move operation is created if lid bloat factor is below limit", JobFixture)
+{
+ // 20% bloat < 30% allowed bloat
+ f.addStats(10, {1,3,4,5,6,7,9}, {{9,2}});
+ EXPECT_TRUE(f.run());
+ EXPECT_TRUE(assertJobContext(0, 0, 0, 0, 0, f));
+}
+
+TEST("require that no move operation is created if lid bloat is below limit")
+{
+ JobFixture f(3, 0.1);
+ // 20% bloat >= 10% allowed bloat BUT lid bloat (2) < allowed lid bloat (3)
+ f.addStats(10, {1,3,4,5,6,7,9}, {{9,2}});
+ EXPECT_TRUE(f.run());
+ EXPECT_TRUE(assertJobContext(0, 0, 0, 0, 0, f));
+}
+
+TEST_F("require that no move operation is created and compaction is initiated", JobFixture)
+{
+ // no documents to move: lowestFreeLid(7) > highestUsedLid(6)
+ f.addStats(10, {1,2,3,4,5,6}, {{6,7}});
+
+ // must scan to find that no documents should be moved
+ f.endScan().compact();
+ EXPECT_TRUE(assertJobContext(0, 0, 0, 7, 1, f));
+}
+
+TEST_F("require that 1 move operation is created and compaction is initiated", JobFixture)
+{
+ f.addStats(10, {1,3,4,5,6,9},
+ {{9,2}, // 30% bloat: move 9 -> 2
+ {6,7}}); // no documents to move
+
+ EXPECT_FALSE(f.run()); // scan
+ EXPECT_TRUE(assertJobContext(2, 9, 1, 0, 0, f));
+ f.endScan().compact();
+ EXPECT_TRUE(assertJobContext(2, 9, 1, 7, 1, f));
+}
+
+TEST_F("require that job returns false when multiple move operations or compaction are needed",
+ JobFixture)
+{
+ f.addStats(10, {1,5,6,9,8,7},
+ {{9,2}, // 30% bloat: move 9 -> 2
+ {8,3}, // move 8 -> 3
+ {7,4}, // move 7 -> 4
+ {6,7}}); // no documents to move
+
+ EXPECT_FALSE(f.run());
+ EXPECT_TRUE(assertJobContext(2, 9, 1, 0, 0, f));
+ EXPECT_FALSE(f.run());
+ EXPECT_TRUE(assertJobContext(3, 8, 2, 0, 0, f));
+ EXPECT_FALSE(f.run());
+ EXPECT_TRUE(assertJobContext(4, 7, 3, 0, 0, f));
+ f.endScan().compact();
+ EXPECT_TRUE(assertJobContext(4, 7, 3, 7, 1, f));
+}
+
+TEST_F("require that job is blocked if trying to move document for frozen bucket", JobFixture)
+{
+ f._frozenHandler._bucket = BUCKET_ID_1;
+ EXPECT_FALSE(f._job.isBlocked());
+ f.addStats(10, {1,3,4,5,6,9}, {{9,2}}); // 30% bloat: try to move 9 -> 2
+ f.addStats(0, 0, 0, 0);
+
+ EXPECT_TRUE(f.run()); // bucket frozen
+ EXPECT_TRUE(assertJobContext(0, 0, 0, 0, 0, f));
+ EXPECT_TRUE(f._job.isBlocked());
+
+ f._frozenHandler._bucket = BUCKET_ID_2;
+ f._job.unBlock();
+
+ EXPECT_FALSE(f.run()); // unblocked
+ EXPECT_TRUE(assertJobContext(2, 9, 1, 0, 0, f));
+ EXPECT_FALSE(f._job.isBlocked());
+}
+
+TEST_F("require that job handles invalid document meta data when max docs are scanned",
+ JobFixture(ALLOWED_LID_BLOAT, ALLOWED_LID_BLOAT_FACTOR, 3))
+{
+ f.addStats(10, {1,3,4,5,6,9},
+ {{9,2}, // 30% bloat: move 9 -> 2
+ {6,7}}); // no documents to move
+
+ EXPECT_FALSE(f.run()); // does not find 9 in first scan
+ EXPECT_TRUE(assertJobContext(0, 0, 0, 0, 0, f));
+ EXPECT_FALSE(f.run()); // move 9 -> 2
+ EXPECT_TRUE(assertJobContext(2, 9, 1, 0, 0, f));
+ f.endScan().compact();
+ EXPECT_TRUE(assertJobContext(2, 9, 1, 7, 1, f));
+}
+
+TEST_F("require that job can restart documents scan if lid bloat is still to large",
+ JobFixture(ALLOWED_LID_BLOAT, ALLOWED_LID_BLOAT_FACTOR, 3))
+{
+ f.addMultiStats(10, {{1,3,4,5,6,9},{1,2,4,5,6,8}},
+ {{9,2}, // 30% bloat: move 9 -> 2
+ {8,3}, // move 8 -> 3 (this should trigger rescan as the set of used docs have changed)
+ {6,7}}); // no documents to move
+
+ EXPECT_FALSE(f.run()); // does not find 9 in first scan
+ EXPECT_EQUAL(1u, f._handler._iteratorCnt);
+ // We simulate that the set of used docs have changed between these 2 runs
+ EXPECT_FALSE(f.run()); // move 9 -> 2
+ f.endScan();
+ EXPECT_TRUE(assertJobContext(2, 9, 1, 0, 0, f));
+ EXPECT_EQUAL(2u, f._handler._iteratorCnt);
+ EXPECT_FALSE(f.run()); // does not find 8 in first scan
+ EXPECT_FALSE(f.run()); // move 8 -> 3
+ EXPECT_TRUE(assertJobContext(3, 8, 2, 0, 0, f));
+ f.endScan().compact();
+ EXPECT_TRUE(assertJobContext(3, 8, 2, 7, 1, f));
+}
+
+TEST_F("require that handler uses doctype and subdb name", HandlerFixture)
+{
+ EXPECT_EQUAL("test.dummysubdb", f._handler.getName());
+}
+
+TEST_F("require that createMoveOperation() works as expected", HandlerFixture)
+{
+ const uint32_t moveToLid = 5;
+ const uint32_t moveFromLid = 10;
+ const BucketId bucketId(100);
+ const Timestamp timestamp(200);
+ DocumentMetaData document(moveFromLid, timestamp, bucketId, GlobalId());
+ MoveOperation::UP op = f._handler.createMoveOperation(document, moveToLid);
+ EXPECT_EQUAL(10u, f._docStore._readLid);
+ EXPECT_EQUAL(DbDocumentId(SUBDB_ID, moveFromLid).toString(),
+ op->getPrevDbDocumentId().toString()); // source
+ EXPECT_EQUAL(DbDocumentId(SUBDB_ID, moveToLid).toString(),
+ op->getDbDocumentId().toString()); // target
+ EXPECT_EQUAL(DocumentId(DOC_ID), op->getDocument()->getId());
+ EXPECT_EQUAL(bucketId, op->getBucketId());
+ EXPECT_EQUAL(timestamp, op->getTimestamp());
+}
+
+
+TEST_F("require that held lid is not considered free, blocks job", JobFixture)
+{
+ // Lid 1 on hold or pendingHold, i.e. neither free nor used.
+ f.addMultiStats(3, {{2}}, {{2, 3}});
+ EXPECT_TRUE(f.run());
+ EXPECT_TRUE(assertJobContext(0, 0, 0, 0, 0, f));
+}
+
+TEST_F("require that held lid is not considered free, only compact", JobFixture)
+{
+ // Lid 1 on hold or pendingHold, i.e. neither free nor used.
+ f.addMultiStats(10, {{2}}, {{2, 3}});
+ EXPECT_FALSE(f.run());
+ EXPECT_TRUE(assertJobContext(0, 0, 0, 0, 0, f));
+ f.compact();
+ EXPECT_TRUE(assertJobContext(0, 0, 0, 3, 1, f));
+}
+
+TEST_F("require that held lids are not considered free, one move", JobFixture)
+{
+ // Lids 1,2,3 on hold or pendingHold, i.e. neither free nor used.
+ f.addMultiStats(10, {{5}}, {{5, 4}, {4, 5}});
+ EXPECT_FALSE(f.run());
+ EXPECT_TRUE(assertJobContext(4, 5, 1, 0, 0, f));
+ f.endScan().compact();
+ EXPECT_TRUE(assertJobContext(4, 5, 1, 5, 1, f));
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/documentdb/maintenancecontroller/.gitignore b/searchcore/src/tests/proton/documentdb/maintenancecontroller/.gitignore
new file mode 100644
index 00000000000..7ce70f9cbcd
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/maintenancecontroller/.gitignore
@@ -0,0 +1,2 @@
+searchcore_frozenbucketsmap_test_app
+searchcore_maintenancecontroller_test_app
diff --git a/searchcore/src/tests/proton/documentdb/maintenancecontroller/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/maintenancecontroller/CMakeLists.txt
new file mode 100644
index 00000000000..4f26a79f0eb
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/maintenancecontroller/CMakeLists.txt
@@ -0,0 +1,38 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_maintenancecontroller_test_app
+ SOURCES
+ maintenancecontroller_test.cpp
+ DEPENDS
+ searchcore_test
+ searchcore_server
+ searchcore_feedoperation
+ searchcore_matching
+ searchcore_attribute
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_pcommon
+ searchcore_persistenceengine
+ searchcore_grouping
+ searchcore_proton_metrics
+ searchcore_util
+ searchcore_fconfig
+)
+vespa_add_test(NAME searchcore_maintenancecontroller_test_app COMMAND searchcore_maintenancecontroller_test_app)
+vespa_add_executable(searchcore_frozenbucketsmap_test_app
+ SOURCES
+ frozenbucketsmap_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_feedoperation
+ searchcore_matching
+ searchcore_attribute
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_pcommon
+ searchcore_persistenceengine
+ searchcore_grouping
+ searchcore_proton_metrics
+ searchcore_util
+ searchcore_fconfig
+)
+vespa_add_test(NAME searchcore_frozenbucketsmap_test_app COMMAND searchcore_frozenbucketsmap_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/maintenancecontroller/DESC b/searchcore/src/tests/proton/documentdb/maintenancecontroller/DESC
new file mode 100644
index 00000000000..ad4e910a6f1
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/maintenancecontroller/DESC
@@ -0,0 +1,2 @@
+maintenance controller test. Take a look at maintenancecontroller_test.cpp
+for details.
diff --git a/searchcore/src/tests/proton/documentdb/maintenancecontroller/FILES b/searchcore/src/tests/proton/documentdb/maintenancecontroller/FILES
new file mode 100644
index 00000000000..4bd439640f1
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/maintenancecontroller/FILES
@@ -0,0 +1,2 @@
+maintenancecontroller_test.cpp
+frozenbucketsmap_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/maintenancecontroller/frozenbucketsmap_test.cpp b/searchcore/src/tests/proton/documentdb/maintenancecontroller/frozenbucketsmap_test.cpp
new file mode 100644
index 00000000000..5dc72d02b15
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/maintenancecontroller/frozenbucketsmap_test.cpp
@@ -0,0 +1,86 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("frozenbucketsmap_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/server/frozenbuckets.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+
+using namespace proton;
+using document::BucketId;
+
+class RWTask : public vespalib::Executor::Task {
+public:
+ RWTask(FrozenBucketsMap & m, BucketId b, size_t count) : _b(b), _m(m), _count(count) {}
+protected:
+ const BucketId _b;
+ FrozenBucketsMap & _m;
+ const size_t _count;
+};
+
+class Reader : public RWTask {
+public:
+ Reader(FrozenBucketsMap & m, BucketId b, size_t count) :
+ RWTask(m, b, count),
+ numContended(0)
+ {}
+ ~Reader() {
+ LOG(info, "NumContended = %ld", numContended);
+ }
+ void run() override {
+ for (size_t i(0); i < _count; i++) {
+ _m.freezeBucket(_b);
+ if (_m.thawBucket(_b)) {
+ numContended++;
+ }
+ }
+ }
+ size_t numContended;
+};
+
+class Writer : public RWTask {
+public:
+ Writer(FrozenBucketsMap & m, BucketId b, size_t count) :
+ RWTask(m, b, count),
+ numFailed(0),
+ numSucces(0)
+ {}
+ ~Writer() {
+ EXPECT_EQUAL(_count, numSucces + numFailed);
+ LOG(info, "NumSuccess = %ld, NumFailed = %ld", numSucces, numFailed);
+ }
+ void run() override {
+ for (size_t i(0); i < _count; i++) {
+ IFrozenBucketHandler::ExclusiveBucketGuard::UP guard = _m.acquireExclusiveBucket(_b);
+ if (guard) {
+ numSucces++;
+ } else {
+ numFailed++;
+ }
+ }
+ }
+ size_t numFailed;
+ size_t numSucces;
+};
+
+TEST("Race reader and writer on FrozenBucketsMap") {
+ FrozenBucketsMap m;
+ BucketId a(8, 6);
+ constexpr size_t NUM_READERS = 3;
+ constexpr size_t NUM_WRITERS = 1;
+ constexpr size_t READER_COUNT = 1000000;
+ constexpr size_t WRITER_COUNT = 1000000;
+ vespalib::ThreadStackExecutor executor(NUM_READERS+NUM_WRITERS, 0x10000);
+ for (size_t i(0); i < NUM_READERS; i++) {
+ EXPECT_FALSE(bool(executor.execute(std::make_unique<Reader>(m, a, READER_COUNT))));
+ }
+ for (size_t i(0); i < NUM_WRITERS; i++) {
+ EXPECT_FALSE(bool(executor.execute(std::make_unique<Writer>(m, a, WRITER_COUNT))));
+ }
+ executor.sync();
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp b/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
new file mode 100644
index 00000000000..0513d8f45d9
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
@@ -0,0 +1,1472 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("maintenancecontroller_test");
+#include <vespa/searchcore/proton/test/test.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/attribute/attribute_usage_filter.h>
+#include <vespa/searchcore/proton/attribute/i_attribute_manager.h>
+#include <vespa/searchcore/proton/common/doctypename.h>
+#include <vespa/searchcore/proton/common/feedtoken.h>
+#include <vespa/searchcore/proton/server/idocumentmovehandler.h>
+#include <vespa/searchcore/proton/server/executor_thread_service.h>
+#include <vespa/searchcore/proton/server/i_operation_storer.h>
+#include <vespa/searchcore/proton/server/ipruneremoveddocumentshandler.h>
+#include <vespa/searchcore/proton/server/iheartbeathandler.h>
+#include <vespa/searchcore/proton/server/maintenance_controller_explorer.h>
+#include <vespa/searchcore/proton/server/maintenance_jobs_injector.h>
+#include <vespa/searchcore/proton/server/maintenancecontroller.h>
+#include <vespa/searchcore/proton/server/ibucketmodifiedhandler.h>
+#include <vespa/searchlib/attribute/attributeguard.h>
+#include <vespa/searchlib/attribute/attributecontext.h>
+#include <vespa/searchlib/common/idocumentmetastore.h>
+#include <vespa/vespalib/util/closuretask.h>
+#include <vespa/vespalib/util/sync.h>
+#include <vespa/searchcore/proton/feedoperation/moveoperation.h>
+#include <vespa/searchcore/proton/test/clusterstatehandler.h>
+#include <vespa/searchcore/proton/test/buckethandler.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+
+using namespace proton;
+using namespace vespalib::slime;
+using document::BucketId;
+using document::Document;
+using document::DocumentId;
+using fastos::ClockSystem;
+using fastos::TimeStamp;
+using search::AttributeGuard;
+using search::DocumentIdT;
+using search::DocumentMetaData;
+using search::SerialNum;
+using storage::spi::BucketInfo;
+using storage::spi::Timestamp;
+using vespalib::makeTask;
+using vespalib::makeClosure;
+using vespalib::Slime;
+using proton::matching::ISessionCachePruner;
+
+typedef BucketId::List BucketIdVector;
+typedef std::set<BucketId> BucketIdSet;
+
+constexpr int TIMEOUT_MS = 60000;
+constexpr double TIMEOUT_SEC = 60.0;
+
+namespace
+{
+
+void
+sampleThreadId(FastOS_ThreadId *threadId)
+{
+ *threadId = FastOS_Thread::GetCurrentThreadId();
+}
+
+} // namespace
+
+
+class MyDocumentSubDB
+{
+ typedef std::map<DocumentIdT, Document::SP> DocMap;
+ DocMap _docs;
+ uint32_t _subDBId;
+ DocumentMetaStore::SP _metaStoreSP;
+ DocumentMetaStore & _metaStore;
+ const document::DocumentTypeRepo::SP &_repo;
+ const DocTypeName &_docTypeName;
+
+public:
+ MyDocumentSubDB(uint32_t subDBId,
+ SubDbType subDbType,
+ const document::DocumentTypeRepo::SP &repo,
+ std::shared_ptr<BucketDBOwner> bucketDB,
+ const DocTypeName &docTypeName)
+ : _docs(),
+ _subDBId(subDBId),
+ _metaStoreSP(std::make_shared<DocumentMetaStore>(bucketDB,
+ DocumentMetaStore::getFixedName(),
+ search::GrowStrategy(),
+ DocumentMetaStore::IGidCompare::SP(new DocumentMetaStore::DefaultGidCompare),
+ subDbType)),
+ _metaStore(*_metaStoreSP),
+ _repo(repo),
+ _docTypeName(docTypeName)
+ {
+ _metaStore.constructFreeList();
+ }
+
+ uint32_t
+ getSubDBId(void) const
+ {
+ return _subDBId;
+ }
+
+ Document::UP
+ getDocument(DocumentIdT lid) const
+ {
+ DocMap::const_iterator it(_docs.find(lid));
+ if (it != _docs.end()) {
+ return Document::UP(it->second->clone());
+ } else {
+ return Document::UP();
+ }
+ }
+
+ MaintenanceDocumentSubDB
+ getSubDB(void);
+
+ void
+ handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &op);
+
+ void
+ handlePut(PutOperation &op);
+
+ void
+ handleRemove(RemoveOperation &op);
+
+ void
+ prepareMove(MoveOperation &op);
+
+ void
+ handleMove(const MoveOperation &op);
+
+ uint32_t
+ getNumUsedLids(void) const;
+
+ uint32_t
+ getDocumentCount(void) const
+ {
+ return _docs.size();
+ }
+
+ void setBucketState(const BucketId &bucket, bool active) {
+ _metaStore.setBucketState(bucket, active);
+ }
+
+ const IDocumentMetaStore &
+ getMetaStore() const
+ {
+ return _metaStore;
+ }
+};
+
+
+struct MyDocumentRetriever : public DocumentRetrieverBaseForTest
+{
+ MyDocumentSubDB &_subDB;
+
+ MyDocumentRetriever(MyDocumentSubDB &subDB)
+ : _subDB(subDB)
+ {
+ }
+
+ virtual const document::DocumentTypeRepo &
+ getDocumentTypeRepo(void) const
+ {
+ abort();
+ }
+
+ virtual void
+ getBucketMetaData(const storage::spi::Bucket &,
+ DocumentMetaData::Vector &) const
+ {
+ abort();
+ }
+ virtual DocumentMetaData
+ getDocumentMetaData(const DocumentId &) const
+ {
+ return DocumentMetaData();
+ }
+
+ virtual Document::UP
+ getDocument(DocumentIdT lid) const
+ {
+ return _subDB.getDocument(lid);
+ }
+
+ virtual CachedSelect::SP
+ parseSelect(const vespalib::string &) const
+ {
+ return CachedSelect::SP();
+ }
+};
+
+
+struct MyBucketModifiedHandler : public IBucketModifiedHandler
+{
+ BucketIdVector _modified;
+ virtual void notifyBucketModified(const BucketId &bucket) {
+ BucketIdVector::const_iterator itr = std::find(_modified.begin(), _modified.end(), bucket);
+ _modified.push_back(bucket);
+ }
+ void reset() { _modified.clear(); }
+};
+
+
+struct MySessionCachePruner : public ISessionCachePruner
+{
+ bool isInvoked;
+ MySessionCachePruner() : isInvoked(false) { }
+ void pruneTimedOutSessions(fastos::TimeStamp current) {
+ (void) current;
+ isInvoked = true;
+ }
+};
+
+
+class MyFeedHandler : public IDocumentMoveHandler,
+ public IPruneRemovedDocumentsHandler,
+ public IHeartBeatHandler,
+ public IWipeOldRemovedFieldsHandler,
+ public IOperationStorer
+{
+ FastOS_ThreadId _executorThreadId;
+ std::vector<MyDocumentSubDB *> _subDBs;
+ SerialNum _serialNum;
+ uint32_t _heartBeats;
+ fastos::TimeStamp _wipeTimeLimit;
+public:
+ MyFeedHandler(FastOS_ThreadId &executorThreadId);
+
+ virtual
+ ~MyFeedHandler();
+
+ bool
+ isExecutorThread(void);
+
+ virtual void
+ handleMove(MoveOperation &op);
+
+ virtual void
+ performPruneRemovedDocuments(PruneRemovedDocumentsOperation &op);
+
+ virtual void
+ heartBeat(void);
+
+ virtual void
+ wipeOldRemovedFields(TimeStamp wipeTimeLimit);
+
+ void
+ setSubDBs(const std::vector<MyDocumentSubDB *> &subDBs);
+
+ SerialNum
+ incSerialNum(void)
+ {
+ return ++_serialNum;
+ }
+
+ // Implements IOperationStorer
+ virtual void
+ storeOperation(FeedOperation &op);
+
+ uint32_t
+ getHeartBeats(void)
+ {
+ return _heartBeats;
+ }
+
+ fastos::TimeStamp
+ getWipeTimeLimit()
+ {
+ return _wipeTimeLimit;
+ }
+};
+
+
+class MyExecutor: public vespalib::ThreadStackExecutor
+{
+public:
+ FastOS_ThreadId _threadId;
+
+ MyExecutor(void);
+
+ virtual
+ ~MyExecutor(void);
+
+ bool
+ isIdle(void);
+
+ bool
+ waitIdle(double timeout);
+};
+
+
+class MyFrozenBucket
+{
+ IBucketFreezer &_freezer;
+ BucketId _bucketId;
+public:
+ typedef std::unique_ptr<MyFrozenBucket> UP;
+
+ MyFrozenBucket(IBucketFreezer &freezer,
+ const BucketId &bucketId)
+ : _freezer(freezer),
+ _bucketId(bucketId)
+ {
+ _freezer.freezeBucket(_bucketId);
+ }
+
+ ~MyFrozenBucket(void)
+ {
+ _freezer.thawBucket(_bucketId);
+ }
+};
+
+struct MySimpleJob : public IMaintenanceJob
+{
+ vespalib::CountDownLatch _latch;
+ size_t _runCnt;
+
+ MySimpleJob(double delay,
+ double interval,
+ uint32_t finishCount)
+ : IMaintenanceJob("my_job", delay, interval),
+ _latch(finishCount),
+ _runCnt(0)
+ {
+ }
+ void block() { setBlocked(true); }
+ virtual bool run() {
+ LOG(info, "MySimpleJob::run()");
+ _latch.countDown();
+ ++_runCnt;
+ return true;
+ }
+};
+
+struct MySplitJob : public MySimpleJob
+{
+ MySplitJob(double delay,
+ double interval,
+ uint32_t finishCount)
+ : MySimpleJob(delay, interval, finishCount)
+ {
+ }
+ virtual bool run() {
+ LOG(info, "MySplitJob::run()");
+ _latch.countDown();
+ ++_runCnt;
+ return _latch.getCount() == 0;
+ }
+};
+
+struct MyLongRunningJob : public IMaintenanceJob
+{
+ vespalib::Gate _firstRun;
+
+ MyLongRunningJob(double delay,
+ double interval)
+ : IMaintenanceJob("long_running_job", delay, interval),
+ _firstRun()
+ {
+ }
+ void block() { setBlocked(true); }
+ virtual bool run() {
+ _firstRun.countDown();
+ usleep(10000);
+ return false;
+ }
+};
+
+
+struct MyAttributeManager : public proton::IAttributeManager
+{
+ virtual AttributeGuard::UP
+ getAttribute(const string &) const override {
+ abort();
+ }
+
+ virtual AttributeGuard::UP
+ getAttributeStableEnum(const string &) const override {
+ abort();
+ }
+
+ virtual void
+ getAttributeList(std::vector<AttributeGuard> &) const override {
+ abort();
+ }
+
+ virtual search::attribute::IAttributeContext::UP
+ createContext() const override {
+ abort();
+ }
+
+ virtual IAttributeManager::SP
+ create(const AttributeCollectionSpec &) const override {
+ abort();
+ }
+
+ virtual std::vector<searchcorespi::IFlushTarget::SP>
+ getFlushTargets() const override {
+ abort();
+ }
+
+ virtual search::SerialNum
+ getFlushedSerialNum(const vespalib::string &) const override {
+ abort();
+ }
+
+ virtual search::SerialNum getOldestFlushedSerialNumber() const override {
+ abort();
+ }
+
+ virtual search::SerialNum
+ getNewestFlushedSerialNumber() const override {
+ abort();
+ }
+
+ virtual void
+ getAttributeListAll(std::vector<search::AttributeGuard> &)
+ const override {
+ abort();
+ }
+
+ virtual void
+ wipeHistory(const search::index::Schema &) override {
+ abort();
+ }
+
+ virtual const IAttributeFactory::SP &
+ getFactory() const override {
+ abort();
+ }
+
+ virtual search::ISequencedTaskExecutor &
+ getAttributeFieldWriter() const override {
+ abort();
+ }
+
+ virtual search::AttributeVector *
+ getWritableAttribute(const vespalib::string &) const override {
+ abort();
+ }
+
+ virtual const std::vector<search::AttributeVector *> &
+ getWritableAttributes() const override {
+ abort();
+ }
+
+ virtual void
+ asyncForEachAttribute(std::shared_ptr<IAttributeFunctor>)
+ const override {
+ }
+
+ virtual ExclusiveAttributeReadAccessor::UP
+ getExclusiveReadAccessor(const vespalib::string &) const override {
+ abort();
+ }
+};
+
+class MaintenanceControllerFixture : public ICommitable
+{
+public:
+ MyExecutor _executor;
+ ExecutorThreadService _threadService;
+ DocTypeName _docTypeName;
+ test::UserDocumentsBuilder _builder;
+ std::shared_ptr<BucketDBOwner> _bucketDB;
+ test::BucketStateCalculator::SP _calc;
+ test::ClusterStateHandler _clusterStateHandler;
+ test::BucketHandler _bucketHandler;
+ MyBucketModifiedHandler _bmc;
+ MyDocumentSubDB _ready;
+ MyDocumentSubDB _removed;
+ MyDocumentSubDB _notReady;
+ MySessionCachePruner _gsp;
+ MyFeedHandler _fh;
+ DocumentDBMaintenanceConfig::SP _mcCfg;
+ bool _injectDefaultJobs;
+ DocumentDBJobTrackers _jobTrackers;
+ std::shared_ptr<proton::IAttributeManager> _readyAttributeManager;
+ std::shared_ptr<proton::IAttributeManager> _notReadyAttributeManager;
+ AttributeUsageFilter _attributeUsageFilter;
+ MaintenanceController _mc;
+
+ MaintenanceControllerFixture(void);
+
+ virtual
+ ~MaintenanceControllerFixture(void);
+
+ void
+ syncSubDBs(void);
+
+ void commit() override {
+ }
+
+ void commitAndWait() override {
+ }
+
+ void
+ performSyncSubDBs(void);
+
+ void
+ notifyClusterStateChanged(void);
+
+ void
+ performNotifyClusterStateChanged(void);
+
+ void
+ startMaintenance(void);
+
+ void injectMaintenanceJobs();
+
+ void
+ performStartMaintenance(void);
+
+ void
+ stopMaintenance(void);
+
+ void
+ forwardMaintenanceConfig(void);
+
+ void
+ performForwardMaintenanceConfig(void);
+
+ void
+ insertDocs(const test::UserDocuments &docs,
+ MyDocumentSubDB &subDb);
+
+ void
+ removeDocs(const test::UserDocuments &docs,
+ Timestamp timestamp);
+
+ void
+ setPruneConfig(const DocumentDBPruneRemovedDocumentsConfig &pruneConfig)
+ {
+ DocumentDBMaintenanceConfig::SP
+ newCfg(new DocumentDBMaintenanceConfig(
+ pruneConfig,
+ _mcCfg->getHeartBeatConfig(),
+ _mcCfg->getWipeOldRemovedFieldsConfig(),
+ _mcCfg->getSessionCachePruneInterval(),
+ _mcCfg->getVisibilityDelay(),
+ _mcCfg->getLidSpaceCompactionConfig(),
+ _mcCfg->getAttributeUsageFilterConfig(),
+ _mcCfg->getAttributeUsageSampleInterval()));
+ _mcCfg = newCfg;
+ forwardMaintenanceConfig();
+ }
+
+ void
+ setHeartBeatConfig(const DocumentDBHeartBeatConfig &heartBeatConfig)
+ {
+ DocumentDBMaintenanceConfig::SP
+ newCfg(new DocumentDBMaintenanceConfig(
+ _mcCfg->getPruneRemovedDocumentsConfig(),
+ heartBeatConfig,
+ _mcCfg->getWipeOldRemovedFieldsConfig(),
+ _mcCfg->getSessionCachePruneInterval(),
+ _mcCfg->getVisibilityDelay(),
+ _mcCfg->getLidSpaceCompactionConfig(),
+ _mcCfg->getAttributeUsageFilterConfig(),
+ _mcCfg->getAttributeUsageSampleInterval()));
+ _mcCfg = newCfg;
+ forwardMaintenanceConfig();
+ }
+
+ void
+ setWipeOldRemovedFieldsConfig(const DocumentDBWipeOldRemovedFieldsConfig &wipeConfig)
+ {
+ DocumentDBMaintenanceConfig::SP
+ newCfg(new DocumentDBMaintenanceConfig(
+ _mcCfg->getPruneRemovedDocumentsConfig(),
+ _mcCfg->getHeartBeatConfig(),
+ wipeConfig,
+ _mcCfg->getSessionCachePruneInterval(),
+ _mcCfg->getVisibilityDelay(),
+ _mcCfg->getLidSpaceCompactionConfig(),
+ _mcCfg->getAttributeUsageFilterConfig(),
+ _mcCfg->getAttributeUsageSampleInterval()));
+ _mcCfg = newCfg;
+ forwardMaintenanceConfig();
+ }
+
+
+ void
+ setGroupingSessionPruneInterval(double groupingSessionPruneInterval)
+ {
+ DocumentDBMaintenanceConfig::SP
+ newCfg(new DocumentDBMaintenanceConfig(
+ _mcCfg->getPruneRemovedDocumentsConfig(),
+ _mcCfg->getHeartBeatConfig(),
+ _mcCfg->getWipeOldRemovedFieldsConfig(),
+ groupingSessionPruneInterval,
+ _mcCfg->getVisibilityDelay(),
+ _mcCfg->getLidSpaceCompactionConfig(),
+ _mcCfg->getAttributeUsageFilterConfig(),
+ _mcCfg->getAttributeUsageSampleInterval()));
+ _mcCfg = newCfg;
+ forwardMaintenanceConfig();
+ }
+
+
+ void
+ performNotifyBucketStateChanged(document::BucketId bucketId,
+ BucketInfo::ActiveState newState)
+ {
+ _bucketHandler.notifyBucketStateChanged(bucketId, newState);
+ }
+
+ void
+ notifyBucketStateChanged(const document::BucketId &bucketId,
+ BucketInfo::ActiveState newState)
+ {
+ _executor.execute(makeTask(makeClosure(this,
+ &MaintenanceControllerFixture::
+ performNotifyBucketStateChanged,
+ bucketId, newState)));
+ _executor.sync();
+ }
+};
+
+
+MaintenanceDocumentSubDB
+MyDocumentSubDB::getSubDB(void)
+{
+ IDocumentRetriever::SP retriever(new MyDocumentRetriever(*this));
+
+ return MaintenanceDocumentSubDB(_metaStoreSP,
+ retriever,
+ _subDBId);
+}
+
+
+void
+MyDocumentSubDB::handlePruneRemovedDocuments(
+ const PruneRemovedDocumentsOperation &op)
+{
+ assert(_subDBId == 1u);
+ typedef LidVectorContext::LidVector LidVector;
+ const SerialNum serialNum = op.getSerialNum();
+ const LidVectorContext &lidCtx = *op.getLidsToRemove();
+ const LidVector &lidsToRemove(lidCtx.getLidVector());
+ _metaStore.removeBatch(lidsToRemove, lidCtx.getDocIdLimit());
+ _metaStore.removeBatchComplete(lidsToRemove);
+ _metaStore.commit(serialNum);
+ for (LidVector::const_iterator it = lidsToRemove.begin(),
+ ite = lidsToRemove.end();
+ it != ite; ++it) {
+ search::DocumentIdT lid(*it);
+ _docs.erase(lid);
+ }
+}
+
+
+void
+MyDocumentSubDB::handlePut(PutOperation &op)
+{
+ const SerialNum serialNum = op.getSerialNum();
+ const Document::SP &doc = op.getDocument();
+ const DocumentId &docId = doc->getId();
+ const document::GlobalId &gid = docId.getGlobalId();
+ bool needCommit = false;
+
+ if (op.getValidDbdId(_subDBId)) {
+ typedef DocumentMetaStore::Result PutRes;
+
+ PutRes putRes(_metaStore.put(gid,
+ op.getBucketId(),
+ op.getTimestamp(),
+ op.getLid()));
+ assert(putRes.ok());
+ assert(op.getLid() == putRes._lid);
+ _docs[op.getLid()] = doc;
+ needCommit = true;
+ }
+ if (op.getValidPrevDbdId(_subDBId) && op.changedDbdId()) {
+ assert(_metaStore.validLid(op.getPrevLid()));
+ const RawDocumentMetaData &meta(_metaStore.getRawMetaData(op.getPrevLid()));
+ assert((_subDBId == 1u) == op.getPrevMarkedAsRemoved());
+ assert(meta.getGid() == gid);
+ (void) meta;
+
+ bool remres = _metaStore.remove(op.getPrevLid());
+ assert(remres);
+ (void) remres;
+ _metaStore.removeComplete(op.getPrevLid());
+
+ _docs.erase(op.getPrevLid());
+ needCommit = true;
+ }
+ if (needCommit) {
+ _metaStore.commit(serialNum, serialNum);
+ }
+}
+
+
+void
+MyDocumentSubDB::handleRemove(RemoveOperation &op)
+{
+ const SerialNum serialNum = op.getSerialNum();
+ const DocumentId &docId = op.getDocumentId();
+ const document::GlobalId &gid = docId.getGlobalId();
+ bool needCommit = false;
+
+ if (op.getValidDbdId(_subDBId)) {
+ typedef DocumentMetaStore::Result PutRes;
+
+ PutRes putRes(_metaStore.put(gid,
+ op.getBucketId(),
+ op.getTimestamp(),
+ op.getLid()));
+ assert(putRes.ok());
+ assert(op.getLid() == putRes._lid);
+ const document::DocumentType *docType =
+ _repo->getDocumentType(_docTypeName.getName());
+ Document::UP doc(new Document(*docType, docId));
+ doc->setRepo(*_repo);
+ _docs[op.getLid()] = std::move(doc);
+ needCommit = true;
+ }
+ if (op.getValidPrevDbdId(_subDBId) && op.changedDbdId()) {
+ assert(_metaStore.validLid(op.getPrevLid()));
+ const RawDocumentMetaData &meta(_metaStore.getRawMetaData(op.getPrevLid()));
+ assert((_subDBId == 1u) == op.getPrevMarkedAsRemoved());
+ assert(meta.getGid() == gid);
+ (void) meta;
+
+ bool remres = _metaStore.remove(op.getPrevLid());
+ assert(remres);
+ (void) remres;
+
+ _metaStore.removeComplete(op.getPrevLid());
+ _docs.erase(op.getPrevLid());
+ needCommit = true;
+ }
+ if (needCommit) {
+ _metaStore.commit(serialNum, serialNum);
+ }
+}
+
+
+void
+MyDocumentSubDB::prepareMove(MoveOperation &op)
+{
+ const DocumentId &docId = op.getDocument()->getId();
+ const document::GlobalId &gid = docId.getGlobalId();
+ DocumentMetaStore::Result inspectResult = _metaStore.inspect(gid);
+ assert(!inspectResult._found);
+ op.setDbDocumentId(DbDocumentId(_subDBId, inspectResult._lid));
+}
+
+
+void
+MyDocumentSubDB::handleMove(const MoveOperation &op)
+{
+ const SerialNum serialNum = op.getSerialNum();
+ const Document::SP &doc = op.getDocument();
+ const DocumentId &docId = doc->getId();
+ const document::GlobalId &gid = docId.getGlobalId();
+ bool needCommit = false;
+
+ if (op.getValidDbdId(_subDBId)) {
+ typedef DocumentMetaStore::Result PutRes;
+
+ PutRes putRes(_metaStore.put(gid,
+ op.getBucketId(),
+ op.getTimestamp(),
+ op.getLid()));
+ assert(putRes.ok());
+ assert(op.getLid() == putRes._lid);
+ _docs[op.getLid()] = doc;
+ needCommit = true;
+ }
+ if (op.getValidPrevDbdId(_subDBId)) {
+ assert(_metaStore.validLid(op.getPrevLid()));
+ const RawDocumentMetaData &meta(_metaStore.getRawMetaData(op.getPrevLid()));
+ assert((_subDBId == 1u) == op.getPrevMarkedAsRemoved());
+ assert(meta.getGid() == gid);
+ (void) meta;
+
+ bool remres = _metaStore.remove(op.getPrevLid());
+ assert(remres);
+ (void) remres;
+
+ _metaStore.removeComplete(op.getPrevLid());
+ _docs.erase(op.getPrevLid());
+ needCommit = true;
+ }
+ if (needCommit) {
+ _metaStore.commit(serialNum, serialNum);
+ }
+}
+
+
+uint32_t
+MyDocumentSubDB::getNumUsedLids(void) const
+{
+ return _metaStore.getNumUsedLids();
+}
+
+
+MyFeedHandler::MyFeedHandler(FastOS_ThreadId &executorThreadId)
+ : IDocumentMoveHandler(),
+ IPruneRemovedDocumentsHandler(),
+ IHeartBeatHandler(),
+ _executorThreadId(executorThreadId),
+ _subDBs(),
+ _serialNum(0u),
+ _heartBeats(0u),
+ _wipeTimeLimit()
+{
+}
+
+
+MyFeedHandler::~MyFeedHandler(void)
+{
+}
+
+
+bool
+MyFeedHandler::isExecutorThread(void)
+{
+ FastOS_ThreadId threadId(FastOS_Thread::GetCurrentThreadId());
+ return FastOS_Thread::CompareThreadIds(_executorThreadId, threadId);
+}
+
+
+void
+MyFeedHandler::handleMove(MoveOperation &op)
+{
+ assert(isExecutorThread());
+ assert(op.getValidPrevDbdId());
+ _subDBs[op.getSubDbId()]->prepareMove(op);
+ assert(op.getValidDbdId());
+ assert(op.getSubDbId() != op.getPrevSubDbId());
+ // Check for wrong magic numbers
+ assert(op.getSubDbId() != 1u);
+ assert(op.getPrevSubDbId() != 1u);
+ assert(op.getSubDbId() < _subDBs.size());
+ assert(op.getPrevSubDbId() < _subDBs.size());
+ storeOperation(op);
+ _subDBs[op.getSubDbId()]->handleMove(op);
+ _subDBs[op.getPrevSubDbId()]->handleMove(op);
+}
+
+
+void
+MyFeedHandler::performPruneRemovedDocuments(PruneRemovedDocumentsOperation &op)
+{
+ assert(isExecutorThread());
+ if (op.getLidsToRemove()->getNumLids() != 0u) {
+ storeOperation(op);
+ // magic number.
+ _subDBs[1u]->handlePruneRemovedDocuments(op);
+ }
+}
+
+
+void
+MyFeedHandler::heartBeat(void)
+{
+ assert(isExecutorThread());
+ ++_heartBeats;
+}
+
+
+void
+MyFeedHandler::wipeOldRemovedFields(fastos::TimeStamp wipeTimeLimit)
+{
+ assert(isExecutorThread());
+ _wipeTimeLimit = wipeTimeLimit;
+}
+
+
+void
+MyFeedHandler::setSubDBs(const std::vector<MyDocumentSubDB *> &subDBs)
+{
+ _subDBs = subDBs;
+}
+
+
+void
+MyFeedHandler::storeOperation(FeedOperation &op)
+{
+ op.setSerialNum(incSerialNum());
+}
+
+
+MyExecutor::MyExecutor(void)
+ : vespalib::ThreadStackExecutor(1, 128 * 1024),
+ _threadId()
+{
+ execute(makeTask(makeClosure(&sampleThreadId, &_threadId)));
+ sync();
+}
+
+
+MyExecutor::~MyExecutor(void)
+{
+}
+
+
+bool
+MyExecutor::isIdle(void)
+{
+ (void) getStats();
+ sync();
+ Stats stats(getStats());
+ return stats.acceptedTasks == 0u;
+}
+
+
+bool
+MyExecutor::waitIdle(double timeout)
+{
+ FastOS_Time startTime;
+ startTime.SetNow();
+ while (!isIdle()) {
+ FastOS_Time cTime;
+ cTime.SetNow();
+ if (cTime.Secs() - startTime.Secs() >= timeout)
+ return false;
+ }
+ return true;
+}
+
+
+MaintenanceControllerFixture::MaintenanceControllerFixture(void)
+ : _executor(),
+ _threadService(_executor),
+ _docTypeName("searchdocument"), // must match document builder
+ _builder(),
+ _bucketDB(std::make_shared<BucketDBOwner>()),
+ _calc(new test::BucketStateCalculator()),
+ _clusterStateHandler(),
+ _bucketHandler(),
+ _bmc(),
+ _ready(0u, SubDbType::READY, _builder.getRepo(), _bucketDB, _docTypeName),
+ _removed(1u, SubDbType::REMOVED, _builder.getRepo(), _bucketDB,
+ _docTypeName),
+ _notReady(2u, SubDbType::NOTREADY, _builder.getRepo(), _bucketDB,
+ _docTypeName),
+ _gsp(),
+ _fh(_executor._threadId),
+ _mcCfg(new DocumentDBMaintenanceConfig),
+ _injectDefaultJobs(true),
+ _jobTrackers(),
+ _readyAttributeManager(std::make_shared<MyAttributeManager>()),
+ _notReadyAttributeManager(std::make_shared<MyAttributeManager>()),
+ _attributeUsageFilter(),
+ _mc(_threadService, _docTypeName)
+{
+ std::vector<MyDocumentSubDB *> subDBs;
+ subDBs.push_back(&_ready);
+ subDBs.push_back(&_removed);
+ subDBs.push_back(&_notReady);
+ _fh.setSubDBs(subDBs);
+ syncSubDBs();
+}
+
+
+MaintenanceControllerFixture::~MaintenanceControllerFixture(void)
+{
+ stopMaintenance();
+}
+
+
+void
+MaintenanceControllerFixture::syncSubDBs(void)
+{
+ _executor.execute(makeTask(makeClosure(this,
+ &MaintenanceControllerFixture::
+ performSyncSubDBs)));
+ _executor.sync();
+}
+
+
+void
+MaintenanceControllerFixture::performSyncSubDBs(void)
+{
+ _mc.syncSubDBs(_ready.getSubDB(),
+ _removed.getSubDB(),
+ _notReady.getSubDB());
+}
+
+
+void
+MaintenanceControllerFixture::notifyClusterStateChanged(void)
+{
+ _executor.execute(makeTask(makeClosure(this,
+ &MaintenanceControllerFixture::
+ performNotifyClusterStateChanged)));
+ _executor.sync();
+}
+
+
+void
+MaintenanceControllerFixture::performNotifyClusterStateChanged(void)
+{
+ _clusterStateHandler.notifyClusterStateChanged(_calc);
+}
+
+
+void
+MaintenanceControllerFixture::startMaintenance(void)
+{
+ _executor.execute(makeTask(makeClosure(this,
+ &MaintenanceControllerFixture::
+ performStartMaintenance)));
+ _executor.sync();
+}
+
+void
+MaintenanceControllerFixture::injectMaintenanceJobs()
+{
+ if (_injectDefaultJobs) {
+ ILidSpaceCompactionHandler::Vector lscHandlers;
+ MaintenanceJobsInjector::injectJobs(_mc, *_mcCfg, _fh, _gsp, _fh,
+ lscHandlers, _fh, _mc, _docTypeName.getName(),
+ _fh, _fh, _bmc, _clusterStateHandler, _bucketHandler,
+ _calc, _jobTrackers, *this,
+ _readyAttributeManager,
+ _notReadyAttributeManager,
+ _attributeUsageFilter);
+ }
+}
+
+void
+MaintenanceControllerFixture::performStartMaintenance(void)
+{
+ injectMaintenanceJobs();
+ _mc.start(_mcCfg);
+}
+
+
+void
+MaintenanceControllerFixture::stopMaintenance(void)
+{
+ _mc.stop();
+ _executor.sync();
+}
+
+
+void
+MaintenanceControllerFixture::forwardMaintenanceConfig(void)
+{
+ _executor.execute(makeTask(makeClosure(this,
+ &MaintenanceControllerFixture::
+ performForwardMaintenanceConfig)));
+ _executor.sync();
+}
+
+
+void
+MaintenanceControllerFixture::performForwardMaintenanceConfig(void)
+{
+ _mc.killJobs();
+ injectMaintenanceJobs();
+ _mc.newConfig(_mcCfg);
+}
+
+
+void
+MaintenanceControllerFixture::insertDocs(const test::UserDocuments &docs,
+ MyDocumentSubDB &subDb)
+{
+
+ for (test::UserDocuments::Iterator itr = docs.begin();
+ itr != docs.end();
+ ++itr) {
+ const test::BucketDocuments &bucketDocs = itr->second;
+ for (size_t i = 0; i < bucketDocs.getDocs().size(); ++i) {
+ const test::Document &testDoc = bucketDocs.getDocs()[i];
+ PutOperation op(testDoc.getBucket(),
+ testDoc.getTimestamp(),
+ testDoc.getDoc());
+ op.setDbDocumentId(DbDocumentId(subDb.getSubDBId(),
+ testDoc.getLid()));
+ _fh.storeOperation(op);
+ subDb.handlePut(op);
+ }
+ }
+}
+
+
+void
+MaintenanceControllerFixture::removeDocs(const test::UserDocuments &docs,
+ Timestamp timestamp)
+{
+
+ for (test::UserDocuments::Iterator itr = docs.begin();
+ itr != docs.end();
+ ++itr) {
+ const test::BucketDocuments &bucketDocs = itr->second;
+ for (size_t i = 0; i < bucketDocs.getDocs().size(); ++i) {
+ const test::Document &testDoc = bucketDocs.getDocs()[i];
+ RemoveOperation op(testDoc.getBucket(),
+ timestamp,
+ testDoc.getDoc()->getId());
+ op.setDbDocumentId(DbDocumentId(_removed.getSubDBId(),
+ testDoc.getLid()));
+ _fh.storeOperation(op);
+ _removed.handleRemove(op);
+ }
+ }
+}
+
+TEST_F("require that bucket move controller is active",
+ MaintenanceControllerFixture)
+{
+ f._builder.createDocs(1, 1, 4); // 3 docs
+ f._builder.createDocs(2, 4, 6); // 2 docs
+ test::UserDocuments readyDocs(f._builder.getDocs());
+ BucketId bucketId1(readyDocs.getBucket(1));
+ BucketId bucketId2(readyDocs.getBucket(2));
+ f.insertDocs(readyDocs, f._ready);
+ f._builder.clearDocs();
+ f._builder.createDocs(3, 1, 3); // 2 docs
+ f._builder.createDocs(4, 3, 6); // 3 docs
+ test::UserDocuments notReadyDocs(f._builder.getDocs());
+ BucketId bucketId3(notReadyDocs.getBucket(3));
+ BucketId bucketId4(notReadyDocs.getBucket(4));
+ f.insertDocs(notReadyDocs, f._notReady);
+ f._builder.clearDocs();
+ f.notifyClusterStateChanged();
+ EXPECT_TRUE(f._executor.isIdle());
+ EXPECT_EQUAL(5u, f._ready.getNumUsedLids());
+ EXPECT_EQUAL(5u, f._ready.getDocumentCount());
+ EXPECT_EQUAL(5u, f._notReady.getNumUsedLids());
+ EXPECT_EQUAL(5u, f._notReady.getDocumentCount());
+ f.startMaintenance();
+ ASSERT_TRUE(f._executor.waitIdle(TIMEOUT_SEC));
+ EXPECT_EQUAL(0u, f._ready.getNumUsedLids());
+ EXPECT_EQUAL(0u, f._ready.getDocumentCount());
+ EXPECT_EQUAL(10u, f._notReady.getNumUsedLids());
+ EXPECT_EQUAL(10u, f._notReady.getDocumentCount());
+ f._calc->addReady(bucketId1);
+ f.notifyClusterStateChanged();
+ ASSERT_TRUE(f._executor.waitIdle(TIMEOUT_SEC));
+ EXPECT_EQUAL(3u, f._ready.getNumUsedLids());
+ EXPECT_EQUAL(3u, f._ready.getDocumentCount());
+ EXPECT_EQUAL(7u, f._notReady.getNumUsedLids());
+ EXPECT_EQUAL(7u, f._notReady.getDocumentCount());
+ MyFrozenBucket::UP frozen2(new MyFrozenBucket(f._mc, bucketId2));
+ f._calc->addReady(bucketId2);
+ f._calc->addReady(bucketId4);
+ f.notifyClusterStateChanged();
+ ASSERT_TRUE(f._executor.waitIdle(TIMEOUT_SEC));
+ EXPECT_EQUAL(6u, f._ready.getNumUsedLids());
+ EXPECT_EQUAL(6u, f._ready.getDocumentCount());
+ EXPECT_EQUAL(4u, f._notReady.getNumUsedLids());
+ EXPECT_EQUAL(4u, f._notReady.getDocumentCount());
+ frozen2.reset();
+ ASSERT_TRUE(f._executor.waitIdle(TIMEOUT_SEC));
+ EXPECT_EQUAL(8u, f._ready.getNumUsedLids());
+ EXPECT_EQUAL(8u, f._ready.getDocumentCount());
+ EXPECT_EQUAL(2u, f._notReady.getNumUsedLids());
+ EXPECT_EQUAL(2u, f._notReady.getDocumentCount());
+}
+
+TEST_F("require that document pruner is active",
+ MaintenanceControllerFixture)
+{
+ uint64_t tshz = 1000000;
+ uint64_t now = static_cast<uint64_t>(time(0)) * tshz;
+ Timestamp remTime(static_cast<Timestamp::Type>(now - 3600 * tshz));
+ Timestamp keepTime(static_cast<Timestamp::Type>(now + 3600 * tshz));
+ f._builder.createDocs(1, 1, 4); // 3 docs
+ f._builder.createDocs(2, 4, 6); // 2 docs
+ test::UserDocuments keepDocs(f._builder.getDocs());
+ BucketId bucketId1(keepDocs.getBucket(1));
+ BucketId bucketId2(keepDocs.getBucket(2));
+ f.removeDocs(keepDocs, keepTime);
+ f._builder.clearDocs();
+ f._builder.createDocs(3, 6, 8); // 2 docs
+ f._builder.createDocs(4, 8, 11); // 3 docs
+ test::UserDocuments removeDocs(f._builder.getDocs());
+ BucketId bucketId3(removeDocs.getBucket(3));
+ BucketId bucketId4(removeDocs.getBucket(4));
+ f.removeDocs(removeDocs, remTime);
+ f.notifyClusterStateChanged();
+ EXPECT_TRUE(f._executor.isIdle());
+ EXPECT_EQUAL(10u, f._removed.getNumUsedLids());
+ EXPECT_EQUAL(10u, f._removed.getDocumentCount());
+ f.startMaintenance();
+ ASSERT_TRUE(f._executor.waitIdle(TIMEOUT_SEC));
+ EXPECT_EQUAL(10u, f._removed.getNumUsedLids());
+ EXPECT_EQUAL(10u, f._removed.getDocumentCount());
+ MyFrozenBucket::UP frozen3(new MyFrozenBucket(f._mc, bucketId3));
+ f.setPruneConfig(DocumentDBPruneRemovedDocumentsConfig(0.2, 900.0));
+ for (uint32_t i = 0; i < 6; ++i) {
+ FastOS_Thread::Sleep(100);
+ ASSERT_TRUE(f._executor.waitIdle(TIMEOUT_SEC));
+ if (f._removed.getNumUsedLids() != 10u)
+ break;
+ }
+ EXPECT_EQUAL(10u, f._removed.getNumUsedLids());
+ EXPECT_EQUAL(10u, f._removed.getDocumentCount());
+ frozen3.reset();
+ for (uint32_t i = 0; i < 600; ++i) {
+ FastOS_Thread::Sleep(100);
+ ASSERT_TRUE(f._executor.waitIdle(TIMEOUT_SEC));
+ if (f._removed.getNumUsedLids() != 10u)
+ break;
+ }
+ EXPECT_EQUAL(5u, f._removed.getNumUsedLids());
+ EXPECT_EQUAL(5u, f._removed.getDocumentCount());
+}
+
+TEST_F("require that heartbeats are scheduled",
+ MaintenanceControllerFixture)
+{
+ f.notifyClusterStateChanged();
+ f.startMaintenance();
+ f.setHeartBeatConfig(DocumentDBHeartBeatConfig(0.2));
+ for (uint32_t i = 0; i < 600; ++i) {
+ FastOS_Thread::Sleep(100);
+ if (f._fh.getHeartBeats() != 0u)
+ break;
+ }
+ EXPECT_GREATER(f._fh.getHeartBeats(), 0u);
+}
+
+TEST_F("require that periodic session prunings are scheduled",
+ MaintenanceControllerFixture)
+{
+ ASSERT_FALSE(f._gsp.isInvoked);
+ f.notifyClusterStateChanged();
+ f.startMaintenance();
+ f.setGroupingSessionPruneInterval(0.2);
+ for (uint32_t i = 0; i < 600; ++i) {
+ FastOS_Thread::Sleep(100);
+ if (f._gsp.isInvoked) {
+ break;
+ }
+ }
+ ASSERT_TRUE(f._gsp.isInvoked);
+}
+
+TEST_F("require that wipe old removed fields are scheduled",
+ MaintenanceControllerFixture)
+{
+ f.notifyClusterStateChanged();
+ f.startMaintenance();
+ TimeStamp now0 = TimeStamp(ClockSystem::now());
+ f.setWipeOldRemovedFieldsConfig(DocumentDBWipeOldRemovedFieldsConfig(0.2, 100));
+ TimeStamp now = TimeStamp(ClockSystem::now());
+ TimeStamp expWipeTimeLimit = now - TimeStamp(100 * TimeStamp::SEC);
+ TimeStamp wtLim;
+ for (uint32_t i = 0; i < 600; ++i) {
+ FastOS_Thread::Sleep(100);
+ wtLim = f._fh.getWipeTimeLimit();
+ if (wtLim.sec() != 0u) {
+ break;
+ }
+ }
+ TimeStamp now1 = TimeStamp(ClockSystem::now());
+ double fuzz = now1.sec() - now0.sec();
+ LOG(info,
+ "WipeOldRemovedFields: "
+ "now(%" PRIu64 "), "
+ "expWipeTimeLimit(%" PRIu64 "), "
+ "actWipeTimeLimit(%" PRIu64 "), "
+ "fuzz(%05.3f)",
+ (uint64_t)now.sec(),
+ (uint64_t)expWipeTimeLimit.sec(),
+ (uint64_t)wtLim.sec(),
+ fuzz);
+ EXPECT_APPROX(expWipeTimeLimit.sec(), wtLim.sec(), 4u + fuzz);
+}
+
+TEST_F("require that active bucket is not moved until de-activated", MaintenanceControllerFixture)
+{
+ f._builder.createDocs(1, 1, 4); // 3 docs
+ f._builder.createDocs(2, 4, 6); // 2 docs
+ test::UserDocuments readyDocs(f._builder.getDocs());
+ f.insertDocs(readyDocs, f._ready);
+ f._builder.clearDocs();
+ f._builder.createDocs(3, 1, 3); // 2 docs
+ f._builder.createDocs(4, 3, 6); // 3 docs
+ test::UserDocuments notReadyDocs(f._builder.getDocs());
+ f.insertDocs(notReadyDocs, f._notReady);
+ f._builder.clearDocs();
+
+ // bucket 1 (active) should be moved from ready to not ready according to cluster state
+ f._calc->addReady(readyDocs.getBucket(2));
+ f._ready.setBucketState(readyDocs.getBucket(1), true);
+
+ f.notifyClusterStateChanged();
+ EXPECT_TRUE(f._executor.isIdle());
+ EXPECT_EQUAL(5u, f._ready.getNumUsedLids());
+ EXPECT_EQUAL(5u, f._ready.getDocumentCount());
+ EXPECT_EQUAL(5u, f._notReady.getNumUsedLids());
+ EXPECT_EQUAL(5u, f._notReady.getDocumentCount());
+
+ f.startMaintenance();
+ ASSERT_TRUE(f._executor.waitIdle(TIMEOUT_SEC));
+ EXPECT_EQUAL(5u, f._ready.getNumUsedLids());
+ EXPECT_EQUAL(5u, f._ready.getDocumentCount());
+ EXPECT_EQUAL(5u, f._notReady.getNumUsedLids());
+ EXPECT_EQUAL(5u, f._notReady.getDocumentCount());
+
+ // de-activate bucket 1
+ f._ready.setBucketState(readyDocs.getBucket(1), false);
+ f.notifyBucketStateChanged(readyDocs.getBucket(1), BucketInfo::NOT_ACTIVE);
+ ASSERT_TRUE(f._executor.waitIdle(TIMEOUT_SEC));
+ EXPECT_EQUAL(2u, f._ready.getNumUsedLids());
+ EXPECT_EQUAL(2u, f._ready.getDocumentCount());
+ EXPECT_EQUAL(8u, f._notReady.getNumUsedLids());
+ EXPECT_EQUAL(8u, f._notReady.getDocumentCount());
+
+ // re-activate bucket 1
+ f._ready.setBucketState(readyDocs.getBucket(1), true);
+ f.notifyBucketStateChanged(readyDocs.getBucket(1), BucketInfo::ACTIVE);
+ ASSERT_TRUE(f._executor.waitIdle(TIMEOUT_SEC));
+ EXPECT_EQUAL(5u, f._ready.getNumUsedLids());
+ EXPECT_EQUAL(5u, f._ready.getDocumentCount());
+ EXPECT_EQUAL(5u, f._notReady.getNumUsedLids());
+ EXPECT_EQUAL(5u, f._notReady.getDocumentCount());
+
+ // de-activate bucket 1
+ f._ready.setBucketState(readyDocs.getBucket(1), false);
+ f.notifyBucketStateChanged(readyDocs.getBucket(1), BucketInfo::NOT_ACTIVE);
+ ASSERT_TRUE(f._executor.waitIdle(TIMEOUT_SEC));
+ EXPECT_EQUAL(2u, f._ready.getNumUsedLids());
+ EXPECT_EQUAL(2u, f._ready.getDocumentCount());
+ EXPECT_EQUAL(8u, f._notReady.getNumUsedLids());
+ EXPECT_EQUAL(8u, f._notReady.getDocumentCount());
+
+ // re-activate bucket 1
+ f._ready.setBucketState(readyDocs.getBucket(1), true);
+ f.notifyBucketStateChanged(readyDocs.getBucket(1), BucketInfo::ACTIVE);
+ ASSERT_TRUE(f._executor.waitIdle(TIMEOUT_SEC));
+ EXPECT_EQUAL(5u, f._ready.getNumUsedLids());
+ EXPECT_EQUAL(5u, f._ready.getDocumentCount());
+ EXPECT_EQUAL(5u, f._notReady.getNumUsedLids());
+ EXPECT_EQUAL(5u, f._notReady.getDocumentCount());
+}
+
+TEST_F("require that a simple maintenance job is executed", MaintenanceControllerFixture)
+{
+ IMaintenanceJob::UP job(new MySimpleJob(0.2, 0.2, 3));
+ MySimpleJob &myJob = static_cast<MySimpleJob &>(*job);
+ f._mc.registerJob(std::move(job));
+ f._injectDefaultJobs = false;
+ f.startMaintenance();
+ bool done = myJob._latch.await(TIMEOUT_MS);
+ EXPECT_TRUE(done);
+ EXPECT_EQUAL(0u, myJob._latch.getCount());
+}
+
+TEST_F("require that a split maintenance job is executed", MaintenanceControllerFixture)
+{
+ IMaintenanceJob::UP job(new MySplitJob(0.2, TIMEOUT_SEC * 2, 3));
+ MySplitJob &myJob = static_cast<MySplitJob &>(*job);
+ f._mc.registerJob(std::move(job));
+ f._injectDefaultJobs = false;
+ f.startMaintenance();
+ bool done = myJob._latch.await(TIMEOUT_MS);
+ EXPECT_TRUE(done);
+ EXPECT_EQUAL(0u, myJob._latch.getCount());
+}
+
+TEST_F("require that a blocked job is unblocked and executed after thaw bucket",
+ MaintenanceControllerFixture)
+{
+ IMaintenanceJob::UP job1(new MySimpleJob(TIMEOUT_SEC * 2, TIMEOUT_SEC * 2, 1));
+ MySimpleJob &myJob1 = static_cast<MySimpleJob &>(*job1);
+ IMaintenanceJob::UP job2(new MySimpleJob(TIMEOUT_SEC * 2, TIMEOUT_SEC * 2, 0));
+ MySimpleJob &myJob2 = static_cast<MySimpleJob &>(*job2);
+ f._mc.registerJob(std::move(job1));
+ f._mc.registerJob(std::move(job2));
+ f._injectDefaultJobs = false;
+ f.startMaintenance();
+
+ myJob1.block();
+ EXPECT_TRUE(myJob1.isBlocked());
+ EXPECT_FALSE(myJob2.isBlocked());
+ IBucketFreezer &ibf = f._mc;
+ ibf.freezeBucket(BucketId(1));
+ ibf.thawBucket(BucketId(1));
+ EXPECT_TRUE(myJob1.isBlocked());
+ ibf.freezeBucket(BucketId(1));
+ IFrozenBucketHandler & fbh = f._mc;
+ // This is to simulate contention, as that is required for notification on thawed buckets.
+ EXPECT_FALSE(fbh.acquireExclusiveBucket(BucketId(1)));
+ ibf.thawBucket(BucketId(1));
+ f._executor.sync();
+ EXPECT_FALSE(myJob1.isBlocked());
+ EXPECT_FALSE(myJob2.isBlocked());
+ bool done1 = myJob1._latch.await(TIMEOUT_MS);
+ EXPECT_TRUE(done1);
+ FastOS_Thread::Sleep(2000);
+ EXPECT_EQUAL(0u, myJob2._runCnt);
+}
+
+TEST_F("require that blocked jobs are not executed", MaintenanceControllerFixture)
+{
+ IMaintenanceJob::UP job(new MySimpleJob(0.2, 0.2, 0));
+ MySimpleJob &myJob = static_cast<MySimpleJob &>(*job);
+ myJob.block();
+ f._mc.registerJob(std::move(job));
+ f._injectDefaultJobs = false;
+ f.startMaintenance();
+ FastOS_Thread::Sleep(2000);
+ EXPECT_EQUAL(0u, myJob._runCnt);
+}
+
+TEST_F("require that maintenance controller state list jobs", MaintenanceControllerFixture)
+{
+ {
+ IMaintenanceJob::UP job1(new MySimpleJob(TIMEOUT_SEC * 2, TIMEOUT_SEC * 2, 0));
+ IMaintenanceJob::UP job2(new MyLongRunningJob(0.2, 0.2));
+ MyLongRunningJob &longRunningJob = static_cast<MyLongRunningJob &>(*job2);
+ f._mc.registerJob(std::move(job1));
+ f._mc.registerJob(std::move(job2));
+ f._injectDefaultJobs = false;
+ f.startMaintenance();
+ longRunningJob._firstRun.await(TIMEOUT_MS);
+ }
+
+ MaintenanceControllerExplorer explorer(f._mc.getJobList());
+ Slime state;
+ SlimeInserter inserter(state);
+ explorer.get_state(inserter, true);
+
+ Inspector &runningJobs = state.get()["runningJobs"];
+ EXPECT_EQUAL(1u, runningJobs.children());
+ EXPECT_EQUAL("long_running_job", runningJobs[0]["name"].asString().make_string());
+
+ Inspector &allJobs = state.get()["allJobs"];
+ EXPECT_EQUAL(2u, allJobs.children());
+ EXPECT_EQUAL("my_job", allJobs[0]["name"].asString().make_string());
+ EXPECT_EQUAL("long_running_job", allJobs[1]["name"].asString().make_string());
+}
+
+TEST("Verify FrozenBucketsMap interface") {
+ FrozenBucketsMap m;
+ BucketId a(8, 6);
+ {
+ auto guard = m.acquireExclusiveBucket(a);
+ EXPECT_TRUE(bool(guard));
+ EXPECT_EQUAL(a, guard->getBucket());
+ }
+ m.freezeBucket(a);
+ EXPECT_FALSE(m.thawBucket(a));
+ m.freezeBucket(a);
+ {
+ auto guard = m.acquireExclusiveBucket(a);
+ EXPECT_FALSE(bool(guard));
+ }
+ EXPECT_TRUE(m.thawBucket(a));
+ m.freezeBucket(a);
+ m.freezeBucket(a);
+ m.freezeBucket(a);
+ {
+ auto guard = m.acquireExclusiveBucket(a);
+ EXPECT_FALSE(bool(guard));
+ }
+ EXPECT_FALSE(m.thawBucket(a));
+ EXPECT_FALSE(m.thawBucket(a));
+ EXPECT_TRUE(m.thawBucket(a));
+ {
+ auto guard = m.acquireExclusiveBucket(a);
+ EXPECT_TRUE(bool(guard));
+ EXPECT_EQUAL(a, guard->getBucket());
+ }
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/documentdb/storeonlyfeedview/.gitignore b/searchcore/src/tests/proton/documentdb/storeonlyfeedview/.gitignore
new file mode 100644
index 00000000000..eaabc7b9279
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/storeonlyfeedview/.gitignore
@@ -0,0 +1,4 @@
+Makefile
+.depend
+*_test
+searchcore_storeonlyfeedview_test_app
diff --git a/searchcore/src/tests/proton/documentdb/storeonlyfeedview/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/storeonlyfeedview/CMakeLists.txt
new file mode 100644
index 00000000000..294360b51ba
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/storeonlyfeedview/CMakeLists.txt
@@ -0,0 +1,13 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_storeonlyfeedview_test_app
+ SOURCES
+ storeonlyfeedview_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_feedoperation
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_proton_metrics
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_storeonlyfeedview_test_app COMMAND searchcore_storeonlyfeedview_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp b/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp
new file mode 100644
index 00000000000..fe9251e1193
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp
@@ -0,0 +1,289 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for storeonlyfeedview.
+
+#include <vespa/log/log.h>
+LOG_SETUP("storeonlyfeedview_test");
+#include <vespa/fastos/fastos.h>
+
+#include <vespa/document/base/documentid.h>
+#include <vespa/document/base/globalid.h>
+#include <vespa/document/bucket/bucketid.h>
+#include <vespa/document/datatype/datatype.h>
+#include <vespa/searchcommon/common/schema.h>
+#include <vespa/searchcore/proton/metrics/feed_metrics.h>
+#include <vespa/searchcore/proton/server/executorthreadingservice.h>
+#include <vespa/searchcore/proton/server/storeonlyfeedview.h>
+#include <vespa/searchcore/proton/documentmetastore/lidreusedelayer.h>
+#include <vespa/searchcore/proton/test/thread_utils.h>
+#include <vespa/searchcore/proton/common/commit_time_tracker.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/searchlib/common/serialnum.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using document::BucketId;
+using document::DataType;
+using document::Document;
+using document::DocumentId;
+using document::DocumentTypeRepo;
+using document::DocumentUpdate;
+using document::GlobalId;
+using search::DocumentIdT;
+using search::index::DocBuilder;
+using search::index::Schema;
+using search::SerialNum;
+using storage::spi::Timestamp;
+using vespalib::make_string;
+using namespace proton;
+
+namespace {
+
+class MySummaryAdapter : public ISummaryAdapter {
+ int &_rm_count;
+ int &_put_count;
+ int &_heartbeat_count;
+
+public:
+ MySummaryAdapter(int &remove_count, int &put_count, int &heartbeat_count)
+ : _rm_count(remove_count),
+ _put_count(put_count),
+ _heartbeat_count(heartbeat_count) {
+ }
+ virtual void put(SerialNum, const Document &, DocumentIdT)
+ { ++ _put_count; }
+ virtual void remove(SerialNum, DocumentIdT) { ++_rm_count; }
+ virtual void update(SerialNum, const DocumentUpdate &, DocumentIdT,
+ const DocumentTypeRepo &) {}
+
+ virtual void heartBeat(SerialNum) { ++_heartbeat_count; }
+ virtual const search::IDocumentStore &getDocumentStore() const
+ { return *reinterpret_cast<const search::IDocumentStore *>(0); }
+
+ virtual Document::UP get(const DocumentIdT, const DocumentTypeRepo &)
+ { return Document::UP(); }
+};
+
+DocumentTypeRepo::SP myGetDocumentTypeRepo() {
+ Schema schema;
+ DocBuilder builder(schema);
+ DocumentTypeRepo::SP repo = builder.getDocumentTypeRepo();
+ ASSERT_TRUE(repo.get());
+ return repo;
+}
+
+struct MyMinimalFeedView : StoreOnlyFeedView {
+ typedef std::unique_ptr<MyMinimalFeedView> UP;
+
+ int removeAttributes_count;
+ int removeIndexedFields_count;
+ int heartBeatAttributes_count;
+ int heartBeatIndexedFields_count;
+
+ MyMinimalFeedView(const ISummaryAdapter::SP &summary_adapter,
+ const DocumentMetaStore::SP &meta_store,
+ searchcorespi::index::IThreadingService &writeService,
+ documentmetastore::ILidReuseDelayer &lidReuseDelayer,
+ CommitTimeTracker &commitTimeTracker,
+ const PersistentParams &params) :
+ StoreOnlyFeedView(StoreOnlyFeedView::Context(summary_adapter,
+ search::index::Schema::SP(),
+ DocumentMetaStoreContext::SP(
+ new DocumentMetaStoreContext(meta_store)),
+ myGetDocumentTypeRepo(),
+ writeService,
+ lidReuseDelayer,
+ commitTimeTracker),
+ params),
+ removeAttributes_count(0),
+ removeIndexedFields_count(0),
+ heartBeatAttributes_count(0),
+ heartBeatIndexedFields_count(0) {
+ }
+ virtual void removeAttributes(SerialNum s, const LidVector &l,
+ bool immediateCommit, OnWriteDoneType onWriteDone) override {
+ StoreOnlyFeedView::removeAttributes(s, l, immediateCommit, onWriteDone);
+ ++removeAttributes_count;
+ }
+ virtual void removeIndexedFields(SerialNum s, const LidVector &l,
+ bool immediateCommit,
+ OnWriteDoneType onWriteDone) override {
+ StoreOnlyFeedView::removeIndexedFields(s, l,
+ immediateCommit, onWriteDone);
+ ++removeIndexedFields_count;
+ }
+ virtual void heartBeatIndexedFields(SerialNum s) override {
+ StoreOnlyFeedView::heartBeatIndexedFields(s);
+ ++heartBeatIndexedFields_count;
+ }
+ virtual void heartBeatAttributes(SerialNum s) override {
+ StoreOnlyFeedView::heartBeatAttributes(s);
+ ++heartBeatAttributes_count;
+ }
+};
+
+const uint32_t subdb_id = 0;
+
+struct Fixture {
+ int remove_count;
+ int put_count;
+ int heartbeat_count;
+ DocumentMetaStore::SP meta_store;
+ ExecutorThreadingService writeService;
+ documentmetastore::LidReuseDelayer _lidReuseDelayer;
+ CommitTimeTracker _commitTimeTracker;
+ MyMinimalFeedView::UP feedview;
+
+ Fixture(SubDbType subDbType = SubDbType::READY)
+ : remove_count(0),
+ put_count(0),
+ heartbeat_count(0),
+ meta_store(new DocumentMetaStore(std::make_shared<BucketDBOwner>(),
+ DocumentMetaStore::getFixedName(),
+ search::GrowStrategy(),
+ DocumentMetaStore::IGidCompare::SP(
+ new DocumentMetaStore::
+ DefaultGidCompare),
+ subDbType)),
+ writeService(),
+ _lidReuseDelayer(writeService, *meta_store),
+ _commitTimeTracker(fastos::TimeStamp()),
+ feedview() {
+ PerDocTypeFeedMetrics metrics(0);
+ StoreOnlyFeedView::PersistentParams
+ params(0, 0, DocTypeName("foo"), metrics, subdb_id,
+ subDbType);
+ meta_store->constructFreeList();
+ ISummaryAdapter::SP adapter(new MySummaryAdapter(
+ remove_count, put_count, heartbeat_count));
+ feedview.reset(new MyMinimalFeedView(adapter, meta_store, writeService,
+ _lidReuseDelayer,
+ _commitTimeTracker, params));
+ }
+
+ ~Fixture() {
+ writeService.sync();
+ }
+
+ void addSingleDocToMetaStore(uint32_t expected_lid) {
+ typedef DocumentMetaStore::Result Result;
+ DocumentId id(make_string("groupdoc:test:foo:%d", expected_lid));
+ Result inspect = meta_store->inspect(id.getGlobalId());
+ EXPECT_EQUAL(expected_lid,
+ meta_store->put(id.getGlobalId(),
+ id.getGlobalId().convertToBucketId(),
+ Timestamp(10), inspect.getLid()).getLid());
+ }
+
+ void addDocsToMetaStore(int count) {
+ for (int i = 1; i <= count; ++i) {
+ addSingleDocToMetaStore(i);
+ EXPECT_TRUE(meta_store->validLid(i));
+ }
+ }
+
+ template <typename FunctionType>
+ void runInMaster(FunctionType func) {
+ test::runInMaster(writeService, func);
+ }
+
+};
+
+TEST_F("require that prepareMove sets target db document id", Fixture)
+{
+ Document::SP doc(new Document);
+ MoveOperation op(BucketId(20, 42), Timestamp(10), doc, 1, subdb_id + 1);
+ f.runInMaster([&] () { f.feedview->prepareMove(op); });
+
+ DbDocumentId target_id = op.getDbDocumentId();
+ EXPECT_EQUAL(subdb_id, target_id.getSubDbId());
+ EXPECT_EQUAL(1u, target_id.getLid());
+}
+
+TEST_F("require that handleMove adds document to target "
+ "and removes it from source", Fixture)
+{
+ Document::SP doc(new Document);
+ MoveOperation op(doc->getId().getGlobalId().convertToBucketId(),
+ Timestamp(10), doc,
+ DbDocumentId(subdb_id + 1, 1), subdb_id);
+ op.setSerialNum(1);
+ EXPECT_EQUAL(0, f.put_count);
+ f.runInMaster([&] () { f.feedview->prepareMove(op); });
+ f.runInMaster([&] () { f.feedview->handleMove(op); });
+ EXPECT_EQUAL(1, f.put_count);
+ uint32_t lid = op.getDbDocumentId().getLid();
+ EXPECT_TRUE(f.meta_store->validLid(lid));
+
+ // Change the MoveOperation so this is the source sub db.
+ op.setDbDocumentId(DbDocumentId(subdb_id + 1, lid));
+ op.setPrevDbDocumentId(DbDocumentId(subdb_id, lid));
+ EXPECT_EQUAL(0, f.remove_count);
+ f.runInMaster([&] () { f.feedview->handleMove(op); });
+ EXPECT_FALSE(f.meta_store->validLid(lid));
+ EXPECT_EQUAL(1, f.remove_count);
+}
+
+
+TEST_F("require that handleMove handles move within same subdb", Fixture)
+{
+ Document::SP doc(new Document);
+ DocumentId doc1id("groupdoc:test:foo:1");
+ f.runInMaster([&] () { f.meta_store->put(doc1id.getGlobalId(),
+ doc1id.getGlobalId().convertToBucketId(),
+ Timestamp(9), 1); });
+ f.runInMaster([&] () { f.meta_store->put(doc->getId().getGlobalId(),
+ doc->getId().getGlobalId().convertToBucketId(),
+ Timestamp(10), 2); });
+ f.runInMaster([&] () { f.meta_store->remove(1); });
+ f.meta_store->removeComplete(1);
+ MoveOperation op(doc->getId().getGlobalId().convertToBucketId(),
+ Timestamp(10), doc,
+ DbDocumentId(subdb_id, 2), subdb_id);
+ op.setTargetLid(1);
+ op.setSerialNum(1);
+ EXPECT_EQUAL(0, f.put_count);
+ EXPECT_EQUAL(0, f.remove_count);
+ f.runInMaster([&] () { f.feedview->handleMove(op); });
+ EXPECT_EQUAL(1, f.put_count);
+ EXPECT_EQUAL(1, f.remove_count);
+ uint32_t lid = op.getDbDocumentId().getLid();
+ EXPECT_TRUE(f.meta_store->validLid(lid));
+}
+
+
+TEST_F("require that prune removed documents removes documents",
+ Fixture(SubDbType::REMOVED))
+{
+ f.addDocsToMetaStore(3);
+
+ LidVectorContext::LP lids(new LidVectorContext(4));
+ lids->addLid(1);
+ lids->addLid(3);
+ PruneRemovedDocumentsOperation op(lids->getDocIdLimit(), subdb_id);
+ op.setLidsToRemove(lids);
+ op.setSerialNum(1); // allows use of meta store.
+ f.runInMaster([&] () { f.feedview->handlePruneRemovedDocuments(op); });
+
+ EXPECT_EQUAL(2, f.remove_count);
+ EXPECT_FALSE(f.meta_store->validLid(1));
+ EXPECT_TRUE(f.meta_store->validLid(2));
+ EXPECT_FALSE(f.meta_store->validLid(3));
+ EXPECT_EQUAL(0, f.feedview->removeAttributes_count);
+ EXPECT_EQUAL(0, f.feedview->removeIndexedFields_count);
+}
+
+TEST_F("require that heartbeat propagates and commits meta store", Fixture)
+{
+ EXPECT_EQUAL(0u, f.meta_store->getStatus().getLastSyncToken());
+ EXPECT_EQUAL(0, f.feedview->heartBeatIndexedFields_count);
+ EXPECT_EQUAL(0, f.feedview->heartBeatAttributes_count);
+ EXPECT_EQUAL(0, f.heartbeat_count);
+ f.runInMaster([&] () { f.feedview->heartBeat(2); });
+ EXPECT_EQUAL(2u, f.meta_store->getStatus().getLastSyncToken());
+ EXPECT_EQUAL(1, f.feedview->heartBeatIndexedFields_count);
+ EXPECT_EQUAL(1, f.feedview->heartBeatAttributes_count);
+ EXPECT_EQUAL(1, f.heartbeat_count);
+}
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/documentmetastore/.gitignore b/searchcore/src/tests/proton/documentmetastore/.gitignore
new file mode 100644
index 00000000000..619f7adbc6c
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/.gitignore
@@ -0,0 +1,6 @@
+.depend
+Makefile
+gidmapattribute_test
+/documentmetastore2.dat
+/documentmetastore3.dat
+searchcore_documentmetastore_test_app
diff --git a/searchcore/src/tests/proton/documentmetastore/CMakeLists.txt b/searchcore/src/tests/proton/documentmetastore/CMakeLists.txt
new file mode 100644
index 00000000000..fbaa86cafc8
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/CMakeLists.txt
@@ -0,0 +1,13 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_documentmetastore_test_app
+ SOURCES
+ documentmetastore_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_attribute
+ searchcore_feedoperation
+ searchcore_fconfig
+)
+vespa_add_test(NAME searchcore_documentmetastore_test_app COMMAND sh documentmetastore_test.sh)
diff --git a/searchcore/src/tests/proton/documentmetastore/DESC b/searchcore/src/tests/proton/documentmetastore/DESC
new file mode 100644
index 00000000000..7a7bdaae267
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/DESC
@@ -0,0 +1 @@
+documentmetastore test. Take a look at documentmetastore_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentmetastore/FILES b/searchcore/src/tests/proton/documentmetastore/FILES
new file mode 100644
index 00000000000..29d56a32b24
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/FILES
@@ -0,0 +1 @@
+documentmetastore_test.cpp
diff --git a/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp b/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp
new file mode 100644
index 00000000000..e1e9f58fc14
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp
@@ -0,0 +1,1878 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("documentmetastore_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/document/base/documentid.h>
+#include <vespa/searchcore/proton/documentmetastore/documentmetastore.h>
+#include <vespa/searchcore/proton/documentmetastore/documentmetastoreflushtarget.h>
+#include <vespa/searchcore/proton/bucketdb/bucketdbhandler.h>
+#include <vespa/searchlib/attribute/attributefilesavetarget.h>
+#include <vespa/searchlib/fef/matchdatalayout.h>
+#include <vespa/searchlib/queryeval/blueprint.h>
+#include <vespa/searchlib/queryeval/searchiterator.h>
+#include <vespa/searchlib/queryeval/simpleresult.h>
+#include <vespa/searchlib/common/tunefileinfo.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/searchcore/proton/server/itlssyncer.h>
+
+using namespace document;
+using search::AttributeVector;
+using search::AttributeGuard;
+using search::AttributeFileSaveTarget;
+using search::DocumentMetaData;
+using vespalib::GenerationHandler;
+using vespalib::GenerationHolder;
+using search::GrowStrategy;
+using search::LidUsageStats;
+using search::QueryTermSimple;
+using search::SingleValueBitNumericAttribute;
+using search::fef::MatchData;
+using search::fef::MatchDataLayout;
+using search::fef::TermFieldMatchData;
+using search::queryeval::Blueprint;
+using search::queryeval::SearchIterator;
+using search::queryeval::SimpleResult;
+using storage::spi::Timestamp;
+using storage::spi::BucketChecksum;
+using storage::spi::BucketInfo;
+using search::TuneFileAttributes;
+using search::index::DummyFileHeaderContext;
+using proton::bucketdb::BucketState;
+
+namespace proton {
+
+class DummyTlsSyncer : public ITlsSyncer
+{
+public:
+ virtual ~DummyTlsSyncer() = default;
+
+ virtual void sync() override { }
+};
+
+class ReverseGidCompare : public DocumentMetaStore::IGidCompare
+{
+ GlobalId::BucketOrderCmp _comp;
+public:
+ ReverseGidCompare(void)
+ : IGidCompare(),
+ _comp()
+ {
+ }
+
+ virtual bool
+ operator()(const GlobalId &lhs, const GlobalId &rhs) const
+ {
+ return _comp(rhs, lhs);
+ }
+};
+
+
+struct BoolVector : public std::vector<bool> {
+ BoolVector() : std::vector<bool>() {}
+ BoolVector(size_t sz) : std::vector<bool>(sz) {}
+ BoolVector &T() { push_back(true); return *this; }
+ BoolVector &F() { push_back(false); return *this; }
+
+ uint32_t
+ countTrue(void) const
+ {
+ uint32_t res(0);
+ for (uint32_t i = 0; i < size(); ++i)
+ if ((*this)[i])
+ ++res;
+ return res;
+ }
+};
+
+typedef DocumentMetaStore::Result PutRes;
+typedef DocumentMetaStore::Result Result;
+
+BucketDBOwner::SP
+createBucketDB()
+{
+ return std::make_shared<BucketDBOwner>();
+}
+
+bool
+assertPut(const BucketId &bucketId,
+ const Timestamp &timestamp,
+ uint32_t lid,
+ const GlobalId &gid,
+ DocumentMetaStore &dms)
+{
+ Result inspect = dms.inspect(gid);
+ PutRes putRes;
+ if (!EXPECT_TRUE((putRes = dms.put(gid, bucketId, timestamp, inspect.getLid())).
+ ok())) return false;
+ return EXPECT_EQUAL(lid, putRes.getLid());
+}
+
+bool
+compare(const GlobalId &lhs, const GlobalId &rhs)
+{
+ return EXPECT_EQUAL(lhs.toString(), rhs.toString());
+}
+
+bool
+assertGid(const GlobalId &exp, uint32_t lid, const DocumentMetaStore &dms)
+{
+ GlobalId act;
+ if (!EXPECT_TRUE(dms.getGid(lid, act))) return false;
+ return compare(exp, act);
+}
+
+bool
+assertGid(const GlobalId &exp,
+ uint32_t lid,
+ const DocumentMetaStore &dms,
+ const BucketId &expBucketId,
+ const Timestamp &expTimestamp)
+{
+ GlobalId act;
+ BucketId bucketId;
+ Timestamp timestamp(1);
+ if (!EXPECT_TRUE(dms.getGid(lid, act)))
+ return false;
+ if (!compare(exp, act))
+ return false;
+ DocumentMetaData meta = dms.getMetaData(act);
+ if (!EXPECT_TRUE(meta.valid()))
+ return false;
+ bucketId = meta.bucketId;
+ timestamp = meta.timestamp;
+ if (!EXPECT_EQUAL(expBucketId.getRawId(), bucketId.getRawId()))
+ return false;
+ if (!EXPECT_EQUAL(expBucketId.getId(), bucketId.getId()))
+ return false;
+ if (!EXPECT_EQUAL(expTimestamp, timestamp))
+ return false;
+ return true;
+}
+
+bool
+assertLid(uint32_t exp, const GlobalId &gid, const DocumentMetaStore &dms)
+{
+ uint32_t act;
+ if (!EXPECT_TRUE(dms.getLid(gid, act))) return false;
+ return EXPECT_EQUAL(exp, act);
+}
+
+bool
+assertMetaData(const DocumentMetaData &exp, const DocumentMetaData &act)
+{
+ if (!EXPECT_EQUAL(exp.lid, act.lid)) return false;
+ if (!EXPECT_EQUAL(exp.timestamp, act.timestamp)) return false;
+ if (!EXPECT_EQUAL(exp.bucketId, act.bucketId)) return false;
+ if (!EXPECT_EQUAL(exp.gid, act.gid)) return false;
+ if (!EXPECT_EQUAL(exp.removed, act.removed)) return false;
+ return true;
+}
+
+bool
+assertActiveLids(const BoolVector &exp, const SingleValueBitNumericAttribute &act)
+{
+ // lid 0 is reserved
+ if (!EXPECT_EQUAL(exp.size() + 1, act.getNumDocs())) return false;
+ for (size_t i = 0; i < exp.size(); ++i) {
+ if (!EXPECT_EQUAL((exp[i] ? 1 : 0), act.getInt(i + 1))) return false;
+ }
+ return true;
+}
+
+bool
+assertBlackList(const SimpleResult &exp, Blueprint::UP blackListBlueprint, bool strict)
+{
+ MatchDataLayout mdl;
+ MatchData::UP md = mdl.createMatchData();
+ blackListBlueprint->fetchPostings(strict);
+ SearchIterator::UP sb = blackListBlueprint->createSearch(*md, strict);
+ SimpleResult act;
+ act.search(*sb);
+ return EXPECT_EQUAL(exp, act);
+}
+
+bool
+assertSearchResult(const SimpleResult &exp, const DocumentMetaStore &dms,
+ const vespalib::string &term, const QueryTermSimple::SearchTerm &termType,
+ bool strict, uint32_t docIdLimit = 100)
+{
+ AttributeVector::SearchContext::UP sc =
+ dms.getSearch(QueryTermSimple::UP(new QueryTermSimple(term, termType)),
+ AttributeVector::SearchContext::Params());
+ TermFieldMatchData tfmd;
+ SearchIterator::UP sb = sc->createIterator(&tfmd, strict);
+ SimpleResult act;
+ if (strict) {
+ act.search(*sb);
+ } else {
+ act.search(*sb, docIdLimit);
+ }
+ return EXPECT_EQUAL(exp, act);
+}
+
+bool
+assertBucketInfo(uint32_t expDocCount,
+ uint32_t expMetaCount,
+ const BucketInfo &act)
+{
+ if (!EXPECT_EQUAL(expDocCount, act.getDocumentCount()))
+ return false;
+ if (!EXPECT_EQUAL(expMetaCount, act.getEntryCount()))
+ return false;
+ return true;
+}
+
+GlobalId gid1("111111111111");
+GlobalId gid2("222222222222");
+GlobalId gid3("333333333333");
+GlobalId gid4("444444444444");
+GlobalId gid5("555555555555");
+const uint32_t minNumBits = 8u;
+BucketId bucketId1(minNumBits,
+ gid1.convertToBucketId().getRawId());
+BucketId bucketId2(minNumBits,
+ gid2.convertToBucketId().getRawId());
+BucketId bucketId3(minNumBits,
+ gid3.convertToBucketId().getRawId());
+BucketId bucketId4(minNumBits,
+ gid4.convertToBucketId().getRawId());
+BucketId bucketId5(minNumBits,
+ gid5.convertToBucketId().getRawId());
+Timestamp time1(1u);
+Timestamp time2(2u);
+Timestamp time3(42u);
+Timestamp time4(82u);
+Timestamp time5(141u);
+
+uint32_t
+addGid(DocumentMetaStore &dms, const GlobalId &gid, const BucketId &bid, Timestamp timestamp = Timestamp())
+{
+ Result inspect = dms.inspect(gid);
+ PutRes putRes;
+ EXPECT_TRUE((putRes = dms.put(gid, bid, timestamp, inspect.getLid())).ok());
+ return putRes.getLid();
+}
+
+uint32_t
+addGid(DocumentMetaStore &dms, const GlobalId &gid, Timestamp timestamp = Timestamp())
+{
+ BucketId bid(minNumBits, gid.convertToBucketId().getRawId());
+ return addGid(dms, gid, bid, timestamp);
+}
+
+void
+putGid(DocumentMetaStore &dms, const GlobalId &gid, uint32_t lid, Timestamp timestamp = Timestamp())
+{
+ BucketId bid(minNumBits, gid.convertToBucketId().getRawId());
+ EXPECT_TRUE(dms.put(gid, bid, timestamp, lid).ok());
+}
+
+TEST("require that removed documents are bucketized to bucket 0")
+{
+ DocumentMetaStore dms(createBucketDB());
+ dms.constructFreeList();
+ EXPECT_EQUAL(1u, dms.getNumDocs());
+ EXPECT_EQUAL(0u, dms.getNumUsedLids());
+
+ vespalib::GenerationHandler::Guard guard = dms.getGuard();
+ EXPECT_EQUAL(0ul, dms.getBucketOf(guard, 1));
+ EXPECT_TRUE(assertPut(bucketId1, time1, 1, gid1, dms));
+ EXPECT_EQUAL(bucketId1.getId(), dms.getBucketOf(guard, 1));
+ EXPECT_TRUE(assertPut(bucketId2, time2, 2, gid2, dms));
+ EXPECT_EQUAL(bucketId2.getId(), dms.getBucketOf(guard, 2));
+ EXPECT_TRUE(dms.remove(1));
+ EXPECT_EQUAL(0ul, dms.getBucketOf(guard, 1));
+ EXPECT_EQUAL(bucketId2.getId(), dms.getBucketOf(guard, 2));
+}
+
+TEST("requireThatGidsCanBeInsertedAndRetrieved")
+{
+ DocumentMetaStore dms(createBucketDB());
+ dms.constructFreeList();
+ // put()
+ EXPECT_EQUAL(1u, dms.getNumDocs());
+ EXPECT_EQUAL(0u, dms.getNumUsedLids());
+ EXPECT_TRUE(assertPut(bucketId1, time1, 1, gid1, dms));
+ EXPECT_EQUAL(2u, dms.getNumDocs());
+ EXPECT_EQUAL(1u, dms.getNumUsedLids());
+ EXPECT_TRUE(assertPut(bucketId2, time2, 2, gid2, dms));
+ EXPECT_EQUAL(3u, dms.getNumDocs());
+ EXPECT_EQUAL(2u, dms.getNumUsedLids());
+ // gid1 already inserted
+ EXPECT_TRUE(assertPut(bucketId1, time1, 1, gid1, dms));
+ // gid2 already inserted
+ EXPECT_TRUE(assertPut(bucketId2, time2, 2, gid2, dms));
+
+
+ // getGid()
+ GlobalId gid;
+ EXPECT_TRUE(assertGid(gid1, 1, dms));
+ EXPECT_TRUE(assertGid(gid2, 2, dms));
+ EXPECT_TRUE(!dms.getGid(3, gid));
+
+ // getLid()
+ uint32_t lid = 0;
+ EXPECT_TRUE(assertLid(1, gid1, dms));
+ EXPECT_TRUE(assertLid(2, gid2, dms));
+ EXPECT_TRUE(!dms.getLid(gid3, lid));
+}
+
+TEST("requireThatGidsCanBeCleared")
+{
+ DocumentMetaStore dms(createBucketDB());
+ GlobalId gid;
+ uint32_t lid = 0u;
+ dms.constructFreeList();
+ addGid(dms, gid1, bucketId1, time1);
+ EXPECT_TRUE(assertGid(gid1, 1, dms));
+ EXPECT_TRUE(assertLid(1, gid1, dms));
+ EXPECT_EQUAL(1u, dms.getNumUsedLids());
+ EXPECT_TRUE(dms.remove(1));
+ dms.removeComplete(1);
+ EXPECT_EQUAL(0u, dms.getNumUsedLids());
+ EXPECT_TRUE(!dms.getGid(1, gid));
+ EXPECT_TRUE(!dms.getLid(gid1, lid));
+ // reuse lid
+ addGid(dms, gid2, bucketId2, time2);
+ EXPECT_TRUE(assertGid(gid2, 1, dms));
+ EXPECT_TRUE(assertLid(1, gid2, dms));
+ EXPECT_EQUAL(1u, dms.getNumUsedLids());
+ EXPECT_TRUE(dms.remove(1));
+ dms.removeComplete(1);
+ EXPECT_EQUAL(0u, dms.getNumUsedLids());
+ EXPECT_TRUE(!dms.getGid(1, gid));
+ EXPECT_TRUE(!dms.getLid(gid2, lid));
+ EXPECT_TRUE(!dms.remove(1)); // not used
+ EXPECT_TRUE(!dms.remove(2)); // outside range
+}
+
+TEST("requireThatGenerationHandlingIsWorking")
+{
+ AttributeVector::SP av(new DocumentMetaStore(createBucketDB()));
+ DocumentMetaStore * dms = static_cast<DocumentMetaStore *>(av.get());
+ dms->constructFreeList();
+ const GenerationHandler & gh = dms->getGenerationHandler();
+ EXPECT_EQUAL(1u, gh.getCurrentGeneration());
+ addGid(*dms, gid1, bucketId1, time1);
+ EXPECT_EQUAL(2u, gh.getCurrentGeneration());
+ EXPECT_EQUAL(0u, gh.getGenerationRefCount());
+ {
+ AttributeGuard g1(av);
+ EXPECT_EQUAL(1u, gh.getGenerationRefCount());
+ {
+ AttributeGuard g2(av);
+ EXPECT_EQUAL(2u, gh.getGenerationRefCount());
+ }
+ EXPECT_EQUAL(1u, gh.getGenerationRefCount());
+ }
+ EXPECT_EQUAL(0u, gh.getGenerationRefCount());
+ dms->remove(1);
+ dms->removeComplete(1);
+ EXPECT_EQUAL(4u, gh.getCurrentGeneration());
+}
+
+TEST("requireThatBasicFreeListIsWorking")
+{
+ GenerationHolder genHold;
+ LidStateVector freeLids(100, 100, genHold, true, false);
+ LidHoldList list;
+ EXPECT_TRUE(freeLids.empty());
+ EXPECT_EQUAL(0u, freeLids.count());
+ EXPECT_EQUAL(0u, list.size());
+
+ list.add(10, 10);
+ EXPECT_TRUE(freeLids.empty());
+ EXPECT_EQUAL(0u, freeLids.count());
+ EXPECT_EQUAL(1u, list.size());
+
+ list.add(20, 20);
+ list.add(30, 30);
+ EXPECT_TRUE(freeLids.empty());
+ EXPECT_EQUAL(0u, freeLids.count());
+ EXPECT_EQUAL(3u, list.size());
+
+ list.trimHoldLists(20, freeLids);
+ EXPECT_FALSE(freeLids.empty());
+ EXPECT_EQUAL(1u, freeLids.count());
+
+ EXPECT_EQUAL(10u, freeLids.getLowest());
+ freeLids.clearBit(10);
+ EXPECT_TRUE(freeLids.empty());
+ EXPECT_EQUAL(0u, freeLids.count());
+ EXPECT_EQUAL(2u, list.size());
+
+ list.trimHoldLists(31, freeLids);
+ EXPECT_FALSE(freeLids.empty());
+ EXPECT_EQUAL(2u, freeLids.count());
+
+ EXPECT_EQUAL(20u, freeLids.getLowest());
+ freeLids.clearBit(20);
+ EXPECT_FALSE(freeLids.empty());
+ EXPECT_EQUAL(1u, freeLids.count());
+ EXPECT_EQUAL(0u, list.size());
+
+ EXPECT_EQUAL(30u, freeLids.getLowest());
+ freeLids.clearBit(30);
+ EXPECT_TRUE(freeLids.empty());
+ EXPECT_EQUAL(0u, list.size());
+ EXPECT_EQUAL(0u, freeLids.count());
+}
+
+void
+assertLidStateVector(const std::vector<uint32_t> &expLids, uint32_t lowest, uint32_t highest,
+ const LidStateVector &actLids)
+{
+ if (!expLids.empty()) {
+ EXPECT_EQUAL(expLids.size(), actLids.count());
+ uint32_t trueBit = 0;
+ for (auto i : expLids) {
+ EXPECT_TRUE(actLids.testBit(i));
+ trueBit = actLids.getNextTrueBit(trueBit);
+ EXPECT_EQUAL(i, trueBit);
+ ++trueBit;
+ }
+ trueBit = actLids.getNextTrueBit(trueBit);
+ EXPECT_EQUAL(actLids.size(), trueBit);
+ EXPECT_EQUAL(lowest, actLids.getLowest());
+ EXPECT_EQUAL(highest, actLids.getHighest());
+ } else {
+ EXPECT_TRUE(actLids.empty());
+ }
+}
+
+TEST("requireThatLidStateVectorResizingIsWorking")
+{
+ GenerationHolder genHold;
+ LidStateVector lids(1000, 1000, genHold, true, true);
+ lids.setBit(3);
+ lids.setBit(150);
+ lids.setBit(270);
+ lids.setBit(310);
+ lids.setBit(440);
+ lids.setBit(780);
+ lids.setBit(930);
+ assertLidStateVector({3,150,270,310,440,780,930}, 3, 930, lids);
+
+ lids.resizeVector(1500, 1500);
+ assertLidStateVector({3,150,270,310,440,780,930}, 3, 930, lids);
+ lids.clearBit(3);
+ assertLidStateVector({150,270,310,440,780,930}, 150, 930, lids);
+ lids.clearBit(150);
+ assertLidStateVector({270,310,440,780,930}, 270, 930, lids);
+ lids.setBit(170);
+ assertLidStateVector({170,270,310,440,780,930}, 170, 930, lids);
+ lids.setBit(1490);
+ assertLidStateVector({170,270,310,440,780,930,1490}, 170, 1490, lids);
+
+ lids.resizeVector(2000, 2000);
+ assertLidStateVector({170,270,310,440,780,930,1490}, 170, 1490, lids);
+ lids.clearBit(170);
+ assertLidStateVector({270,310,440,780,930,1490}, 270, 1490, lids);
+ lids.clearBit(270);
+ assertLidStateVector({310,440,780,930,1490}, 310, 1490, lids);
+ lids.setBit(1990);
+ assertLidStateVector({310,440,780,930,1490,1990}, 310, 1990, lids);
+ lids.clearBit(310);
+ assertLidStateVector({440,780,930,1490,1990}, 440, 1990, lids);
+ lids.clearBit(440);
+ assertLidStateVector({780,930,1490,1990}, 780, 1990, lids);
+ lids.clearBit(780);
+ assertLidStateVector({930,1490,1990}, 930, 1990, lids);
+ lids.clearBit(930);
+ assertLidStateVector({1490,1990}, 1490, 1990, lids);
+ lids.clearBit(1490);
+ assertLidStateVector({1990}, 1990, 1990, lids);
+ lids.clearBit(1990);
+ assertLidStateVector({}, 0, 0, lids);
+
+ genHold.clearHoldLists();
+}
+
+TEST("requireThatLidAndGidSpaceIsReused")
+{
+ AttributeVector::SP av(new DocumentMetaStore(createBucketDB()));
+ DocumentMetaStore * dms = static_cast<DocumentMetaStore *>(av.get());
+ dms->constructFreeList();
+ EXPECT_EQUAL(1u, dms->getNumDocs());
+ EXPECT_EQUAL(0u, dms->getNumUsedLids());
+ EXPECT_TRUE(assertPut(bucketId1, time1, 1, gid1, *dms)); // -> gen 1
+ EXPECT_EQUAL(2u, dms->getNumDocs());
+ EXPECT_EQUAL(1u, dms->getNumUsedLids());
+ EXPECT_TRUE(assertPut(bucketId2, time2, 2, gid2, *dms)); // -> gen 2
+ EXPECT_EQUAL(3u, dms->getNumDocs());
+ EXPECT_EQUAL(2u, dms->getNumUsedLids());
+ dms->remove(2); // -> gen 3
+ dms->removeComplete(2); // -> gen 4
+ EXPECT_EQUAL(3u, dms->getNumDocs());
+ EXPECT_EQUAL(1u, dms->getNumUsedLids());
+ // -> gen 5 (reuse of lid 2)
+ EXPECT_TRUE(assertPut(bucketId3, time3, 2, gid3, *dms));
+ EXPECT_EQUAL(3u, dms->getNumDocs());
+ EXPECT_EQUAL(2u, dms->getNumUsedLids()); // reuse
+ EXPECT_TRUE(assertGid(gid3, 2, *dms));
+ {
+ AttributeGuard g1(av); // guard on gen 5
+ dms->remove(2);
+ dms->removeComplete(2);
+ EXPECT_EQUAL(3u, dms->getNumDocs());
+ EXPECT_EQUAL(1u, dms->getNumUsedLids()); // lid 2 free but guarded
+ EXPECT_TRUE(assertPut(bucketId4, time4, 3, gid4, *dms));
+ EXPECT_EQUAL(4u, dms->getNumDocs()); // generation guarded, new lid
+ EXPECT_EQUAL(2u, dms->getNumUsedLids());
+ EXPECT_TRUE(assertGid(gid4, 3, *dms));
+ }
+ EXPECT_TRUE(assertPut(bucketId5, time5, 4, gid5, *dms));
+ EXPECT_EQUAL(5u, dms->getNumDocs()); // reuse blocked by previous guard. released at end of put()
+ EXPECT_EQUAL(3u, dms->getNumUsedLids());
+ EXPECT_TRUE(assertGid(gid5, 4, *dms));
+ EXPECT_TRUE(assertPut(bucketId2, time2, 2, gid2, *dms)); // reuse of lid 2
+ EXPECT_EQUAL(5u, dms->getNumDocs());
+ EXPECT_EQUAL(4u, dms->getNumUsedLids());
+ EXPECT_TRUE(assertGid(gid2, 2, *dms));
+}
+
+GlobalId
+createGid(uint32_t lid)
+{
+ DocumentId docId(vespalib::make_string("doc:id:%u", lid));
+ return docId.getGlobalId();
+}
+
+GlobalId
+createGid(uint32_t userId, uint32_t lid)
+{
+ DocumentId docId(vespalib::make_string("userdoc:id:%u:%u", userId, lid));
+ return docId.getGlobalId();
+}
+
+TEST("requireThatWeCanStoreBucketIdAndTimestamp")
+{
+ DocumentMetaStore dms(createBucketDB());
+ uint32_t numLids = 1000;
+ uint32_t bkBits = UINT32_C(20);
+ uint64_t tsbias = UINT64_C(2000000000000);
+
+ dms.constructFreeList();
+ for (uint32_t lid = 1; lid <= numLids; ++lid) {
+ GlobalId gid = createGid(lid);
+ BucketId bucketId(gid.convertToBucketId());
+ bucketId.setUsedBits(bkBits);
+ uint32_t addLid = addGid(dms, gid, bucketId, Timestamp(lid + tsbias));
+ EXPECT_EQUAL(lid, addLid);
+ }
+ for (uint32_t lid = 1; lid <= numLids; ++lid) {
+ GlobalId gid = createGid(lid);
+ BucketId bucketId(gid.convertToBucketId());
+ bucketId.setUsedBits(bkBits);
+ EXPECT_TRUE(assertGid(gid, lid, dms, bucketId,
+ Timestamp(lid + tsbias)));
+ EXPECT_TRUE(assertLid(lid, gid, dms));
+ }
+}
+
+TEST("requireThatGidsCanBeSavedAndLoaded")
+{
+ DocumentMetaStore dms1(createBucketDB());
+ uint32_t numLids = 1000;
+ uint32_t bkBits = UINT32_C(20);
+ uint64_t tsbias = UINT64_C(2000000000000);
+ std::vector<uint32_t> removeLids;
+ removeLids.push_back(10);
+ removeLids.push_back(20);
+ removeLids.push_back(100);
+ removeLids.push_back(500);
+ dms1.constructFreeList();
+ for (uint32_t lid = 1; lid <= numLids; ++lid) {
+ GlobalId gid = createGid(lid);
+ BucketId bucketId(gid.convertToBucketId());
+ bucketId.setUsedBits(bkBits);
+ uint32_t addLid = addGid(dms1, gid, bucketId, Timestamp(lid + tsbias));
+ EXPECT_EQUAL(lid, addLid);
+ }
+ for (size_t i = 0; i < removeLids.size(); ++i) {
+ dms1.remove(removeLids[i]);
+ dms1.removeComplete(removeLids[i]);
+ }
+ uint64_t expSaveBytesSize = DocumentMetaStore::minHeaderLen +
+ (1000 - 4) * DocumentMetaStore::entrySize;
+ EXPECT_EQUAL(expSaveBytesSize, dms1.getEstimatedSaveByteSize());
+ TuneFileAttributes tuneFileAttributes;
+ DummyFileHeaderContext fileHeaderContext;
+ AttributeFileSaveTarget saveTarget(tuneFileAttributes, fileHeaderContext);
+ EXPECT_TRUE(dms1.saveAs("documentmetastore2", saveTarget));
+
+ DocumentMetaStore dms2(createBucketDB(), "documentmetastore2");
+ EXPECT_TRUE(dms2.load());
+ dms2.constructFreeList();
+ EXPECT_EQUAL(numLids + 1, dms2.getNumDocs());
+ EXPECT_EQUAL(numLids - 4, dms2.getNumUsedLids()); // 4 removed
+ for (uint32_t lid = 1; lid <= numLids; ++lid) {
+ GlobalId gid = createGid(lid);
+ BucketId bucketId(gid.convertToBucketId());
+ bucketId.setUsedBits(bkBits);
+ if (std::count(removeLids.begin(), removeLids.end(), lid) == 0) {
+ EXPECT_TRUE(assertGid(gid, lid, dms2, bucketId,
+ Timestamp(lid + tsbias)));
+ EXPECT_TRUE(assertLid(lid, gid, dms2));
+ } else {
+ LOG(info, "Lid %u was removed before saving", lid);
+ uint32_t myLid;
+ GlobalId myGid;
+ EXPECT_TRUE(!dms2.getGid(lid, myGid));
+ EXPECT_TRUE(!dms2.getLid(gid, myLid));
+ }
+ }
+ // check we can re-use from free list after load
+ for (size_t i = 0; i < removeLids.size(); ++i) {
+ LOG(info, "Re-use remove lid %u", removeLids[i]);
+ GlobalId gid = createGid(removeLids[i]);
+ BucketId bucketId(bkBits,
+ gid.convertToBucketId().getRawId());
+ // re-use removeLid[i]
+ uint32_t addLid = addGid(dms2, gid, bucketId, Timestamp(43u + i));
+ EXPECT_EQUAL(removeLids[i], addLid);
+ EXPECT_EQUAL(numLids + 1, dms2.getNumDocs());
+ EXPECT_EQUAL(numLids - (3 - i), dms2.getNumUsedLids());
+ }
+}
+
+TEST("requireThatStatsAreUpdated")
+{
+ DocumentMetaStore dms(createBucketDB());
+ dms.constructFreeList();
+ size_t perGidUsed = sizeof(uint32_t) + GlobalId::LENGTH;
+ EXPECT_EQUAL(1u, dms.getStatus().getNumDocs());
+ EXPECT_EQUAL(1u, dms.getStatus().getNumValues());
+ uint64_t lastAllocated = dms.getStatus().getAllocated();
+ uint64_t lastUsed = dms.getStatus().getUsed();
+ EXPECT_GREATER(lastAllocated, perGidUsed);
+ EXPECT_GREATER(lastUsed, perGidUsed);
+
+ FastOS_Thread::Sleep(2200);
+ addGid(dms, gid1, bucketId1, time1);
+ EXPECT_EQUAL(2u, dms.getStatus().getNumDocs());
+ EXPECT_EQUAL(2u, dms.getStatus().getNumValues());
+ EXPECT_GREATER_EQUAL(dms.getStatus().getAllocated(), lastAllocated);
+ EXPECT_GREATER_EQUAL(dms.getStatus().getAllocated(), lastUsed);
+ EXPECT_GREATER(dms.getStatus().getUsed(), lastUsed);
+ EXPECT_GREATER(dms.getStatus().getUsed(), 2 * perGidUsed);
+ lastAllocated = dms.getStatus().getAllocated();
+ lastUsed = dms.getStatus().getUsed();
+
+ addGid(dms, gid2, bucketId2, time2);
+ dms.commit(true);
+ EXPECT_EQUAL(3u, dms.getStatus().getNumDocs());
+ EXPECT_EQUAL(3u, dms.getStatus().getNumValues());
+ EXPECT_GREATER_EQUAL(dms.getStatus().getAllocated(), lastAllocated);
+ EXPECT_GREATER_EQUAL(dms.getStatus().getAllocated(), lastUsed);
+ EXPECT_GREATER(dms.getStatus().getUsed(), lastUsed);
+ EXPECT_GREATER(dms.getStatus().getUsed(), 3 * perGidUsed);
+ LOG(info,
+ "stats after 2 gids added: allocated %d, used is %d > %d (3 * %d)",
+ static_cast<int>(dms.getStatus().getAllocated()),
+ static_cast<int>(dms.getStatus().getUsed()),
+ static_cast<int>(3 * perGidUsed),
+ static_cast<int>(perGidUsed));
+}
+
+TEST("requireThatWeCanPutAndRemoveBeforeFreeListConstruct")
+{
+ DocumentMetaStore dms(createBucketDB());
+ EXPECT_TRUE(dms.put(gid4, bucketId4, time4, 4).ok());
+ EXPECT_TRUE(assertLid(4, gid4, dms));
+ EXPECT_TRUE(assertGid(gid4, 4, dms));
+ EXPECT_EQUAL(1u, dms.getNumUsedLids());
+ EXPECT_EQUAL(5u, dms.getNumDocs());
+ EXPECT_TRUE(dms.put(gid1, bucketId1, time1, 1).ok());
+ // already there, nothing changes
+ EXPECT_TRUE(dms.put(gid1, bucketId1, time1, 1).ok());
+ EXPECT_TRUE(assertLid(1, gid1, dms));
+ EXPECT_TRUE(assertGid(gid1, 1, dms));
+ EXPECT_EQUAL(2u, dms.getNumUsedLids());
+ EXPECT_EQUAL(5u, dms.getNumDocs());
+ // gid1 already there with lid 1
+ EXPECT_EXCEPTION(!dms.put(gid1, bucketId1, time1, 2).ok(),
+ vespalib::IllegalStateException,
+ "gid found, but using another lid");
+ EXPECT_EXCEPTION(!dms.put(gid5, bucketId5, time5, 1).ok(),
+ vespalib::IllegalStateException,
+ "gid not found, but lid is used by another gid");
+ EXPECT_TRUE(assertLid(1, gid1, dms));
+ EXPECT_TRUE(assertGid(gid1, 1, dms));
+ EXPECT_EQUAL(2u, dms.getNumUsedLids());
+ EXPECT_EQUAL(5u, dms.getNumDocs());
+ EXPECT_TRUE(dms.remove(4)); // -> goes to free list. cleared and re-applied in constructFreeList().
+ uint32_t lid;
+ GlobalId gid;
+ EXPECT_TRUE(!dms.getLid(gid4, lid));
+ EXPECT_TRUE(!dms.getGid(4, gid));
+ EXPECT_EQUAL(1u, dms.getNumUsedLids());
+ EXPECT_EQUAL(5u, dms.getNumDocs());
+ dms.constructFreeList();
+ EXPECT_EQUAL(1u, dms.getNumUsedLids());
+ EXPECT_EQUAL(5u, dms.getNumDocs());
+ EXPECT_TRUE(assertPut(bucketId2, time2, 2, gid2, dms));
+ EXPECT_TRUE(assertPut(bucketId3, time3, 3, gid3, dms));
+ EXPECT_EQUAL(3u, dms.getNumUsedLids());
+ EXPECT_EQUAL(5u, dms.getNumDocs());
+}
+
+TEST("requireThatWeCanSortGids")
+{
+ DocumentMetaStore dms(createBucketDB());
+ DocumentMetaStore rdms(createBucketDB(),
+ DocumentMetaStore::getFixedName(),
+ GrowStrategy(),
+ DocumentMetaStore::IGidCompare::SP(
+ new ReverseGidCompare));
+
+ dms.constructFreeList();
+ rdms.constructFreeList();
+ uint32_t numLids = 1000;
+ for (uint32_t lid = 1; lid <= numLids; ++lid) {
+ GlobalId gid = createGid(lid);
+ Timestamp oldTimestamp;
+ BucketId bucketId(minNumBits,
+ gid.convertToBucketId().getRawId());
+ uint32_t addLid = addGid(dms, gid, bucketId, Timestamp(0u));
+ EXPECT_EQUAL(lid, addLid);
+ uint32_t addLid2 = addGid(rdms, gid, bucketId, Timestamp(0u));
+ EXPECT_EQUAL(lid, addLid2);
+ }
+ std::vector<uint32_t> lids;
+ std::vector<uint32_t> rlids;
+ for (DocumentMetaStore::ConstIterator it = dms.beginFrozen(); it.valid(); ++it)
+ lids.push_back(it.getKey());
+ for (DocumentMetaStore::ConstIterator rit = rdms.beginFrozen();
+ rit.valid(); ++rit)
+ rlids.push_back(rit.getKey());
+ EXPECT_EQUAL(numLids, lids.size());
+ EXPECT_EQUAL(numLids, rlids.size());
+ for (uint32_t i = 0; i < numLids; ++i) {
+ EXPECT_EQUAL(lids[numLids - 1 - i], rlids[i]);
+ }
+}
+
+TEST("requireThatBasicBucketInfoWorks")
+{
+ DocumentMetaStore dms(createBucketDB());
+ typedef std::pair<BucketId, GlobalId> Elem;
+ typedef std::map<Elem, Timestamp> Map;
+ Map m;
+ uint32_t numLids = 2000;
+ dms.constructFreeList();
+ for (uint32_t lid = 1; lid <= numLids; ++lid) {
+ GlobalId gid = createGid(lid);
+ Timestamp timestamp(UINT64_C(123456789) * lid);
+ Timestamp oldTimestamp;
+ BucketId bucketId(minNumBits,
+ gid.convertToBucketId().getRawId());
+ uint32_t addLid = addGid(dms, gid, bucketId, timestamp);
+ EXPECT_EQUAL(lid, addLid);
+ m[std::make_pair(bucketId, gid)] = timestamp;
+ }
+ for (uint32_t lid = 2; lid <= numLids; lid += 7) {
+ GlobalId gid = createGid(lid);
+ Timestamp timestamp(UINT64_C(14735) * lid);
+ Timestamp oldTimestamp;
+ BucketId bucketId(minNumBits,
+ gid.convertToBucketId().getRawId());
+ uint32_t addLid = addGid(dms, gid, bucketId, timestamp);
+ EXPECT_EQUAL(lid, addLid);
+ m[std::make_pair(bucketId, gid)] = timestamp;
+ }
+ for (uint32_t lid = 3; lid <= numLids; lid += 5) {
+ GlobalId gid = createGid(lid);
+ BucketId bucketId(minNumBits,
+ gid.convertToBucketId().getRawId());
+ EXPECT_TRUE(dms.remove(lid));
+ dms.removeComplete(lid);
+ m.erase(std::make_pair(bucketId, gid));
+ }
+ assert(!m.empty());
+ BucketChecksum cksum;
+ BucketId prevBucket = m.begin()->first.first;
+ uint32_t cnt = 0u;
+ uint32_t maxcnt = 0u;
+ BucketDBOwner::Guard bucketDB = dms.getBucketDB().takeGuard();
+ for (Map::const_iterator i = m.begin(), ie = m.end(); i != ie; ++i) {
+ if (i->first.first == prevBucket) {
+ cksum = BucketChecksum(cksum +
+ BucketState::calcChecksum(i->first.second,
+ i->second));
+ ++cnt;
+ } else {
+ BucketInfo bi = bucketDB->get(prevBucket);
+ EXPECT_EQUAL(cnt, bi.getDocumentCount());
+ EXPECT_EQUAL(cksum, bi.getChecksum());
+ prevBucket = i->first.first;
+ cksum = BucketState::calcChecksum(i->first.second,
+ i->second);
+ maxcnt = std::max(maxcnt, cnt);
+ cnt = 1u;
+ }
+ }
+ maxcnt = std::max(maxcnt, cnt);
+ BucketInfo bi = bucketDB->get(prevBucket);
+ EXPECT_EQUAL(cnt, bi.getDocumentCount());
+ EXPECT_EQUAL(cksum, bi.getChecksum());
+ LOG(info, "Largest bucket: %u elements", maxcnt);
+}
+
+TEST("requireThatWeCanRetrieveListOfLidsFromBucketId")
+{
+ typedef std::vector<uint32_t> LidVector;
+ typedef std::map<BucketId, LidVector> Map;
+ DocumentMetaStore dms(createBucketDB());
+ const uint32_t bucketBits = 2; // -> 4 buckets
+ uint32_t numLids = 1000;
+ Map m;
+
+ dms.constructFreeList();
+ // insert global ids
+ for (uint32_t lid = 1; lid <= numLids; ++lid) {
+ GlobalId gid = createGid(lid);
+ BucketId bucketId(bucketBits,
+ gid.convertToBucketId().getRawId());
+ uint32_t addLid = addGid(dms, gid, bucketId, Timestamp(0));
+ EXPECT_EQUAL(lid, addLid);
+ m[bucketId].push_back(lid);
+ }
+
+ // Verify that bucket id x has y lids
+ EXPECT_EQUAL(4u, m.size());
+ for (Map::const_iterator itr = m.begin(); itr != m.end(); ++itr) {
+ const BucketId &bucketId = itr->first;
+ const LidVector &expLids = itr->second;
+ LOG(info, "Verify that bucket id '%s' has %zu lids",
+ bucketId.toString().c_str(), expLids.size());
+ LidVector actLids;
+ dms.getLids(bucketId, actLids);
+ EXPECT_EQUAL(expLids.size(), actLids.size());
+ for (size_t i = 0; i < expLids.size(); ++i) {
+ EXPECT_TRUE(std::find(actLids.begin(), actLids.end(), expLids[i]) != actLids.end());
+ }
+ }
+
+ // Remove and verify empty buckets
+ for (Map::const_iterator itr = m.begin(); itr != m.end(); ++itr) {
+ const BucketId &bucketId = itr->first;
+ const LidVector &expLids = itr->second;
+ for (size_t i = 0; i < expLids.size(); ++i) {
+ EXPECT_TRUE(dms.remove(expLids[i]));
+ dms.removeComplete(expLids[i]);
+ }
+ LOG(info, "Verify that bucket id '%s' has 0 lids", bucketId.toString().c_str());
+ LidVector actLids;
+ dms.getLids(bucketId, actLids);
+ EXPECT_TRUE(actLids.empty());
+ }
+}
+
+struct Comparator {
+ bool operator() (const DocumentMetaData &lhs, const DocumentMetaData &rhs) const {
+ return lhs.lid < rhs.lid;
+ }
+};
+
+struct UserDocFixture {
+ std::shared_ptr<BucketDBOwner> _bucketDB;
+ DocumentMetaStore dms;
+ std::vector<GlobalId> gids;
+ BucketId bid1;
+ BucketId bid2;
+ BucketId bid3;
+ bucketdb::BucketDBHandler _bucketDBHandler;
+ UserDocFixture()
+ : _bucketDB(createBucketDB()),
+ dms(_bucketDB), gids(), bid1(), bid2(), bid3(),
+ _bucketDBHandler(*_bucketDB)
+ {
+ _bucketDBHandler.addDocumentMetaStore(&dms, 0);
+ gids.push_back(createGid(10, 1));
+ gids.push_back(createGid(10, 2));
+ gids.push_back(createGid(20, 3));
+ gids.push_back(createGid(10, 4));
+ gids.push_back(createGid(10, 5));
+ gids.push_back(createGid(20, 6));
+ gids.push_back(createGid(20, 7));
+ gids.push_back(createGid(30, 8)); // extra
+ gids.push_back(createGid(10, 9)); // extra
+ // 3 users -> 3 buckets
+ bid1 = BucketId(minNumBits, gids[0].convertToBucketId().getRawId());
+ bid2 = BucketId(minNumBits, gids[2].convertToBucketId().getRawId());
+ bid3 = BucketId(minNumBits, gids[7].convertToBucketId().getRawId());
+ }
+ void addGlobalId(const GlobalId &gid, uint32_t expLid, uint32_t timestampConst = 100) {
+ uint32_t actLid = addGid(dms, gid, Timestamp(expLid + timestampConst));
+ EXPECT_EQUAL(expLid, actLid);
+ }
+ void putGlobalId(const GlobalId &gid, uint32_t lid, uint32_t timestampConst = 100) {
+ putGid(dms, gid, lid, Timestamp(lid + timestampConst));
+ }
+ void addGlobalIds(size_t numGids=7) __attribute__((noinline));
+};
+
+void
+UserDocFixture::addGlobalIds(size_t numGids) {
+ for (size_t i = 0; i < numGids; ++i) {
+ uint32_t expLid = i + 1;
+ addGlobalId(gids[i], expLid);
+ }
+}
+
+TEST("requireThatWeCanRetrieveListOfMetaDataFromBucketId")
+{
+ UserDocFixture f;
+ { // empty bucket
+ DocumentMetaData::Vector result;
+ f.dms.getMetaData(f.bid1, result);
+ EXPECT_EQUAL(0u, result.size());
+ }
+ f.dms.constructFreeList();
+ f.addGlobalIds();
+ { // verify bucket 1
+ DocumentMetaData::Vector result;
+ f.dms.getMetaData(f.bid1, result);
+ std::sort(result.begin(), result.end(), Comparator());
+ EXPECT_EQUAL(4u, result.size());
+ EXPECT_TRUE(assertMetaData(DocumentMetaData(1, Timestamp(101), f.bid1,
+ f.gids[0]), result[0]));
+ EXPECT_TRUE(assertMetaData(DocumentMetaData(2, Timestamp(102), f.bid1,
+ f.gids[1]), result[1]));
+ EXPECT_TRUE(assertMetaData(DocumentMetaData(4, Timestamp(104), f.bid1,
+ f.gids[3]), result[2]));
+ EXPECT_TRUE(assertMetaData(DocumentMetaData(5, Timestamp(105), f.bid1,
+ f.gids[4]), result[3]));
+ }
+ { // verify bucket 2
+ DocumentMetaData::Vector result;
+ f.dms.getMetaData(f.bid2, result);
+ std::sort(result.begin(), result.end(), Comparator());
+ EXPECT_EQUAL(3u, result.size());
+ EXPECT_TRUE(assertMetaData(DocumentMetaData(3, Timestamp(103), f.bid2,
+ f.gids[2]), result[0]));
+ EXPECT_TRUE(assertMetaData(DocumentMetaData(6, Timestamp(106), f.bid2,
+ f.gids[5]), result[1]));
+ EXPECT_TRUE(assertMetaData(DocumentMetaData(7, Timestamp(107), f.bid2,
+ f.gids[6]), result[2]));
+ }
+}
+
+TEST("requireThatBucketStateCanBeUpdated")
+{
+ UserDocFixture f;
+ f.dms.constructFreeList();
+ EXPECT_EQUAL(1u, f.dms.getActiveLids().getNumDocs()); // lid 0 is reserved
+
+ f.addGlobalIds();
+ EXPECT_TRUE(assertActiveLids(BoolVector().F().F().F().F().F().F().F(), f.dms.getActiveLids()));
+ EXPECT_EQUAL(0u, f.dms.getNumActiveLids());
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->get(f.bid1).isActive());
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->get(f.bid2).isActive());
+
+ f.dms.setBucketState(f.bid1, true);
+ EXPECT_TRUE(assertActiveLids(BoolVector().T().T().F().T().T().F().F(), f.dms.getActiveLids()));
+ EXPECT_EQUAL(4u, f.dms.getNumActiveLids());
+ EXPECT_TRUE(f.dms.getBucketDB().takeGuard()->get(f.bid1).isActive());
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->get(f.bid2).isActive());
+
+ f.dms.setBucketState(f.bid2, true);
+ EXPECT_TRUE(assertActiveLids(BoolVector().T().T().T().T().T().T().T(), f.dms.getActiveLids()));
+ EXPECT_EQUAL(7u, f.dms.getNumActiveLids());
+ EXPECT_TRUE(f.dms.getBucketDB().takeGuard()->get(f.bid1).isActive());
+ EXPECT_TRUE(f.dms.getBucketDB().takeGuard()->get(f.bid2).isActive());
+
+ f.addGlobalId(createGid(30, 8), 8);
+ f.addGlobalId(createGid(10, 9), 9); // bid1 is active so added document should be active as well
+ EXPECT_TRUE(assertActiveLids(BoolVector().T().T().T().T().T().T().T().F().T(), f.dms.getActiveLids()));
+ EXPECT_EQUAL(8u, f.dms.getNumActiveLids());
+ EXPECT_TRUE(f.dms.getBucketDB().takeGuard()->get(f.bid1).isActive());
+ EXPECT_TRUE(f.dms.getBucketDB().takeGuard()->get(f.bid2).isActive());
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->get(f.bid3).isActive());
+
+ f.dms.setBucketState(f.bid1, false);
+ EXPECT_TRUE(assertActiveLids(BoolVector().F().F().T().F().F().T().T().F().F(), f.dms.getActiveLids()));
+ EXPECT_EQUAL(3u, f.dms.getNumActiveLids());
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->get(f.bid1).isActive());
+ EXPECT_TRUE(f.dms.getBucketDB().takeGuard()->get(f.bid2).isActive());
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->get(f.bid3).isActive());
+
+ f.dms.setBucketState(f.bid2, false);
+ EXPECT_TRUE(assertActiveLids(BoolVector().F().F().F().F().F().F().F().F().F(), f.dms.getActiveLids()));
+ EXPECT_EQUAL(0u, f.dms.getNumActiveLids());
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->get(f.bid1).isActive());
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->get(f.bid2).isActive());
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->get(f.bid3).isActive());
+}
+
+
+TEST("requireThatRemovedLidsAreClearedAsActive")
+{
+ UserDocFixture f;
+ f.dms.constructFreeList();
+ f.addGlobalIds(2);
+ f.dms.setBucketState(f.bid1, true);
+ EXPECT_TRUE(assertActiveLids(BoolVector().T().T(), f.dms.getActiveLids()));
+ EXPECT_EQUAL(2u, f.dms.getNumActiveLids());
+ f.dms.remove(2);
+ f.dms.removeComplete(2);
+ EXPECT_TRUE(assertActiveLids(BoolVector().T().F(), f.dms.getActiveLids()));
+ EXPECT_EQUAL(1u, f.dms.getNumActiveLids());
+ f.addGlobalId(f.gids[2], 2); // from bid2
+ EXPECT_TRUE(assertActiveLids(BoolVector().T().F(), f.dms.getActiveLids()));
+ EXPECT_EQUAL(1u, f.dms.getNumActiveLids());
+ f.dms.remove(2);
+ f.dms.removeComplete(2);
+ f.addGlobalId(f.gids[3], 2); // from bid1
+ EXPECT_TRUE(assertActiveLids(BoolVector().T().T(), f.dms.getActiveLids()));
+ EXPECT_EQUAL(2u, f.dms.getNumActiveLids());
+}
+
+TEST("requireThatBlackListBlueprintIsCreated")
+{
+ UserDocFixture f;
+ f.dms.constructFreeList();
+ f.addGlobalIds();
+
+ f.dms.setBucketState(f.bid1, true);
+ EXPECT_TRUE(assertBlackList(SimpleResult().addHit(3).addHit(6).addHit(7),
+ f.dms.createBlackListBlueprint(), true));
+
+ f.dms.setBucketState(f.bid2, true);
+ EXPECT_TRUE(assertBlackList(SimpleResult(),
+ f.dms.createBlackListBlueprint(), true));
+}
+
+TEST("requireThatDocumentAndMetaEntryCountIsUpdated")
+{
+ UserDocFixture f;
+ f.dms.constructFreeList();
+ EXPECT_EQUAL(0u, f.dms.getBucketDB().takeGuard()->get(f.bid1).getDocumentCount());
+ EXPECT_EQUAL(0u, f.dms.getBucketDB().takeGuard()->get(f.bid1).getEntryCount());
+ EXPECT_EQUAL(0u, f.dms.getBucketDB().takeGuard()->get(f.bid2).getDocumentCount());
+ EXPECT_EQUAL(0u, f.dms.getBucketDB().takeGuard()->get(f.bid2).getEntryCount());
+ f.addGlobalIds();
+ EXPECT_EQUAL(4u, f.dms.getBucketDB().takeGuard()->get(f.bid1).getDocumentCount());
+ EXPECT_EQUAL(4u, f.dms.getBucketDB().takeGuard()->get(f.bid1).getEntryCount());
+ EXPECT_EQUAL(3u, f.dms.getBucketDB().takeGuard()->get(f.bid2).getDocumentCount());
+ EXPECT_EQUAL(3u, f.dms.getBucketDB().takeGuard()->get(f.bid2).getEntryCount());
+ f.dms.remove(3); // from bid2
+ f.dms.removeComplete(3);
+ EXPECT_EQUAL(4u, f.dms.getBucketDB().takeGuard()->get(f.bid1).getDocumentCount());
+ EXPECT_EQUAL(4u, f.dms.getBucketDB().takeGuard()->get(f.bid1).getEntryCount());
+ EXPECT_EQUAL(2u, f.dms.getBucketDB().takeGuard()->get(f.bid2).getDocumentCount());
+ EXPECT_EQUAL(2u, f.dms.getBucketDB().takeGuard()->get(f.bid2).getEntryCount());
+}
+
+TEST("requireThatEmptyBucketsAreRemoved")
+{
+ UserDocFixture f;
+ f.dms.constructFreeList();
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->hasBucket(f.bid1));
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->hasBucket(f.bid2));
+ f.addGlobalIds(3);
+ EXPECT_TRUE(f.dms.getBucketDB().takeGuard()->hasBucket(f.bid1));
+ EXPECT_TRUE(f.dms.getBucketDB().takeGuard()->hasBucket(f.bid2));
+ f.dms.remove(3); // from bid2
+ f.dms.removeComplete(3);
+ EXPECT_TRUE(f.dms.getBucketDB().takeGuard()->hasBucket(f.bid1));
+ EXPECT_TRUE(f.dms.getBucketDB().takeGuard()->hasBucket(f.bid2));
+ EXPECT_EQUAL(0u, f.dms.getBucketDB().takeGuard()->get(f.bid2).getEntryCount());
+ f._bucketDBHandler.handleDeleteBucket(f.bid2);
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->hasBucket(f.bid2));
+ f.dms.remove(1); // from bid1
+ f.dms.removeComplete(1);
+ EXPECT_TRUE(f.dms.getBucketDB().takeGuard()->hasBucket(f.bid1));
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->hasBucket(f.bid2));
+ f.dms.remove(2); // from bid1
+ f.dms.removeComplete(2);
+ EXPECT_TRUE(f.dms.getBucketDB().takeGuard()->hasBucket(f.bid1));
+ EXPECT_EQUAL(0u, f.dms.getBucketDB().takeGuard()->get(f.bid1).getEntryCount());
+ f._bucketDBHandler.handleDeleteBucket(f.bid1);
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->hasBucket(f.bid1));
+ EXPECT_FALSE(f.dms.getBucketDB().takeGuard()->hasBucket(f.bid2));
+}
+
+
+struct GlobalIdEntry {
+ uint32_t lid;
+ GlobalId gid;
+ BucketId bid1;
+ BucketId bid2;
+ BucketId bid3;
+ GlobalIdEntry(uint32_t lid_) :
+ lid(lid_),
+ gid(createGid(lid_)),
+ bid1(1, gid.convertToBucketId().getRawId()),
+ bid2(2, gid.convertToBucketId().getRawId()),
+ bid3(3, gid.convertToBucketId().getRawId())
+ {}
+};
+
+typedef std::vector<GlobalIdEntry> GlobalIdVector;
+
+struct SplitAndJoinEmptyFixture
+{
+ DocumentMetaStore dms;
+ BucketId bid10;
+ BucketId bid11;
+ BucketId bid20; // contained in bid10
+ BucketId bid21; // contained in bid11
+ BucketId bid22; // contained in bid10
+ BucketId bid23; // contained in bid11
+ BucketId bid30; // contained in bid10 and bid20
+ BucketId bid32; // contained in bid10 and bid22
+ BucketId bid34; // contained in bid10 and bid20
+ BucketId bid36; // contained in bid10 and bid22
+ bucketdb::BucketDBHandler _bucketDBHandler;
+
+ SplitAndJoinEmptyFixture(void)
+ : dms(createBucketDB()),
+ bid10(1, 0), bid11(1, 1),
+ bid20(2, 0), bid21(2, 1), bid22(2, 2), bid23(2, 3),
+ bid30(3, 0), bid32(3, 2), bid34(3, 4), bid36(3, 6),
+ _bucketDBHandler(dms.getBucketDB())
+ {
+ _bucketDBHandler.addDocumentMetaStore(&dms, 0);
+ }
+
+ BucketInfo
+ getInfo(const BucketId &bid) const
+ {
+ return dms.getBucketDB().takeGuard()->get(bid);
+ }
+};
+
+struct SplitAndJoinFixture : public SplitAndJoinEmptyFixture
+{
+ typedef std::map<BucketId, GlobalIdVector> BucketMap;
+ GlobalIdVector gids;
+ BucketMap bid1s;
+ BucketMap bid2s;
+ BucketMap bid3s;
+ const GlobalIdVector *bid10Gids;
+ const GlobalIdVector *bid11Gids;
+ const GlobalIdVector *bid21Gids;
+ const GlobalIdVector *bid23Gids;
+ const GlobalIdVector *bid30Gids;
+ const GlobalIdVector *bid32Gids;
+ SplitAndJoinFixture()
+ : SplitAndJoinEmptyFixture(),
+ gids(),
+ bid1s(), bid2s(), bid3s(),
+ bid10Gids(), bid11Gids(), bid21Gids(), bid23Gids(),
+ bid30Gids(), bid32Gids()
+ {
+ for (uint32_t i = 1; i <= 31; ++i) {
+ gids.push_back(GlobalIdEntry(i));
+ bid1s[gids.back().bid1].push_back(gids.back());
+ bid2s[gids.back().bid2].push_back(gids.back());
+ bid3s[gids.back().bid3].push_back(gids.back());
+ }
+ ASSERT_EQUAL(2u, bid1s.size());
+ ASSERT_EQUAL(4u, bid2s.size());
+ ASSERT_EQUAL(8u, bid3s.size());
+ bid10Gids = &bid1s[bid10];
+ bid11Gids = &bid1s[bid11];
+ bid21Gids = &bid2s[bid21];
+ bid23Gids = &bid2s[bid23];
+ bid30Gids = &bid3s[bid30];
+ bid32Gids = &bid3s[bid32];
+ }
+ void insertGids1() {
+ for (size_t i = 0; i < gids.size(); ++i) {
+ EXPECT_TRUE(dms.put(gids[i].gid, gids[i].bid1, Timestamp(0),
+ gids[i].lid).ok());
+ }
+ }
+ void insertGids2() {
+ for (size_t i = 0; i < gids.size(); ++i) {
+ EXPECT_TRUE(dms.put(gids[i].gid, gids[i].bid2, Timestamp(0),
+ gids[i].lid).ok());
+ }
+ }
+
+ void
+ insertGids1Mostly(const BucketId &alt)
+ {
+ for (size_t i = 0; i < gids.size(); ++i) {
+ const GlobalIdEntry &g(gids[i]);
+ BucketId b(g.bid3 == alt ? g.bid2 : g.bid1);
+ EXPECT_TRUE(dms.put(g.gid, b, Timestamp(0), g.lid).ok());
+ }
+ }
+
+ void
+ insertGids2Mostly(const BucketId &alt)
+ {
+ for (size_t i = 0; i < gids.size(); ++i) {
+ const GlobalIdEntry &g(gids[i]);
+ BucketId b(g.bid3 == alt ? g.bid1 : g.bid2);
+ EXPECT_TRUE(dms.put(g.gid, b, Timestamp(0), g.lid).ok());
+ }
+ }
+};
+
+
+BoolVector
+getBoolVector(const GlobalIdVector &gids, size_t sz)
+{
+ BoolVector retval(sz);
+ for (size_t i = 0; i < gids.size(); ++i) {
+ uint32_t lid(gids[i].lid);
+ ASSERT_TRUE(lid <= sz && lid > 0u);
+ retval[lid - 1] = true;
+ }
+ return retval;
+}
+
+
+BoolVector
+getBoolVectorFiltered(const GlobalIdVector &gids, size_t sz,
+ const BucketId &skip)
+{
+ BoolVector retval(sz);
+ for (size_t i = 0; i < gids.size(); ++i) {
+ const GlobalIdEntry &g(gids[i]);
+ uint32_t lid(g.lid);
+ ASSERT_TRUE(lid <= sz && lid > 0u);
+ if (g.bid3 == skip)
+ continue;
+ retval[lid - 1] = true;
+ }
+ return retval;
+}
+
+TEST("requireThatBucketInfoIsCorrectAfterSplit")
+{
+ SplitAndJoinFixture f;
+ f.insertGids1();
+ BucketInfo bi10 = f.getInfo(f.bid10);
+ BucketInfo bi11 = f.getInfo(f.bid11);
+ LOG(info, "%s: %s", f.bid10.toString().c_str(), bi10.toString().c_str());
+ LOG(info, "%s: %s", f.bid11.toString().c_str(), bi11.toString().c_str());
+ EXPECT_TRUE(assertBucketInfo(f.bid10Gids->size(), f.bid10Gids->size(), bi10));
+ EXPECT_TRUE(assertBucketInfo(f.bid11Gids->size(), f.bid11Gids->size(), bi11));
+ EXPECT_NOT_EQUAL(bi10.getEntryCount(), bi11.getEntryCount());
+ EXPECT_EQUAL(31u, bi10.getEntryCount() + bi11.getEntryCount());
+
+ f._bucketDBHandler.handleSplit(10, f.bid11, f.bid21, f.bid23);
+
+ BucketInfo nbi10 = f.getInfo(f.bid10);
+ BucketInfo nbi11 = f.getInfo(f.bid11);
+ BucketInfo bi21 = f.getInfo(f.bid21);
+ BucketInfo bi23 = f.getInfo(f.bid23);
+ LOG(info, "%s: %s", f.bid10.toString().c_str(), nbi10.toString().c_str());
+ LOG(info, "%s: %s", f.bid11.toString().c_str(), nbi11.toString().c_str());
+ LOG(info, "%s: %s", f.bid21.toString().c_str(), bi21.toString().c_str());
+ LOG(info, "%s: %s", f.bid23.toString().c_str(), bi23.toString().c_str());
+ EXPECT_TRUE(assertBucketInfo(f.bid10Gids->size(),
+ f.bid10Gids->size(),
+ nbi10));
+ EXPECT_TRUE(assertBucketInfo(0u, 0u, nbi11));
+ EXPECT_TRUE(assertBucketInfo(f.bid21Gids->size(),
+ f.bid21Gids->size(),
+ bi21));
+ EXPECT_TRUE(assertBucketInfo(f.bid23Gids->size(),
+ f.bid23Gids->size(),
+ bi23));
+ EXPECT_EQUAL(bi11.getEntryCount(),
+ bi21.getEntryCount() + bi23.getEntryCount());
+ EXPECT_EQUAL(bi11.getDocumentCount(),
+ bi21.getDocumentCount() +
+ bi23.getDocumentCount());
+}
+
+TEST("requireThatActiveStateIsPreservedAfterSplit")
+{
+ { // non-active bucket
+ SplitAndJoinFixture f;
+ f.insertGids1();
+ EXPECT_FALSE(f.getInfo(f.bid10).isActive());
+ f._bucketDBHandler.handleSplit(10, f.bid10, f.bid20, f.bid22);
+ EXPECT_FALSE(f.getInfo(f.bid20).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid22).isActive());
+ assertActiveLids(BoolVector(31), f.dms.getActiveLids());
+ EXPECT_EQUAL(0u, f.dms.getNumActiveLids());
+ }
+ { // active bucket
+ SplitAndJoinFixture f;
+ f.insertGids1();
+ f.dms.setBucketState(f.bid10, true);
+ EXPECT_TRUE(f.getInfo(f.bid10).isActive());
+ f._bucketDBHandler.handleSplit(10, f.bid10, f.bid20, f.bid22);
+ EXPECT_TRUE(f.getInfo(f.bid20).isActive());
+ EXPECT_TRUE(f.getInfo(f.bid22).isActive());
+ assertActiveLids(getBoolVector(*f.bid10Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid10Gids->size(), f.dms.getNumActiveLids());
+ }
+ { // non-active source, active overlapping target1
+ SplitAndJoinFixture f;
+ f.insertGids1Mostly(f.bid30);
+ EXPECT_FALSE(f.getInfo(f.bid10).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid20).isActive());
+ assertActiveLids(BoolVector(31), f.dms.getActiveLids());
+ EXPECT_EQUAL(0u, f.dms.getNumActiveLids());
+ f.dms.setBucketState(f.bid20, true);
+ EXPECT_TRUE(f.getInfo(f.bid20).isActive());
+ assertActiveLids(getBoolVector(*f.bid30Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid30Gids->size(), f.dms.getNumActiveLids());
+ f._bucketDBHandler.handleSplit(10, f.bid10, f.bid20, f.bid22);
+ EXPECT_FALSE(f.getInfo(f.bid20).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid22).isActive());
+ assertActiveLids(BoolVector(31), f.dms.getActiveLids());
+ EXPECT_EQUAL(0u, f.dms.getNumActiveLids());
+ }
+ { // non-active source, active overlapping target2
+ SplitAndJoinFixture f;
+ f.insertGids1Mostly(f.bid32);
+ EXPECT_FALSE(f.getInfo(f.bid10).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid20).isActive());
+ assertActiveLids(BoolVector(31), f.dms.getActiveLids());
+ EXPECT_EQUAL(0u, f.dms.getNumActiveLids());
+ f.dms.setBucketState(f.bid22, true);
+ EXPECT_TRUE(f.getInfo(f.bid22).isActive());
+ assertActiveLids(getBoolVector(*f.bid32Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid32Gids->size(), f.dms.getNumActiveLids());
+ f._bucketDBHandler.handleSplit(10, f.bid10, f.bid20, f.bid22);
+ EXPECT_FALSE(f.getInfo(f.bid20).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid22).isActive());
+ assertActiveLids(BoolVector(31), f.dms.getActiveLids());
+ EXPECT_EQUAL(0u, f.dms.getNumActiveLids());
+ }
+ { // active source, non-active overlapping target1
+ SplitAndJoinFixture f;
+ f.insertGids1Mostly(f.bid30);
+ f.dms.setBucketState(f.bid10, true);
+ EXPECT_TRUE(f.getInfo(f.bid10).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid20).isActive());
+ BoolVector filtered(getBoolVectorFiltered(*f.bid10Gids, 31, f.bid30));
+ assertActiveLids(filtered, f.dms.getActiveLids());
+ EXPECT_EQUAL(filtered.countTrue(), f.dms.getNumActiveLids());
+ f._bucketDBHandler.handleSplit(10, f.bid10, f.bid20, f.bid22);
+ EXPECT_TRUE(f.getInfo(f.bid20).isActive());
+ EXPECT_TRUE(f.getInfo(f.bid22).isActive());
+ assertActiveLids(getBoolVector(*f.bid10Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid10Gids->size(), f.dms.getNumActiveLids());
+ }
+ { // active source, non-active overlapping target2
+ SplitAndJoinFixture f;
+ f.insertGids1Mostly(f.bid32);
+ f.dms.setBucketState(f.bid10, true);
+ EXPECT_TRUE(f.getInfo(f.bid10).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid20).isActive());
+ BoolVector filtered(getBoolVectorFiltered(*f.bid10Gids, 31, f.bid32));
+ assertActiveLids(filtered, f.dms.getActiveLids());
+ EXPECT_EQUAL(filtered.countTrue(), f.dms.getNumActiveLids());
+ f._bucketDBHandler.handleSplit(10, f.bid10, f.bid20, f.bid22);
+ EXPECT_TRUE(f.getInfo(f.bid20).isActive());
+ EXPECT_TRUE(f.getInfo(f.bid22).isActive());
+ assertActiveLids(getBoolVector(*f.bid10Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid10Gids->size(), f.dms.getNumActiveLids());
+ }
+}
+
+TEST("requireThatActiveStateIsPreservedAfterEmptySplit")
+{
+ { // non-active bucket
+ SplitAndJoinEmptyFixture f;
+ f._bucketDBHandler.handleCreateBucket(f.bid10);
+ EXPECT_FALSE(f.getInfo(f.bid10).isActive());
+ f._bucketDBHandler.handleSplit(10, f.bid10, f.bid20, f.bid22);
+ EXPECT_FALSE(f.getInfo(f.bid20).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid22).isActive());
+ }
+ { // active bucket
+ SplitAndJoinEmptyFixture f;
+ f._bucketDBHandler.handleCreateBucket(f.bid10);
+ f.dms.setBucketState(f.bid10, true);
+ EXPECT_TRUE(f.getInfo(f.bid10).isActive());
+ f._bucketDBHandler.handleSplit(10, f.bid10, f.bid20, f.bid22);
+ EXPECT_TRUE(f.getInfo(f.bid20).isActive());
+ EXPECT_TRUE(f.getInfo(f.bid22).isActive());
+ }
+}
+
+TEST("requireThatBucketInfoIsCorrectAfterJoin")
+{
+ SplitAndJoinFixture f;
+ f.insertGids2();
+ BucketInfo bi21 = f.getInfo(f.bid21);
+ BucketInfo bi23 = f.getInfo(f.bid23);
+ LOG(info, "%s: %s", f.bid21.toString().c_str(), bi21.toString().c_str());
+ LOG(info, "%s: %s", f.bid23.toString().c_str(), bi23.toString().c_str());
+ EXPECT_TRUE(assertBucketInfo(f.bid21Gids->size(), f.bid21Gids->size(), bi21));
+ EXPECT_TRUE(assertBucketInfo(f.bid23Gids->size(), f.bid23Gids->size(), bi23));
+ EXPECT_NOT_EQUAL(bi21.getEntryCount(), bi23.getEntryCount());
+ EXPECT_EQUAL(f.bid11Gids->size(), bi21.getEntryCount() + bi23.getEntryCount());
+
+ f._bucketDBHandler.handleJoin(10, f.bid21, f.bid23, f.bid11);
+ BucketInfo bi11 = f.getInfo(f.bid11);
+ BucketInfo nbi21 = f.getInfo(f.bid21);
+ BucketInfo nbi23 = f.getInfo(f.bid23);
+ LOG(info, "%s: %s", f.bid11.toString().c_str(), bi11.toString().c_str());
+ LOG(info, "%s: %s", f.bid21.toString().c_str(), nbi21.toString().c_str());
+ LOG(info, "%s: %s", f.bid23.toString().c_str(), nbi23.toString().c_str());
+ EXPECT_TRUE(assertBucketInfo(f.bid11Gids->size(),
+ f.bid11Gids->size(), bi11));
+ EXPECT_TRUE(assertBucketInfo(0u, 0u, nbi21));
+ EXPECT_TRUE(assertBucketInfo(0u, 0u, nbi23));
+ EXPECT_EQUAL(bi21.getEntryCount() + bi23.getEntryCount(),
+ bi11.getEntryCount());
+ EXPECT_EQUAL(bi21.getDocumentCount() +
+ bi23.getDocumentCount(),
+ bi11.getDocumentCount());
+}
+
+TEST("requireThatActiveStateIsPreservedAfterJoin")
+{
+ { // non-active buckets
+ SplitAndJoinFixture f;
+ f.insertGids2();
+ EXPECT_FALSE(f.getInfo(f.bid20).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid22).isActive());
+
+ f._bucketDBHandler.handleJoin(10, f.bid20, f.bid22, f.bid10);
+ EXPECT_FALSE(f.getInfo(f.bid10).isActive());
+ assertActiveLids(BoolVector(31), f.dms.getActiveLids());
+ EXPECT_EQUAL(0u, f.dms.getNumActiveLids());
+ }
+ { // active buckets
+ SplitAndJoinFixture f;
+ f.insertGids2();
+ f.dms.setBucketState(f.bid20, true);
+ f.dms.setBucketState(f.bid22, true);
+ EXPECT_TRUE(f.getInfo(f.bid20).isActive());
+ EXPECT_TRUE(f.getInfo(f.bid22).isActive());
+
+ f._bucketDBHandler.handleJoin(10, f.bid20, f.bid22, f.bid10);
+ EXPECT_TRUE(f.getInfo(f.bid10).isActive());
+ assertActiveLids(getBoolVector(*f.bid10Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid10Gids->size(), f.dms.getNumActiveLids());
+ }
+ { // 1 active bucket
+ SplitAndJoinFixture f;
+ f.insertGids2();
+ f.dms.setBucketState(f.bid20, true);
+ EXPECT_TRUE(f.getInfo(f.bid20).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid22).isActive());
+
+ f._bucketDBHandler.handleJoin(10, f.bid20, f.bid22, f.bid10);
+ EXPECT_TRUE(f.getInfo(f.bid10).isActive());
+ assertActiveLids(getBoolVector(*f.bid10Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid10Gids->size(), f.dms.getNumActiveLids());
+ }
+ { // 1 active bucket
+ SplitAndJoinFixture f;
+ f.insertGids2();
+ f.dms.setBucketState(f.bid22, true);
+ EXPECT_FALSE(f.getInfo(f.bid20).isActive());
+ EXPECT_TRUE(f.getInfo(f.bid22).isActive());
+
+ f._bucketDBHandler.handleJoin(10, f.bid20, f.bid22, f.bid10);
+ EXPECT_TRUE(f.getInfo(f.bid10).isActive());
+ assertActiveLids(getBoolVector(*f.bid10Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid10Gids->size(), f.dms.getNumActiveLids());
+ }
+ { // non-active buckets, active target
+ SplitAndJoinFixture f;
+ f.insertGids2Mostly(f.bid30);
+ f.dms.setBucketState(f.bid10, true);
+ EXPECT_TRUE(f.getInfo(f.bid10).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid20).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid22).isActive());
+ assertActiveLids(getBoolVector(*f.bid30Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid30Gids->size(), f.dms.getNumActiveLids());
+
+ f._bucketDBHandler.handleJoin(10, f.bid20, f.bid22, f.bid10);
+ EXPECT_FALSE(f.getInfo(f.bid10).isActive());
+ assertActiveLids(BoolVector(31), f.dms.getActiveLids());
+ EXPECT_EQUAL(0u, f.dms.getNumActiveLids());
+ }
+ { // non-active buckets, active target
+ SplitAndJoinFixture f;
+ f.insertGids2Mostly(f.bid32);
+ f.dms.setBucketState(f.bid10, true);
+ EXPECT_TRUE(f.getInfo(f.bid10).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid20).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid22).isActive());
+ assertActiveLids(getBoolVector(*f.bid32Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid32Gids->size(), f.dms.getNumActiveLids());
+
+ f._bucketDBHandler.handleJoin(10, f.bid20, f.bid22, f.bid10);
+ EXPECT_FALSE(f.getInfo(f.bid10).isActive());
+ assertActiveLids(BoolVector(31), f.dms.getActiveLids());
+ EXPECT_EQUAL(0u, f.dms.getNumActiveLids());
+ }
+ { // active buckets, non-active target
+ SplitAndJoinFixture f;
+ f.insertGids2Mostly(f.bid30);
+ f.dms.setBucketState(f.bid20, true);
+ f.dms.setBucketState(f.bid22, true);
+ EXPECT_FALSE(f.getInfo(f.bid10).isActive());
+ EXPECT_TRUE(f.getInfo(f.bid20).isActive());
+ EXPECT_TRUE(f.getInfo(f.bid22).isActive());
+ BoolVector filtered(getBoolVectorFiltered(*f.bid10Gids, 31, f.bid30));
+ assertActiveLids(filtered, f.dms.getActiveLids());
+ EXPECT_EQUAL(filtered.countTrue(), f.dms.getNumActiveLids());
+
+ f._bucketDBHandler.handleJoin(10, f.bid20, f.bid22, f.bid10);
+ EXPECT_TRUE(f.getInfo(f.bid10).isActive());
+ assertActiveLids(getBoolVector(*f.bid10Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid10Gids->size(), f.dms.getNumActiveLids());
+ }
+ { // active buckets, non-active target
+ SplitAndJoinFixture f;
+ f.insertGids2Mostly(f.bid32);
+ f.dms.setBucketState(f.bid20, true);
+ f.dms.setBucketState(f.bid22, true);
+ EXPECT_FALSE(f.getInfo(f.bid10).isActive());
+ EXPECT_TRUE(f.getInfo(f.bid20).isActive());
+ EXPECT_TRUE(f.getInfo(f.bid22).isActive());
+ BoolVector filtered(getBoolVectorFiltered(*f.bid10Gids, 31, f.bid32));
+ assertActiveLids(filtered, f.dms.getActiveLids());
+ EXPECT_EQUAL(filtered.countTrue(), f.dms.getNumActiveLids());
+
+ f._bucketDBHandler.handleJoin(10, f.bid20, f.bid22, f.bid10);
+ EXPECT_TRUE(f.getInfo(f.bid10).isActive());
+ assertActiveLids(getBoolVector(*f.bid10Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid10Gids->size(), f.dms.getNumActiveLids());
+ }
+}
+
+TEST("requireThatActiveStateIsPreservedAfterEmptyJoin")
+{
+ { // non-active buckets
+ SplitAndJoinEmptyFixture f;
+ f._bucketDBHandler.handleCreateBucket(f.bid20);
+ f._bucketDBHandler.handleCreateBucket(f.bid22);
+ EXPECT_FALSE(f.getInfo(f.bid20).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid22).isActive());
+
+ f._bucketDBHandler.handleJoin(10, f.bid20, f.bid22, f.bid10);
+ EXPECT_FALSE(f.getInfo(f.bid10).isActive());
+ }
+ { // active buckets
+ SplitAndJoinEmptyFixture f;
+ f._bucketDBHandler.handleCreateBucket(f.bid20);
+ f._bucketDBHandler.handleCreateBucket(f.bid22);
+ f.dms.setBucketState(f.bid20, true);
+ f.dms.setBucketState(f.bid22, true);
+ EXPECT_TRUE(f.getInfo(f.bid20).isActive());
+ EXPECT_TRUE(f.getInfo(f.bid22).isActive());
+
+ f._bucketDBHandler.handleJoin(10, f.bid20, f.bid22, f.bid10);
+ EXPECT_TRUE(f.getInfo(f.bid10).isActive());
+ }
+ { // 1 active bucket
+ SplitAndJoinEmptyFixture f;
+ f._bucketDBHandler.handleCreateBucket(f.bid20);
+ f._bucketDBHandler.handleCreateBucket(f.bid22);
+ f.dms.setBucketState(f.bid20, true);
+ EXPECT_TRUE(f.getInfo(f.bid20).isActive());
+ EXPECT_FALSE(f.getInfo(f.bid22).isActive());
+
+ f._bucketDBHandler.handleJoin(10, f.bid20, f.bid22, f.bid10);
+ EXPECT_TRUE(f.getInfo(f.bid10).isActive());
+ }
+}
+
+TEST("requireThatOverlappingBucketActiveStateWorks")
+{
+ SplitAndJoinFixture f;
+ f.insertGids1Mostly(f.bid30);
+ assertActiveLids(BoolVector(31), f.dms.getActiveLids());
+ EXPECT_EQUAL(0u, f.dms.getNumActiveLids());
+ f.dms.setBucketState(f.bid10, true);
+ BoolVector filtered(getBoolVectorFiltered(*f.bid10Gids, 31, f.bid30));
+ assertActiveLids(filtered, f.dms.getActiveLids());
+ EXPECT_EQUAL(filtered.countTrue(), f.dms.getNumActiveLids());
+ f.dms.setBucketState(f.bid20, true);
+ assertActiveLids(getBoolVector(*f.bid10Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid10Gids->size(), f.dms.getNumActiveLids());
+ f.dms.setBucketState(f.bid10, false);
+ assertActiveLids(getBoolVector(*f.bid30Gids, 31),
+ f.dms.getActiveLids());
+ EXPECT_EQUAL(f.bid30Gids->size(), f.dms.getNumActiveLids());
+ f.dms.setBucketState(f.bid20, false);
+ assertActiveLids(BoolVector(31), f.dms.getActiveLids());
+ EXPECT_EQUAL(0u, f.dms.getNumActiveLids());
+}
+
+struct RemovedFixture
+{
+ std::shared_ptr<BucketDBOwner> _bucketDB;
+ DocumentMetaStore dms;
+ bucketdb::BucketDBHandler _bucketDBHandler;
+
+ RemovedFixture(void)
+ : _bucketDB(createBucketDB()),
+ dms(_bucketDB,
+ DocumentMetaStore::getFixedName(),
+ search::GrowStrategy(),
+ DocumentMetaStore::IGidCompare::SP(new DocumentMetaStore::DefaultGidCompare),
+ SubDbType::REMOVED),
+ _bucketDBHandler(dms.getBucketDB())
+ {
+ _bucketDBHandler.addDocumentMetaStore(&dms, 0);
+ }
+
+ BucketInfo
+ getInfo(const BucketId &bid) const
+ {
+ return dms.getBucketDB().takeGuard()->get(bid);
+ }
+};
+
+TEST("requireThatRemoveChangedBucketWorks")
+{
+ RemovedFixture f;
+ GlobalIdEntry g(1);
+ f.dms.constructFreeList();
+ f._bucketDBHandler.handleCreateBucket(g.bid1);
+ uint32_t addLid1 = addGid(f.dms, g.gid, g.bid1, Timestamp(0));
+ EXPECT_EQUAL(1u, addLid1);
+ uint32_t addLid2 = addGid(f.dms, g.gid, g.bid2, Timestamp(0));
+ EXPECT_TRUE(1u == addLid2);
+ EXPECT_TRUE(f.dms.remove(1u));
+ f.dms.removeComplete(1u);
+}
+
+TEST("requireThatGetLidUsageStatsWorks")
+{
+ DocumentMetaStore dms(createBucketDB());
+ dms.constructFreeList();
+
+ LidUsageStats s = dms.getLidUsageStats();
+ EXPECT_EQUAL(1u, s.getLidLimit());
+ EXPECT_EQUAL(0u, s.getUsedLids());
+ EXPECT_EQUAL(1u, s.getLowestFreeLid());
+ EXPECT_EQUAL(0u, s.getHighestUsedLid());
+
+ putGid(dms, createGid(1), 1);
+
+ s = dms.getLidUsageStats();
+ EXPECT_EQUAL(2u, s.getLidLimit());
+ EXPECT_EQUAL(1u, s.getUsedLids());
+ EXPECT_EQUAL(2u, s.getLowestFreeLid());
+ EXPECT_EQUAL(1u, s.getHighestUsedLid());
+
+ putGid(dms, createGid(2), 2);
+
+ s = dms.getLidUsageStats();
+ EXPECT_EQUAL(3u, s.getLidLimit());
+ EXPECT_EQUAL(2u, s.getUsedLids());
+ EXPECT_EQUAL(3u, s.getLowestFreeLid());
+ EXPECT_EQUAL(2u, s.getHighestUsedLid());
+
+
+ putGid(dms, createGid(3), 3);
+
+ s = dms.getLidUsageStats();
+ EXPECT_EQUAL(4u, s.getLidLimit());
+ EXPECT_EQUAL(3u, s.getUsedLids());
+ EXPECT_EQUAL(4u, s.getLowestFreeLid());
+ EXPECT_EQUAL(3u, s.getHighestUsedLid());
+
+ dms.remove(1);
+ dms.removeComplete(1);
+
+ s = dms.getLidUsageStats();
+ EXPECT_EQUAL(4u, s.getLidLimit());
+ EXPECT_EQUAL(2u, s.getUsedLids());
+ EXPECT_EQUAL(1u, s.getLowestFreeLid());
+ EXPECT_EQUAL(3u, s.getHighestUsedLid());
+
+ dms.remove(3);
+ dms.removeComplete(3);
+
+ s = dms.getLidUsageStats();
+ EXPECT_EQUAL(4u, s.getLidLimit());
+ EXPECT_EQUAL(1u, s.getUsedLids());
+ EXPECT_EQUAL(1u, s.getLowestFreeLid());
+ EXPECT_EQUAL(2u, s.getHighestUsedLid());
+
+ dms.remove(2);
+ dms.removeComplete(2);
+
+ s = dms.getLidUsageStats();
+ EXPECT_EQUAL(4u, s.getLidLimit());
+ EXPECT_EQUAL(0u, s.getUsedLids());
+ EXPECT_EQUAL(1u, s.getLowestFreeLid());
+ EXPECT_EQUAL(0u, s.getHighestUsedLid());
+}
+
+bool
+assertLidBloat(uint32_t expBloat, uint32_t lidLimit, uint32_t usedLids)
+{
+ LidUsageStats stats(lidLimit, usedLids, 0, 0);
+ return EXPECT_EQUAL(expBloat, stats.getLidBloat());
+}
+
+TEST("require that LidUsageStats::getLidBloat() works")
+{
+ assertLidBloat(4, 10, 5);
+ assertLidBloat(0, 1, 0);
+ assertLidBloat(0, 1, 1);
+}
+
+TEST("requireThatMoveWorks")
+{
+ DocumentMetaStore dms(createBucketDB());
+ GlobalId gid;
+ uint32_t lid = 0u;
+ dms.constructFreeList();
+
+ EXPECT_EQUAL(1u, dms.getNumDocs());
+ EXPECT_EQUAL(0u, dms.getNumUsedLids());
+ EXPECT_TRUE(assertPut(bucketId1, time1, 1u, gid1, dms));
+ EXPECT_EQUAL(2u, dms.getNumDocs());
+ EXPECT_EQUAL(1u, dms.getNumUsedLids());
+ EXPECT_TRUE(assertPut(bucketId2, time2, 2u, gid2, dms));
+ EXPECT_EQUAL(3u, dms.getNumDocs());
+ EXPECT_EQUAL(2u, dms.getNumUsedLids());
+ EXPECT_TRUE(dms.getGid(1u, gid));
+ EXPECT_TRUE(dms.getLid(gid2, lid));
+ EXPECT_EQUAL(gid1, gid);
+ EXPECT_EQUAL(2u, lid);
+ EXPECT_TRUE(dms.remove(1));
+ dms.removeComplete(1u);
+ EXPECT_EQUAL(1u, dms.getNumUsedLids());
+ dms.move(2u, 1u);
+ dms.removeComplete(2u);
+ EXPECT_TRUE(dms.getGid(1u, gid));
+ EXPECT_TRUE(dms.getLid(gid2, lid));
+ EXPECT_EQUAL(gid2, gid);
+ EXPECT_EQUAL(1u, lid);
+}
+
+bool
+assertLidSpace(uint32_t numDocs,
+ uint32_t committedDocIdLimit,
+ uint32_t numUsedLids,
+ bool wantShrinkLidSpace,
+ bool canShrinkLidSpace,
+ const DocumentMetaStore &dms)
+{
+ if (!EXPECT_EQUAL(numDocs, dms.getNumDocs())) return false;
+ if (!EXPECT_EQUAL(committedDocIdLimit, dms.getCommittedDocIdLimit())) return false;
+ if (!EXPECT_EQUAL(numUsedLids, dms.getNumUsedLids())) return false;
+ if (!EXPECT_EQUAL(wantShrinkLidSpace, dms.wantShrinkLidSpace())) return false;
+ if (!EXPECT_EQUAL(canShrinkLidSpace, dms.canShrinkLidSpace())) return false;
+ return true;
+}
+
+void
+populate(uint32_t endLid, DocumentMetaStore &dms)
+{
+ for (uint32_t lid = 1; lid < endLid; ++lid) {
+ GlobalId gid = createGid(lid);
+ putGid(dms, gid, lid, Timestamp(10000 + lid));
+ }
+ EXPECT_TRUE(assertLidSpace(endLid, endLid, endLid - 1, false, false, dms));
+}
+
+void
+remove(uint32_t startLid, uint32_t shrinkTarget, DocumentMetaStore &dms)
+{
+ for (uint32_t lid = startLid; lid >= shrinkTarget; --lid) {
+ dms.remove(lid);
+ dms.removeComplete(lid);
+ }
+}
+
+TEST("requireThatShrinkWorks")
+{
+ DocumentMetaStore dms(createBucketDB());
+ dms.constructFreeList();
+
+ populate(10, dms);
+
+ uint32_t shrinkTarget = 5;
+ remove(9, shrinkTarget, dms);
+ EXPECT_TRUE(assertLidSpace(10, 10, shrinkTarget - 1, false, false, dms));
+
+ dms.compactLidSpace(shrinkTarget);
+ EXPECT_TRUE(assertLidSpace(10, shrinkTarget, shrinkTarget - 1, true, false, dms));
+
+ dms.holdUnblockShrinkLidSpace();
+ EXPECT_TRUE(assertLidSpace(10, shrinkTarget, shrinkTarget - 1, true, true, dms));
+
+ dms.shrinkLidSpace();
+ EXPECT_TRUE(assertLidSpace(shrinkTarget, shrinkTarget, shrinkTarget - 1, false, false, dms));
+}
+
+
+TEST("requireThatShrinkViaFlushTargetWorks")
+{
+ DocumentMetaStore::SP dms(new DocumentMetaStore(createBucketDB()));
+ dms->constructFreeList();
+ TuneFileAttributes tuneFileAttributes;
+ DummyFileHeaderContext fileHeaderContext;
+ DummyTlsSyncer dummyTlsSyncer;
+ vespalib::rmdir("dmsflush", true);
+ vespalib::mkdir("dmsflush");
+ IFlushTarget::SP ft(new DocumentMetaStoreFlushTarget(dms,
+ dummyTlsSyncer,
+ "dmsflush",
+ tuneFileAttributes,
+ fileHeaderContext));
+
+ populate(10, *dms);
+
+ uint32_t shrinkTarget = 5;
+ remove(9, shrinkTarget, *dms);
+ EXPECT_TRUE(assertLidSpace(10, 10, shrinkTarget - 1, false, false, *dms));
+ EXPECT_EQUAL(ft->getApproxMemoryGain().getBefore(),
+ ft->getApproxMemoryGain().getAfter());
+
+ dms->compactLidSpace(shrinkTarget);
+ EXPECT_TRUE(assertLidSpace(10, shrinkTarget, shrinkTarget - 1, true, false, *dms));
+ EXPECT_EQUAL(ft->getApproxMemoryGain().getBefore(),
+ ft->getApproxMemoryGain().getAfter());
+ AttributeGuard::UP g(new AttributeGuard(dms));
+
+ dms->holdUnblockShrinkLidSpace();
+ EXPECT_TRUE(assertLidSpace(10, shrinkTarget, shrinkTarget - 1, true, false, *dms));
+ EXPECT_EQUAL(ft->getApproxMemoryGain().getBefore(),
+ ft->getApproxMemoryGain().getAfter());
+
+ g.reset();
+ dms->removeAllOldGenerations();
+ EXPECT_TRUE(assertLidSpace(10, shrinkTarget, shrinkTarget - 1, true, true, *dms));
+ EXPECT_TRUE(ft->getApproxMemoryGain().getBefore() >
+ ft->getApproxMemoryGain().getAfter());
+
+ vespalib::ThreadStackExecutor exec(1, 128 * 1024);
+ vespalib::Executor::Task::UP task = ft->initFlush(11);
+ exec.execute(std::move(task));
+ exec.sync();
+ exec.shutdown();
+ EXPECT_TRUE(assertLidSpace(shrinkTarget, shrinkTarget, shrinkTarget - 1, false, false, *dms));
+ EXPECT_EQUAL(ft->getApproxMemoryGain().getBefore(),
+ ft->getApproxMemoryGain().getAfter());
+}
+
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.sh b/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.sh
new file mode 100644
index 00000000000..03fb18363f1
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+$VALGRIND ./searchcore_documentmetastore_test_app
+rm -rf documentmetastore*.dat
+rm -rf dmsflush
diff --git a/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/.gitignore b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/.gitignore
new file mode 100644
index 00000000000..3af6b83638f
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/.gitignore
@@ -0,0 +1 @@
+searchcore_lidreusedelayer_test_app
diff --git a/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/CMakeLists.txt b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/CMakeLists.txt
new file mode 100644
index 00000000000..5e333c74b2f
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_lidreusedelayer_test_app
+ SOURCES
+ lidreusedelayer_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_documentmetastore
+)
+vespa_add_test(NAME searchcore_lidreusedelayer_test_app COMMAND searchcore_lidreusedelayer_test_app)
diff --git a/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/DESC b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/DESC
new file mode 100644
index 00000000000..73e755c73c6
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/DESC
@@ -0,0 +1 @@
+LID reuse delayer test. Take a look at lidreusedelayer_test.cpp for details.
diff --git a/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/FILES b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/FILES
new file mode 100644
index 00000000000..4965f6577f6
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/FILES
@@ -0,0 +1 @@
+lidreusedelayer_test.cpp
diff --git a/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp
new file mode 100644
index 00000000000..ceecaad7d5b
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp
@@ -0,0 +1,325 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("lidreusedelayer_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/documentmetastore/i_store.h>
+#include <vespa/searchcore/proton/documentmetastore/lidreusedelayer.h>
+#include <vespa/searchcore/proton/server/executorthreadingservice.h>
+#include <vespa/searchcore/proton/test/thread_utils.h>
+#include <vespa/searchcore/proton/test/threading_service_observer.h>
+#include <vespa/searchlib/common/lambdatask.h>
+
+using search::makeLambdaTask;
+
+namespace proton {
+
+namespace
+{
+
+bool
+assertThreadObserver(uint32_t masterExecuteCnt,
+ uint32_t indexExecuteCnt,
+ const test::ThreadingServiceObserver &observer)
+{
+ if (!EXPECT_EQUAL(masterExecuteCnt,
+ observer.masterObserver().getExecuteCnt())) {
+ return false;
+ }
+ if (!EXPECT_EQUAL(indexExecuteCnt,
+ observer.indexObserver().getExecuteCnt())) {
+ return false;
+ }
+ return true;
+}
+
+}
+
+class MyMetaStore : public documentmetastore::IStore
+{
+public:
+ bool _freeListActive;
+ uint32_t _removeCompleteCount;
+ uint32_t _removeBatchCompleteCount;
+ uint32_t _removeCompleteLids;
+
+ MyMetaStore()
+ : _freeListActive(false),
+ _removeCompleteCount(0),
+ _removeBatchCompleteCount(0),
+ _removeCompleteLids(0)
+ {
+ }
+
+ virtual ~MyMetaStore() { }
+
+ virtual Result inspectExisting(const GlobalId &) const override
+ {
+ return Result();
+ }
+
+ virtual Result inspect(const GlobalId &) override
+ {
+ return Result();
+ }
+
+ virtual Result put(const GlobalId &, const BucketId &, const Timestamp &,
+ DocId) override
+ {
+ return Result();
+ }
+
+ virtual bool updateMetaData(DocId, const BucketId &,
+ const Timestamp &) override
+ {
+ return true;
+ }
+
+ virtual bool remove(DocId) override
+ {
+ return true;
+ }
+
+ virtual void removeComplete(DocId) override
+ {
+ ++_removeCompleteCount;
+ ++_removeCompleteLids;
+ }
+
+ virtual void move(DocId, DocId) override
+ {
+ }
+
+ virtual bool validLid(DocId) const override
+ {
+ return true;
+ }
+
+ virtual void removeBatch(const std::vector<DocId> &,
+ const DocId) override
+ {
+ }
+
+ virtual void
+ removeBatchComplete(const std::vector<DocId> &lidsToRemove) override
+ {
+ ++_removeBatchCompleteCount;
+ _removeCompleteLids += lidsToRemove.size();
+ }
+
+ virtual const RawDocumentMetaData &getRawMetaData(DocId) const override
+ {
+ abort();
+ }
+
+ virtual bool getFreeListActive() const override
+ {
+ return _freeListActive;
+ }
+
+ bool
+ assertWork(uint32_t expRemoveCompleteCount,
+ uint32_t expRemoveBatchCompleteCount,
+ uint32_t expRemoveCompleteLids)
+ {
+ if (!EXPECT_EQUAL(expRemoveCompleteCount, _removeCompleteCount)) {
+ return false;
+ }
+ if (!EXPECT_EQUAL(expRemoveBatchCompleteCount,
+ _removeBatchCompleteCount)) {
+ return false;
+ }
+ if (!EXPECT_EQUAL(expRemoveCompleteLids, _removeCompleteLids)) {
+ return false;
+ }
+ return true;
+ }
+};
+
+class Fixture
+{
+public:
+ ExecutorThreadingService _writeServiceReal;
+ test::ThreadingServiceObserver _writeService;
+ MyMetaStore _store;
+ documentmetastore::LidReuseDelayer _lidReuseDelayer;
+
+ Fixture()
+ : _writeServiceReal(),
+ _writeService(_writeServiceReal),
+ _store(),
+ _lidReuseDelayer(_writeService, _store)
+ {
+ }
+
+ template <typename FunctionType>
+ void runInMaster(FunctionType func) {
+ test::runInMaster(_writeService, func);
+ }
+
+ void
+ cycledLids(const std::vector<uint32_t> &lids)
+ {
+ if (lids.size() == 1) {
+ _store.removeComplete(lids[0]);
+ } else {
+ _store.removeBatchComplete(lids);
+ }
+ }
+
+ void
+ performCycleLids(const std::vector<uint32_t> &lids)
+ {
+ _writeService.master().execute(
+ makeLambdaTask([=]() { cycledLids(lids);}));
+ }
+
+ void
+ cycleLids(const std::vector<uint32_t> &lids)
+ {
+ if (lids.empty())
+ return;
+ _writeService.index().execute(
+ makeLambdaTask([=]() { performCycleLids(lids);}));
+ }
+
+ bool
+ delayReuse(uint32_t lid)
+ {
+ bool res = false;
+ runInMaster([&] () { res = _lidReuseDelayer.delayReuse(lid); } );
+ return res;
+ }
+
+ bool
+ delayReuse(const std::vector<uint32_t> &lids)
+ {
+ bool res = false;
+ runInMaster([&] () { res = _lidReuseDelayer.delayReuse(lids); });
+ return res;
+ }
+
+ void setImmediateCommit(bool immediateCommit) {
+ runInMaster([&] () { _lidReuseDelayer.
+ setImmediateCommit(immediateCommit); } );
+ }
+
+ void setHasIndexedFields(bool hasIndexedFields) {
+ runInMaster([&] () { _lidReuseDelayer.
+ setHasIndexedFields(hasIndexedFields); } );
+ }
+
+ void commit() {
+ runInMaster([&] () { cycleLids(_lidReuseDelayer.getReuseLids()); });
+ }
+
+ void
+ sync()
+ {
+ _writeService.sync();
+ }
+
+ void
+ scheduleDelayReuseLid(uint32_t lid)
+ {
+ runInMaster([&] () { cycleLids({ lid }); });
+ }
+
+ void
+ scheduleDelayReuseLids(const std::vector<uint32_t> &lids)
+ {
+ runInMaster([&] () { cycleLids(lids); });
+ }
+};
+
+
+TEST_F("require that nothing happens before free list is active", Fixture)
+{
+ f.setHasIndexedFields(true);
+ EXPECT_FALSE(f.delayReuse(4));
+ EXPECT_FALSE(f.delayReuse({ 5, 6}));
+ EXPECT_TRUE(f._store.assertWork(0, 0, 0));
+ EXPECT_TRUE(assertThreadObserver(3, 0, f._writeService));
+}
+
+
+TEST_F("require that single lid is delayed", Fixture)
+{
+ f._store._freeListActive = true;
+ f.setHasIndexedFields(true);
+ EXPECT_TRUE(f.delayReuse(4));
+ f.scheduleDelayReuseLid(4);
+ EXPECT_TRUE(f._store.assertWork(1, 0, 1));
+ EXPECT_TRUE(assertThreadObserver(4, 1, f._writeService));
+}
+
+
+TEST_F("require that lid vector is delayed", Fixture)
+{
+ f._store._freeListActive = true;
+ f.setHasIndexedFields(true);
+ EXPECT_TRUE(f.delayReuse({ 5, 6, 7}));
+ f.scheduleDelayReuseLids({ 5, 6, 7});
+ EXPECT_TRUE(f._store.assertWork(0, 1, 3));
+ EXPECT_TRUE(assertThreadObserver(4, 1, f._writeService));
+}
+
+
+TEST_F("require that reuse can be batched", Fixture)
+{
+ f._store._freeListActive = true;
+ f.setHasIndexedFields(true);
+ f.setImmediateCommit(false);
+ EXPECT_FALSE(f.delayReuse(4));
+ EXPECT_FALSE(f.delayReuse({ 5, 6, 7}));
+ EXPECT_TRUE(f._store.assertWork(0, 0, 0));
+ EXPECT_TRUE(assertThreadObserver(4, 0, f._writeService));
+ f.commit();
+ EXPECT_TRUE(f._store.assertWork(0, 1, 4));
+ EXPECT_TRUE(assertThreadObserver(6, 1, f._writeService));
+ EXPECT_FALSE(f.delayReuse(8));
+ EXPECT_FALSE(f.delayReuse({ 9, 10}));
+ EXPECT_TRUE(f._store.assertWork(0, 1, 4));
+ EXPECT_TRUE(assertThreadObserver(8, 1, f._writeService));
+}
+
+
+TEST_F("require that single element array is optimized", Fixture)
+{
+ f._store._freeListActive = true;
+ f.setHasIndexedFields(true);
+ f.setImmediateCommit(false);
+ EXPECT_FALSE(f.delayReuse({ 4}));
+ EXPECT_TRUE(f._store.assertWork(0, 0, 0));
+ EXPECT_TRUE(assertThreadObserver(3, 0, f._writeService));
+ f.commit();
+ f.setImmediateCommit(true);
+ EXPECT_TRUE(f._store.assertWork(1, 0, 1));
+ EXPECT_TRUE(assertThreadObserver(6, 1, f._writeService));
+ EXPECT_TRUE(f.delayReuse({ 8}));
+ f.scheduleDelayReuseLids({ 8});
+ EXPECT_TRUE(f._store.assertWork(2, 0, 2));
+ EXPECT_TRUE(assertThreadObserver(9, 2, f._writeService));
+}
+
+
+TEST_F("require that lids are reused faster with no indexed fields", Fixture)
+{
+ f._store._freeListActive = true;
+ f.setHasIndexedFields(false);
+ EXPECT_FALSE(f.delayReuse(4));
+ EXPECT_TRUE(f._store.assertWork(1, 0, 1));
+ EXPECT_TRUE(assertThreadObserver(2, 0, f._writeService));
+ EXPECT_FALSE(f.delayReuse({ 5, 6, 7}));
+ EXPECT_TRUE(f._store.assertWork(1, 1, 4));
+ EXPECT_TRUE(assertThreadObserver(3, 0, f._writeService));
+}
+
+}
+
+
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/feed_and_search/.gitignore b/searchcore/src/tests/proton/feed_and_search/.gitignore
new file mode 100644
index 00000000000..83587d9de8b
--- /dev/null
+++ b/searchcore/src/tests/proton/feed_and_search/.gitignore
@@ -0,0 +1,8 @@
+.depend
+Makefile
+feed_and_search_test
+test_index
+test_index2
+test_index3
+test_index4
+searchcore_feed_and_search_test_app
diff --git a/searchcore/src/tests/proton/feed_and_search/CMakeLists.txt b/searchcore/src/tests/proton/feed_and_search/CMakeLists.txt
new file mode 100644
index 00000000000..b6d1258a386
--- /dev/null
+++ b/searchcore/src/tests/proton/feed_and_search/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_feed_and_search_test_app
+ SOURCES
+ feed_and_search.cpp
+ DEPENDS
+ searchcore_util
+)
+vespa_add_test(NAME searchcore_feed_and_search_test_app COMMAND searchcore_feed_and_search_test_app)
diff --git a/searchcore/src/tests/proton/feed_and_search/DESC b/searchcore/src/tests/proton/feed_and_search/DESC
new file mode 100644
index 00000000000..0ae466eb2c9
--- /dev/null
+++ b/searchcore/src/tests/proton/feed_and_search/DESC
@@ -0,0 +1 @@
+feed_and_search test. Take a look at feed_and_search.cpp for details.
diff --git a/searchcore/src/tests/proton/feed_and_search/FILES b/searchcore/src/tests/proton/feed_and_search/FILES
new file mode 100644
index 00000000000..cd160110672
--- /dev/null
+++ b/searchcore/src/tests/proton/feed_and_search/FILES
@@ -0,0 +1 @@
+feed_and_search.cpp
diff --git a/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp b/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp
new file mode 100644
index 00000000000..ccd80f6bdf3
--- /dev/null
+++ b/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp
@@ -0,0 +1,241 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("feed_and_search_test");
+
+#include <vespa/document/datatype/datatype.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/document/fieldvalue/fieldvalue.h>
+#include <vespa/searchlib/memoryindex/memoryindex.h>
+#include <vespa/searchlib/diskindex/diskindex.h>
+#include <vespa/searchlib/diskindex/indexbuilder.h>
+#include <vespa/searchlib/fef/fef.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/searchlib/query/base.h>
+#include <vespa/searchlib/query/tree/simplequery.h>
+#include <vespa/searchlib/queryeval/blueprint.h>
+#include <vespa/searchlib/queryeval/searchiterator.h>
+#include <vespa/searchlib/queryeval/fake_requestcontext.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <sstream>
+#include <vespa/searchlib/diskindex/fusion.h>
+#include <vespa/searchlib/common/documentsummary.h>
+#include <vespa/searchlib/common/sequencedtaskexecutor.h>
+
+using document::DataType;
+using document::Document;
+using document::FieldValue;
+using search::DocumentIdT;
+using search::TuneFileIndexing;
+using search::TuneFileSearch;
+using search::diskindex::DiskIndex;
+using search::diskindex::IndexBuilder;
+using search::diskindex::SelectorArray;
+using search::fef::FieldPositionsIterator;
+using search::fef::MatchData;
+using search::fef::MatchDataLayout;
+using search::fef::TermFieldHandle;
+using search::fef::TermFieldMatchData;
+using search::index::DocBuilder;
+using search::index::Schema;
+using search::index::DummyFileHeaderContext;
+using search::memoryindex::MemoryIndex;
+using search::query::SimpleStringTerm;
+using search::queryeval::Blueprint;
+using search::queryeval::FieldSpec;
+using search::queryeval::FieldSpecList;
+using search::queryeval::SearchIterator;
+using search::queryeval::Searchable;
+using search::queryeval::FakeRequestContext;
+using std::ostringstream;
+using vespalib::string;
+using search::docsummary::DocumentSummary;
+
+namespace {
+
+class Test : public vespalib::TestApp {
+ const char *current_state;
+ void DumpState(bool) {
+ fprintf(stderr, "%s: ERROR: in %s\n", GetName(), current_state);
+ }
+
+ void requireThatMemoryIndexCanBeDumpedAndSearched();
+
+ void testSearch(Searchable &source,
+ const string &term, uint32_t doc_id);
+
+public:
+ int Main();
+};
+
+#define TEST_CALL(func) \
+ current_state = #func; \
+ func();
+
+int
+Test::Main()
+{
+ TEST_INIT("feed_and_search_test");
+
+ if (_argc > 0) {
+ DummyFileHeaderContext::setCreator(_argv[0]);
+ }
+ TEST_CALL(requireThatMemoryIndexCanBeDumpedAndSearched);
+
+ TEST_DONE();
+}
+
+const string field_name = "string_field";
+const string noise = "noise";
+const string word1 = "foo";
+const string word2 = "bar";
+const DocumentIdT doc_id1 = 1;
+const DocumentIdT doc_id2 = 2;
+const uint32_t field_id = 1;
+
+Schema getSchema() {
+ Schema schema;
+ schema.addIndexField(Schema::IndexField(field_name,
+ Schema::STRING));
+ return schema;
+}
+
+Document::UP buildDocument(DocBuilder & doc_builder, int id,
+ const string &word) {
+ ostringstream ost;
+ ost << "doc::" << id;
+ doc_builder.startDocument(ost.str());
+ doc_builder.startIndexField(field_name)
+ .addStr(noise).addStr(word).endField();
+ return doc_builder.endDocument();
+}
+
+// Performs a search using a Searchable.
+void Test::testSearch(Searchable &source,
+ const string &term, uint32_t doc_id)
+{
+ FakeRequestContext requestContext;
+ uint32_t fieldId = 0;
+ MatchDataLayout mdl;
+ TermFieldHandle handle = mdl.allocTermField(fieldId);
+ MatchData::UP match_data = mdl.createMatchData();
+
+ SimpleStringTerm node(term, field_name, 0, search::query::Weight(0));
+ Blueprint::UP result = source.createBlueprint(requestContext,
+ FieldSpecList().add(FieldSpec(field_name, 0, handle)), node);
+ result->fetchPostings(true);
+ SearchIterator::UP search_iterator =
+ result->createSearch(*match_data, true);
+ search_iterator->initFullRange();
+ ASSERT_TRUE(search_iterator.get());
+ ASSERT_TRUE(search_iterator->seek(doc_id));
+ EXPECT_EQUAL(doc_id, search_iterator->getDocId());
+ search_iterator->unpack(doc_id);
+ FieldPositionsIterator it =
+ match_data->resolveTermField(handle)->getIterator();
+ ASSERT_TRUE(it.valid());
+ EXPECT_EQUAL(1u, it.size());
+ EXPECT_EQUAL(1u, it.getPosition()); // All hits are at pos 1 in this index
+
+ EXPECT_TRUE(!search_iterator->seek(doc_id + 1));
+ EXPECT_TRUE(search_iterator->isAtEnd());
+}
+
+// Creates a memory index, inserts documents, performs a few
+// searches, dumps the index to disk, and performs the searches
+// again.
+void Test::requireThatMemoryIndexCanBeDumpedAndSearched() {
+ Schema schema = getSchema();
+ search::SequencedTaskExecutor indexFieldInverter(2);
+ search::SequencedTaskExecutor indexFieldWriter(2);
+ MemoryIndex memory_index(schema, indexFieldInverter, indexFieldWriter);
+ DocBuilder doc_builder(schema);
+
+ Document::UP doc = buildDocument(doc_builder, doc_id1, word1);
+ memory_index.insertDocument(doc_id1, *doc.get());
+
+ doc = buildDocument(doc_builder, doc_id2, word2);
+ memory_index.insertDocument(doc_id2, *doc.get());
+ memory_index.commit(std::shared_ptr<search::IDestructorCallback>());
+ indexFieldWriter.sync();
+
+ testSearch(memory_index, word1, doc_id1);
+ testSearch(memory_index, word2, doc_id2);
+
+ const string index_dir = "test_index";
+ IndexBuilder index_builder(schema);
+ index_builder.setPrefix(index_dir);
+ const uint32_t docIdLimit = memory_index.getDocIdLimit();
+ const uint64_t num_words = memory_index.getNumWords();
+ search::TuneFileIndexing tuneFileIndexing;
+ DummyFileHeaderContext fileHeaderContext;
+ index_builder.open(docIdLimit, num_words, tuneFileIndexing,
+ fileHeaderContext);
+ memory_index.dump(index_builder);
+ index_builder.close();
+
+ // Fusion test. Keep all documents to get an "indentical" copy.
+ const string index_dir2 = "test_index2";
+ std::vector<string> fusionInputs;
+ fusionInputs.push_back(index_dir);
+ uint32_t fusionDocIdLimit = 0;
+ typedef search::diskindex::Fusion FastS_Fusion;
+ bool fret1 = DocumentSummary::readDocIdLimit(index_dir, fusionDocIdLimit);
+ ASSERT_TRUE(fret1);
+ SelectorArray selector(fusionDocIdLimit, 0);
+ bool fret2 = FastS_Fusion::merge(schema,
+ index_dir2,
+ fusionInputs,
+ selector,
+ false /* dynamicKPosOccFormat */,
+ tuneFileIndexing,
+ fileHeaderContext);
+ ASSERT_TRUE(fret2);
+
+ // Fusion test with all docs removed in output (doesn't affect word list)
+ const string index_dir3 = "test_index3";
+ fusionInputs.clear();
+ fusionInputs.push_back(index_dir);
+ fusionDocIdLimit = 0;
+ bool fret3 = DocumentSummary::readDocIdLimit(index_dir, fusionDocIdLimit);
+ ASSERT_TRUE(fret3);
+ SelectorArray selector2(fusionDocIdLimit, 1);
+ bool fret4 = FastS_Fusion::merge(schema,
+ index_dir3,
+ fusionInputs,
+ selector2,
+ false /* dynamicKPosOccFormat */,
+ tuneFileIndexing,
+ fileHeaderContext);
+ ASSERT_TRUE(fret4);
+
+ // Fusion test with all docs removed in input (affects word list)
+ const string index_dir4 = "test_index4";
+ fusionInputs.clear();
+ fusionInputs.push_back(index_dir3);
+ fusionDocIdLimit = 0;
+ bool fret5 = DocumentSummary::readDocIdLimit(index_dir3, fusionDocIdLimit);
+ ASSERT_TRUE(fret5);
+ SelectorArray selector3(fusionDocIdLimit, 0);
+ bool fret6 = FastS_Fusion::merge(schema,
+ index_dir4,
+ fusionInputs,
+ selector3,
+ false /* dynamicKPosOccFormat */,
+ tuneFileIndexing,
+ fileHeaderContext);
+ ASSERT_TRUE(fret6);
+
+ DiskIndex disk_index(index_dir);
+ ASSERT_TRUE(disk_index.setup(TuneFileSearch()));
+ testSearch(disk_index, word1, doc_id1);
+ testSearch(disk_index, word2, doc_id2);
+ DiskIndex disk_index2(index_dir2);
+ ASSERT_TRUE(disk_index2.setup(TuneFileSearch()));
+ testSearch(disk_index2, word1, doc_id1);
+ testSearch(disk_index2, word2, doc_id2);
+}
+} // namespace
+
+TEST_APPHOOK(Test);
diff --git a/searchcore/src/tests/proton/feedoperation/.gitignore b/searchcore/src/tests/proton/feedoperation/.gitignore
new file mode 100644
index 00000000000..695cdf3365d
--- /dev/null
+++ b/searchcore/src/tests/proton/feedoperation/.gitignore
@@ -0,0 +1,5 @@
+*_test
+.depend
+Makefile
+
+searchcore_feedoperation_test_app
diff --git a/searchcore/src/tests/proton/feedoperation/CMakeLists.txt b/searchcore/src/tests/proton/feedoperation/CMakeLists.txt
new file mode 100644
index 00000000000..fc47a3bde85
--- /dev/null
+++ b/searchcore/src/tests/proton/feedoperation/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_feedoperation_test_app
+ SOURCES
+ feedoperation_test.cpp
+ DEPENDS
+ searchcore_feedoperation
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_feedoperation_test_app COMMAND searchcore_feedoperation_test_app)
diff --git a/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp b/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp
new file mode 100644
index 00000000000..804927a0e03
--- /dev/null
+++ b/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp
@@ -0,0 +1,172 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for feedoperation.
+
+#include <vespa/log/log.h>
+LOG_SETUP("feedoperation_test");
+#include <vespa/fastos/fastos.h>
+
+#include <vespa/document/base/documentid.h>
+#include <vespa/document/bucket/bucketid.h>
+#include <vespa/document/datatype/datatype.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/document/update/documentupdate.h>
+#include <persistence/spi/types.h>
+#include <vespa/searchcore/proton/feedoperation/compact_lid_space_operation.h>
+#include <vespa/searchcore/proton/feedoperation/deletebucketoperation.h>
+#include <vespa/searchcore/proton/feedoperation/joinbucketsoperation.h>
+#include <vespa/searchcore/proton/feedoperation/moveoperation.h>
+#include <vespa/searchcore/proton/feedoperation/newconfigoperation.h>
+#include <vespa/searchcore/proton/feedoperation/noopoperation.h>
+#include <vespa/searchcore/proton/feedoperation/pruneremoveddocumentsoperation.h>
+#include <vespa/searchcore/proton/feedoperation/putoperation.h>
+#include <vespa/searchcore/proton/feedoperation/removeoperation.h>
+#include <vespa/searchcore/proton/feedoperation/splitbucketoperation.h>
+#include <vespa/searchcore/proton/feedoperation/spoolerreplayoperation.h>
+#include <vespa/searchcore/proton/feedoperation/updateoperation.h>
+#include <vespa/searchcore/proton/feedoperation/wipehistoryoperation.h>
+#include <vespa/searchlib/query/base.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using document::BucketId;
+using document::DataType;
+using document::Document;
+using document::DocumentId;
+using document::DocumentUpdate;
+using search::DocumentIdT;
+using storage::spi::Timestamp;
+using namespace proton;
+
+namespace {
+
+struct MyStreamHandler : NewConfigOperation::IStreamHandler {
+ typedef NewConfigOperation::SerialNum SerialNum;
+ virtual void serializeConfig(SerialNum, vespalib::nbostream &) {}
+ virtual void deserializeConfig(SerialNum, vespalib::nbostream &) {}
+};
+
+TEST("require that toString() on derived classes are meaningful")
+{
+ BucketId bucket_id1(42);
+ BucketId bucket_id2(43);
+ BucketId bucket_id3(44);
+ Timestamp timestamp(10);
+ Document::SP doc(new Document);
+ DbDocumentId db_doc_id;
+ uint32_t sub_db_id = 1;
+ MyStreamHandler stream_handler;
+ DocumentIdT doc_id_limit = 15;
+ DocumentId doc_id("doc:foo:bar");
+ DocumentUpdate::SP update(new DocumentUpdate(*DataType::DOCUMENT, doc_id));
+
+ EXPECT_EQUAL("DeleteBucket(BucketId(0x0000000000000000), serialNum=0)",
+ DeleteBucketOperation().toString());
+ EXPECT_EQUAL("DeleteBucket(BucketId(0x000000000000002a), serialNum=0)",
+ DeleteBucketOperation(bucket_id1).toString());
+
+ EXPECT_EQUAL("JoinBuckets("
+ "source1=BucketId(0x0000000000000000), "
+ "source2=BucketId(0x0000000000000000), "
+ "target=BucketId(0x0000000000000000), serialNum=0)",
+ JoinBucketsOperation().toString());
+ EXPECT_EQUAL("JoinBuckets("
+ "source1=BucketId(0x000000000000002a), "
+ "source2=BucketId(0x000000000000002b), "
+ "target=BucketId(0x000000000000002c), serialNum=0)",
+ JoinBucketsOperation(bucket_id1, bucket_id2, bucket_id3)
+ .toString());
+
+ EXPECT_EQUAL("Move(NULL, BucketId(0x0000000000000000), timestamp=0, dbdId=(subDbId=0, lid=0), "
+ "prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
+ MoveOperation().toString());
+ EXPECT_EQUAL("Move(null::, BucketId(0x000000000000002a), timestamp=10, dbdId=(subDbId=1, lid=0), "
+ "prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
+ MoveOperation(bucket_id1, timestamp, doc,
+ db_doc_id, sub_db_id).toString());
+
+ EXPECT_EQUAL("NewConfig(serialNum=64)",
+ NewConfigOperation(64, stream_handler).toString());
+
+ EXPECT_EQUAL("Noop(serialNum=32)", NoopOperation(32).toString());
+
+ EXPECT_EQUAL("PruneRemovedDocuments(limitLid=0, subDbId=0, "
+ "serialNum=0)",
+ PruneRemovedDocumentsOperation().toString());
+ EXPECT_EQUAL("PruneRemovedDocuments(limitLid=15, subDbId=1, "
+ "serialNum=0)",
+ PruneRemovedDocumentsOperation(
+ doc_id_limit, sub_db_id).toString());
+
+ EXPECT_EQUAL("Put(NULL, BucketId(0x0000000000000000), timestamp=0, dbdId=(subDbId=0, lid=0), "
+ "prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
+ PutOperation().toString());
+ EXPECT_EQUAL("Put(null::, BucketId(0x000000000000002a), timestamp=10, dbdId=(subDbId=0, lid=0), "
+ "prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
+ PutOperation(bucket_id1, timestamp, doc).toString());
+
+ EXPECT_EQUAL("Remove(null::, BucketId(0x0000000000000000), timestamp=0, dbdId=(subDbId=0, lid=0), "
+ "prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
+ RemoveOperation().toString());
+ EXPECT_EQUAL("Remove(doc:foo:bar, BucketId(0x000000000000002a), timestamp=10, dbdId=(subDbId=0, lid=0), "
+ "prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
+ RemoveOperation(bucket_id1, timestamp, doc_id).toString());
+
+ EXPECT_EQUAL("SplitBucket("
+ "source=BucketId(0x0000000000000000), "
+ "target1=BucketId(0x0000000000000000), "
+ "target2=BucketId(0x0000000000000000), serialNum=0)",
+ SplitBucketOperation().toString());
+ EXPECT_EQUAL("SplitBucket("
+ "source=BucketId(0x000000000000002a), "
+ "target1=BucketId(0x000000000000002b), "
+ "target2=BucketId(0x000000000000002c), serialNum=0)",
+ SplitBucketOperation(bucket_id1, bucket_id2, bucket_id3)
+ .toString());
+
+ EXPECT_EQUAL("SpoolerReplayStart(spoolerSerialNum=0, serialNum=0)",
+ SpoolerReplayStartOperation().toString());
+ EXPECT_EQUAL("SpoolerReplayStart(spoolerSerialNum=20, serialNum=10)",
+ SpoolerReplayStartOperation(10, 20).toString());
+
+ EXPECT_EQUAL("SpoolerReplayComplete(spoolerSerialNum=0, serialNum=0)",
+ SpoolerReplayCompleteOperation().toString());
+ EXPECT_EQUAL("SpoolerReplayComplete(spoolerSerialNum=2, serialNum=1)",
+ SpoolerReplayCompleteOperation(1, 2).toString());
+
+ EXPECT_EQUAL("Update(NULL, BucketId(0x0000000000000000), timestamp=0, dbdId=(subDbId=0, lid=0), "
+ "prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
+ UpdateOperation().toString());
+ EXPECT_EQUAL("Update(doc:foo:bar, BucketId(0x000000000000002a), timestamp=10, dbdId=(subDbId=0, lid=0), "
+ "prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
+ UpdateOperation(bucket_id1, timestamp, update).toString());
+
+ EXPECT_EQUAL("WipeHistory(wipeTimeLimit=0, serialNum=0)",
+ WipeHistoryOperation().toString());
+ EXPECT_EQUAL("WipeHistory(wipeTimeLimit=20, serialNum=10)",
+ WipeHistoryOperation(10, 20).toString());
+ EXPECT_EQUAL("CompactLidSpace(subDbId=2, lidLimit=99, serialNum=0)",
+ CompactLidSpaceOperation(2, 99).toString());
+}
+
+TEST("require that serialize/deserialize works for CompactLidSpaceOperation")
+{
+ vespalib::nbostream stream;
+ {
+ CompactLidSpaceOperation op(2, 99);
+ EXPECT_EQUAL(FeedOperation::COMPACT_LID_SPACE, op.getType());
+ EXPECT_EQUAL(2u, op.getSubDbId());
+ EXPECT_EQUAL(99u, op.getLidLimit());
+ op.serialize(stream);
+ }
+ {
+ const document::DocumentTypeRepo *repo = NULL;
+ CompactLidSpaceOperation op;
+ op.deserialize(stream, *repo);
+ EXPECT_EQUAL(FeedOperation::COMPACT_LID_SPACE, op.getType());
+ EXPECT_EQUAL(2u, op.getSubDbId());
+ EXPECT_EQUAL(99u, op.getLidLimit());
+ }
+}
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/feedtoken/.gitignore b/searchcore/src/tests/proton/feedtoken/.gitignore
new file mode 100644
index 00000000000..eee829c3cbf
--- /dev/null
+++ b/searchcore/src/tests/proton/feedtoken/.gitignore
@@ -0,0 +1,4 @@
+.depend
+Makefile
+feedtoken_test
+searchcore_feedtoken_test_app
diff --git a/searchcore/src/tests/proton/feedtoken/CMakeLists.txt b/searchcore/src/tests/proton/feedtoken/CMakeLists.txt
new file mode 100644
index 00000000000..328d872f668
--- /dev/null
+++ b/searchcore/src/tests/proton/feedtoken/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_feedtoken_test_app
+ SOURCES
+ feedtoken.cpp
+ DEPENDS
+ searchcore_pcommon
+ searchcore_proton_metrics
+)
+vespa_add_test(NAME searchcore_feedtoken_test_app COMMAND searchcore_feedtoken_test_app)
diff --git a/searchcore/src/tests/proton/feedtoken/DESC b/searchcore/src/tests/proton/feedtoken/DESC
new file mode 100644
index 00000000000..6fd2921a8c4
--- /dev/null
+++ b/searchcore/src/tests/proton/feedtoken/DESC
@@ -0,0 +1 @@
+feedtoken test. Take a look at feedtoken.cpp for details.
diff --git a/searchcore/src/tests/proton/feedtoken/FILES b/searchcore/src/tests/proton/feedtoken/FILES
new file mode 100644
index 00000000000..052aab3d388
--- /dev/null
+++ b/searchcore/src/tests/proton/feedtoken/FILES
@@ -0,0 +1 @@
+feedtoken.cpp
diff --git a/searchcore/src/tests/proton/feedtoken/feedtoken.cpp b/searchcore/src/tests/proton/feedtoken/feedtoken.cpp
new file mode 100644
index 00000000000..bd7d0b9cf6c
--- /dev/null
+++ b/searchcore/src/tests/proton/feedtoken/feedtoken.cpp
@@ -0,0 +1,158 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("feedtoken_test");
+
+#include <vespa/messagebus/emptyreply.h>
+#include <vespa/messagebus/testlib/receptor.h>
+#include <vespa/documentapi/messagebus/messages/removedocumentreply.h>
+#include <vespa/searchcore/proton/common/feedtoken.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace proton;
+
+class LocalTransport : public FeedToken::ITransport {
+private:
+ mbus::Receptor _receptor;
+ double _latency_ms;
+
+public:
+ LocalTransport()
+ : _receptor(),
+ _latency_ms(0.0)
+ {
+ // empty
+ }
+
+ void send(mbus::Reply::UP reply,
+ ResultUP,
+ bool,
+ double latency_ms) {
+ _receptor.handleReply(std::move(reply));
+ _latency_ms = latency_ms;
+ }
+
+ mbus::Reply::UP getReply() {
+ return _receptor.getReply();
+ }
+
+ double getLatencyMs() const {
+ return _latency_ms;
+ }
+};
+
+class Test : public vespalib::TestApp {
+private:
+ void testAck();
+ void testAutoReply();
+ void testFail();
+ void testHandover();
+ void testIntegrity();
+ void testTrace();
+
+public:
+ int Main() {
+ TEST_INIT("feedtoken_test");
+
+ testAck(); TEST_FLUSH();
+// testAutoReply(); TEST_FLUSH();
+ testFail(); TEST_FLUSH();
+ testHandover(); TEST_FLUSH();
+// testIntegrity(); TEST_FLUSH();
+ testTrace(); TEST_FLUSH();
+
+ TEST_DONE();
+ }
+};
+
+TEST_APPHOOK(Test);
+
+void
+Test::testAck()
+{
+ LocalTransport transport;
+ mbus::Reply::UP msg(new documentapi::RemoveDocumentReply());
+ FeedToken token(transport, std::move(msg));
+ token.ack();
+ mbus::Reply::UP reply = transport.getReply();
+ ASSERT_TRUE(reply.get() != NULL);
+ EXPECT_TRUE(!reply->hasErrors());
+}
+
+void
+Test::testAutoReply()
+{
+ mbus::Receptor receptor;
+ mbus::Reply::UP reply(new documentapi::RemoveDocumentReply());
+ reply->pushHandler(receptor);
+ {
+ LocalTransport transport;
+ FeedToken token(transport, std::move(reply));
+ }
+ reply = receptor.getReply(0);
+ ASSERT_TRUE(reply.get() != NULL);
+ EXPECT_TRUE(reply->hasErrors());
+}
+
+void
+Test::testFail()
+{
+ LocalTransport transport;
+ mbus::Reply::UP reply(new documentapi::RemoveDocumentReply());
+ FeedToken token(transport, std::move(reply));
+ token.fail(69, "6699");
+ reply = transport.getReply();
+ ASSERT_TRUE(reply.get() != NULL);
+ EXPECT_EQUAL(1u, reply->getNumErrors());
+ EXPECT_EQUAL(69u, reply->getError(0).getCode());
+ EXPECT_EQUAL("6699", reply->getError(0).getMessage());
+}
+
+void
+Test::testHandover()
+{
+ struct MyHandover {
+ static FeedToken handover(FeedToken token) {
+ return token;
+ }
+ };
+
+ LocalTransport transport;
+ mbus::Reply::UP reply(new documentapi::RemoveDocumentReply());
+
+ FeedToken token(transport, std::move(reply));
+ token = MyHandover::handover(token);
+ token.ack();
+ reply = transport.getReply();
+ ASSERT_TRUE(reply.get() != NULL);
+ EXPECT_TRUE(!reply->hasErrors());
+}
+
+void
+Test::testIntegrity()
+{
+ LocalTransport transport;
+ try {
+ FeedToken token(transport, mbus::Reply::UP());
+ EXPECT_TRUE(false); // should throw an exception
+ } catch (vespalib::IllegalArgumentException &e) {
+ (void)e; // expected
+ }
+}
+
+void
+Test::testTrace()
+{
+ LocalTransport transport;
+ mbus::Reply::UP reply(new documentapi::RemoveDocumentReply());
+
+ FeedToken token(transport, std::move(reply));
+ token.trace(0, "foo");
+ token.ack();
+ reply = transport.getReply();
+ ASSERT_TRUE(reply.get() != NULL);
+ EXPECT_TRUE(!reply->hasErrors());
+ std::string trace = reply->getTrace().toString();
+ fprintf(stderr, "%s", trace.c_str());
+ EXPECT_TRUE(trace.find("foo") != std::string::npos);
+}
diff --git a/searchcore/src/tests/proton/flushengine/.gitignore b/searchcore/src/tests/proton/flushengine/.gitignore
new file mode 100644
index 00000000000..65d6633a4d1
--- /dev/null
+++ b/searchcore/src/tests/proton/flushengine/.gitignore
@@ -0,0 +1,2 @@
+/vlog.txt
+searchcore_flushengine_test_app
diff --git a/searchcore/src/tests/proton/flushengine/CMakeLists.txt b/searchcore/src/tests/proton/flushengine/CMakeLists.txt
new file mode 100644
index 00000000000..4fc59180946
--- /dev/null
+++ b/searchcore/src/tests/proton/flushengine/CMakeLists.txt
@@ -0,0 +1,13 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_flushengine_test_app
+ SOURCES
+ flushengine.cpp
+ DEPENDS
+ searchcore_flushengine
+ searchcore_pcommon
+)
+vespa_add_test(
+ NAME searchcore_flushengine_test_app
+ COMMAND searchcore_flushengine_test_app
+ ENVIRONMENT "VESPA_LOG_LEVEL=all;VESPA_LOG_TARGET=file:vlog.txt"
+)
diff --git a/searchcore/src/tests/proton/flushengine/DESC b/searchcore/src/tests/proton/flushengine/DESC
new file mode 100644
index 00000000000..87a9f388463
--- /dev/null
+++ b/searchcore/src/tests/proton/flushengine/DESC
@@ -0,0 +1 @@
+flushengine test. Take a look at flushengine.cpp for details.
diff --git a/searchcore/src/tests/proton/flushengine/FILES b/searchcore/src/tests/proton/flushengine/FILES
new file mode 100644
index 00000000000..5156cba47e7
--- /dev/null
+++ b/searchcore/src/tests/proton/flushengine/FILES
@@ -0,0 +1 @@
+flushengine.cpp
diff --git a/searchcore/src/tests/proton/flushengine/flushengine.cpp b/searchcore/src/tests/proton/flushengine/flushengine.cpp
new file mode 100644
index 00000000000..59b86671a0d
--- /dev/null
+++ b/searchcore/src/tests/proton/flushengine/flushengine.cpp
@@ -0,0 +1,605 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("flushengine_test");
+
+#include <vespa/searchcore/proton/flushengine/cachedflushtarget.h>
+#include <vespa/searchcore/proton/flushengine/flush_engine_explorer.h>
+#include <vespa/searchcore/proton/flushengine/flushengine.h>
+#include <vespa/searchcore/proton/flushengine/threadedflushtarget.h>
+#include <vespa/searchcore/proton/flushengine/tls_stats_map.h>
+#include <vespa/searchcore/proton/flushengine/i_tls_stats_factory.h>
+#include <vespa/searchcore/proton/server/igetserialnum.h>
+#include <vespa/searchcore/proton/test/dummy_flush_handler.h>
+#include <vespa/searchcore/proton/test/dummy_flush_target.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/util/sync.h>
+#include <memory>
+
+// --------------------------------------------------------------------------------
+//
+// Setup.
+//
+// --------------------------------------------------------------------------------
+
+using namespace proton;
+using namespace vespalib::slime;
+using searchcorespi::FlushTask;
+using vespalib::Slime;
+
+const long LONG_TIMEOUT = 66666;
+const long SHORT_TIMEOUT = 1;
+const uint32_t IINTERVAL = 1000;
+
+class SimpleExecutor : public vespalib::Executor {
+public:
+ vespalib::Gate _done;
+
+public:
+ SimpleExecutor()
+ : _done()
+ {
+ // empty
+ }
+
+ Task::UP
+ execute(Task::UP task)
+ {
+ task->run();
+ _done.countDown();
+ return Task::UP();
+ }
+};
+
+class SimpleGetSerialNum : public IGetSerialNum
+{
+ virtual search::SerialNum getSerialNum() const override {
+ return 0u;
+ }
+};
+
+class SimpleTlsStatsFactory : public flushengine::ITlsStatsFactory
+{
+ virtual flushengine::TlsStatsMap create() override {
+ vespalib::hash_map<vespalib::string, flushengine::TlsStats> map;
+ return flushengine::TlsStatsMap(std::move(map));
+ }
+};
+
+typedef std::vector<IFlushTarget::SP> Targets;
+
+class SimpleHandler : public test::DummyFlushHandler {
+public:
+ Targets _targets;
+ search::SerialNum _oldestSerial;
+ search::SerialNum _currentSerial;
+ vespalib::CountDownLatch _done;
+
+public:
+ typedef std::shared_ptr<SimpleHandler> SP;
+
+ SimpleHandler(const Targets &targets, const std::string &name = "anon",
+ search::SerialNum currentSerial = -1)
+ : test::DummyFlushHandler(name),
+ _targets(targets),
+ _oldestSerial(0),
+ _currentSerial(currentSerial),
+ _done(targets.size())
+ {
+ // empty
+ }
+
+ search::SerialNum
+ getCurrentSerialNumber() const override
+ {
+ LOG(info, "SimpleHandler(%s)::getCurrentSerialNumber()",
+ getName().c_str());
+ return _currentSerial;
+ }
+
+ std::vector<IFlushTarget::SP>
+ getFlushTargets() override
+ {
+ LOG(info, "SimpleHandler(%s)::getFlushTargets()",
+ getName().c_str());
+ return _targets;
+ }
+
+ void
+ flushDone(search::SerialNum oldestSerial) override
+ {
+ LOG(info, "SimpleHandler(%s)::flushDone(%" PRIu64 ")",
+ getName().c_str(), oldestSerial);
+ _oldestSerial = std::max(_oldestSerial, oldestSerial);
+ _done.countDown();
+ }
+
+};
+
+class SimpleTask : public searchcorespi::FlushTask {
+public:
+ vespalib::Gate &_start;
+ vespalib::Gate &_done;
+ vespalib::Gate *_proceed;
+
+public:
+ SimpleTask(vespalib::Gate &start,
+ vespalib::Gate &done,
+ vespalib::Gate *proceed)
+ : _start(start), _done(done), _proceed(proceed)
+ {
+ // empty
+ }
+
+ void run() {
+ _start.countDown();
+ if (_proceed != NULL) {
+ _proceed->await();
+ }
+ _done.countDown();
+ }
+
+ virtual search::SerialNum
+ getFlushSerial(void) const
+ {
+ return 0u;
+ }
+};
+
+class SimpleTarget : public test::DummyFlushTarget {
+public:
+ search::SerialNum _flushedSerial;
+ vespalib::Gate _proceed;
+ vespalib::Gate _initDone;
+ vespalib::Gate _taskStart;
+ vespalib::Gate _taskDone;
+ Task::UP _task;
+
+public:
+ typedef std::shared_ptr<SimpleTarget> SP;
+
+ SimpleTarget(Task::UP task, const std::string &name) :
+ test::DummyFlushTarget(name),
+ _flushedSerial(0),
+ _proceed(),
+ _initDone(),
+ _taskStart(),
+ _taskDone(),
+ _task(std::move(task))
+ {
+ }
+
+ SimpleTarget(const std::string &name = "anon", search::SerialNum flushedSerial = 0, bool proceedImmediately = true) :
+ test::DummyFlushTarget(name),
+ _flushedSerial(flushedSerial),
+ _proceed(),
+ _initDone(),
+ _taskStart(),
+ _taskDone(),
+ _task(new SimpleTask(_taskStart, _taskDone, &_proceed))
+ {
+ if (proceedImmediately) {
+ _proceed.countDown();
+ }
+ }
+
+ virtual Time
+ getLastFlushTime() const override { return fastos::ClockSystem::now(); }
+
+ virtual SerialNum
+ getFlushedSerialNum() const override
+ {
+ LOG(info, "SimpleTarget(%s)::getFlushedSerialNum()",
+ getName().c_str());
+ return _flushedSerial;
+ }
+
+ virtual Task::UP
+ initFlush(SerialNum currentSerial) override
+ {
+ LOG(info, "SimpleTarget(%s)::initFlush(%" PRIu64 ")",
+ getName().c_str(), currentSerial);
+ _initDone.countDown();
+ return std::move(_task);
+ }
+
+};
+
+class AssertedTarget : public SimpleTarget {
+public:
+ mutable bool _mgain;
+ mutable bool _serial;
+
+public:
+ typedef std::shared_ptr<AssertedTarget> SP;
+
+ AssertedTarget(const std::string &name = "anon")
+ : SimpleTarget(name),
+ _mgain(false),
+ _serial(false)
+ {
+ // empty
+ }
+
+ virtual MemoryGain
+ getApproxMemoryGain() const
+ {
+ LOG_ASSERT(_mgain == false);
+ _mgain = true;
+ return SimpleTarget::getApproxMemoryGain();
+ }
+
+ virtual search::SerialNum
+ getFlushedSerialNum() const
+ {
+ LOG_ASSERT(_serial == false);
+ _serial = true;
+ return SimpleTarget::getFlushedSerialNum();
+ }
+};
+
+class SimpleStrategy : public IFlushStrategy {
+public:
+ std::vector<IFlushTarget::SP> _targets;
+
+ struct CompareTarget {
+ CompareTarget(const SimpleStrategy &flush) : _flush(flush) { }
+ bool operator () (const FlushContext::SP &lhs, const FlushContext::SP &rhs) const {
+ return _flush.compare(lhs->getTarget(), rhs->getTarget());
+ }
+ const SimpleStrategy &_flush;
+ };
+
+ virtual FlushContext::List getFlushTargets(const FlushContext::List &targetList,
+ const flushengine::TlsStatsMap &) const override {
+ FlushContext::List fv(targetList);
+ std::sort(fv.begin(), fv.end(), CompareTarget(*this));
+ return fv;
+ }
+
+ bool
+ compare(const IFlushTarget::SP &lhs, const IFlushTarget::SP &rhs) const
+ {
+ LOG(info, "SimpleStrategy::compare(%p, %p)", lhs.get(), rhs.get());
+ return indexOf(lhs) < indexOf(rhs);
+ }
+
+
+public:
+ typedef std::shared_ptr<SimpleStrategy> SP;
+
+ SimpleStrategy()
+ {
+ // empty
+ }
+
+ uint32_t
+ indexOf(const IFlushTarget::SP &target) const
+ {
+ IFlushTarget *raw = target.get();
+ CachedFlushTarget *cached = dynamic_cast<CachedFlushTarget*>(raw);
+ if (cached != NULL) {
+ raw = cached->getFlushTarget().get();
+ }
+ for (uint32_t i = 0, len = _targets.size(); i < len; ++i) {
+ if (raw == _targets[i].get()) {
+ LOG(info, "Index of target %p is %d.", raw, i);
+ return i;
+ }
+ }
+ LOG(info, "Target %p not found.", raw);
+ return -1;
+ }
+};
+
+class ConstantFlushStrategy : public SimpleStrategy {
+public:
+ uint64_t _millis;
+
+public:
+ ConstantFlushStrategy(uint64_t millis) : SimpleStrategy(), _millis(millis) { }
+ typedef std::shared_ptr<ConstantFlushStrategy> SP;
+};
+
+// --------------------------------------------------------------------------------
+//
+// Tests.
+//
+// --------------------------------------------------------------------------------
+
+class AppendTask : public FlushTask
+{
+public:
+ AppendTask(const vespalib::string & name, std::vector<vespalib::string> & list, vespalib::Gate & done) :
+ _list(list),
+ _done(done),
+ _name(name)
+ { }
+ void run() {
+ _list.push_back(_name);
+ _done.countDown();
+ }
+ virtual search::SerialNum
+ getFlushSerial(void) const
+ {
+ return 0u;
+ }
+ std::vector<vespalib::string> & _list;
+ vespalib::Gate & _done;
+ vespalib::string _name;
+};
+
+
+struct Fixture
+{
+ std::shared_ptr<flushengine::ITlsStatsFactory> tlsStatsFactory;
+ SimpleStrategy::SP strategy;
+ FlushEngine engine;
+
+ Fixture(uint32_t numThreads, uint32_t idleIntervalMS)
+ : tlsStatsFactory(std::make_shared<SimpleTlsStatsFactory>()),
+ strategy(std::make_shared<SimpleStrategy>()),
+ engine(tlsStatsFactory, strategy, numThreads, idleIntervalMS, false)
+ {
+ }
+};
+
+
+TEST_F("require that strategy controls flush target", Fixture(1, IINTERVAL))
+{
+ vespalib::Gate fooG, barG;
+ std::vector<vespalib::string> order;
+ FlushTask::UP fooT(new AppendTask("foo", order, fooG));
+ FlushTask::UP barT(new AppendTask("bar", order, barG));
+ SimpleTarget::SP foo(new SimpleTarget(std::move(fooT), "foo"));
+ SimpleTarget::SP bar(new SimpleTarget(std::move(barT), "bar"));
+ f.strategy->_targets.push_back(foo);
+ f.strategy->_targets.push_back(bar);
+
+ SimpleHandler::SP handler(new SimpleHandler({bar, foo}));
+ DocTypeName dtnvanon("anon");
+ f.engine.putFlushHandler(dtnvanon, handler);
+ f.engine.start();
+
+ EXPECT_TRUE(fooG.await(LONG_TIMEOUT));
+ EXPECT_TRUE(barG.await(LONG_TIMEOUT));
+ EXPECT_EQUAL(2u, order.size());
+ EXPECT_EQUAL("foo", order[0]);
+ EXPECT_EQUAL("bar", order[1]);
+}
+
+TEST_F("require that zero handlers does not core", Fixture(2, 50))
+{
+ f.engine.start();
+}
+
+TEST_F("require that zero targets does not core", Fixture(2, 50))
+{
+ DocTypeName dtnvfoo("foo");
+ DocTypeName dtnvbar("bar");
+ f.engine.putFlushHandler(dtnvfoo,
+ IFlushHandler::SP(new SimpleHandler({}, "foo")));
+ f.engine.putFlushHandler(dtnvbar,
+ IFlushHandler::SP(new SimpleHandler({}, "bar")));
+ f.engine.start();
+}
+
+TEST_F("require that oldest serial is found", Fixture(1, IINTERVAL))
+{
+ SimpleTarget::SP foo(new SimpleTarget("foo", 10));
+ SimpleTarget::SP bar(new SimpleTarget("bar", 20));
+ f.strategy->_targets.push_back(foo);
+ f.strategy->_targets.push_back(bar);
+
+ SimpleHandler::SP handler(new SimpleHandler({foo, bar}, "anon", 25));
+ DocTypeName dtnvanon("anon");
+ f.engine.putFlushHandler(dtnvanon, handler);
+ f.engine.start();
+
+ EXPECT_TRUE(handler->_done.await(LONG_TIMEOUT));
+ EXPECT_EQUAL(20ul, handler->_oldestSerial);
+}
+
+TEST_F("require that oldest serial is found in group", Fixture(2, IINTERVAL))
+{
+ SimpleTarget::SP fooT1(new SimpleTarget("fooT1", 10));
+ SimpleTarget::SP fooT2(new SimpleTarget("fooT2", 20));
+ SimpleTarget::SP barT1(new SimpleTarget("barT1", 5));
+ SimpleTarget::SP barT2(new SimpleTarget("barT2", 15));
+ f.strategy->_targets.push_back(fooT1);
+ f.strategy->_targets.push_back(fooT2);
+ f.strategy->_targets.push_back(barT1);
+ f.strategy->_targets.push_back(barT2);
+
+ SimpleHandler::SP fooH(new SimpleHandler({fooT1, fooT2}, "fooH", 25));
+ DocTypeName dtnvfoo("foo");
+ f.engine.putFlushHandler(dtnvfoo, fooH);
+
+ SimpleHandler::SP barH(new SimpleHandler({barT1, barT2}, "barH", 20));
+ DocTypeName dtnvbar("bar");
+ f.engine.putFlushHandler(dtnvbar, barH);
+
+ f.engine.start();
+
+ EXPECT_TRUE(fooH->_done.await(LONG_TIMEOUT));
+ EXPECT_EQUAL(20ul, fooH->_oldestSerial);
+ EXPECT_TRUE(barH->_done.await(LONG_TIMEOUT));
+ EXPECT_EQUAL(15ul, barH->_oldestSerial);
+}
+
+TEST_F("require that target can refuse flush", Fixture(2, IINTERVAL))
+{
+ SimpleTarget::SP target(new SimpleTarget());
+ SimpleHandler::SP handler(new SimpleHandler({target}));
+ target->_task = searchcorespi::FlushTask::UP();
+ DocTypeName dtnvanon("anon");
+ f.engine.putFlushHandler(dtnvanon, handler);
+ f.engine.start();
+
+ EXPECT_TRUE(target->_initDone.await(LONG_TIMEOUT));
+ EXPECT_TRUE(!target->_taskDone.await(SHORT_TIMEOUT));
+ EXPECT_TRUE(!handler->_done.await(SHORT_TIMEOUT));
+}
+
+TEST_F("require that targets are flushed when nothing new to flush",
+ Fixture(2, IINTERVAL))
+{
+ SimpleTarget::SP target(new SimpleTarget("anon", 5)); // oldest unflushed serial num = 5
+ SimpleHandler::SP handler(new SimpleHandler({target}, "anon", 4)); // current serial num = 4
+ DocTypeName dtnvanon("anon");
+ f.engine.putFlushHandler(dtnvanon, handler);
+ f.engine.start();
+
+ EXPECT_TRUE(target->_initDone.await(LONG_TIMEOUT));
+ EXPECT_TRUE(target->_taskDone.await(LONG_TIMEOUT));
+ EXPECT_TRUE(handler->_done.await(LONG_TIMEOUT));
+}
+
+TEST_F("require that flushing targets are skipped", Fixture(2, IINTERVAL))
+{
+ SimpleTarget::SP foo(new SimpleTarget("foo"));
+ SimpleTarget::SP bar(new SimpleTarget("bar"));
+ f.strategy->_targets.push_back(foo);
+ f.strategy->_targets.push_back(bar);
+
+ SimpleHandler::SP handler(new SimpleHandler({bar, foo}));
+ DocTypeName dtnvanon("anon");
+ f.engine.putFlushHandler(dtnvanon, handler);
+ f.engine.start();
+
+ EXPECT_TRUE(foo->_taskDone.await(LONG_TIMEOUT));
+ EXPECT_TRUE(bar->_taskDone.await(LONG_TIMEOUT)); /* this is the key check */
+}
+
+TEST_F("require that updated targets are not skipped", Fixture(2, IINTERVAL))
+{
+ SimpleTarget::SP target(new SimpleTarget("target", 1));
+ f.strategy->_targets.push_back(target);
+
+ SimpleHandler::SP handler(new SimpleHandler({target}, "handler", 0));
+ DocTypeName dtnvhandler("handler");
+ f.engine.putFlushHandler(dtnvhandler, handler);
+ f.engine.start();
+
+ EXPECT_TRUE(target->_taskDone.await(LONG_TIMEOUT));
+}
+
+TEST("require that threaded target works")
+{
+ SimpleExecutor executor;
+ SimpleGetSerialNum getSerialNum;
+ IFlushTarget::SP target(new SimpleTarget());
+ target.reset(new ThreadedFlushTarget(executor, getSerialNum, target));
+
+ EXPECT_FALSE(executor._done.await(SHORT_TIMEOUT));
+ EXPECT_TRUE(target->initFlush(0).get() != NULL);
+ EXPECT_TRUE(executor._done.await(LONG_TIMEOUT));
+}
+
+TEST("require that cached target works")
+{
+ IFlushTarget::SP target(new AssertedTarget());
+ target.reset(new CachedFlushTarget(target));
+ for (uint32_t i = 0; i < 2; ++i) {
+ EXPECT_EQUAL(0l, target->getApproxMemoryGain().getBefore());
+ EXPECT_EQUAL(0l, target->getApproxMemoryGain().getAfter());
+ EXPECT_EQUAL(0ul, target->getFlushedSerialNum());
+ }
+}
+
+TEST_F("require that trigger flush works", Fixture(2, IINTERVAL))
+{
+ SimpleTarget::SP target(new SimpleTarget("target", 1));
+ f.strategy->_targets.push_back(target);
+
+ SimpleHandler::SP handler(new SimpleHandler({target}, "handler", 9));
+ DocTypeName dtnvhandler("handler");
+ f.engine.putFlushHandler(dtnvhandler, handler);
+ f.engine.start();
+ f.engine.triggerFlush();
+ EXPECT_TRUE(target->_initDone.await(LONG_TIMEOUT));
+ EXPECT_TRUE(target->_taskDone.await(LONG_TIMEOUT));
+}
+
+bool
+asserCorrectHandlers(const FlushEngine::FlushMetaSet & current1, const std::vector<const char *> & targets)
+{
+ bool retval(targets.size() == current1.size());
+ FlushEngine::FlushMetaSet::const_iterator curr(current1.begin());
+ if (retval) {
+ for (const char * target : targets) {
+ if (target != (curr++)->getName()) {
+ return false;
+ }
+ }
+ }
+ return retval;
+}
+
+void
+assertThatHandlersInCurrentSet(FlushEngine & engine, const std::vector<const char *> & targets)
+{
+ FlushEngine::FlushMetaSet current1 = engine.getCurrentlyFlushingSet();
+ while ((current1.size() < targets.size()) || !asserCorrectHandlers(current1, targets)) {
+ FastOS_Thread::Sleep(1);
+ current1 = engine.getCurrentlyFlushingSet();
+ }
+}
+
+TEST_F("require that concurrency works", Fixture(2, 1))
+{
+ SimpleTarget::SP target1(new SimpleTarget("target1", 1, false));
+ SimpleTarget::SP target2(new SimpleTarget("target2", 2, false));
+ SimpleTarget::SP target3(new SimpleTarget("target3", 3, false));
+ SimpleHandler::SP handler(new SimpleHandler({target1, target2, target3}, "handler", 9));
+ DocTypeName dtnvhandler("handler");
+ f.engine.putFlushHandler(dtnvhandler, handler);
+ f.engine.start();
+ EXPECT_TRUE(target1->_initDone.await(LONG_TIMEOUT));
+ EXPECT_TRUE(target2->_initDone.await(LONG_TIMEOUT));
+ EXPECT_TRUE(!target3->_initDone.await(SHORT_TIMEOUT));
+ assertThatHandlersInCurrentSet(f.engine, {"handler.target1", "handler.target2"});
+ EXPECT_TRUE(!target3->_initDone.await(SHORT_TIMEOUT));
+ target1->_proceed.countDown();
+ EXPECT_TRUE(target1->_taskDone.await(LONG_TIMEOUT));
+ assertThatHandlersInCurrentSet(f.engine, {"handler.target2", "handler.target3"});
+ target3->_proceed.countDown();
+ target2->_proceed.countDown();
+}
+
+TEST_F("require that state explorer can list flush targets", Fixture(1, 1))
+{
+ SimpleTarget::SP target = std::make_shared<SimpleTarget>("target1", 100, false);
+ f.engine.putFlushHandler(DocTypeName("handler"),
+ std::make_shared<SimpleHandler>(
+ Targets({target, std::make_shared<SimpleTarget>("target2", 50, true)}),
+ "handler", 9));
+ f.engine.start();
+ target->_initDone.await(LONG_TIMEOUT);
+ target->_taskStart.await(LONG_TIMEOUT);
+
+ FlushEngineExplorer explorer(f.engine);
+ Slime state;
+ SlimeInserter inserter(state);
+ explorer.get_state(inserter, true);
+
+ Inspector &all = state.get()["allTargets"];
+ EXPECT_EQUAL(2u, all.children());
+ EXPECT_EQUAL("handler.target2", all[0]["name"].asString().make_string());
+ EXPECT_EQUAL(50, all[0]["flushedSerialNum"].asLong());
+ EXPECT_EQUAL("handler.target1", all[1]["name"].asString().make_string());
+ EXPECT_EQUAL(100, all[1]["flushedSerialNum"].asLong());
+
+ Inspector &flushing = state.get()["flushingTargets"];
+ EXPECT_EQUAL(1u, flushing.children());
+ EXPECT_EQUAL("handler.target1", flushing[0]["name"].asString().make_string());
+
+ target->_proceed.countDown();
+ target->_taskDone.await(LONG_TIMEOUT);
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/CMakeLists.txt b/searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/CMakeLists.txt
new file mode 100644
index 00000000000..a4bff892ffa
--- /dev/null
+++ b/searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/CMakeLists.txt
@@ -0,0 +1,12 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_flushengine_prepare_restart_flush_strategy_test_app
+ SOURCES
+ prepare_restart_flush_strategy_test.cpp
+ DEPENDS
+ searchcorespi
+ searchcore_flushengine
+)
+vespa_add_test(
+ NAME searchcore_flushengine_prepare_restart_flush_strategy_test_app
+ COMMAND searchcore_flushengine_prepare_restart_flush_strategy_test_app
+)
diff --git a/searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/DESC b/searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/DESC
new file mode 100644
index 00000000000..77474a51fbb
--- /dev/null
+++ b/searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/DESC
@@ -0,0 +1 @@
+prepare_restart_flush_strategy test. Take a look at prepare_restart_flush_strategy_test.cpp for details.
diff --git a/searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/FILES b/searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/FILES
new file mode 100644
index 00000000000..35ad6c54f3c
--- /dev/null
+++ b/searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/FILES
@@ -0,0 +1 @@
+prepare_restart_flush_strategy_test.cpp
diff --git a/searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/prepare_restart_flush_strategy_test.cpp b/searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/prepare_restart_flush_strategy_test.cpp
new file mode 100644
index 00000000000..ac3dbb8fed2
--- /dev/null
+++ b/searchcore/src/tests/proton/flushengine/prepare_restart_flush_strategy/prepare_restart_flush_strategy_test.cpp
@@ -0,0 +1,297 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <vespa/searchcore/proton/flushengine/prepare_restart_flush_strategy.h>
+#include <vespa/searchcore/proton/flushengine/flush_target_candidates.h>
+#include <vespa/searchcore/proton/flushengine/tls_stats_map.h>
+#include <vespa/searchcore/proton/test/dummy_flush_handler.h>
+#include <vespa/searchcore/proton/test/dummy_flush_target.h>
+
+using namespace proton;
+using search::SerialNum;
+
+using SimpleFlushHandler = test::DummyFlushHandler;
+using FlushCandidatesList = std::vector<FlushTargetCandidates>;
+using Config = PrepareRestartFlushStrategy::Config;
+
+const Config DEFAULT_CFG(2.0, 4.0);
+
+struct SimpleFlushTarget : public test::DummyFlushTarget
+{
+ SerialNum flushedSerial;
+ uint64_t approxDiskBytes;
+ SimpleFlushTarget(const vespalib::string &name,
+ SerialNum flushedSerial_,
+ uint64_t approxDiskBytes_)
+ : test::DummyFlushTarget(name),
+ flushedSerial(flushedSerial_),
+ approxDiskBytes(approxDiskBytes_)
+ {}
+ SimpleFlushTarget(const vespalib::string &name,
+ const Type &type,
+ SerialNum flushedSerial_,
+ uint64_t approxDiskBytes_)
+ : test::DummyFlushTarget(name, type, Component::OTHER),
+ flushedSerial(flushedSerial_),
+ approxDiskBytes(approxDiskBytes_)
+ {}
+ virtual SerialNum getFlushedSerialNum() const override {
+ return flushedSerial;
+ }
+ virtual uint64_t getApproxBytesToWriteToDisk() const override {
+ return approxDiskBytes;
+ }
+};
+
+class ContextsBuilder
+{
+private:
+ FlushContext::List _result;
+ std::map<vespalib::string, IFlushHandler::SP> _handlers;
+
+ IFlushHandler::SP createAndGetHandler(const vespalib::string &handlerName) {
+ auto itr = _handlers.find(handlerName);
+ if (itr != _handlers.end()) {
+ return itr->second;
+ }
+ IFlushHandler::SP handler = std::make_shared<SimpleFlushHandler>(handlerName);
+ _handlers.insert(std::make_pair(handlerName, handler));
+ return handler;
+ }
+
+public:
+ ContextsBuilder() : _result(), _handlers() {}
+ ContextsBuilder &add(const vespalib::string &handlerName,
+ const vespalib::string &targetName,
+ IFlushTarget::Type targetType,
+ SerialNum flushedSerial,
+ uint64_t approxDiskBytes) {
+ IFlushHandler::SP handler = createAndGetHandler(handlerName);
+ IFlushTarget::SP target = std::make_shared<SimpleFlushTarget>(targetName,
+ targetType,
+ flushedSerial,
+ approxDiskBytes);
+ _result.push_back(std::make_shared<FlushContext>(handler, target, 0, 0));
+ return *this;
+ }
+ ContextsBuilder &add(const vespalib::string &handlerName,
+ const vespalib::string &targetName,
+ SerialNum flushedSerial,
+ uint64_t approxDiskBytes) {
+ return add(handlerName, targetName, IFlushTarget::Type::FLUSH, flushedSerial, approxDiskBytes);
+ }
+ ContextsBuilder &add(const vespalib::string &targetName,
+ SerialNum flushedSerial,
+ uint64_t approxDiskBytes) {
+ return add("handler1", targetName, IFlushTarget::Type::FLUSH, flushedSerial, approxDiskBytes);
+ }
+ ContextsBuilder &addGC(const vespalib::string &targetName,
+ SerialNum flushedSerial,
+ uint64_t approxDiskBytes) {
+ return add("handler1", targetName, IFlushTarget::Type::GC, flushedSerial, approxDiskBytes);
+ }
+ FlushContext::List build() const { return _result; }
+};
+
+class CandidatesBuilder
+{
+private:
+ const FlushContext::List *_sortedFlushContexts;
+ size_t _numCandidates;
+ flushengine::TlsStats _tlsStats;
+ Config _cfg;
+
+public:
+ CandidatesBuilder(const FlushContext::List &sortedFlushContexts)
+ : _sortedFlushContexts(&sortedFlushContexts),
+ _numCandidates(sortedFlushContexts.size()),
+ _tlsStats(1000, 11, 110),
+ _cfg(DEFAULT_CFG)
+ {}
+ CandidatesBuilder &flushContexts(const FlushContext::List &sortedFlushContexts) {
+ _sortedFlushContexts = &sortedFlushContexts;
+ _numCandidates = sortedFlushContexts.size();
+ return *this;
+ }
+ CandidatesBuilder &numCandidates(size_t numCandidates) {
+ _numCandidates = numCandidates;
+ return *this;
+ }
+ CandidatesBuilder &replayEnd(SerialNum replayEndSerial) {
+ flushengine::TlsStats oldTlsStats = _tlsStats;
+ _tlsStats = flushengine::TlsStats(oldTlsStats.getNumBytes(),
+ oldTlsStats.getFirstSerial(),
+ replayEndSerial);
+ return *this;
+ }
+ FlushTargetCandidates build() const {
+ return FlushTargetCandidates(*_sortedFlushContexts,
+ _numCandidates,
+ _tlsStats,
+ _cfg);
+ }
+};
+
+struct CandidatesFixture
+{
+ FlushContext::List emptyContexts;
+ CandidatesBuilder builder;
+ CandidatesFixture() : emptyContexts(), builder(emptyContexts) {}
+};
+
+TEST_F("require that tls replay cost is correct for 100% replay", CandidatesFixture)
+{
+ EXPECT_EQUAL(2000, f.builder.replayEnd(110).build().getTlsReplayCost());
+}
+
+TEST_F("require that tls replay cost is correct for 75% replay", CandidatesFixture)
+{
+ FlushContext::List contexts = ContextsBuilder().add("target1", 10, 0).add("target2", 35, 0).build();
+ EXPECT_EQUAL(1500, f.builder.flushContexts(contexts).numCandidates(1).replayEnd(110).
+ build().getTlsReplayCost());
+}
+
+TEST_F("require that tls replay cost is correct for 25% replay", CandidatesFixture)
+{
+ FlushContext::List contexts = ContextsBuilder().add("target1", 10, 0).add("target2", 85, 0).build();
+ EXPECT_EQUAL(500, f.builder.flushContexts(contexts).numCandidates(1).replayEnd(110).
+ build().getTlsReplayCost());
+}
+
+TEST_F("require that tls replay cost is correct for zero operations to replay", CandidatesFixture)
+{
+ EXPECT_EQUAL(0, f.builder.replayEnd(10).build().getTlsReplayCost());
+}
+
+TEST_F("require that flush cost is correct for zero flush targets", CandidatesFixture)
+{
+ EXPECT_EQUAL(0, f.builder.build().getFlushTargetsWriteCost());
+}
+
+TEST_F("require that flush cost is sum of flush targets", CandidatesFixture)
+{
+ FlushContext::List contexts = ContextsBuilder().add("target1", 20, 1000).add("target2", 30, 2000).build();
+ EXPECT_EQUAL(12000, f.builder.flushContexts(contexts).build().getFlushTargetsWriteCost());
+}
+
+
+flushengine::TlsStatsMap
+defaultTransactionLogStats()
+{
+ flushengine::TlsStatsMap::Map result;
+ result.insert(std::make_pair("handler1", flushengine::TlsStats(1000, 11, 110)));
+ result.insert(std::make_pair("handler2", flushengine::TlsStats(2000, 11, 110)));
+ return std::move(result);
+}
+
+struct FlushStrategyFixture
+{
+ flushengine::TlsStatsMap _tlsStatsMap;
+ PrepareRestartFlushStrategy strategy;
+ FlushStrategyFixture()
+ : _tlsStatsMap(defaultTransactionLogStats()),
+ strategy(DEFAULT_CFG)
+ {}
+ FlushContext::List getFlushTargets(const FlushContext::List &targetList,
+ const flushengine::TlsStatsMap &tlsStatsMap) const {
+ return strategy.getFlushTargets(targetList, tlsStatsMap);
+ }
+};
+
+vespalib::string
+toString(const FlushContext::List &flushContexts)
+{
+ std::ostringstream oss;
+ oss << "[";
+ bool comma = false;
+ for (const auto &flushContext : flushContexts) {
+ if (comma) {
+ oss << ",";
+ }
+ oss << flushContext->getTarget()->getName();
+ comma = true;
+ }
+ oss << "]";
+ return oss.str();
+}
+
+void
+assertFlushContexts(const vespalib::string &expected, const FlushContext::List &actual)
+{
+ EXPECT_EQUAL(expected, toString(actual));
+}
+
+/**
+ * For the following tests the content of the TLS is as follows:
+ * - handler1: serial numbers 10 -> 110, 1000 bytes
+ * - handler2: serial numbers 10 -> 110, 2000 bytes
+ *
+ * The cost config is: tlsReplayCost=2.0, flushTargetsWriteCost=4.0.
+ * The cost of replaying the complete TLS is then:
+ * - handler1: 1000*2.0 = 2000
+ * - handler2: 2000*2.0 = 4000
+ *
+ * With 3 flush targets that has getApproxBytesToWriteToDisk=167,
+ * the total write cost is 3*167*4.0 = 2004.
+ *
+ * This should give the baseline for understanding the following tests:
+ */
+
+TEST_F("require that the best strategy is flushing 0 targets", FlushStrategyFixture)
+{
+ FlushContext::List targets = f.getFlushTargets(ContextsBuilder().
+ add("foo", 10, 167).add("bar", 10, 167).add("baz", 10, 167).build(), f._tlsStatsMap);
+ TEST_DO(assertFlushContexts("[]", targets));
+}
+
+TEST_F("require that the best strategy is flushing all targets", FlushStrategyFixture)
+{
+ FlushContext::List targets = f.getFlushTargets(ContextsBuilder().
+ add("foo", 10, 166).add("bar", 10, 166).add("baz", 10, 166).build(), f._tlsStatsMap);
+ TEST_DO(assertFlushContexts("[bar,baz,foo]", targets));
+}
+
+TEST_F("require that the best strategy is flushing all targets (with different unflushed serial)", FlushStrategyFixture)
+{
+ FlushContext::List targets = f.getFlushTargets(ContextsBuilder().
+ add("foo", 10, 166).add("bar", 11, 166).add("baz", 12, 166).build(), f._tlsStatsMap);
+ TEST_DO(assertFlushContexts("[foo,bar,baz]", targets));
+}
+
+TEST_F("require that the best strategy is flushing 1 target", FlushStrategyFixture)
+{
+ FlushContext::List targets = f.getFlushTargets(ContextsBuilder().
+ add("foo", 10, 249).add("bar", 60, 125).add("baz", 60, 125).build(), f._tlsStatsMap);
+ TEST_DO(assertFlushContexts("[foo]", targets));
+}
+
+TEST_F("require that the best strategy is flushing 2 targets", FlushStrategyFixture)
+{
+ FlushContext::List targets = f.getFlushTargets(ContextsBuilder().
+ add("foo", 10, 124).add("bar", 11, 124).add("baz", 60, 251).build(), f._tlsStatsMap);
+ TEST_DO(assertFlushContexts("[foo,bar]", targets));
+}
+
+TEST_F("require that GC flush targets are removed", FlushStrategyFixture)
+{
+ FlushContext::List targets = f.getFlushTargets(ContextsBuilder().
+ addGC("foo", 10, 124).add("bar", 11, 124).add("baz", 60, 251).build(), f._tlsStatsMap);
+ TEST_DO(assertFlushContexts("[bar]", targets));
+}
+
+TEST_F("require that flush targets for different flush handlers are treated independently", FlushStrategyFixture)
+{
+ // best strategy for handler1 is flushing 1 target (foo)
+ // best strategy for handler2 is flushing 2 targets (baz,quz)
+ FlushContext::List targets = f.getFlushTargets(ContextsBuilder().
+ add("handler1", "foo", 10, 249).add("handler1", "bar", 60, 251).
+ add("handler2", "baz", 10, 499).add("handler2", "quz", 60, 499).build(), f._tlsStatsMap);
+ TEST_DO(assertFlushContexts("[foo,baz,quz]", targets));
+}
+
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/index/.gitignore b/searchcore/src/tests/proton/index/.gitignore
new file mode 100644
index 00000000000..00f61bc687d
--- /dev/null
+++ b/searchcore/src/tests/proton/index/.gitignore
@@ -0,0 +1,4 @@
+searchcore_diskindexcleaner_test_app
+searchcore_fusionrunner_test_app
+searchcore_indexcollection_test_app
+searchcore_indexmanager_test_app
diff --git a/searchcore/src/tests/proton/index/CMakeLists.txt b/searchcore/src/tests/proton/index/CMakeLists.txt
new file mode 100644
index 00000000000..46cbf4117ac
--- /dev/null
+++ b/searchcore/src/tests/proton/index/CMakeLists.txt
@@ -0,0 +1,33 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_indexmanager_test_app
+ SOURCES
+ indexmanager_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_index
+ searchcore_flushengine
+ searchcore_pcommon
+ searchcore_util
+)
+vespa_add_executable(searchcore_fusionrunner_test_app
+ SOURCES
+ fusionrunner_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_index
+ searchcore_pcommon
+ searchcore_util
+)
+vespa_add_executable(searchcore_diskindexcleaner_test_app
+ SOURCES
+ diskindexcleaner_test.cpp
+ DEPENDS
+ searchcore_index
+)
+vespa_add_executable(searchcore_indexcollection_test_app
+ SOURCES
+ indexcollection_test.cpp
+ DEPENDS
+ searchcore_index
+)
+vespa_add_test(NAME searchcore_index_test COMMAND sh index_test.sh)
diff --git a/searchcore/src/tests/proton/index/diskindexcleaner_test.cpp b/searchcore/src/tests/proton/index/diskindexcleaner_test.cpp
new file mode 100644
index 00000000000..e462ba17dba
--- /dev/null
+++ b/searchcore/src/tests/proton/index/diskindexcleaner_test.cpp
@@ -0,0 +1,159 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for diskindexcleaner.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("diskindexcleaner_test");
+
+#include <vespa/searchcorespi/index/activediskindexes.h>
+#include <vespa/searchcorespi/index/diskindexcleaner.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <string>
+#include <vector>
+
+using std::string;
+using std::vector;
+using namespace searchcorespi::index;
+
+namespace {
+
+class Test : public vespalib::TestApp {
+ void requireThatAllIndexesOlderThanLastFusionIsRemoved();
+ void requireThatIndexesInUseAreNotRemoved();
+ void requireThatInvalidFlushIndexesAreRemoved();
+ void requireThatInvalidFusionIndexesAreRemoved();
+ void requireThatRemoveDontTouchNewIndexes();
+
+public:
+ int Main();
+};
+
+const string index_dir = "test_data";
+
+void removeTestData() {
+ FastOS_FileInterface::EmptyAndRemoveDirectory(index_dir.c_str());
+}
+
+int
+Test::Main()
+{
+ TEST_INIT("diskindexcleaner_test");
+
+ TEST_DO(removeTestData());
+
+ TEST_DO(requireThatAllIndexesOlderThanLastFusionIsRemoved());
+ TEST_DO(requireThatIndexesInUseAreNotRemoved());
+ TEST_DO(requireThatInvalidFlushIndexesAreRemoved());
+ TEST_DO(requireThatInvalidFusionIndexesAreRemoved());
+ TEST_DO(requireThatRemoveDontTouchNewIndexes());
+
+ TEST_DO(removeTestData());
+
+ TEST_DONE();
+}
+
+void createIndex(const string &name) {
+ FastOS_FileInterface::MakeDirIfNotPresentOrExit(index_dir.c_str());
+ const string dir_name = index_dir + "/" + name;
+ FastOS_FileInterface::MakeDirIfNotPresentOrExit(dir_name.c_str());
+ const string serial_file = dir_name + "/serial.dat";
+ FastOS_File file(serial_file.c_str());
+ file.OpenWriteOnlyTruncate();
+}
+
+vector<string> readIndexes() {
+ vector<string> indexes;
+ FastOS_DirectoryScan dir_scan(index_dir.c_str());
+ while (dir_scan.ReadNext()) {
+ string name = dir_scan.GetName();
+ if (!dir_scan.IsDirectory() || name.find("index.") != 0) {
+ continue;
+ }
+ indexes.push_back(name);
+ }
+ return indexes;
+}
+
+template <class Container>
+bool contains(Container c, typename Container::value_type v) {
+ return find(c.begin(), c.end(), v) != c.end();
+}
+
+void createIndexes() {
+ createIndex("index.flush.0");
+ createIndex("index.flush.1");
+ createIndex("index.fusion.1");
+ createIndex("index.flush.2");
+ createIndex("index.fusion.2");
+ createIndex("index.flush.3");
+ createIndex("index.flush.4");
+}
+
+void Test::requireThatAllIndexesOlderThanLastFusionIsRemoved() {
+ createIndexes();
+ ActiveDiskIndexes active_indexes;
+ DiskIndexCleaner::clean(index_dir, active_indexes);
+ vector<string> indexes = readIndexes();
+ EXPECT_EQUAL(3u, indexes.size());
+ EXPECT_TRUE(contains(indexes, "index.fusion.2"));
+ EXPECT_TRUE(contains(indexes, "index.flush.3"));
+ EXPECT_TRUE(contains(indexes, "index.flush.4"));
+}
+
+void Test::requireThatIndexesInUseAreNotRemoved() {
+ createIndexes();
+ ActiveDiskIndexes active_indexes;
+ active_indexes.setActive(index_dir + "/index.fusion.1");
+ active_indexes.setActive(index_dir + "/index.flush.2");
+ DiskIndexCleaner::clean(index_dir, active_indexes);
+ vector<string> indexes = readIndexes();
+ EXPECT_TRUE(contains(indexes, "index.fusion.1"));
+ EXPECT_TRUE(contains(indexes, "index.flush.2"));
+
+ active_indexes.notActive(index_dir + "/index.fusion.1");
+ active_indexes.notActive(index_dir + "/index.flush.2");
+ DiskIndexCleaner::clean(index_dir, active_indexes);
+ indexes = readIndexes();
+ EXPECT_TRUE(!contains(indexes, "index.fusion.1"));
+ EXPECT_TRUE(!contains(indexes, "index.flush.2"));
+}
+
+void Test::requireThatInvalidFlushIndexesAreRemoved() {
+ createIndexes();
+ FastOS_File((index_dir + "/index.flush.4/serial.dat").c_str()).Delete();
+ ActiveDiskIndexes active_indexes;
+ DiskIndexCleaner::clean(index_dir, active_indexes);
+ vector<string> indexes = readIndexes();
+ EXPECT_EQUAL(2u, indexes.size());
+ EXPECT_TRUE(contains(indexes, "index.fusion.2"));
+ EXPECT_TRUE(contains(indexes, "index.flush.3"));
+}
+
+void Test::requireThatInvalidFusionIndexesAreRemoved() {
+ createIndexes();
+ FastOS_File((index_dir + "/index.fusion.2/serial.dat").c_str()).Delete();
+ ActiveDiskIndexes active_indexes;
+ DiskIndexCleaner::clean(index_dir, active_indexes);
+ vector<string> indexes = readIndexes();
+ EXPECT_EQUAL(4u, indexes.size());
+ EXPECT_TRUE(contains(indexes, "index.fusion.1"));
+ EXPECT_TRUE(contains(indexes, "index.flush.2"));
+ EXPECT_TRUE(contains(indexes, "index.flush.3"));
+ EXPECT_TRUE(contains(indexes, "index.flush.4"));
+}
+
+void Test::requireThatRemoveDontTouchNewIndexes() {
+ createIndexes();
+ FastOS_File((index_dir + "/index.flush.4/serial.dat").c_str()).Delete();
+ ActiveDiskIndexes active_indexes;
+ DiskIndexCleaner::removeOldIndexes(index_dir, active_indexes);
+ vector<string> indexes = readIndexes();
+ EXPECT_EQUAL(3u, indexes.size());
+ EXPECT_TRUE(contains(indexes, "index.fusion.2"));
+ EXPECT_TRUE(contains(indexes, "index.flush.3"));
+ EXPECT_TRUE(contains(indexes, "index.flush.4"));
+}
+
+} // namespace
+
+TEST_APPHOOK(Test);
diff --git a/searchcore/src/tests/proton/index/fusionrunner_test.cpp b/searchcore/src/tests/proton/index/fusionrunner_test.cpp
new file mode 100644
index 00000000000..dd2bcee97b2
--- /dev/null
+++ b/searchcore/src/tests/proton/index/fusionrunner_test.cpp
@@ -0,0 +1,328 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for fusionrunner.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("fusionrunner_test");
+
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/document/fieldvalue/fieldvalue.h>
+#include <vespa/searchlib/memoryindex/memoryindex.h>
+#include <vespa/searchcore/proton/index/indexmanager.h>
+#include <vespa/searchcorespi/index/fusionrunner.h>
+#include <vespa/searchcorespi/index/fusionspec.h>
+#include <vespa/searchlib/attribute/fixedsourceselector.h>
+#include <vespa/searchlib/diskindex/diskindex.h>
+#include <vespa/searchlib/diskindex/indexbuilder.h>
+#include <vespa/searchlib/fef/matchdata.h>
+#include <vespa/searchlib/fef/matchdatalayout.h>
+#include <vespa/searchlib/fef/termfieldmatchdata.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/searchlib/query/tree/simplequery.h>
+#include <vespa/searchlib/queryeval/fake_requestcontext.h>
+#include <vespa/searchcore/proton/server/executorthreadingservice.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <set>
+
+using document::Document;
+using document::FieldValue;
+using search::FixedSourceSelector;
+using search::diskindex::DiskIndex;
+using search::diskindex::IndexBuilder;
+using search::fef::MatchData;
+using search::fef::MatchDataLayout;
+using search::fef::TermFieldHandle;
+using search::fef::TermFieldMatchData;
+using search::index::DocBuilder;
+using search::index::Schema;
+using search::index::DummyFileHeaderContext;
+using search::memoryindex::MemoryIndex;
+using search::query::SimpleStringTerm;
+using search::queryeval::Blueprint;
+using search::queryeval::FieldSpec;
+using search::queryeval::FieldSpecList;
+using search::queryeval::ISourceSelector;
+using search::queryeval::SearchIterator;
+using search::queryeval::FakeRequestContext;
+using std::set;
+using std::string;
+using namespace proton;
+using search::TuneFileAttributes;
+using search::TuneFileIndexing;
+using search::TuneFileIndexManager;
+using search::TuneFileSearch;
+using searchcorespi::index::FusionRunner;
+using searchcorespi::index::FusionSpec;
+using proton::ExecutorThreadingService;
+
+namespace {
+
+#define TEST_CALL(func) \
+ setUp(); \
+ func; \
+ tearDown()
+
+class Test : public vespalib::TestApp {
+ std::unique_ptr<FusionRunner> _fusion_runner;
+ FixedSourceSelector::UP _selector;
+ FusionSpec _fusion_spec;
+ DummyFileHeaderContext _fileHeaderContext;
+ ExecutorThreadingService _threadingService;
+ IndexManager::MaintainerOperations _ops;
+
+ void setUp();
+ void tearDown();
+
+ void createIndex(const string &dir, uint32_t id, bool fusion = false);
+ void checkResults(uint32_t fusion_id, const uint32_t *ids, size_t size);
+
+ void requireThatNoDiskIndexesGiveId0();
+ void requireThatOneDiskIndexCausesCopy();
+ void requireThatTwoDiskIndexesCauseFusion();
+ void requireThatFusionCanRunOnMultipleDiskIndexes();
+ void requireThatOldFusionIndexCanBePartOfNewFusion();
+ void requireThatSelectorsCanBeRebased();
+
+public:
+ Test()
+ : _fusion_runner(),
+ _selector(),
+ _fusion_spec(),
+ _fileHeaderContext(),
+ _threadingService(),
+ _ops(_fileHeaderContext,
+ TuneFileIndexManager(), 0,
+ _threadingService)
+ {
+ }
+ int Main();
+};
+
+int
+Test::Main()
+{
+ TEST_INIT("fusionrunner_test");
+
+ if (_argc > 0) {
+ DummyFileHeaderContext::setCreator(_argv[0]);
+ }
+ TEST_CALL(requireThatNoDiskIndexesGiveId0());
+ TEST_CALL(requireThatOneDiskIndexCausesCopy());
+ TEST_CALL(requireThatTwoDiskIndexesCauseFusion());
+ TEST_CALL(requireThatFusionCanRunOnMultipleDiskIndexes());
+ TEST_CALL(requireThatOldFusionIndexCanBePartOfNewFusion());
+ TEST_CALL(requireThatSelectorsCanBeRebased());
+
+ TEST_DONE();
+}
+
+const string base_dir = "fusion_test_data";
+const string field_name = "field_name";
+const string term = "foo";
+const uint32_t disk_id[] = { 1, 2, 21, 42 };
+
+Schema getSchema() {
+ Schema schema;
+ schema.addIndexField(
+ Schema::IndexField(field_name, Schema::STRING));
+ return schema;
+}
+
+void Test::setUp() {
+ FastOS_FileInterface::EmptyAndRemoveDirectory(base_dir.c_str());
+ _fusion_runner.reset(new FusionRunner(base_dir, getSchema(),
+ TuneFileAttributes(),
+ _fileHeaderContext));
+ const string selector_base = base_dir + "/index.flush.0/selector";
+ _selector.reset(new FixedSourceSelector(0, selector_base));
+ _fusion_spec = FusionSpec();
+}
+
+void Test::tearDown() {
+ FastOS_FileInterface::EmptyAndRemoveDirectory(base_dir.c_str());
+ _selector.reset(0);
+}
+
+Document::UP buildDocument(DocBuilder & doc_builder, int id, const string &word) {
+ vespalib::asciistream ost;
+ ost << "doc::" << id;
+ doc_builder.startDocument(ost.str());
+ doc_builder.startIndexField(field_name).addStr(word).endField();
+ return doc_builder.endDocument();
+}
+
+void addDocument(DocBuilder & doc_builder, MemoryIndex &index, ISourceSelector &selector,
+ uint8_t index_id, uint32_t docid, const string &word) {
+ Document::UP doc = buildDocument(doc_builder, docid, word);
+ index.insertDocument(docid, *doc);
+ index.commit(std::shared_ptr<search::IDestructorCallback>());
+ selector.setSource(docid, index_id);
+}
+
+void Test::createIndex(const string &dir, uint32_t id, bool fusion) {
+ FastOS_FileInterface::MakeDirIfNotPresentOrExit(dir.c_str());
+ vespalib::asciistream ost;
+ if (fusion) {
+ ost << dir << "/index.fusion." << id;
+ _fusion_spec.last_fusion_id = id;
+ } else {
+ ost << dir << "/index.flush." << id;
+ _fusion_spec.flush_ids.push_back(id);
+ }
+ const string index_dir = ost.str();
+
+ Schema schema = getSchema();
+ DocBuilder doc_builder(schema);
+ MemoryIndex memory_index(schema, _threadingService.indexFieldInverter(),
+ _threadingService.indexFieldWriter());
+ addDocument(doc_builder, memory_index, *_selector, id, id + 0, term);
+ addDocument(doc_builder, memory_index, *_selector, id, id + 1, "bar");
+ addDocument(doc_builder, memory_index, *_selector, id, id + 2, "baz");
+ addDocument(doc_builder, memory_index, *_selector, id, id + 3, "qux");
+ _threadingService.indexFieldWriter().sync();
+
+ const uint32_t docIdLimit =
+ std::min(memory_index.getDocIdLimit(), _selector->getDocIdLimit());
+ IndexBuilder index_builder(schema);
+ index_builder.setPrefix(index_dir);
+ TuneFileIndexing tuneFileIndexing;
+ TuneFileAttributes tuneFileAttributes;
+ index_builder.open(docIdLimit, memory_index.getNumWords(),
+ tuneFileIndexing,
+ _fileHeaderContext);
+ memory_index.dump(index_builder);
+ index_builder.close();
+
+ _selector->extractSaveInfo(index_dir + "/selector")->
+ save(tuneFileAttributes, _fileHeaderContext);
+}
+
+set<uint32_t> readFusionIds(const string &dir) {
+ set<uint32_t> ids;
+ FastOS_DirectoryScan dir_scan(dir.c_str());
+ while (dir_scan.ReadNext()) {
+ if (!dir_scan.IsDirectory()) {
+ continue;
+ }
+ vespalib::string name = dir_scan.GetName();
+ const vespalib::string prefix("index.fusion.");
+ vespalib::string::size_type pos = name.find(prefix);
+ if (pos != 0) {
+ continue;
+ }
+ vespalib::string idString = name.substr(prefix.size());
+ vespalib::asciistream ist(idString);
+ uint32_t id;
+ ist >> id;
+ ids.insert(id);
+ }
+ return ids;
+}
+
+vespalib::string getFusionIndexName(uint32_t fusion_id) {
+ vespalib::asciistream ost;
+ ost << base_dir << "/index.fusion." << fusion_id;
+ return ost.str();
+}
+
+void Test::checkResults(uint32_t fusion_id, const uint32_t *ids, size_t size) {
+ FakeRequestContext requestContext;
+ DiskIndex disk_index(getFusionIndexName(fusion_id));
+ ASSERT_TRUE(disk_index.setup(TuneFileSearch()));
+ uint32_t fieldId = 0;
+
+ MatchDataLayout mdl;
+ TermFieldHandle handle = mdl.allocTermField(fieldId);
+ MatchData::UP match_data = mdl.createMatchData();
+
+ FieldSpec field(field_name, fieldId, handle);
+ FieldSpecList fields;
+ fields.add(field);
+
+ search::queryeval::Searchable &searchable = disk_index;
+ SimpleStringTerm node(term, field_name, fieldId, search::query::Weight(0));
+ Blueprint::UP blueprint = searchable.createBlueprint(requestContext, fields, node);
+ blueprint->fetchPostings(true);
+ SearchIterator::UP search = blueprint->createSearch(*match_data, true);
+ search->initFullRange();
+ for (size_t i = 0; i < size; ++i) {
+ EXPECT_TRUE(search->seek(ids[i]));
+ }
+}
+
+void Test::requireThatNoDiskIndexesGiveId0() {
+ uint32_t fusion_id = _fusion_runner->fuse(_fusion_spec, 0u, _ops);
+ EXPECT_EQUAL(0u, fusion_id);
+}
+
+void Test::requireThatOneDiskIndexCausesCopy() {
+ createIndex(base_dir, disk_id[0]);
+ uint32_t fusion_id = _fusion_runner->fuse(_fusion_spec, 0u, _ops);
+ EXPECT_EQUAL(disk_id[0], fusion_id);
+ set<uint32_t> fusion_ids = readFusionIds(base_dir);
+ ASSERT_TRUE(!fusion_ids.empty());
+ EXPECT_EQUAL(1u, fusion_ids.size());
+ EXPECT_EQUAL(fusion_id, *fusion_ids.begin());
+
+ checkResults(fusion_id, disk_id, 1);
+}
+
+void Test::requireThatTwoDiskIndexesCauseFusion() {
+ createIndex(base_dir, disk_id[0]);
+ createIndex(base_dir, disk_id[1]);
+ uint32_t fusion_id = _fusion_runner->fuse(_fusion_spec, 0u, _ops);
+ EXPECT_EQUAL(disk_id[1], fusion_id);
+ set<uint32_t> fusion_ids = readFusionIds(base_dir);
+ ASSERT_TRUE(!fusion_ids.empty());
+ EXPECT_EQUAL(1u, fusion_ids.size());
+ EXPECT_EQUAL(fusion_id, *fusion_ids.begin());
+
+ checkResults(fusion_id, disk_id, 2);
+}
+
+void Test::requireThatFusionCanRunOnMultipleDiskIndexes() {
+ createIndex(base_dir, disk_id[0]);
+ createIndex(base_dir, disk_id[1]);
+ createIndex(base_dir, disk_id[2]);
+ createIndex(base_dir, disk_id[3]);
+ uint32_t fusion_id = _fusion_runner->fuse(_fusion_spec, 0u, _ops);
+ EXPECT_EQUAL(disk_id[3], fusion_id);
+ set<uint32_t> fusion_ids = readFusionIds(base_dir);
+ ASSERT_TRUE(!fusion_ids.empty());
+ EXPECT_EQUAL(1u, fusion_ids.size());
+ EXPECT_EQUAL(fusion_id, *fusion_ids.begin());
+
+ checkResults(fusion_id, disk_id, 4);
+}
+
+void Test::requireThatOldFusionIndexCanBePartOfNewFusion() {
+ createIndex(base_dir, disk_id[0], true);
+ createIndex(base_dir, disk_id[1]);
+ uint32_t fusion_id = _fusion_runner->fuse(_fusion_spec, 0u, _ops);
+ EXPECT_EQUAL(disk_id[1], fusion_id);
+ set<uint32_t> fusion_ids = readFusionIds(base_dir);
+ ASSERT_TRUE(!fusion_ids.empty());
+ EXPECT_EQUAL(2u, fusion_ids.size());
+ EXPECT_EQUAL(disk_id[0], *fusion_ids.begin());
+ EXPECT_EQUAL(fusion_id, *(++fusion_ids.begin()));
+
+ checkResults(fusion_id, disk_id, 2);
+}
+
+void Test::requireThatSelectorsCanBeRebased() {
+ createIndex(base_dir, disk_id[0]);
+ createIndex(base_dir, disk_id[1]);
+ uint32_t fusion_id = _fusion_runner->fuse(_fusion_spec, 0u, _ops);
+
+ _fusion_spec.flush_ids.clear();
+ _fusion_spec.last_fusion_id = fusion_id;
+ createIndex(base_dir, disk_id[2]);
+ fusion_id = _fusion_runner->fuse(_fusion_spec, 0u, _ops);
+
+ checkResults(fusion_id, disk_id, 3);
+}
+
+} // namespace
+
+TEST_APPHOOK(Test);
diff --git a/searchcore/src/tests/proton/index/index_test.sh b/searchcore/src/tests/proton/index/index_test.sh
new file mode 100644
index 00000000000..91c271128fe
--- /dev/null
+++ b/searchcore/src/tests/proton/index/index_test.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+set -e
+
+$VALGRIND ./searchcore_diskindexcleaner_test_app
+$VALGRIND ./searchcore_fusionrunner_test_app
+$VALGRIND ./searchcore_indexcollection_test_app
+$VALGRIND ./searchcore_indexmanager_test_app
diff --git a/searchcore/src/tests/proton/index/index_writer/.gitignore b/searchcore/src/tests/proton/index/index_writer/.gitignore
new file mode 100644
index 00000000000..bbada541cf0
--- /dev/null
+++ b/searchcore/src/tests/proton/index/index_writer/.gitignore
@@ -0,0 +1 @@
+searchcore_index_writer_test_app
diff --git a/searchcore/src/tests/proton/index/index_writer/CMakeLists.txt b/searchcore/src/tests/proton/index/index_writer/CMakeLists.txt
new file mode 100644
index 00000000000..88db11dbdba
--- /dev/null
+++ b/searchcore/src/tests/proton/index/index_writer/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_index_writer_test_app
+ SOURCES
+ index_writer_test.cpp
+ DEPENDS
+ searchcore_index
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_index_writer_test_app COMMAND searchcore_index_writer_test_app)
diff --git a/searchcore/src/tests/proton/index/index_writer/DESC b/searchcore/src/tests/proton/index/index_writer/DESC
new file mode 100644
index 00000000000..ea5596d3039
--- /dev/null
+++ b/searchcore/src/tests/proton/index/index_writer/DESC
@@ -0,0 +1 @@
+index writer test. Take a look at index_writer_test.cpp for details.
diff --git a/searchcore/src/tests/proton/index/index_writer/FILES b/searchcore/src/tests/proton/index/index_writer/FILES
new file mode 100644
index 00000000000..3b26c7f84ad
--- /dev/null
+++ b/searchcore/src/tests/proton/index/index_writer/FILES
@@ -0,0 +1 @@
+index_writer_test.cpp
diff --git a/searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp b/searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp
new file mode 100644
index 00000000000..1ff5a6e8649
--- /dev/null
+++ b/searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp
@@ -0,0 +1,117 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("index_writer_test");
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <vespa/searchcore/proton/index/index_writer.h>
+#include <vespa/searchcore/proton/test/mock_index_manager.h>
+#include <vespa/searchlib/index/docbuilder.h>
+
+using namespace proton;
+using namespace search;
+using namespace search::index;
+using namespace searchcorespi;
+
+using document::Document;
+
+std::string
+toString(const std::vector<SerialNum> &vec)
+{
+ std::ostringstream oss;
+ for (size_t i = 0; i < vec.size(); ++i) {
+ if (i > 0) oss << ",";
+ oss << vec[i];
+ }
+ return oss.str();
+}
+
+struct MyIndexManager : public test::MockIndexManager
+{
+ typedef std::map<uint32_t, std::vector<SerialNum> > LidMap;
+ LidMap puts;
+ LidMap removes;
+ SerialNum current;
+ SerialNum flushed;
+ SerialNum commitSerial;
+ MyIndexManager() : puts(), removes(), current(0), flushed(0),
+ commitSerial(0)
+ {
+ }
+ std::string getPut(uint32_t lid) {
+ return toString(puts[lid]);
+ }
+ std::string getRemove(uint32_t lid) {
+ return toString(removes[lid]);
+ }
+ // Implements IIndexManager
+ virtual void putDocument(uint32_t lid, const Document &,
+ SerialNum serialNum) override {
+ puts[lid].push_back(serialNum);
+ }
+ virtual void removeDocument(uint32_t lid,
+ SerialNum serialNum) override {
+ removes[lid].push_back(serialNum);
+ }
+ virtual void commit(SerialNum serialNum,
+ OnWriteDoneType) override {
+ commitSerial = serialNum;
+ }
+ virtual SerialNum getCurrentSerialNum() const override {
+ return current;
+ }
+ virtual SerialNum getFlushedSerialNum() const override {
+ return flushed;
+ }
+};
+
+struct Fixture
+{
+ IIndexManager::SP iim;
+ MyIndexManager &mim;
+ IndexWriter iw;
+ Schema schema;
+ DocBuilder builder;
+ Document::UP dummyDoc;
+ Fixture()
+ : iim(new MyIndexManager()),
+ mim(static_cast<MyIndexManager &>(*iim)),
+ iw(iim),
+ schema(),
+ builder(schema),
+ dummyDoc(createDoc(1234)) // This content of this is not used
+ {
+ }
+ Document::UP createDoc(uint32_t lid) {
+ builder.startDocument(vespalib::make_string("doc:test:%u", lid));
+ return builder.endDocument();
+ }
+ void put(SerialNum serialNum, const search::DocumentIdT lid) {
+ iw.put(serialNum, *dummyDoc, lid);
+ iw.commit(serialNum, std::shared_ptr<IDestructorCallback>());
+ }
+ void remove(SerialNum serialNum, const search::DocumentIdT lid) {
+ iw.remove(serialNum, lid);
+ iw.commit(serialNum, std::shared_ptr<IDestructorCallback>());
+ }
+};
+
+TEST_F("require that index adapter ignores old operations", Fixture)
+{
+ f.mim.flushed = 10;
+ f.put(8, 1);
+ f.remove(9, 2);
+ EXPECT_EQUAL("", f.mim.getPut(1));
+ EXPECT_EQUAL("", f.mim.getRemove(2));
+}
+
+TEST_F("require that commit is forwarded to index manager", Fixture)
+{
+ f.iw.commit(10, std::shared_ptr<IDestructorCallback>());
+ EXPECT_EQUAL(10u, f.mim.commitSerial);
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/index/indexcollection_test.cpp b/searchcore/src/tests/proton/index/indexcollection_test.cpp
new file mode 100644
index 00000000000..f27b9c86260
--- /dev/null
+++ b/searchcore/src/tests/proton/index/indexcollection_test.cpp
@@ -0,0 +1,129 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("indexcollection_test");
+
+#include <vespa/searchcore/proton/matching/fakesearchcontext.h>
+#include <vespa/searchcorespi/index/indexcollection.h>
+#include <vespa/searchcorespi/index/warmupindexcollection.h>
+#include <vespa/searchlib/queryeval/fake_searchable.h>
+#include <vespa/searchlib/attribute/fixedsourceselector.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+
+using search::queryeval::ISourceSelector;
+using search::queryeval::FakeSearchable;
+using search::FixedSourceSelector;
+using namespace proton;
+using namespace searchcorespi;
+
+namespace {
+
+class Test : public vespalib::TestApp,
+ public IWarmupDone
+{
+ std::shared_ptr<ISourceSelector> _selector;
+ std::shared_ptr<IndexSearchable> _source1;
+ std::shared_ptr<IndexSearchable> _source2;
+ std::shared_ptr<IndexSearchable> _fusion_source;
+ vespalib::ThreadStackExecutor _executor;
+ std::shared_ptr<IndexSearchable> _warmup;
+
+ void requireThatSearchablesCanBeAppended(IndexCollection::UP fsc);
+ void requireThatSearchablesCanBeReplaced(IndexCollection::UP fsc);
+ void requireThatReplaceAndRenumberUpdatesCollectionAfterFusion();
+ IndexCollection::UP createWarmup(const IndexCollection::SP & prev, const IndexCollection::SP & next);
+ virtual void warmupDone(ISearchableIndexCollection::SP current) {
+ (void) current;
+ }
+
+public:
+ Test() : _selector(new FixedSourceSelector(0, "fs1")),
+ _source1(new FakeIndexSearchable),
+ _source2(new FakeIndexSearchable),
+ _fusion_source(new FakeIndexSearchable),
+ _executor(1, 128*1024),
+ _warmup(new FakeIndexSearchable)
+ {}
+
+ int Main();
+};
+
+
+IndexCollection::UP
+Test::createWarmup(const IndexCollection::SP & prev, const IndexCollection::SP & next)
+{
+ return IndexCollection::UP(new WarmupIndexCollection(1.0, prev, next, *_warmup, _executor, *this));
+}
+
+int
+Test::Main()
+{
+ TEST_INIT("indexcollection_test");
+
+ TEST_DO(requireThatSearchablesCanBeAppended(IndexCollection::UP(new IndexCollection(_selector))));
+ TEST_DO(requireThatSearchablesCanBeReplaced(IndexCollection::UP(new IndexCollection(_selector))));
+ TEST_DO(requireThatReplaceAndRenumberUpdatesCollectionAfterFusion());
+ {
+ IndexCollection::SP prev(new IndexCollection(_selector));
+ IndexCollection::SP next(new IndexCollection(_selector));
+ requireThatSearchablesCanBeAppended(createWarmup(prev, next));
+ EXPECT_EQUAL(0u, prev->getSourceCount());
+ EXPECT_EQUAL(1u, next->getSourceCount());
+ }
+ {
+ IndexCollection::SP prev(new IndexCollection(_selector));
+ IndexCollection::SP next(new IndexCollection(_selector));
+ requireThatSearchablesCanBeReplaced(createWarmup(prev, next));
+ EXPECT_EQUAL(0u, prev->getSourceCount());
+ EXPECT_EQUAL(1u, next->getSourceCount());
+ }
+
+ TEST_DONE();
+}
+
+void Test::requireThatSearchablesCanBeAppended(IndexCollection::UP fsc) {
+ const uint32_t id = 42;
+
+ fsc->append(id, _source1);
+ EXPECT_EQUAL(1u, fsc->getSourceCount());
+ EXPECT_EQUAL(id, fsc->getSourceId(0));
+}
+
+void Test::requireThatSearchablesCanBeReplaced(IndexCollection::UP fsc) {
+ const uint32_t id = 42;
+
+ fsc->append(id, _source1);
+ EXPECT_EQUAL(1u, fsc->getSourceCount());
+ EXPECT_EQUAL(id, fsc->getSourceId(0));
+ EXPECT_EQUAL(_source1.get(), &fsc->getSearchable(0));
+
+ fsc->replace(id, _source2);
+ EXPECT_EQUAL(1u, fsc->getSourceCount());
+ EXPECT_EQUAL(id, fsc->getSourceId(0));
+ EXPECT_EQUAL(_source2.get(), &fsc->getSearchable(0));
+}
+
+void Test::requireThatReplaceAndRenumberUpdatesCollectionAfterFusion() {
+ IndexCollection fsc(_selector);
+
+ fsc.append(0, _source1);
+ fsc.append(1, _source1);
+ fsc.append(2, _source1);
+ fsc.append(3, _source2);
+ EXPECT_EQUAL(4u, fsc.getSourceCount());
+
+ const uint32_t id_diff = 2;
+ IndexCollection::UP new_fsc =
+ IndexCollection::replaceAndRenumber(
+ _selector, fsc, id_diff, _fusion_source);
+ EXPECT_EQUAL(2u, new_fsc->getSourceCount());
+ EXPECT_EQUAL(0u, new_fsc->getSourceId(0));
+ EXPECT_EQUAL(_fusion_source.get(), &new_fsc->getSearchable(0));
+ EXPECT_EQUAL(1u, new_fsc->getSourceId(1));
+ EXPECT_EQUAL(_source2.get(), &new_fsc->getSearchable(1));
+}
+
+} // namespace
+
+TEST_APPHOOK(Test);
diff --git a/searchcore/src/tests/proton/index/indexmanager_test.cpp b/searchcore/src/tests/proton/index/indexmanager_test.cpp
new file mode 100644
index 00000000000..95f5a3b50ce
--- /dev/null
+++ b/searchcore/src/tests/proton/index/indexmanager_test.cpp
@@ -0,0 +1,690 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for IndexManager.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("indexmanager_test");
+
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/document/fieldvalue/fieldvalue.h>
+#include <vespa/searchcore/proton/index/indexmanager.h>
+#include <vespa/searchcore/proton/server/executorthreadingservice.h>
+#include <vespa/searchcorespi/index/indexcollection.h>
+#include <vespa/searchcorespi/index/indexflushtarget.h>
+#include <vespa/searchcorespi/index/indexfusiontarget.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/searchlib/memoryindex/dictionary.h>
+#include <vespa/searchlib/memoryindex/documentinverter.h>
+#include <vespa/searchlib/memoryindex/fieldinverter.h>
+#include <vespa/searchlib/memoryindex/ordereddocumentinserter.h>
+#include <vespa/searchlib/memoryindex/compact_document_words_store.h>
+#include <vespa/searchlib/queryeval/isourceselector.h>
+#include <vespa/searchlib/common/serialnum.h>
+#include <vespa/searchlib/util/dirtraverse.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/util/sync.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <vespa/vespalib/util/blockingthreadstackexecutor.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/searchlib/common/sequencedtaskexecutor.h>
+#include <set>
+
+using document::Document;
+using document::FieldValue;
+using search::btree::EntryRef;
+using search::index::DocBuilder;
+using search::index::Schema;
+using search::index::DummyFileHeaderContext;
+using search::memoryindex::Dictionary;
+using search::memoryindex::CompactDocumentWordsStore;
+using search::queryeval::Source;
+using search::SequencedTaskExecutor;
+using search::SerialNum;
+using std::set;
+using std::string;
+using vespalib::Gate;
+using vespalib::Monitor;
+using vespalib::MonitorGuard;
+using namespace proton;
+using namespace searchcorespi;
+using namespace searchcorespi::index;
+using search::TuneFileIndexing;
+using search::TuneFileIndexManager;
+using search::TuneFileAttributes;
+using vespalib::BlockingThreadStackExecutor;
+using vespalib::ThreadStackExecutor;
+using search::makeLambdaTask;
+
+namespace {
+
+class IndexManagerDummyReconfigurer : public searchcorespi::IIndexManager::Reconfigurer
+{
+ virtual bool
+ reconfigure(vespalib::Closure0<bool>::UP closure)
+ {
+ bool ret = true;
+ if (closure.get() != NULL)
+ ret = closure->call(); // Perform index manager reconfiguration now
+ return ret;
+ }
+
+};
+
+const string index_dir = "test_data";
+const string field_name = "field";
+const uint32_t docid = 1;
+
+Schema getSchema() {
+ Schema schema;
+ schema.addIndexField(Schema::IndexField(field_name, Schema::STRING));
+ return schema;
+}
+
+void removeTestData() {
+ FastOS_FileInterface::EmptyAndRemoveDirectory(index_dir.c_str());
+}
+
+Document::UP buildDocument(DocBuilder &doc_builder, int id,
+ const string &word) {
+ vespalib::asciistream ost;
+ ost << "doc::" << id;
+ doc_builder.startDocument(ost.str());
+ doc_builder.startIndexField(field_name).addStr(word).endField();
+ return doc_builder.endDocument();
+}
+
+std::shared_ptr<search::IDestructorCallback> emptyDestructorCallback;
+
+struct Fixture {
+ SerialNum _serial_num;
+ IndexManagerDummyReconfigurer _reconfigurer;
+ DummyFileHeaderContext _fileHeaderContext;
+ ExecutorThreadingService _writeService;
+ std::unique_ptr<IndexManager> _index_manager;
+ Schema _schema;
+ DocBuilder _builder;
+
+ Fixture()
+ : _serial_num(0),
+ _reconfigurer(),
+ _fileHeaderContext(),
+ _writeService(),
+ _index_manager(),
+ _schema(getSchema()),
+ _builder(_schema)
+ {
+ removeTestData();
+ vespalib::mkdir(index_dir, false);
+ _writeService.sync();
+ resetIndexManager();
+ }
+
+ ~Fixture() {
+ _writeService.shutdown();
+ }
+
+ template <class FunctionType>
+ inline void runAsMaster(FunctionType &&function) {
+ _writeService.master().execute(makeLambdaTask(std::move(function)));
+ _writeService.master().sync();
+ }
+ template <class FunctionType>
+ inline void runAsIndex(FunctionType &&function) {
+ _writeService.index().execute(makeLambdaTask(std::move(function)));
+ _writeService.index().sync();
+ }
+ void flushIndexManager();
+ Document::UP addDocument(uint32_t docid);
+ void resetIndexManager();
+ void removeDocument(uint32_t docId, SerialNum serialNum) {
+ runAsIndex([&]() { _index_manager->removeDocument(docId, serialNum);
+ _index_manager->commit(serialNum,
+ emptyDestructorCallback);
+ });
+ _writeService.indexFieldWriter().sync();
+ }
+};
+
+void Fixture::flushIndexManager() {
+ vespalib::Executor::Task::UP task;
+ SerialNum serialNum = _index_manager->getCurrentSerialNum();
+ auto &maintainer = _index_manager->getMaintainer();
+ runAsMaster([&]() { task = maintainer.initFlush(serialNum, NULL); });
+ if (task.get()) {
+ task->run();
+ }
+}
+
+Document::UP Fixture::addDocument(uint32_t id) {
+ Document::UP doc = buildDocument(_builder, id, "foo");
+ SerialNum serialNum = ++_serial_num;
+ runAsIndex([&]() { _index_manager->putDocument(id, *doc, serialNum);
+ _index_manager->commit(serialNum,
+ emptyDestructorCallback); });
+ _writeService.indexFieldWriter().sync();
+ return doc;
+}
+
+void Fixture::resetIndexManager() {
+ _index_manager.reset(0);
+ _index_manager.reset(
+ new IndexManager(index_dir, 0.0, 2, 0, getSchema(), getSchema(),
+ _reconfigurer, _writeService, _writeService.getMasterExecutor(),
+ TuneFileIndexManager(), TuneFileAttributes(),
+ _fileHeaderContext));
+}
+
+TEST_F("requireThatEmptyMemoryIndexIsNotFlushed", Fixture) {
+ IIndexCollection::SP sources = f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(1u, sources->getSourceCount());
+
+ f.flushIndexManager();
+
+ sources = f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(1u, sources->getSourceCount());
+}
+
+TEST_F("requireThatEmptyMemoryIndexIsFlushedIfSourceSelectorChanged", Fixture)
+{
+ IIndexCollection::SP sources = f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(1u, sources->getSourceCount());
+
+ f.removeDocument(docid, 42);
+ f.flushIndexManager();
+
+ sources = f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(2u, sources->getSourceCount());
+}
+
+set<uint32_t> readDiskIds(const string &dir, const string &type) {
+ set<uint32_t> ids;
+ FastOS_DirectoryScan dir_scan(dir.c_str());
+ while (dir_scan.ReadNext()) {
+ if (!dir_scan.IsDirectory()) {
+ continue;
+ }
+ string name = dir_scan.GetName();
+ const string flush_prefix("index." + type + ".");
+ string::size_type pos = name.find(flush_prefix);
+ if (pos != 0) {
+ continue;
+ }
+ vespalib::string idString(name.substr(flush_prefix.size()));
+ vespalib::asciistream ist(idString);
+ uint32_t id;
+ ist >> id;
+ ids.insert(id);
+ }
+ return ids;
+}
+
+TEST_F("requireThatMemoryIndexIsFlushed", Fixture) {
+ FastOS_StatInfo stat;
+ {
+ f.addDocument(docid);
+
+ IIndexCollection::SP sources =
+ f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(1u, sources->getSourceCount());
+ EXPECT_EQUAL(1u, sources->getSourceId(0));
+
+ IndexFlushTarget target(f._index_manager->getMaintainer());
+ EXPECT_EQUAL(0, target.getLastFlushTime().time());
+ vespalib::Executor::Task::UP flushTask;
+ f.runAsMaster([&]() { flushTask = target.initFlush(1); });
+ flushTask->run();
+ EXPECT_TRUE(FastOS_File::Stat("test_data/index.flush.1", &stat));
+ EXPECT_EQUAL(stat._modifiedTime, target.getLastFlushTime().time());
+
+ sources = f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(2u, sources->getSourceCount());
+ EXPECT_EQUAL(1u, sources->getSourceId(0));
+ EXPECT_EQUAL(2u, sources->getSourceId(1));
+
+ set<uint32_t> disk_ids = readDiskIds(index_dir, "flush");
+ ASSERT_TRUE(disk_ids.size() == 1);
+ EXPECT_EQUAL(1u, *disk_ids.begin());
+
+ FlushStats stats = target.getLastFlushStats();
+ EXPECT_EQUAL("test_data/index.flush.1", stats.getPath());
+ EXPECT_EQUAL(7u, stats.getPathElementsToLog());
+ }
+ { // verify last flush time when loading disk index
+ f.resetIndexManager();
+ IndexFlushTarget target(f._index_manager->getMaintainer());
+ EXPECT_EQUAL(stat._modifiedTime, target.getLastFlushTime().time());
+
+ // updated serial number & flush time when nothing to flush
+ FastOS_Thread::Sleep(8000);
+ fastos::TimeStamp now = fastos::ClockSystem::now();
+ vespalib::Executor::Task::UP task;
+ f.runAsMaster([&]() { task = target.initFlush(2); });
+ EXPECT_TRUE(task.get() == NULL);
+ EXPECT_EQUAL(2u, target.getFlushedSerialNum());
+ EXPECT_LESS(stat._modifiedTime, target.getLastFlushTime().time());
+ EXPECT_APPROX(now.time(), target.getLastFlushTime().time(), 8);
+ }
+}
+
+TEST_F("requireThatMultipleFlushesGivesMultipleIndexes", Fixture) {
+ size_t flush_count = 10;
+ for (size_t i = 0; i < flush_count; ++i) {
+ f.addDocument(docid);
+ f.flushIndexManager();
+ }
+ set<uint32_t> disk_ids = readDiskIds(index_dir, "flush");
+ EXPECT_EQUAL(flush_count, disk_ids.size());
+ uint32_t i = 1;
+ for (set<uint32_t>::iterator it = disk_ids.begin(); it != disk_ids.end();
+ ++it) {
+ EXPECT_EQUAL(i++, *it);
+ }
+}
+
+TEST_F("requireThatMaxFlushesSetsUrgent", Fixture) {
+ size_t flush_count = 20;
+ for (size_t i = 0; i < flush_count; ++i) {
+ f.addDocument(docid);
+ f.flushIndexManager();
+ }
+ IndexFusionTarget target(f._index_manager->getMaintainer());
+ EXPECT_TRUE(target.needUrgentFlush());
+}
+
+uint32_t getSource(const IIndexCollection &sources, uint32_t id) {
+ return sources.getSourceSelector().createIterator()->getSource(id);
+}
+
+TEST_F("requireThatPutDocumentUpdatesSelector", Fixture) {
+ f.addDocument(docid);
+ IIndexCollection::SP sources = f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(1u, getSource(*sources, docid));
+ f.flushIndexManager();
+ f.addDocument(docid + 1);
+ sources = f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(1u, getSource(*sources, docid));
+ EXPECT_EQUAL(2u, getSource(*sources, docid + 1));
+}
+
+TEST_F("requireThatRemoveDocumentUpdatesSelector", Fixture) {
+ Document::UP doc = f.addDocument(docid);
+ IIndexCollection::SP sources = f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(1u, getSource(*sources, docid));
+ f.flushIndexManager();
+ f.removeDocument(docid, ++f._serial_num);
+ sources = f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(2u, getSource(*sources, docid));
+}
+
+TEST_F("requireThatSourceSelectorIsFlushed", Fixture) {
+ f.addDocument(docid);
+ f.flushIndexManager();
+ FastOS_File file((index_dir + "/index.flush.1/selector.dat").c_str());
+ ASSERT_TRUE(file.OpenReadOnlyExisting());
+}
+
+TEST_F("requireThatFlushStatsAreCalculated", Fixture) {
+ Schema schema(getSchema());
+ Dictionary dict(schema);
+ SequencedTaskExecutor invertThreads(2);
+ SequencedTaskExecutor pushThreads(2);
+ search::memoryindex::DocumentInverter inverter(schema, invertThreads,
+ pushThreads);
+
+ uint64_t fixed_index_size = dict.getMemoryUsage().allocatedBytes();
+ uint64_t index_size = dict.getMemoryUsage().allocatedBytes() - fixed_index_size;
+ /// Must account for both docid 0 being reserved and the extra after.
+ uint64_t selector_size = (1) * sizeof(Source);
+ EXPECT_EQUAL(index_size, f._index_manager->getMaintainer().getFlushStats().memory_before_bytes -
+ f._index_manager->getMaintainer().getFlushStats().memory_after_bytes);
+ EXPECT_EQUAL(0u, f._index_manager->getMaintainer().getFlushStats().disk_write_bytes);
+ EXPECT_EQUAL(0u, f._index_manager->getMaintainer().getFlushStats().cpu_time_required);
+
+ Document::UP doc = f.addDocument(docid);
+ inverter.invertDocument(docid, *doc);
+ invertThreads.sync();
+ inverter.pushDocuments(dict,
+ std::shared_ptr<search::IDestructorCallback>());
+ pushThreads.sync();
+ index_size = dict.getMemoryUsage().allocatedBytes() - fixed_index_size;
+
+ /// Must account for both docid 0 being reserved and the extra after.
+ selector_size = (docid + 1) * sizeof(Source);
+ EXPECT_EQUAL(index_size,
+ f._index_manager->getMaintainer().getFlushStats().memory_before_bytes -
+ f._index_manager->getMaintainer().getFlushStats().memory_after_bytes);
+ EXPECT_EQUAL(selector_size + index_size,
+ f._index_manager->getMaintainer().getFlushStats().disk_write_bytes);
+ EXPECT_EQUAL(selector_size * (3+1) + index_size,
+ f._index_manager->getMaintainer().getFlushStats().cpu_time_required);
+
+ doc = f.addDocument(docid + 10);
+ inverter.invertDocument(docid + 10, *doc);
+ doc = f.addDocument(docid + 100);
+ inverter.invertDocument(docid + 100, *doc);
+ invertThreads.sync();
+ inverter.pushDocuments(dict,
+ std::shared_ptr<search::IDestructorCallback>());
+ pushThreads.sync();
+ index_size = dict.getMemoryUsage().allocatedBytes() - fixed_index_size;
+ /// Must account for both docid 0 being reserved and the extra after.
+ selector_size = (docid + 100 + 1) * sizeof(Source);
+ EXPECT_EQUAL(index_size,
+ f._index_manager->getMaintainer().getFlushStats().memory_before_bytes -
+ f._index_manager->getMaintainer().getFlushStats().memory_after_bytes);
+ EXPECT_EQUAL(selector_size + index_size,
+ f._index_manager->getMaintainer().getFlushStats().disk_write_bytes);
+ EXPECT_EQUAL(selector_size * (3+1) + index_size,
+ f._index_manager->getMaintainer().getFlushStats().cpu_time_required);
+}
+
+TEST_F("requireThatFusionStatsAreCalculated", Fixture) {
+ f.addDocument(docid);
+ EXPECT_EQUAL(0u, f._index_manager->getMaintainer().getFusionStats().diskUsage);
+ f.flushIndexManager();
+ ASSERT_TRUE(f._index_manager->getMaintainer().getFusionStats().diskUsage > 0);
+}
+
+TEST_F("requireThatPutDocumentUpdatesSerialNum", Fixture) {
+ f._serial_num = 0;
+ EXPECT_EQUAL(0u, f._index_manager->getCurrentSerialNum());
+ f.addDocument(docid);
+ EXPECT_EQUAL(1u, f._index_manager->getCurrentSerialNum());
+}
+
+TEST_F("requireThatRemoveDocumentUpdatesSerialNum", Fixture) {
+ f._serial_num = 0;
+ Document::UP doc = f.addDocument(docid);
+ EXPECT_EQUAL(1u, f._index_manager->getCurrentSerialNum());
+ f.removeDocument(docid, ++f._serial_num);
+ EXPECT_EQUAL(2u, f._index_manager->getCurrentSerialNum());
+}
+
+TEST_F("requireThatFlushUpdatesSerialNum", Fixture) {
+ f._serial_num = 0;
+ f.addDocument(docid);
+ EXPECT_EQUAL(1u, f._index_manager->getCurrentSerialNum());
+ EXPECT_EQUAL(0u, f._index_manager->getFlushedSerialNum());
+ f.flushIndexManager();
+ EXPECT_EQUAL(1u, f._index_manager->getCurrentSerialNum());
+ EXPECT_EQUAL(1u, f._index_manager->getFlushedSerialNum());
+}
+
+TEST_F("requireThatFusionUpdatesIndexes", Fixture) {
+ for (size_t i = 0; i < 10; ++i) {
+ f.addDocument(docid + i);
+ f.flushIndexManager();
+ }
+ uint32_t ids[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+ IIndexCollection::SP
+ source_list(f._index_manager->getMaintainer().getSourceCollection());
+ EXPECT_EQUAL(10u + 1, source_list->getSourceCount()); // disk + mem
+ EXPECT_EQUAL(ids[2], getSource(*source_list, docid + 2));
+ EXPECT_EQUAL(ids[6], getSource(*source_list, docid + 6));
+
+ FusionSpec fusion_spec;
+ fusion_spec.flush_ids.assign(ids, ids + 4);
+ f._index_manager->getMaintainer().runFusion(fusion_spec);
+
+ set<uint32_t> fusion_ids = readDiskIds(index_dir, "fusion");
+ EXPECT_EQUAL(1u, fusion_ids.size());
+ EXPECT_EQUAL(ids[3], *fusion_ids.begin());
+
+ source_list = f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(10u + 1 - 4 + 1, source_list->getSourceCount());
+ EXPECT_EQUAL(0u, getSource(*source_list, docid + 2));
+ EXPECT_EQUAL(3u, getSource(*source_list, docid + 6));
+}
+
+TEST_F("requireThatFlushTriggersFusion", Fixture) {
+ const uint32_t fusion_trigger = 5;
+ f.resetIndexManager();
+
+ for (size_t i = 1; i <= fusion_trigger; ++i) {
+ f.addDocument(docid);
+ f.flushIndexManager();
+ }
+ IFlushTarget::SP target(new IndexFusionTarget(f._index_manager->getMaintainer()));
+ target->initFlush(0)->run();
+ f.addDocument(docid);
+ f.flushIndexManager();
+ set<uint32_t> fusion_ids = readDiskIds(index_dir, "fusion");
+ EXPECT_EQUAL(1u, fusion_ids.size());
+ EXPECT_EQUAL(5u, *fusion_ids.begin());
+ set<uint32_t> flush_ids = readDiskIds(index_dir, "flush");
+ EXPECT_EQUAL(1u, flush_ids.size());
+ EXPECT_EQUAL(6u, *flush_ids.begin());
+}
+
+TEST_F("requireThatFusionTargetIsSetUp", Fixture) {
+ f.addDocument(docid);
+ f.flushIndexManager();
+ f.addDocument(docid);
+ f.flushIndexManager();
+ IFlushTarget::List lst(f._index_manager->getFlushTargets());
+ EXPECT_EQUAL(2u, lst.size());
+ IFlushTarget::SP target(lst.at(1));
+ EXPECT_EQUAL("memoryindex.fusion", target->getName());
+ EXPECT_FALSE(target->needUrgentFlush());
+ f.addDocument(docid);
+ f.flushIndexManager();
+ lst = f._index_manager->getFlushTargets();
+ EXPECT_EQUAL(2u, lst.size());
+ target = lst.at(1);
+ EXPECT_EQUAL("memoryindex.fusion", target->getName());
+ EXPECT_TRUE(target->needUrgentFlush());
+}
+
+TEST_F("requireThatFusionCleansUpOldIndexes", Fixture) {
+ f.addDocument(docid);
+ f.flushIndexManager();
+ // hold reference to index.flush.1
+ IIndexCollection::SP fsc = f._index_manager->getMaintainer().getSourceCollection();
+
+ f.addDocument(docid + 1);
+ f.flushIndexManager();
+
+ set<uint32_t> flush_ids = readDiskIds(index_dir, "flush");
+ EXPECT_EQUAL(2u, flush_ids.size());
+
+ FusionSpec fusion_spec;
+ fusion_spec.flush_ids.push_back(1);
+ fusion_spec.flush_ids.push_back(2);
+ f._index_manager->getMaintainer().runFusion(fusion_spec);
+
+ flush_ids = readDiskIds(index_dir, "flush");
+ EXPECT_EQUAL(1u, flush_ids.size());
+ EXPECT_EQUAL(1u, *flush_ids.begin());
+
+ fsc.reset();
+ f._index_manager->getMaintainer().removeOldDiskIndexes();
+ flush_ids = readDiskIds(index_dir, "flush");
+ EXPECT_EQUAL(0u, flush_ids.size());
+}
+
+bool contains(const IIndexCollection &fsc, uint32_t id) {
+ set<uint32_t> ids;
+ for (size_t i = 0; i < fsc.getSourceCount(); ++i) {
+ ids.insert(fsc.getSourceId(i));
+ }
+ return ids.find(id) != ids.end();
+}
+
+bool indexExists(const string &type, uint32_t id) {
+ set<uint32_t> disk_ids = readDiskIds(index_dir, type);
+ return disk_ids.find(id) != disk_ids.end();
+}
+
+TEST_F("requireThatDiskIndexesAreLoadedOnStartup", Fixture) {
+ f.addDocument(docid);
+ f.flushIndexManager();
+ f._index_manager.reset(0);
+
+ ASSERT_TRUE(indexExists("flush", 1));
+ f.resetIndexManager();
+
+ IIndexCollection::SP fsc = f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(2u, fsc->getSourceCount());
+ EXPECT_TRUE(contains(*fsc, 1u));
+ EXPECT_TRUE(contains(*fsc, 2u));
+ EXPECT_EQUAL(1u, getSource(*fsc, docid));
+ fsc.reset();
+
+
+ f.addDocument(docid + 1);
+ f.flushIndexManager();
+ ASSERT_TRUE(indexExists("flush", 2));
+ FusionSpec fusion_spec;
+ fusion_spec.flush_ids.push_back(1);
+ fusion_spec.flush_ids.push_back(2);
+ f._index_manager->getMaintainer().runFusion(fusion_spec);
+ f._index_manager.reset(0);
+
+ ASSERT_TRUE(!indexExists("flush", 1));
+ ASSERT_TRUE(!indexExists("flush", 2));
+ ASSERT_TRUE(indexExists("fusion", 2));
+ f.resetIndexManager();
+
+ fsc = f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(2u, fsc->getSourceCount());
+ EXPECT_TRUE(contains(*fsc, 0u));
+ EXPECT_TRUE(contains(*fsc, 1u));
+ EXPECT_EQUAL(0u, getSource(*fsc, docid));
+ EXPECT_EQUAL(0u, getSource(*fsc, docid + 1));
+ /// Must account for both docid 0 being reserved and the extra after.
+ EXPECT_EQUAL(docid + 2, fsc->getSourceSelector().getDocIdLimit());
+ fsc.reset();
+
+
+ f.addDocument(docid + 2);
+ f.flushIndexManager();
+ f._index_manager.reset(0);
+
+ ASSERT_TRUE(indexExists("fusion", 2));
+ ASSERT_TRUE(indexExists("flush", 3));
+ f.resetIndexManager();
+
+ fsc = f._index_manager->getMaintainer().getSourceCollection();
+ EXPECT_EQUAL(3u, fsc->getSourceCount());
+ EXPECT_TRUE(contains(*fsc, 0u));
+ EXPECT_TRUE(contains(*fsc, 1u));
+ EXPECT_TRUE(contains(*fsc, 2u));
+ EXPECT_EQUAL(0u, getSource(*fsc, docid));
+ EXPECT_EQUAL(0u, getSource(*fsc, docid + 1));
+ EXPECT_EQUAL(1u, getSource(*fsc, docid + 2));
+ fsc.reset();
+}
+
+TEST_F("requireThatExistingIndexesAreToBeFusionedOnStartup", Fixture) {
+ f.addDocument(docid);
+ f.flushIndexManager();
+ f.addDocument(docid + 1);
+ f.flushIndexManager();
+ f.resetIndexManager();
+
+ IFlushTarget::SP target(new IndexFusionTarget(f._index_manager->getMaintainer()));
+ target->initFlush(0)->run();
+ f.addDocument(docid);
+ f.flushIndexManager();
+
+ set<uint32_t> fusion_ids = readDiskIds(index_dir, "fusion");
+ EXPECT_EQUAL(1u, fusion_ids.size());
+ EXPECT_EQUAL(2u, *fusion_ids.begin());
+}
+
+TEST_F("requireThatSerialNumberIsWrittenOnFlush", Fixture) {
+ f.addDocument(docid);
+ f.flushIndexManager();
+ FastOS_File file((index_dir + "/index.flush.1/serial.dat").c_str());
+ EXPECT_TRUE(file.OpenReadOnly());
+}
+
+TEST_F("requireThatSerialNumberIsCopiedOnFusion", Fixture) {
+ f.addDocument(docid);
+ f.flushIndexManager();
+ f.addDocument(docid);
+ f.flushIndexManager();
+ FusionSpec fusion_spec;
+ fusion_spec.flush_ids.push_back(1);
+ fusion_spec.flush_ids.push_back(2);
+ f._index_manager->getMaintainer().runFusion(fusion_spec);
+ FastOS_File file((index_dir + "/index.fusion.2/serial.dat").c_str());
+ EXPECT_TRUE(file.OpenReadOnly());
+}
+
+TEST_F("requireThatSerialNumberIsReadOnLoad", Fixture) {
+ f.addDocument(docid);
+ f.flushIndexManager();
+ EXPECT_EQUAL(f._serial_num, f._index_manager->getFlushedSerialNum());
+ f.resetIndexManager();
+ EXPECT_EQUAL(f._serial_num, f._index_manager->getFlushedSerialNum());
+
+ f.addDocument(docid);
+ f.flushIndexManager();
+ f.addDocument(docid);
+ f.flushIndexManager();
+ search::SerialNum serial = f._serial_num;
+ f.addDocument(docid);
+ f.resetIndexManager();
+ EXPECT_EQUAL(serial, f._index_manager->getFlushedSerialNum());
+}
+
+void crippleFusion(uint32_t fusionId) {
+ vespalib::asciistream ost;
+ ost << index_dir << "/index.flush." << fusionId << "/serial.dat";
+ FastOS_File(ost.str().c_str()).Delete();
+}
+
+TEST_F("requireThatFailedFusionIsRetried", Fixture) {
+ f.resetIndexManager();
+
+ f.addDocument(docid);
+ f.flushIndexManager();
+ f.addDocument(docid);
+ f.flushIndexManager();
+
+ crippleFusion(2);
+
+ IndexFusionTarget target(f._index_manager->getMaintainer());
+ vespalib::Executor::Task::UP fusionTask = target.initFlush(1);
+ fusionTask->run();
+
+ FusionSpec spec = f._index_manager->getMaintainer().getFusionSpec();
+ set<uint32_t> fusion_ids = readDiskIds(index_dir, "fusion");
+ EXPECT_TRUE(fusion_ids.empty());
+ EXPECT_EQUAL(0u, spec.last_fusion_id);
+ EXPECT_EQUAL(2u, spec.flush_ids.size());
+ EXPECT_EQUAL(1u, spec.flush_ids[0]);
+ EXPECT_EQUAL(2u, spec.flush_ids[1]);
+}
+
+TEST_F("require that wipeHistory updates schema on disk", Fixture) {
+ Schema empty_schema;
+ f.addDocument(docid);
+ f.flushIndexManager();
+ f.runAsMaster([&]() { f._index_manager->setSchema(empty_schema,
+ empty_schema); });
+ f.addDocument(docid);
+ f.flushIndexManager();
+
+ Schema s;
+ s.loadFromFile("test_data/index.flush.1/schema.txt");
+ EXPECT_EQUAL(1u, s.getNumIndexFields());
+
+ f.runAsMaster([&]() { f._index_manager->wipeHistory(f._serial_num,
+ empty_schema); });
+
+ s.loadFromFile("test_data/index.flush.1/schema.txt");
+ EXPECT_EQUAL(0u, s.getNumIndexFields());
+}
+
+
+} // namespace
+
+TEST_MAIN() {
+ TEST_DO(removeTestData());
+ DummyFileHeaderContext::setCreator("indexmanager_test");
+ TEST_RUN_ALL();
+ TEST_DO(removeTestData());
+}
diff --git a/searchcore/src/tests/proton/initializer/.gitignore b/searchcore/src/tests/proton/initializer/.gitignore
new file mode 100644
index 00000000000..486dbb0b11e
--- /dev/null
+++ b/searchcore/src/tests/proton/initializer/.gitignore
@@ -0,0 +1 @@
+searchcore_task_runner_test_app
diff --git a/searchcore/src/tests/proton/initializer/CMakeLists.txt b/searchcore/src/tests/proton/initializer/CMakeLists.txt
new file mode 100644
index 00000000000..0f20324cae3
--- /dev/null
+++ b/searchcore/src/tests/proton/initializer/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_task_runner_test_app
+ SOURCES
+ task_runner_test.cpp
+ DEPENDS
+ searchcore_initializer
+)
+vespa_add_test(NAME searchcore_task_runner_test_app COMMAND searchcore_task_runner_test_app)
diff --git a/searchcore/src/tests/proton/initializer/DESC b/searchcore/src/tests/proton/initializer/DESC
new file mode 100644
index 00000000000..be2743a181e
--- /dev/null
+++ b/searchcore/src/tests/proton/initializer/DESC
@@ -0,0 +1 @@
+TaskRunner test. Take a look at task_runner_test.cpp for details.
diff --git a/searchcore/src/tests/proton/initializer/FILES b/searchcore/src/tests/proton/initializer/FILES
new file mode 100644
index 00000000000..bbbbe1c2d86
--- /dev/null
+++ b/searchcore/src/tests/proton/initializer/FILES
@@ -0,0 +1 @@
+task_runner_test.cpp
diff --git a/searchcore/src/tests/proton/initializer/task_runner_test.cpp b/searchcore/src/tests/proton/initializer/task_runner_test.cpp
new file mode 100644
index 00000000000..afa807fd0e6
--- /dev/null
+++ b/searchcore/src/tests/proton/initializer/task_runner_test.cpp
@@ -0,0 +1,141 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("task_runner_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/initializer/initializer_task.h>
+#include <vespa/searchcore/proton/initializer/task_runner.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <vespa/vespalib/stllike/string.h>
+#include <mutex>
+
+using proton::initializer::InitializerTask;
+using proton::initializer::TaskRunner;
+
+struct TestLog
+{
+ std::mutex _lock;
+ vespalib::string _log;
+ using UP = std::unique_ptr<TestLog>;
+
+ TestLog()
+ : _lock(),
+ _log()
+ {
+ }
+
+ void append(vespalib::string str) {
+ std::lock_guard<std::mutex> guard(_lock);
+ _log += str;
+ }
+
+ vespalib::string result() const { return _log; }
+};
+
+class NamedTask : public InitializerTask
+{
+protected:
+ vespalib::string _name;
+ TestLog &_log;
+public:
+ NamedTask(const vespalib::string &name, TestLog &log)
+ : _name(name),
+ _log(log)
+ {
+ }
+
+ virtual void run() { _log.append(_name); }
+};
+
+
+struct TestJob {
+ TestLog::UP _log;
+ InitializerTask::SP _root;
+
+ TestJob(TestLog::UP log, InitializerTask::SP root)
+ : _log(std::move(log)),
+ _root(std::move(root))
+ {
+ }
+
+ static TestJob setupCDependsOnAandB()
+ {
+ TestLog::UP log = std::make_unique<TestLog>();
+ InitializerTask::SP A(std::make_shared<NamedTask>("A", *log));
+ InitializerTask::SP B(std::make_shared<NamedTask>("B", *log));
+ InitializerTask::SP C(std::make_shared<NamedTask>("C", *log));
+ C->addDependency(A);
+ C->addDependency(B);
+ return TestJob(std::move(log), std::move(C));
+ }
+
+ static TestJob setupDiamond()
+ {
+ TestLog::UP log = std::make_unique<TestLog>();
+ InitializerTask::SP A(std::make_shared<NamedTask>("A", *log));
+ InitializerTask::SP B(std::make_shared<NamedTask>("B", *log));
+ InitializerTask::SP C(std::make_shared<NamedTask>("C", *log));
+ InitializerTask::SP D(std::make_shared<NamedTask>("D", *log));
+ C->addDependency(A);
+ C->addDependency(B);
+ A->addDependency(D);
+ B->addDependency(D);
+ return TestJob(std::move(log), std::move(C));
+ }
+};
+
+
+struct Fixture
+{
+ vespalib::ThreadStackExecutor _executor;
+ TaskRunner _taskRunner;
+
+ Fixture(uint32_t numThreads = 1)
+ : _executor(numThreads, 128 * 1024),
+ _taskRunner(_executor)
+ {
+ }
+
+ void run(const InitializerTask::SP &task) { _taskRunner.runTask(task); }
+};
+
+
+TEST_F("1 thread, 2 dependees, 1 depender", Fixture(1))
+{
+ TestJob job = TestJob::setupCDependsOnAandB();
+ f.run(job._root);
+ EXPECT_EQUAL("ABC", job._log->result());
+}
+
+TEST_F("1 thread, dag graph", Fixture(1))
+{
+ for (int iter = 0; iter < 1000; ++iter) {
+ TestJob job = TestJob::setupDiamond();
+ f.run(job._root);
+ EXPECT_EQUAL("DABC", job._log->result());
+ }
+}
+
+TEST_F("multiple threads, dag graph", Fixture(10))
+{
+ int dabc_count = 0;
+ int dbac_count = 0;
+ for (int iter = 0; iter < 1000; ++iter) {
+ TestJob job = TestJob::setupDiamond();
+ f.run(job._root);
+ vespalib::string result = job._log->result();
+ EXPECT_TRUE("DABC" == result || "DBAC" == result);
+ if ("DABC" == result) {
+ ++dabc_count;
+ }
+ if ("DBAC" == result) {
+ ++dbac_count;
+ }
+ }
+ LOG(info, "dabc=%d, dbac=%d", dabc_count, dbac_count);
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/matchengine/.gitignore b/searchcore/src/tests/proton/matchengine/.gitignore
new file mode 100644
index 00000000000..4e2a3a6df8d
--- /dev/null
+++ b/searchcore/src/tests/proton/matchengine/.gitignore
@@ -0,0 +1,6 @@
+.depend
+Makefile
+matchengine_test
+query_test
+queryenvbuilder_test
+searchcore_matchengine_test_app
diff --git a/searchcore/src/tests/proton/matchengine/CMakeLists.txt b/searchcore/src/tests/proton/matchengine/CMakeLists.txt
new file mode 100644
index 00000000000..32c0b47ae4b
--- /dev/null
+++ b/searchcore/src/tests/proton/matchengine/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_matchengine_test_app
+ SOURCES
+ matchengine.cpp
+ DEPENDS
+ searchcore_matchengine
+ searchcore_matching
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_matchengine_test_app COMMAND searchcore_matchengine_test_app)
diff --git a/searchcore/src/tests/proton/matchengine/DESC b/searchcore/src/tests/proton/matchengine/DESC
new file mode 100644
index 00000000000..1530502e2de
--- /dev/null
+++ b/searchcore/src/tests/proton/matchengine/DESC
@@ -0,0 +1 @@
+matchengine test. Take a look at matchengine.cpp for details.
diff --git a/searchcore/src/tests/proton/matchengine/FILES b/searchcore/src/tests/proton/matchengine/FILES
new file mode 100644
index 00000000000..91961877ee2
--- /dev/null
+++ b/searchcore/src/tests/proton/matchengine/FILES
@@ -0,0 +1 @@
+matchengine.cpp
diff --git a/searchcore/src/tests/proton/matchengine/matchengine.cpp b/searchcore/src/tests/proton/matchengine/matchengine.cpp
new file mode 100644
index 00000000000..617c2f81b74
--- /dev/null
+++ b/searchcore/src/tests/proton/matchengine/matchengine.cpp
@@ -0,0 +1,214 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("matchengine_test");
+
+#include <vespa/searchcore/proton/matchengine/matchengine.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/testkit/test_kit.h>
+
+using namespace proton;
+using namespace search::engine;
+using namespace vespalib::slime;
+using vespalib::Slime;
+
+class MySearchHandler : public ISearchHandler {
+ size_t _numHits;
+ std::string _name;
+ std::string _reply;
+public:
+ MySearchHandler(size_t numHits = 0,
+ const std::string & name = "my",
+ const std::string & reply = "myreply") :
+ _numHits(numHits), _name(name), _reply(reply) {}
+ virtual DocsumReply::UP getDocsums(const DocsumRequest &) {
+ return DocsumReply::UP(new DocsumReply);
+ }
+
+ virtual search::engine::SearchReply::UP match(
+ const ISearchHandler::SP &,
+ const search::engine::SearchRequest &,
+ vespalib::ThreadBundle &) const {
+ SearchReply::UP retval(new SearchReply);
+ for (size_t i = 0; i < _numHits; ++i) {
+ retval->hits.push_back(SearchReply::Hit());
+ }
+ return retval;
+ }
+};
+
+class LocalSearchClient : public SearchClient {
+private:
+ vespalib::Monitor _monitor;
+ SearchReply::UP _reply;
+
+public:
+ void searchDone(SearchReply::UP reply) {
+ vespalib::MonitorGuard guard(_monitor);
+ _reply = std::move(reply);
+ guard.broadcast();
+ }
+
+ SearchReply::UP getReply(uint32_t millis) {
+ vespalib::MonitorGuard guard(_monitor);
+ vespalib::TimedWaiter waiter(guard, millis);
+ while (_reply.get() == NULL && waiter.hasTime()) {
+ waiter.wait();
+ }
+ return std::move(_reply);
+ }
+};
+
+TEST("requireThatSearchesExecute")
+{
+ int numMatcherThreads = 16;
+ MatchEngine engine(numMatcherThreads, 1, 7);
+ engine.setOnline();
+ engine.setNodeUp(true);
+
+ MySearchHandler::SP handler(new MySearchHandler);
+ DocTypeName dtnvfoo("foo");
+ engine.putSearchHandler(dtnvfoo, handler);
+
+ LocalSearchClient client;
+ SearchRequest::Source request(new SearchRequest());
+ SearchReply::UP reply = engine.search(std::move(request), client);
+ EXPECT_TRUE(reply.get() == NULL);
+
+ reply = client.getReply(10000);
+ EXPECT_TRUE(reply.get() != NULL);
+}
+
+bool
+assertSearchReply(MatchEngine & engine, const std::string & searchDocType, size_t expHits)
+{
+ SearchRequest *request = new SearchRequest();
+ request->propertiesMap.lookupCreate(search::MapNames::MATCH).add("documentdb.searchdoctype", searchDocType);
+ LocalSearchClient client;
+ engine.search(SearchRequest::Source(request), client);
+ SearchReply::UP reply = client.getReply(10000);
+ return EXPECT_EQUAL(expHits, reply->hits.size());
+}
+
+TEST("requireThatCorrectHandlerIsUsed")
+{
+ MatchEngine engine(1, 1, 7);
+ engine.setOnline();
+ engine.setNodeUp(true);
+ ISearchHandler::SP h1(new MySearchHandler(2));
+ ISearchHandler::SP h2(new MySearchHandler(4));
+ ISearchHandler::SP h3(new MySearchHandler(6));
+ DocTypeName dtnvfoo("foo");
+ DocTypeName dtnvbar("bar");
+ DocTypeName dtnvbaz("baz");
+ engine.putSearchHandler(dtnvfoo, h1);
+ engine.putSearchHandler(dtnvbar, h2);
+ engine.putSearchHandler(dtnvbaz, h3);
+
+ EXPECT_TRUE(assertSearchReply(engine, "foo", 2));
+ EXPECT_TRUE(assertSearchReply(engine, "bar", 4));
+ EXPECT_TRUE(assertSearchReply(engine, "baz", 6));
+ EXPECT_TRUE(assertSearchReply(engine, "not", 4)); // uses the first (sorted on name)
+}
+
+struct ObserveBundleMatchHandler : MySearchHandler {
+ typedef std::shared_ptr<ObserveBundleMatchHandler> SP;
+ mutable size_t bundleSize;
+ ObserveBundleMatchHandler() : bundleSize(0) {}
+
+ virtual search::engine::SearchReply::UP match(
+ const ISearchHandler::SP &,
+ const search::engine::SearchRequest &,
+ vespalib::ThreadBundle &threadBundle) const
+ {
+ bundleSize = threadBundle.size();
+ return SearchReply::UP(new SearchReply);
+ }
+};
+
+TEST("requireThatBundlesAreUsed")
+{
+ MatchEngine engine(15, 5, 7);
+ engine.setOnline();
+ engine.setNodeUp(true);
+
+ ObserveBundleMatchHandler::SP handler(new ObserveBundleMatchHandler());
+ DocTypeName dtnvfoo("foo");
+ engine.putSearchHandler(dtnvfoo, handler);
+
+ LocalSearchClient client;
+ SearchRequest::Source request(new SearchRequest());
+ engine.search(std::move(request), client);
+ SearchReply::UP reply = client.getReply(10000);
+ EXPECT_EQUAL(7u, reply->getDistributionKey());
+ EXPECT_EQUAL(5u, handler->bundleSize);
+}
+
+TEST("requireThatHandlersCanBeRemoved")
+{
+ MatchEngine engine(1, 1, 7);
+ engine.setOnline();
+ engine.setNodeUp(true);
+ ISearchHandler::SP h(new MySearchHandler(1));
+ DocTypeName docType("foo");
+ engine.putSearchHandler(docType, h);
+
+ ISearchHandler::SP r = engine.getSearchHandler(docType);
+ EXPECT_TRUE(r.get() != NULL);
+ EXPECT_TRUE(h.get() == r.get());
+
+ r = engine.removeSearchHandler(docType);
+ EXPECT_TRUE(r.get() != NULL);
+ EXPECT_TRUE(h.get() == r.get());
+
+ r = engine.getSearchHandler(docType);
+ EXPECT_TRUE(r.get() == NULL);
+}
+
+TEST("requireThatEngineCanBeSetOffline")
+{
+ MatchEngine engine(1, 1, 7);
+ engine.setNodeUp(true);
+ engine.setOnline();
+ engine.setInService();
+ ASSERT_TRUE(engine.isOnline());
+ engine.setOffline();
+ ASSERT_FALSE(engine.isOnline());
+ engine.setOnline();
+ ASSERT_TRUE(engine.isOnline());
+ engine.setOutOfService();
+ ASSERT_FALSE(engine.isOnline());
+}
+
+TEST("requireThatEmptySearchReplyIsReturnedWhenEngineIsClosed")
+{
+ MatchEngine engine(1, 1, 7);
+ engine.setOnline();
+ engine.setNodeUp(true);
+ engine.close();
+ LocalSearchClient client;
+ SearchRequest::Source request(new SearchRequest());
+ SearchReply::UP reply = engine.search(std::move(request), client);
+ EXPECT_TRUE(reply.get() != NULL);
+ EXPECT_EQUAL(0u, reply->hits.size());
+ EXPECT_EQUAL(7u, reply->getDistributionKey());
+}
+
+TEST("requireThatStateIsReported")
+{
+ MatchEngine engine(1, 1, 7);
+
+ Slime slime;
+ SlimeInserter inserter(slime);
+ engine.get_state(inserter, false);
+ EXPECT_EQUAL(
+ "{\n"
+ " \"status\": {\n"
+ " \"state\": \"OFFLINE\",\n"
+ " \"message\": \"Search interface is offline\"\n"
+ " }\n"
+ "}\n",
+ slime.toString());
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/matching/.cvsignore b/searchcore/src/tests/proton/matching/.cvsignore
new file mode 100644
index 00000000000..75b0a127c8f
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/.cvsignore
@@ -0,0 +1,3 @@
+.depend
+Makefile
+matching_test
diff --git a/searchcore/src/tests/proton/matching/.gitignore b/searchcore/src/tests/proton/matching/.gitignore
new file mode 100644
index 00000000000..c9789272a35
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/.gitignore
@@ -0,0 +1,14 @@
+.depend
+Makefile
+matching_test
+query_test
+querynodes_test
+resolveviewvisitor_test
+termdataextractor_test
+searchcore_matching_stats_test_app
+searchcore_matching_test_app
+searchcore_query_test_app
+searchcore_querynodes_test_app
+searchcore_resolveviewvisitor_test_app
+searchcore_sessionmanager_test_app
+searchcore_termdataextractor_test_app
diff --git a/searchcore/src/tests/proton/matching/CMakeLists.txt b/searchcore/src/tests/proton/matching/CMakeLists.txt
new file mode 100644
index 00000000000..8007ff0344d
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/CMakeLists.txt
@@ -0,0 +1,60 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_matching_test_app
+ SOURCES
+ matching_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_fconfig
+ searchcore_matching
+ searchcore_feedoperation
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_pcommon
+ searchcore_grouping
+ searchcore_util
+)
+vespa_add_test(NAME searchcore_matching_test_app COMMAND searchcore_matching_test_app)
+vespa_add_executable(searchcore_sessionmanager_test_app
+ SOURCES
+ sessionmanager_test.cpp
+ DEPENDS
+ searchcore_matching
+ searchcore_grouping
+)
+vespa_add_test(NAME searchcore_sessionmanager_test_app COMMAND searchcore_sessionmanager_test_app)
+vespa_add_executable(searchcore_matching_stats_test_app
+ SOURCES
+ matching_stats_test.cpp
+ DEPENDS
+ searchcore_matching
+)
+vespa_add_test(NAME searchcore_matching_stats_test_app COMMAND searchcore_matching_stats_test_app)
+vespa_add_executable(searchcore_query_test_app
+ SOURCES
+ query_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_matching
+)
+vespa_add_test(NAME searchcore_query_test_app COMMAND searchcore_query_test_app)
+vespa_add_executable(searchcore_termdataextractor_test_app
+ SOURCES
+ termdataextractor_test.cpp
+ DEPENDS
+ searchcore_matching
+)
+vespa_add_test(NAME searchcore_termdataextractor_test_app COMMAND searchcore_termdataextractor_test_app)
+vespa_add_executable(searchcore_resolveviewvisitor_test_app
+ SOURCES
+ resolveviewvisitor_test.cpp
+ DEPENDS
+ searchcore_matching
+)
+vespa_add_test(NAME searchcore_resolveviewvisitor_test_app COMMAND searchcore_resolveviewvisitor_test_app)
+vespa_add_executable(searchcore_querynodes_test_app
+ SOURCES
+ querynodes_test.cpp
+ DEPENDS
+ searchcore_matching
+)
+vespa_add_test(NAME searchcore_querynodes_test_app COMMAND searchcore_querynodes_test_app)
diff --git a/searchcore/src/tests/proton/matching/DESC b/searchcore/src/tests/proton/matching/DESC
new file mode 100644
index 00000000000..435b17f333e
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/DESC
@@ -0,0 +1 @@
+matching test. Take a look at matching.cpp for details.
diff --git a/searchcore/src/tests/proton/matching/FILES b/searchcore/src/tests/proton/matching/FILES
new file mode 100644
index 00000000000..0213f77d899
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/FILES
@@ -0,0 +1 @@
+matching.cpp
diff --git a/searchcore/src/tests/proton/matching/docid_range_scheduler/.gitignore b/searchcore/src/tests/proton/matching/docid_range_scheduler/.gitignore
new file mode 100644
index 00000000000..8de390797da
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/docid_range_scheduler/.gitignore
@@ -0,0 +1,3 @@
+/docid_range_scheduler_bench
+searchcore_docid_range_scheduler_test_app
+searchcore_docid_range_scheduler_bench_app
diff --git a/searchcore/src/tests/proton/matching/docid_range_scheduler/CMakeLists.txt b/searchcore/src/tests/proton/matching/docid_range_scheduler/CMakeLists.txt
new file mode 100644
index 00000000000..3892ac41b92
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/docid_range_scheduler/CMakeLists.txt
@@ -0,0 +1,15 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_docid_range_scheduler_test_app
+ SOURCES
+ docid_range_scheduler_test.cpp
+ DEPENDS
+ searchcore_matching
+)
+vespa_add_test(NAME searchcore_docid_range_scheduler_test_app COMMAND searchcore_docid_range_scheduler_test_app)
+vespa_add_executable(searchcore_docid_range_scheduler_bench_app
+ SOURCES
+ docid_range_scheduler_bench.cpp
+ DEPENDS
+ searchcore_matching
+)
+vespa_add_test(NAME searchcore_docid_range_scheduler_bench_app COMMAND searchcore_docid_range_scheduler_bench_app BENCHMARK)
diff --git a/searchcore/src/tests/proton/matching/docid_range_scheduler/docid_range_scheduler_bench.cpp b/searchcore/src/tests/proton/matching/docid_range_scheduler/docid_range_scheduler_bench.cpp
new file mode 100644
index 00000000000..848743e0f23
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/docid_range_scheduler/docid_range_scheduler_bench.cpp
@@ -0,0 +1,226 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/searchcore/proton/matching/docid_range_scheduler.h>
+#include <vespa/vespalib/util/benchmark_timer.h>
+#include <vespa/vespalib/util/stringfmt.h>
+
+using namespace proton::matching;
+using namespace vespalib;
+
+//-----------------------------------------------------------------------------
+
+size_t do_work(size_t cost) __attribute__((noinline));
+size_t do_work(size_t cost) {
+ size_t result = 0;
+ size_t loop_cnt = 42;
+ for (size_t n = 0; n < cost; ++n) {
+ result += (cost * n);
+ for (size_t i = 0; i < loop_cnt; ++i) {
+ result += (cost * n * i);
+ for (size_t j = 0; j < loop_cnt; ++j) {
+ result += (cost * n * i * j);
+ for (size_t k = 0; k < loop_cnt; ++k) {
+ result += (cost * n * i * j * k);
+ }
+ }
+ }
+ }
+ return result;
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("measure do_work overhead for different cost inputs") {
+ for (size_t cost: {0, 1, 10, 100, 1000}) {
+ BenchmarkTimer timer(1.0);
+ while (timer.has_budget()) {
+ timer.before();
+ (void) do_work(cost);
+ timer.after();
+ }
+ double min_time_s = timer.min_time();
+ fprintf(stderr, "const %zu: %g us\n", cost, min_time_s * 1000.0 * 1000.0);
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+struct Work {
+ typedef std::unique_ptr<Work> UP;
+ virtual vespalib::string desc() const = 0;
+ virtual void perform(uint32_t docid) const = 0;
+ virtual ~Work() {}
+};
+
+struct UniformWork : public Work {
+ size_t cost;
+ UniformWork(size_t cost_in) : cost(cost_in) {}
+ vespalib::string desc() const override { return make_string("uniform(%zu)", cost); }
+ void perform(uint32_t) const override { (void) do_work(cost); }
+};
+
+struct TriangleWork : public Work {
+ size_t div;
+ TriangleWork(size_t div_in) : div(div_in) {}
+ vespalib::string desc() const override { return make_string("triangle(docid/%zu)", div); }
+ void perform(uint32_t docid) const override { (void) do_work(docid/div); }
+};
+
+struct SpikeWork : public Work {
+ uint32_t begin;
+ uint32_t end;
+ size_t cost;
+ SpikeWork(uint32_t begin_in, uint32_t end_in, size_t cost_in)
+ : begin(begin_in), end(end_in), cost(cost_in) {}
+ vespalib::string desc() const override { return make_string("spike(%u,%u,%zu)", begin, end, cost); }
+ void perform(uint32_t docid) const override {
+ if ((docid >= begin) && (docid < end)) {
+ (void) do_work(cost);
+ }
+ }
+};
+
+struct WorkList {
+ std::vector<Work::UP> work_list;
+ WorkList() : work_list() {
+ work_list.push_back(std::make_unique<UniformWork>(10));
+ work_list.push_back(std::make_unique<TriangleWork>(4878));
+ work_list.push_back(std::make_unique<SpikeWork>(1, 10001, 100));
+ work_list.push_back(std::make_unique<SpikeWork>(1, 1001, 1000));
+ work_list.push_back(std::make_unique<SpikeWork>(1, 101, 10000));
+ work_list.push_back(std::make_unique<SpikeWork>(1, 11, 100000));
+ work_list.push_back(std::make_unique<SpikeWork>(90001, 100001, 100));
+ work_list.push_back(std::make_unique<SpikeWork>(99001, 100001, 1000));
+ work_list.push_back(std::make_unique<SpikeWork>(99901, 100001, 10000));
+ work_list.push_back(std::make_unique<SpikeWork>(99991, 100001, 100000));
+ }
+};
+
+//-----------------------------------------------------------------------------
+
+struct SchedulerFactory {
+ typedef std::unique_ptr<SchedulerFactory> UP;
+ virtual vespalib::string desc() const = 0;
+ virtual DocidRangeScheduler::UP create(uint32_t docid_limit) const = 0;
+ virtual ~SchedulerFactory() {}
+};
+
+struct PartitionSchedulerFactory : public SchedulerFactory {
+ size_t num_threads;
+ PartitionSchedulerFactory(size_t num_threads_in) : num_threads(num_threads_in) {}
+ vespalib::string desc() const override { return make_string("partition(threads:%zu)", num_threads); }
+ DocidRangeScheduler::UP create(uint32_t docid_limit) const override {
+ return std::make_unique<PartitionDocidRangeScheduler>(num_threads, docid_limit);
+ }
+};
+
+struct TaskSchedulerFactory : public SchedulerFactory {
+ size_t num_threads;
+ size_t num_tasks;
+ TaskSchedulerFactory(size_t num_threads_in, size_t num_tasks_in)
+ : num_threads(num_threads_in), num_tasks(num_tasks_in) {}
+ vespalib::string desc() const override { return make_string("task(threads:%zu,num_tasks:%zu)", num_threads, num_tasks); }
+ DocidRangeScheduler::UP create(uint32_t docid_limit) const override {
+ return std::make_unique<TaskDocidRangeScheduler>(num_threads, num_tasks, docid_limit);
+ }
+};
+
+struct AdaptiveSchedulerFactory : public SchedulerFactory {
+ size_t num_threads;
+ size_t min_task;
+ AdaptiveSchedulerFactory(size_t num_threads_in, size_t min_task_in)
+ : num_threads(num_threads_in), min_task(min_task_in) {}
+ vespalib::string desc() const override { return make_string("adaptive(threads:%zu,min_task:%zu)", num_threads, min_task); }
+ DocidRangeScheduler::UP create(uint32_t docid_limit) const override {
+ return std::make_unique<AdaptiveDocidRangeScheduler>(num_threads, min_task, docid_limit);
+ }
+};
+
+struct SchedulerList {
+ std::vector<SchedulerFactory::UP> factory_list;
+ SchedulerList(size_t num_threads) : factory_list() {
+ factory_list.push_back(std::make_unique<PartitionSchedulerFactory>(num_threads));
+ factory_list.push_back(std::make_unique<TaskSchedulerFactory>(num_threads, num_threads));
+ factory_list.push_back(std::make_unique<TaskSchedulerFactory>(num_threads, 64));
+ factory_list.push_back(std::make_unique<TaskSchedulerFactory>(num_threads, 256));
+ factory_list.push_back(std::make_unique<TaskSchedulerFactory>(num_threads, 1024));
+ factory_list.push_back(std::make_unique<TaskSchedulerFactory>(num_threads, 4096));
+ factory_list.push_back(std::make_unique<AdaptiveSchedulerFactory>(num_threads, 1000));
+ factory_list.push_back(std::make_unique<AdaptiveSchedulerFactory>(num_threads, 100));
+ factory_list.push_back(std::make_unique<AdaptiveSchedulerFactory>(num_threads, 10));
+ factory_list.push_back(std::make_unique<AdaptiveSchedulerFactory>(num_threads, 1));
+ }
+};
+
+//-----------------------------------------------------------------------------
+
+void worker(DocidRangeScheduler &scheduler, const Work &work, size_t thread_id) {
+ IdleObserver observer = scheduler.make_idle_observer();
+ if (observer.is_always_zero()) {
+ for (DocidRange range = scheduler.first_range(thread_id);
+ !range.empty();
+ range = scheduler.next_range(thread_id))
+ {
+ do_work(10); // represents init-range cost
+ for (uint32_t docid = range.begin; docid < range.end; ++docid) {
+ work.perform(docid);
+ }
+ }
+ } else {
+ for (DocidRange range = scheduler.first_range(thread_id);
+ !range.empty();
+ range = scheduler.next_range(thread_id))
+ {
+ do_work(10); // represents init-range cost
+ for (uint32_t docid = range.begin; docid < range.end; ++docid) {
+ work.perform(docid);
+ if (observer.get() > 0) {
+ range = scheduler.share_range(thread_id, DocidRange(docid, range.end));
+ }
+ }
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+TEST_MT_FFF("benchmark different combinations of schedulers and work loads", 8,
+ DocidRangeScheduler::UP(), SchedulerList(num_threads), WorkList())
+{
+ if (thread_id == 0) {
+ fprintf(stderr, "Benchmarking with %zu threads:\n", num_threads);
+ }
+ for (size_t scheduler = 0; scheduler < f2.factory_list.size(); ++scheduler) {
+ for (size_t work = 0; work < f3.work_list.size(); ++work) {
+ if (thread_id == 0) {
+ fprintf(stderr, " scheduler: %s, work load: %s ",
+ f2.factory_list[scheduler]->desc().c_str(),
+ f3.work_list[work]->desc().c_str());
+ }
+ BenchmarkTimer timer(1.0);
+ for (size_t i = 0; i < 5; ++i) {
+ TEST_BARRIER();
+ if (thread_id == 0) {
+ f1 = f2.factory_list[scheduler]->create(100001);
+ }
+ TEST_BARRIER();
+ timer.before();
+ worker(*f1, *f3.work_list[work], thread_id);
+ TEST_BARRIER();
+ timer.after();
+ if (thread_id == 0) {
+ fprintf(stderr, ".");
+ }
+ }
+ if (thread_id == 0) {
+ fprintf(stderr, " real time: %g ms\n", timer.min_time() * 1000.0);
+ }
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/matching/docid_range_scheduler/docid_range_scheduler_test.cpp b/searchcore/src/tests/proton/matching/docid_range_scheduler/docid_range_scheduler_test.cpp
new file mode 100644
index 00000000000..6716e945a0d
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/docid_range_scheduler/docid_range_scheduler_test.cpp
@@ -0,0 +1,286 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/searchcore/proton/matching/docid_range_scheduler.h>
+#include <chrono>
+#include <thread>
+
+using namespace proton::matching;
+
+void verify_range(DocidRange a, DocidRange b) {
+ EXPECT_EQUAL(a.begin, b.begin);
+ EXPECT_EQUAL(a.end, b.end);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("require that default docid range constructor creates and empty range") {
+ EXPECT_TRUE(DocidRange().empty());
+ EXPECT_EQUAL(DocidRange().size(), 0u);
+}
+
+TEST("require that docid range ensures end is not less than begin") {
+ EXPECT_EQUAL(DocidRange(10, 20).size(), 10u);
+ EXPECT_TRUE(!DocidRange(10, 20).empty());
+ EXPECT_EQUAL(DocidRange(10, 20).begin, 10u);
+ EXPECT_EQUAL(DocidRange(10, 20).end, 20u);
+ EXPECT_EQUAL(DocidRange(20, 10).size(), 0u);
+ EXPECT_TRUE(DocidRange(20, 10).empty());
+ EXPECT_EQUAL(DocidRange(20, 10).begin, 20u);
+ EXPECT_EQUAL(DocidRange(20, 10).end, 20u);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("require that default constructed IdleObserver is always zero") {
+ IdleObserver observer;
+ EXPECT_TRUE(observer.is_always_zero());
+ EXPECT_EQUAL(0u, observer.get());
+}
+
+TEST("require that IdleObserver can observe an atomic size_t value") {
+ std::atomic<size_t> idle(0);
+ IdleObserver observer(idle);
+ EXPECT_TRUE(!observer.is_always_zero());
+ EXPECT_EQUAL(0u, observer.get());
+ idle = 10;
+ EXPECT_EQUAL(10u, observer.get());
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("require that the docid range splitter can split a docid range") {
+ DocidRangeSplitter splitter(DocidRange(1, 16), 4);
+ TEST_DO(verify_range(splitter.get(0), DocidRange(1, 5)));
+ TEST_DO(verify_range(splitter.get(1), DocidRange(5, 9)));
+ TEST_DO(verify_range(splitter.get(2), DocidRange(9, 13)));
+ TEST_DO(verify_range(splitter.get(3), DocidRange(13, 16)));
+}
+
+TEST("require that the docid range splitter can split an empty range") {
+ DocidRangeSplitter splitter(DocidRange(5, 5), 2);
+ TEST_DO(verify_range(splitter.get(0), DocidRange(5, 5)));
+ TEST_DO(verify_range(splitter.get(1), DocidRange(5, 5)));
+}
+
+TEST("require that the docid range splitter can split a range into more parts than values") {
+ DocidRangeSplitter splitter(DocidRange(1, 4), 4);
+ TEST_DO(verify_range(splitter.get(0), DocidRange(1, 2)));
+ TEST_DO(verify_range(splitter.get(1), DocidRange(2, 3)));
+ TEST_DO(verify_range(splitter.get(2), DocidRange(3, 4)));
+ TEST_DO(verify_range(splitter.get(3), DocidRange(4, 4)));
+}
+
+TEST("require that the docid range splitter gives empty ranges if accessed with too high index") {
+ DocidRangeSplitter splitter(DocidRange(1, 4), 3);
+ TEST_DO(verify_range(splitter.get(0), DocidRange(1, 2)));
+ TEST_DO(verify_range(splitter.get(1), DocidRange(2, 3)));
+ TEST_DO(verify_range(splitter.get(2), DocidRange(3, 4)));
+ TEST_DO(verify_range(splitter.get(3), DocidRange(4, 4)));
+ TEST_DO(verify_range(splitter.get(100), DocidRange(4, 4)));
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("require that the partition scheduler acts as expected") {
+ PartitionDocidRangeScheduler scheduler(4, 16);
+ TEST_DO(verify_range(scheduler.total_span(0), DocidRange(1, 5)));
+ TEST_DO(verify_range(scheduler.total_span(1), DocidRange(5, 9)));
+ TEST_DO(verify_range(scheduler.total_span(2), DocidRange(9, 13)));
+ TEST_DO(verify_range(scheduler.total_span(3), DocidRange(13, 16)));
+ EXPECT_EQUAL(scheduler.total_size(0), 4u);
+ EXPECT_EQUAL(scheduler.total_size(1), 4u);
+ EXPECT_EQUAL(scheduler.total_size(2), 4u);
+ EXPECT_EQUAL(scheduler.total_size(3), 3u);
+ EXPECT_EQUAL(scheduler.unassigned_size(), 0u);
+ TEST_DO(verify_range(scheduler.first_range(0), DocidRange(1, 5)));
+ TEST_DO(verify_range(scheduler.first_range(1), DocidRange(5, 9)));
+ TEST_DO(verify_range(scheduler.first_range(2), DocidRange(9, 13)));
+ TEST_DO(verify_range(scheduler.first_range(3), DocidRange(13, 16)));
+ TEST_DO(verify_range(scheduler.next_range(0), DocidRange()));
+ TEST_DO(verify_range(scheduler.next_range(1), DocidRange()));
+ TEST_DO(verify_range(scheduler.next_range(2), DocidRange()));
+ TEST_DO(verify_range(scheduler.next_range(3), DocidRange()));
+}
+
+TEST("require that the partition scheduler protects against documents underflow") {
+ PartitionDocidRangeScheduler scheduler(2, 0);
+ TEST_DO(verify_range(scheduler.total_span(0), DocidRange(1,1)));
+ TEST_DO(verify_range(scheduler.total_span(1), DocidRange(1,1)));
+ EXPECT_EQUAL(scheduler.total_size(0), 0u);
+ EXPECT_EQUAL(scheduler.total_size(1), 0u);
+ EXPECT_EQUAL(scheduler.unassigned_size(), 0u);
+ TEST_DO(verify_range(scheduler.first_range(0), DocidRange(1,1)));
+ TEST_DO(verify_range(scheduler.first_range(1), DocidRange(1,1)));
+ TEST_DO(verify_range(scheduler.next_range(0), DocidRange()));
+ TEST_DO(verify_range(scheduler.next_range(1), DocidRange()));
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("require that the task scheduler acts as expected") {
+ TaskDocidRangeScheduler scheduler(2, 5, 20);
+ EXPECT_EQUAL(scheduler.unassigned_size(), 19u);
+ TEST_DO(verify_range(scheduler.total_span(0), DocidRange(1, 20)));
+ TEST_DO(verify_range(scheduler.total_span(1), DocidRange(1, 20)));
+ EXPECT_EQUAL(scheduler.total_size(0), 0u);
+ EXPECT_EQUAL(scheduler.total_size(1), 0u);
+ TEST_DO(verify_range(scheduler.first_range(1), DocidRange(1, 5)));
+ TEST_DO(verify_range(scheduler.first_range(0), DocidRange(5, 9)));
+ TEST_DO(verify_range(scheduler.next_range(0), DocidRange(9, 13)));
+ EXPECT_EQUAL(scheduler.unassigned_size(), 7u);
+ TEST_DO(verify_range(scheduler.next_range(1), DocidRange(13, 17)));
+ TEST_DO(verify_range(scheduler.next_range(0), DocidRange(17, 20)));
+ TEST_DO(verify_range(scheduler.next_range(0), DocidRange(20, 20)));
+ TEST_DO(verify_range(scheduler.next_range(1), DocidRange(20, 20)));
+ EXPECT_EQUAL(scheduler.total_size(0), 11u);
+ EXPECT_EQUAL(scheduler.total_size(1), 8u);
+ EXPECT_EQUAL(scheduler.unassigned_size(), 0u);
+}
+
+TEST("require that the task scheduler protects against documents underflow") {
+ TaskDocidRangeScheduler scheduler(2, 4, 0);
+ TEST_DO(verify_range(scheduler.total_span(0), DocidRange(1,1)));
+ TEST_DO(verify_range(scheduler.total_span(1), DocidRange(1,1)));
+ EXPECT_EQUAL(scheduler.total_size(0), 0u);
+ EXPECT_EQUAL(scheduler.total_size(1), 0u);
+ EXPECT_EQUAL(scheduler.unassigned_size(), 0u);
+ TEST_DO(verify_range(scheduler.first_range(0), DocidRange(1,1)));
+ TEST_DO(verify_range(scheduler.first_range(1), DocidRange(1,1)));
+ TEST_DO(verify_range(scheduler.next_range(0), DocidRange(1,1)));
+ TEST_DO(verify_range(scheduler.next_range(1), DocidRange(1,1)));
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("require that the adaptive scheduler starts by dividing the docid space equally") {
+ AdaptiveDocidRangeScheduler scheduler(4, 1, 16);
+ EXPECT_EQUAL(scheduler.total_size(0), 4u);
+ EXPECT_EQUAL(scheduler.total_size(1), 4u);
+ EXPECT_EQUAL(scheduler.total_size(2), 4u);
+ EXPECT_EQUAL(scheduler.total_size(3), 3u);
+ EXPECT_EQUAL(scheduler.unassigned_size(), 0u);
+ TEST_DO(verify_range(scheduler.first_range(0), DocidRange(1, 5)));
+ TEST_DO(verify_range(scheduler.first_range(1), DocidRange(5, 9)));
+ TEST_DO(verify_range(scheduler.first_range(2), DocidRange(9, 13)));
+ TEST_DO(verify_range(scheduler.first_range(3), DocidRange(13, 16)));
+}
+
+TEST("require that the adaptive scheduler reports the full span to all threads") {
+ AdaptiveDocidRangeScheduler scheduler(3, 1, 16);
+ TEST_DO(verify_range(scheduler.total_span(0), DocidRange(1,16)));
+ TEST_DO(verify_range(scheduler.total_span(1), DocidRange(1,16)));
+ TEST_DO(verify_range(scheduler.total_span(2), DocidRange(1,16)));
+}
+
+TEST_MT_F("require that the adaptive scheduler terminates when all workers request more work",
+ 4, AdaptiveDocidRangeScheduler(num_threads, 1, 16))
+{
+ (void) f1.first_range(thread_id);
+ DocidRange range = f1.next_range(thread_id);
+ EXPECT_TRUE(range.empty());
+}
+
+void wait_idle(const DocidRangeScheduler &scheduler, size_t wanted) {
+ IdleObserver observer = scheduler.make_idle_observer();
+ while (observer.get() != wanted) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+}
+
+TEST_MT_F("require that the adaptive scheduler enables threads to share work",
+ 3, AdaptiveDocidRangeScheduler(num_threads, 1, 28))
+{
+ DocidRange range = f1.first_range(thread_id);
+ if (thread_id == 0) {
+ TEST_DO(verify_range(range, DocidRange(1,10)));
+ } else if (thread_id == 1) {
+ TEST_DO(verify_range(range, DocidRange(10,19)));
+ } else {
+ TEST_DO(verify_range(range, DocidRange(19,28)));
+ }
+ EXPECT_EQUAL(f1.total_size(thread_id), 9u);
+ TEST_DO(verify_range(f1.share_range(thread_id, range), range));
+ TEST_BARRIER();
+ if (thread_id == 0) {
+ TEST_DO(verify_range(f1.next_range(thread_id), DocidRange(25,28)));
+ } else if (thread_id == 1) {
+ wait_idle(f1, 1);
+ TEST_DO(verify_range(f1.next_range(thread_id), DocidRange(22,25)));
+ } else {
+ wait_idle(f1, 2);
+ verify_range(f1.share_range(thread_id, range), DocidRange(19,22));
+ }
+ TEST_DO(verify_range(f1.next_range(thread_id), DocidRange()));
+ EXPECT_EQUAL(f1.total_size(0), 12u);
+ EXPECT_EQUAL(f1.total_size(1), 12u);
+ EXPECT_EQUAL(f1.total_size(2), 3u);
+}
+
+TEST("require that the adaptive scheduler protects against documents underflow") {
+ AdaptiveDocidRangeScheduler scheduler(2, 1, 0);
+ TEST_DO(verify_range(scheduler.first_range(0), DocidRange(1,1)));
+ TEST_DO(verify_range(scheduler.first_range(1), DocidRange(1,1)));
+ EXPECT_EQUAL(scheduler.total_size(0), 0u);
+ EXPECT_EQUAL(scheduler.total_size(1), 0u);
+ EXPECT_EQUAL(scheduler.unassigned_size(), 0u);
+}
+
+TEST_MT_F("require that the adaptive scheduler respects the minimal task size",
+ 2, AdaptiveDocidRangeScheduler(num_threads, 3, 21))
+{
+ EXPECT_EQUAL(f1.first_range(thread_id).size(), 10u);
+ if (thread_id == 0) {
+ TEST_DO(verify_range(f1.next_range(thread_id), DocidRange(18,21)));
+ TEST_DO(verify_range(f1.next_range(thread_id), DocidRange()));
+ } else {
+ wait_idle(f1, 1);
+ // a range with size 5 will not be split
+ TEST_DO(verify_range(f1.share_range(thread_id, DocidRange(16,21)), DocidRange(16,21)));
+ // a range with size 6 will be split
+ TEST_DO(verify_range(f1.share_range(thread_id, DocidRange(15,21)), DocidRange(15,18)));
+ TEST_DO(verify_range(f1.next_range(thread_id), DocidRange()));
+ }
+}
+
+TEST_MT_F("require that the adaptive scheduler will never split a task with size 1",
+ 2, AdaptiveDocidRangeScheduler(num_threads, 0, 21))
+{
+ EXPECT_EQUAL(f1.first_range(thread_id).size(), 10u);
+ if (thread_id == 0) {
+ TEST_DO(verify_range(f1.next_range(thread_id), DocidRange()));
+ } else {
+ IdleObserver observer = f1.make_idle_observer();
+ while (observer.get() == 0) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+ DocidRange small_range = DocidRange(20,21);
+ verify_range(f1.share_range(thread_id, small_range), small_range);
+ TEST_DO(verify_range(f1.next_range(thread_id), DocidRange()));
+ }
+}
+
+TEST_MT_F("require that the adaptive scheduler can leave idle workers alone due to minimal task size",
+ 3, AdaptiveDocidRangeScheduler(num_threads, 3, 28))
+{
+ EXPECT_EQUAL(f1.first_range(thread_id).size(), 9u);
+ if (thread_id == 0) {
+ TEST_DO(verify_range(f1.next_range(thread_id), DocidRange()));
+ } else if (thread_id == 1) {
+ wait_idle(f1, 1);
+ TEST_DO(verify_range(f1.next_range(thread_id), DocidRange(24,28)));
+ TEST_DO(verify_range(f1.next_range(thread_id), DocidRange()));
+ } else {
+ wait_idle(f1, 2);
+ verify_range(f1.share_range(thread_id, DocidRange(20,28)), DocidRange(20,24));
+ TEST_DO(verify_range(f1.next_range(thread_id), DocidRange()));
+ }
+ EXPECT_EQUAL(f1.total_size(0), 9u);
+ EXPECT_EQUAL(f1.total_size(1), 13u);
+ EXPECT_EQUAL(f1.total_size(2), 5u);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/matching/match_loop_communicator/.gitignore b/searchcore/src/tests/proton/matching/match_loop_communicator/.gitignore
new file mode 100644
index 00000000000..c3797981bab
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/match_loop_communicator/.gitignore
@@ -0,0 +1 @@
+searchcore_match_loop_communicator_test_app
diff --git a/searchcore/src/tests/proton/matching/match_loop_communicator/CMakeLists.txt b/searchcore/src/tests/proton/matching/match_loop_communicator/CMakeLists.txt
new file mode 100644
index 00000000000..513e002f064
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/match_loop_communicator/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_match_loop_communicator_test_app
+ SOURCES
+ match_loop_communicator_test.cpp
+ DEPENDS
+ searchcore_matching
+)
+vespa_add_test(NAME searchcore_match_loop_communicator_test_app COMMAND searchcore_match_loop_communicator_test_app)
diff --git a/searchcore/src/tests/proton/matching/match_loop_communicator/FILES b/searchcore/src/tests/proton/matching/match_loop_communicator/FILES
new file mode 100644
index 00000000000..d2f1096aaa3
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/match_loop_communicator/FILES
@@ -0,0 +1 @@
+match_loop_communicator_test.cpp
diff --git a/searchcore/src/tests/proton/matching/match_loop_communicator/match_loop_communicator_test.cpp b/searchcore/src/tests/proton/matching/match_loop_communicator/match_loop_communicator_test.cpp
new file mode 100644
index 00000000000..92139a1c027
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/match_loop_communicator/match_loop_communicator_test.cpp
@@ -0,0 +1,118 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/searchcore/proton/matching/match_loop_communicator.h>
+#include <vespa/vespalib/util/box.h>
+
+using namespace proton::matching;
+
+using vespalib::Box;
+using vespalib::make_box;
+
+typedef MatchLoopCommunicator::Range Range;
+typedef MatchLoopCommunicator::RangePair RangePair;
+typedef MatchLoopCommunicator::feature_t feature_t;
+typedef MatchLoopCommunicator::Matches Matches;
+
+std::vector<feature_t> makeScores(size_t id) {
+ switch (id) {
+ case 0: return make_box<feature_t>(5.4, 4.4, 3.4, 2.4, 1.4);
+ case 1: return make_box<feature_t>(5.3, 4.3, 3.3, 2.3, 1.3);
+ case 2: return make_box<feature_t>(5.2, 4.2, 3.2, 2.2, 1.2);
+ case 3: return make_box<feature_t>(5.1, 4.1, 3.1, 2.1, 1.1);
+ case 4: return make_box<feature_t>(5.0, 4.0, 3.0, 2.0, 1.0);
+ }
+ return Box<feature_t>();
+}
+
+RangePair makeRanges(size_t id) {
+ switch (id) {
+ case 0: return std::make_pair(Range(5, 5), Range(7, 7));
+ case 1: return std::make_pair(Range(2, 2), Range(8, 8));
+ case 2: return std::make_pair(Range(3, 3), Range(6, 6));
+ case 3: return std::make_pair(Range(1, 1), Range(5, 5));
+ case 4: return std::make_pair(Range(4, 4), Range(9, 9));
+ }
+ return std::make_pair(Range(-50, -60), Range(60, 50));
+}
+
+TEST_F("require that selectBest gives appropriate results for single thread", MatchLoopCommunicator(num_threads, 3)) {
+ EXPECT_EQUAL(2u, f1.selectBest(make_box<feature_t>(5, 4)));
+ EXPECT_EQUAL(3u, f1.selectBest(make_box<feature_t>(5, 4, 3)));
+ EXPECT_EQUAL(3u, f1.selectBest(make_box<feature_t>(5, 4, 3, 2)));
+}
+
+TEST_MT_F("require that selectBest works with no hits", 10, MatchLoopCommunicator(num_threads, 10)) {
+ EXPECT_EQUAL(0u, f1.selectBest(Box<feature_t>()));
+}
+
+TEST_MT_F("require that selectBest works with too many hits from all threads", 5, MatchLoopCommunicator(num_threads, 13)) {
+ if (thread_id < 3) {
+ EXPECT_EQUAL(3u, f1.selectBest(makeScores(thread_id)));
+ } else {
+ EXPECT_EQUAL(2u, f1.selectBest(makeScores(thread_id)));
+ }
+}
+
+TEST_MT_F("require that selectBest works with some exhausted threads", 5, MatchLoopCommunicator(num_threads, 22)) {
+ if (thread_id < 2) {
+ EXPECT_EQUAL(5u, f1.selectBest(makeScores(thread_id)));
+ } else {
+ EXPECT_EQUAL(4u, f1.selectBest(makeScores(thread_id)));
+ }
+}
+
+TEST_MT_F("require that selectBest can select all hits from all threads", 5, MatchLoopCommunicator(num_threads, 100)) {
+ EXPECT_EQUAL(5u, f1.selectBest(makeScores(thread_id)));
+}
+
+TEST_MT_F("require that selectBest works with some empty threads", 10, MatchLoopCommunicator(num_threads, 7)) {
+ if (thread_id < 2) {
+ EXPECT_EQUAL(2u, f1.selectBest(makeScores(thread_id)));
+ } else if (thread_id < 5) {
+ EXPECT_EQUAL(1u, f1.selectBest(makeScores(thread_id)));
+ } else {
+ EXPECT_EQUAL(0u, f1.selectBest(makeScores(thread_id)));
+ }
+}
+
+TEST_F("require that rangeCover is identity function for single thread", MatchLoopCommunicator(num_threads, 5)) {
+ RangePair res = f1.rangeCover(std::make_pair(Range(2, 4), Range(3, 5)));
+ EXPECT_EQUAL(2, res.first.low);
+ EXPECT_EQUAL(4, res.first.high);
+ EXPECT_EQUAL(3, res.second.low);
+ EXPECT_EQUAL(5, res.second.high);
+}
+
+TEST_MT_F("require that rangeCover can mix ranges from multiple threads", 5, MatchLoopCommunicator(num_threads, 5)) {
+ RangePair res = f1.rangeCover(makeRanges(thread_id));
+ EXPECT_EQUAL(1, res.first.low);
+ EXPECT_EQUAL(5, res.first.high);
+ EXPECT_EQUAL(5, res.second.low);
+ EXPECT_EQUAL(9, res.second.high);
+}
+
+TEST_MT_F("require that invalid ranges are ignored", 10, MatchLoopCommunicator(num_threads, 5)) {
+ RangePair res = f1.rangeCover(makeRanges(thread_id));
+ EXPECT_EQUAL(1, res.first.low);
+ EXPECT_EQUAL(5, res.first.high);
+ EXPECT_EQUAL(5, res.second.low);
+ EXPECT_EQUAL(9, res.second.high);
+}
+
+TEST_MT_F("require that only invalid ranges produce default invalid range", 3, MatchLoopCommunicator(num_threads, 5)) {
+ RangePair res = f1.rangeCover(makeRanges(10));
+ Range expect;
+ EXPECT_FALSE(expect.isValid());
+ EXPECT_EQUAL(expect.low, res.first.low);
+ EXPECT_EQUAL(expect.high, res.first.high);
+ EXPECT_EQUAL(expect.low, res.second.low);
+ EXPECT_EQUAL(expect.high, res.second.high);
+}
+
+TEST_MT_F("require that count_matches will count hits and docs across threads", 4, MatchLoopCommunicator(num_threads, 5)) {
+ double freq = (0.0/10.0 + 1.0/11.0 + 2.0/12.0 + 3.0/13.0) / 4.0;
+ EXPECT_APPROX(freq, f1.estimate_match_frequency(Matches(thread_id, thread_id + 10)), 0.00001);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/matching/match_phase_limiter/.gitignore b/searchcore/src/tests/proton/matching/match_phase_limiter/.gitignore
new file mode 100644
index 00000000000..69806654ee0
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/match_phase_limiter/.gitignore
@@ -0,0 +1 @@
+searchcore_match_phase_limiter_test_app
diff --git a/searchcore/src/tests/proton/matching/match_phase_limiter/CMakeLists.txt b/searchcore/src/tests/proton/matching/match_phase_limiter/CMakeLists.txt
new file mode 100644
index 00000000000..78c16d1435d
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/match_phase_limiter/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_match_phase_limiter_test_app
+ SOURCES
+ match_phase_limiter_test.cpp
+ DEPENDS
+ searchcore_matching
+)
+vespa_add_test(NAME searchcore_match_phase_limiter_test_app COMMAND searchcore_match_phase_limiter_test_app)
diff --git a/searchcore/src/tests/proton/matching/match_phase_limiter/FILES b/searchcore/src/tests/proton/matching/match_phase_limiter/FILES
new file mode 100644
index 00000000000..776925a0d69
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/match_phase_limiter/FILES
@@ -0,0 +1 @@
+match_phase_limiter_test.cpp
diff --git a/searchcore/src/tests/proton/matching/match_phase_limiter/match_phase_limiter_test.cpp b/searchcore/src/tests/proton/matching/match_phase_limiter/match_phase_limiter_test.cpp
new file mode 100644
index 00000000000..35757cb43c7
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/match_phase_limiter/match_phase_limiter_test.cpp
@@ -0,0 +1,361 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/searchcore/proton/matching/match_phase_limiter.h>
+#include <vespa/searchlib/queryeval/termasstring.h>
+#include <vespa/searchlib/queryeval/andsearchstrict.h>
+#include <vespa/searchlib/queryeval/fake_requestcontext.h>
+
+using namespace proton::matching;
+using search::queryeval::SearchIterator;
+using search::queryeval::Searchable;
+using search::queryeval::Blueprint;
+using search::queryeval::SimpleLeafBlueprint;
+using search::queryeval::FieldSpec;
+using search::queryeval::FieldSpecBaseList;
+using search::queryeval::AndSearchStrict;
+using search::queryeval::termAsString;
+using search::queryeval::FakeRequestContext;
+using search::fef::TermFieldMatchDataArray;
+
+//-----------------------------------------------------------------------------
+
+SearchIterator::UP prepare(SearchIterator * search)
+{
+ search->initFullRange();
+ return SearchIterator::UP(search);
+}
+
+struct MockSearch : SearchIterator {
+ FieldSpec spec;
+ vespalib::string term;
+ vespalib::Trinary _strict;
+ TermFieldMatchDataArray tfmda;
+ bool postings_fetched;
+ uint32_t last_seek = beginId();
+ uint32_t last_unpack = beginId();
+ MockSearch(const vespalib::string &term_in)
+ : spec(0, 0, 0), term(term_in), _strict(vespalib::Trinary::True), tfmda(), postings_fetched(false) {}
+ MockSearch(const FieldSpec &spec_in, const vespalib::string &term_in, bool strict_in,
+ const TermFieldMatchDataArray &tfmda_in, bool postings_fetched_in)
+ : spec(spec_in), term(term_in),
+ _strict(strict_in ? vespalib::Trinary::True : vespalib::Trinary::False),
+ tfmda(tfmda_in),
+ postings_fetched(postings_fetched_in) {}
+ void doSeek(uint32_t docid) override { last_seek = docid; setDocId(docid); }
+ void doUnpack(uint32_t docid) override { last_unpack = docid; }
+ vespalib::Trinary is_strict() const override { return _strict; }
+ bool strict() const { return (is_strict() == vespalib::Trinary::True); }
+};
+
+struct MockBlueprint : SimpleLeafBlueprint {
+ FieldSpec spec;
+ vespalib::string term;
+ bool postings_fetched = false;
+ bool postings_strict = false;
+ MockBlueprint(const FieldSpec &spec_in, const vespalib::string &term_in)
+ : SimpleLeafBlueprint(FieldSpecBaseList().add(spec_in)), spec(spec_in), term(term_in)
+ {
+ setEstimate(HitEstimate(756, false));
+ }
+ virtual SearchIterator::UP createLeafSearch(const TermFieldMatchDataArray &tfmda,
+ bool strict) const override
+ {
+ if (postings_fetched) {
+ EXPECT_EQUAL(postings_strict, strict);
+ }
+ return SearchIterator::UP(new MockSearch(spec, term, strict, tfmda,
+ postings_fetched));
+ }
+ virtual void fetchPostings(bool strict) override {
+ postings_strict = strict;
+ postings_fetched = true;
+ }
+};
+
+struct MockSearchable : Searchable {
+ size_t create_cnt = 0;
+ virtual Blueprint::UP createBlueprint(const search::queryeval::IRequestContext & requestContext,
+ const FieldSpec &field,
+ const search::query::Node &term) override
+ {
+ (void) requestContext;
+ ++create_cnt;
+ return Blueprint::UP(new MockBlueprint(field, termAsString(term)));
+ }
+};
+
+//-----------------------------------------------------------------------------
+
+TEST("require that match phase limit calculator gives expert values") {
+ MatchPhaseLimitCalculator calc(5000, 1, 0.2);
+ EXPECT_EQUAL(1000u, calc.sample_hits_per_thread(1));
+ EXPECT_EQUAL(100u, calc.sample_hits_per_thread(10));
+ EXPECT_EQUAL(10000u, calc.wanted_num_docs(0.5));
+ EXPECT_EQUAL(50000u, calc.wanted_num_docs(0.1));
+}
+
+TEST("require that match phase limit calculator can estimate hits") {
+ MatchPhaseLimitCalculator calc(0, 1, 0.2); // max hits not used
+ EXPECT_EQUAL(0u, calc.estimated_hits(0.0, 0));
+ EXPECT_EQUAL(0u, calc.estimated_hits(0.0, 1));
+ EXPECT_EQUAL(0u, calc.estimated_hits(0.0, 1000));
+ EXPECT_EQUAL(1u, calc.estimated_hits(1.0, 1));
+ EXPECT_EQUAL(10u, calc.estimated_hits(1.0, 10));
+ EXPECT_EQUAL(5u, calc.estimated_hits(0.5, 10));
+ EXPECT_EQUAL(500u, calc.estimated_hits(0.5, 1000));
+}
+
+TEST("require that match phase limit calculator has lower bound on global sample hits") {
+ MatchPhaseLimitCalculator calc(100, 1, 0.2);
+ EXPECT_EQUAL(128u, calc.sample_hits_per_thread(1));
+ EXPECT_EQUAL(4u, calc.sample_hits_per_thread(32));
+}
+
+TEST("require that match phase limit calculator has lower bound on thread sample hits") {
+ MatchPhaseLimitCalculator calc(5000, 1, 0.2);
+ EXPECT_EQUAL(1u, calc.sample_hits_per_thread(10000));
+}
+
+TEST("require that match phase limit calculator has lower bound on wanted hits") {
+ MatchPhaseLimitCalculator calc(100, 1, 0.2);
+ EXPECT_EQUAL(128u, calc.wanted_num_docs(1.0));
+}
+
+TEST("require that match phase limit calculator has upper bound on wanted hits") {
+ MatchPhaseLimitCalculator calc(100000000, 1, 0.2);
+ EXPECT_EQUAL(0x7fffFFFFu, calc.wanted_num_docs(0.0000001));
+}
+
+TEST("require that match phase limit calculator gives sane values with no hits") {
+ MatchPhaseLimitCalculator calc(100, 1, 0.2);
+ EXPECT_EQUAL(128u, calc.wanted_num_docs(1.0));
+ EXPECT_EQUAL(0x7fffFFFFu, calc.wanted_num_docs(0.000000001));
+ EXPECT_EQUAL(0x7fffFFFFu, calc.wanted_num_docs(0.000000001));
+}
+
+TEST("verify numbers used in matching test") {
+ MatchPhaseLimitCalculator calc(150, 1, 0.2);
+ EXPECT_EQUAL(1u, calc.sample_hits_per_thread(75));
+ EXPECT_EQUAL(176u, calc.wanted_num_docs(74.0 / 87.0));
+}
+
+TEST("require that max group size is calculated correctly") {
+ for (size_t min_groups: std::vector<size_t>({0, 1, 2, 3, 4, 10, 500})) {
+ for (size_t wanted_hits: std::vector<size_t>({0, 3, 321, 921})) {
+ MatchPhaseLimitCalculator calc(100, min_groups, 0.2);
+ if (min_groups == 0) {
+ EXPECT_EQUAL(wanted_hits, calc.max_group_size(wanted_hits));
+ } else {
+ EXPECT_EQUAL((wanted_hits / min_groups), calc.max_group_size(wanted_hits));
+ }
+ }
+ }
+}
+
+TEST("require that the attribute limiter works correctly") {
+ FakeRequestContext requestContext;
+ for (int i = 0; i <= 7; ++i) {
+ bool descending = (i & 1) != 0;
+ bool strict = (i & 2) != 0;
+ bool diverse = (i & 4) != 0;
+ MockSearchable searchable;
+ AttributeLimiter limiter(searchable, requestContext, "limiter_attribute", descending, "category", 10.0, AttributeLimiter::LOOSE);
+ EXPECT_EQUAL(0u, searchable.create_cnt);
+ EXPECT_FALSE(limiter.was_used());
+ SearchIterator::UP s1 = limiter.create_search(42, diverse ? 3 : 42, strict);
+ EXPECT_TRUE(limiter.was_used());
+ EXPECT_EQUAL(1u, searchable.create_cnt);
+ SearchIterator::UP s2 = limiter.create_search(42, diverse ? 3 : 42, strict);
+ EXPECT_EQUAL(1u, searchable.create_cnt);
+ MockSearch *ms = dynamic_cast<MockSearch*>(s1.get());
+ ASSERT_TRUE(ms != nullptr);
+ EXPECT_EQUAL("limiter_attribute", ms->spec.getName());
+ EXPECT_EQUAL(0u, ms->spec.getFieldId());
+ EXPECT_EQUAL(0u, ms->spec.getHandle());
+ EXPECT_EQUAL(strict, ms->strict());
+ EXPECT_TRUE(ms->postings_fetched);
+ if (descending) {
+ if (diverse) {
+ EXPECT_EQUAL("[;;-42;category;3;140;loose]", ms->term);
+ } else {
+ EXPECT_EQUAL("[;;-42]", ms->term);
+ }
+ } else {
+ if (diverse) {
+ EXPECT_EQUAL("[;;42;category;3;140;loose]", ms->term);
+ } else {
+ EXPECT_EQUAL("[;;42]", ms->term);
+ }
+ }
+ ASSERT_EQUAL(1u, ms->tfmda.size());
+ EXPECT_EQUAL(0u, ms->tfmda[0]->getFieldId());
+ }
+}
+
+TEST("require that no limiter has no behavior") {
+ NoMatchPhaseLimiter no_limiter;
+ MaybeMatchPhaseLimiter &limiter = no_limiter;
+ EXPECT_FALSE(limiter.is_enabled());
+ EXPECT_EQUAL(0u, limiter.sample_hits_per_thread(1));
+ SearchIterator::UP search = limiter.maybe_limit(prepare(new MockSearch("search")), 1.0, 100000000);
+ limiter.updateDocIdSpaceEstimate(1000, 9000);
+ EXPECT_EQUAL(std::numeric_limits<size_t>::max(), limiter.getDocIdSpaceEstimate());
+ MockSearch *ms = dynamic_cast<MockSearch*>(search.get());
+ ASSERT_TRUE(ms != nullptr);
+ EXPECT_EQUAL("search", ms->term);
+ EXPECT_FALSE(limiter.was_limited());
+}
+
+TEST("require that the match phase limiter may chose not to limit the query") {
+ FakeRequestContext requestContext;
+ MockSearchable searchable;
+ MatchPhaseLimiter yes_limiter(10000, searchable, requestContext, "limiter_attribute", 1000, true, 1.0, 0.2, 1.0, "", 1, 10.0, AttributeLimiter::LOOSE);
+ MaybeMatchPhaseLimiter &limiter = yes_limiter;
+ EXPECT_TRUE(limiter.is_enabled());
+ EXPECT_EQUAL(20u, limiter.sample_hits_per_thread(10));
+ SearchIterator::UP search = limiter.maybe_limit(prepare(new MockSearch("search")),
+ 0.005, 100000);
+ limiter.updateDocIdSpaceEstimate(1000, 9000);
+ EXPECT_EQUAL(10000u, limiter.getDocIdSpaceEstimate());
+ MockSearch *ms = dynamic_cast<MockSearch*>(search.get());
+ ASSERT_TRUE(ms != nullptr);
+ EXPECT_EQUAL("search", ms->term);
+ EXPECT_FALSE(limiter.was_limited());
+}
+
+struct MaxFilterCoverageLimiterFixture {
+
+ FakeRequestContext requestContext;
+ MockSearchable searchable;
+
+ MatchPhaseLimiter::UP getMaxFilterCoverageLimiter() {
+ MatchPhaseLimiter::UP yes_limiter(new MatchPhaseLimiter(10000, searchable, requestContext, "limiter_attribute", 10000, true, 0.05, 1.0, 1.0, "", 1, 10.0, AttributeLimiter::LOOSE));
+ MaybeMatchPhaseLimiter &limiter = *yes_limiter;
+ EXPECT_TRUE(limiter.is_enabled());
+ EXPECT_EQUAL(1000u, limiter.sample_hits_per_thread(10));
+ return yes_limiter;
+ }
+};
+
+TEST_F("require that the match phase limiter may chose not to limit the query when considering max-filter-coverage", MaxFilterCoverageLimiterFixture) {
+ MatchPhaseLimiter::UP limiterUP = f.getMaxFilterCoverageLimiter();
+ MaybeMatchPhaseLimiter & limiter = *limiterUP;
+ SearchIterator::UP search = limiter.maybe_limit(prepare(new MockSearch("search")), 0.10, 1900000);
+ limiter.updateDocIdSpaceEstimate(1000, 1899000);
+ EXPECT_EQUAL(1900000u, limiter.getDocIdSpaceEstimate());
+ MockSearch *ms = dynamic_cast<MockSearch *>(search.get());
+ ASSERT_TRUE(ms != nullptr);
+ EXPECT_EQUAL("search", ms->term);
+ EXPECT_FALSE(limiter.was_limited());
+}
+
+TEST_F("require that the match phase limiter may chose to limit the query even when considering max-filter-coverage", MaxFilterCoverageLimiterFixture) {
+ MatchPhaseLimiter::UP limiterUP = f.getMaxFilterCoverageLimiter();
+ MaybeMatchPhaseLimiter & limiter = *limiterUP;
+ SearchIterator::UP search = limiter.maybe_limit(prepare(new MockSearch("search")), 0.10, 2100000);
+ limiter.updateDocIdSpaceEstimate(1000, 2099000);
+ EXPECT_EQUAL(159684u, limiter.getDocIdSpaceEstimate());
+ LimitedSearch *strict_and = dynamic_cast<LimitedSearch*>(search.get());
+ ASSERT_TRUE(strict_and != nullptr);
+ const MockSearch *ms1 = dynamic_cast<const MockSearch*>(&strict_and->getFirst());
+ ASSERT_TRUE(ms1 != nullptr);
+ const MockSearch *ms2 = dynamic_cast<const MockSearch*>(&strict_and->getSecond());
+ ASSERT_TRUE(ms2 != nullptr);
+ EXPECT_EQUAL("[;;-100000]", ms1->term);
+ EXPECT_EQUAL("search", ms2->term);
+ EXPECT_TRUE(ms1->strict());
+ EXPECT_TRUE(ms2->strict());
+ EXPECT_TRUE(limiter.was_limited());
+}
+
+TEST("require that the match phase limiter is able to pre-limit the query") {
+ FakeRequestContext requestContext;
+ MockSearchable searchable;
+ MatchPhaseLimiter yes_limiter(10000, searchable, requestContext, "limiter_attribute", 500, true, 1.0, 0.2, 1.0, "", 1, 10.0, AttributeLimiter::LOOSE);
+ MaybeMatchPhaseLimiter &limiter = yes_limiter;
+ EXPECT_TRUE(limiter.is_enabled());
+ EXPECT_EQUAL(12u, limiter.sample_hits_per_thread(10));
+ SearchIterator::UP search = limiter.maybe_limit(prepare(new MockSearch("search")),
+ 0.1, 100000);
+ limiter.updateDocIdSpaceEstimate(1000, 9000);
+ EXPECT_EQUAL(1680u, limiter.getDocIdSpaceEstimate());
+ LimitedSearch *strict_and = dynamic_cast<LimitedSearch*>(search.get());
+ ASSERT_TRUE(strict_and != nullptr);
+ const MockSearch *ms1 = dynamic_cast<const MockSearch*>(&strict_and->getFirst());
+ ASSERT_TRUE(ms1 != nullptr);
+ const MockSearch *ms2 = dynamic_cast<const MockSearch*>(&strict_and->getSecond());
+ ASSERT_TRUE(ms2 != nullptr);
+ EXPECT_EQUAL("[;;-5000]", ms1->term);
+ EXPECT_EQUAL("search", ms2->term);
+ EXPECT_TRUE(ms1->strict());
+ EXPECT_TRUE(ms2->strict());
+ search->seek(100);
+ EXPECT_EQUAL(100u, ms1->last_seek);
+ EXPECT_EQUAL(100u, ms2->last_seek);
+ search->unpack(100);
+ EXPECT_EQUAL(0u, ms1->last_unpack); // will not unpack limiting term
+ EXPECT_EQUAL(100u, ms2->last_unpack);
+ EXPECT_TRUE(limiter.was_limited());
+}
+
+TEST("require that the match phase limiter is able to post-limit the query") {
+ MockSearchable searchable;
+ FakeRequestContext requestContext;
+ MatchPhaseLimiter yes_limiter(10000, searchable, requestContext,"limiter_attribute", 1500, true, 1.0, 0.2, 1.0, "", 1, 10.0, AttributeLimiter::LOOSE);
+ MaybeMatchPhaseLimiter &limiter = yes_limiter;
+ EXPECT_TRUE(limiter.is_enabled());
+ EXPECT_EQUAL(30u, limiter.sample_hits_per_thread(10));
+ SearchIterator::UP search = limiter.maybe_limit(prepare(new MockSearch("search")), 0.1, 100000);
+ limiter.updateDocIdSpaceEstimate(1000, 9000);
+ EXPECT_EQUAL(1680u, limiter.getDocIdSpaceEstimate());
+ LimitedSearch *strict_and = dynamic_cast<LimitedSearch*>(search.get());
+ ASSERT_TRUE(strict_and != nullptr);
+ const MockSearch *ms1 = dynamic_cast<const MockSearch*>(&strict_and->getFirst());
+ ASSERT_TRUE(ms1 != nullptr);
+ const MockSearch *ms2 = dynamic_cast<const MockSearch*>(&strict_and->getSecond());
+ ASSERT_TRUE(ms2 != nullptr);
+ EXPECT_EQUAL("search", ms1->term);
+ EXPECT_EQUAL("[;;-15000]", ms2->term);
+ EXPECT_TRUE(ms1->strict());
+ EXPECT_FALSE(ms2->strict());
+ search->seek(100);
+ EXPECT_EQUAL(100u, ms1->last_seek);
+ EXPECT_EQUAL(100u, ms2->last_seek);
+ search->unpack(100);
+ EXPECT_EQUAL(100u, ms1->last_unpack);
+ EXPECT_EQUAL(0u, ms2->last_unpack); // will not unpack limiting term
+ EXPECT_TRUE(limiter.was_limited());
+}
+
+void verifyDiversity(AttributeLimiter::DiversityCutoffStrategy strategy)
+{
+ MockSearchable searchable;
+ FakeRequestContext requestContext;
+ MatchPhaseLimiter yes_limiter(10000, searchable, requestContext,"limiter_attribute", 500, true, 1.0, 0.2, 1.0, "category", 10, 13.1, strategy);
+ MaybeMatchPhaseLimiter &limiter = yes_limiter;
+ SearchIterator::UP search = limiter.maybe_limit(prepare(new MockSearch("search")), 0.1, 100000);
+ limiter.updateDocIdSpaceEstimate(1000, 9000);
+ EXPECT_EQUAL(1680u, limiter.getDocIdSpaceEstimate());
+ LimitedSearch *strict_and = dynamic_cast<LimitedSearch*>(search.get());
+ ASSERT_TRUE(strict_and != nullptr);
+ const MockSearch *ms1 = dynamic_cast<const MockSearch*>(&strict_and->getFirst());
+ ASSERT_TRUE(ms1 != nullptr);
+ if (strategy == AttributeLimiter::LOOSE) {
+ EXPECT_EQUAL("[;;-5000;category;500;131;loose]", ms1->term);
+ } else if (strategy == AttributeLimiter::STRICT) {
+ EXPECT_EQUAL("[;;-5000;category;500;131;strict]", ms1->term);
+ } else {
+ ASSERT_TRUE(false);
+ }
+}
+
+TEST("require that the match phase limiter can use loose diversity") {
+ verifyDiversity(AttributeLimiter::LOOSE);
+}
+
+TEST("require that the match phase limiter can use strict diversity") {
+ verifyDiversity(AttributeLimiter::STRICT);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/matching/matching_stats_test.cpp b/searchcore/src/tests/proton/matching/matching_stats_test.cpp
new file mode 100644
index 00000000000..237f283f042
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/matching_stats_test.cpp
@@ -0,0 +1,151 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("matching_stats_test");
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/searchcore/proton/matching/matching_stats.h>
+
+using namespace proton::matching;
+
+TEST("requireThatDocCountsAddUp") {
+ MatchingStats stats;
+ EXPECT_EQUAL(0u, stats.docsMatched());
+ EXPECT_EQUAL(0u, stats.docsRanked());
+ EXPECT_EQUAL(0u, stats.docsReRanked());
+ EXPECT_EQUAL(0u, stats.queries());
+ EXPECT_EQUAL(0u, stats.limited_queries());
+ {
+ MatchingStats rhs;
+ EXPECT_EQUAL(&rhs.docsMatched(1000), &rhs);
+ EXPECT_EQUAL(&rhs.docsRanked(100), &rhs);
+ EXPECT_EQUAL(&rhs.docsReRanked(10), &rhs);
+ EXPECT_EQUAL(&rhs.queries(2), &rhs);
+ EXPECT_EQUAL(&rhs.limited_queries(1), &rhs);
+ EXPECT_EQUAL(&stats.add(rhs), &stats);
+ }
+ EXPECT_EQUAL(1000u, stats.docsMatched());
+ EXPECT_EQUAL(100u, stats.docsRanked());
+ EXPECT_EQUAL(10u, stats.docsReRanked());
+ EXPECT_EQUAL(2u, stats.queries());
+ EXPECT_EQUAL(1u, stats.limited_queries());
+ EXPECT_EQUAL(&stats.add(MatchingStats().docsMatched(1000).docsRanked(100)
+ .docsReRanked(10).queries(2).limited_queries(1)), &stats);
+ EXPECT_EQUAL(2000u, stats.docsMatched());
+ EXPECT_EQUAL(200u, stats.docsRanked());
+ EXPECT_EQUAL(20u, stats.docsReRanked());
+ EXPECT_EQUAL(4u, stats.queries());
+ EXPECT_EQUAL(2u, stats.limited_queries());
+}
+
+TEST("requireThatAverageTimesAreRecorded") {
+ MatchingStats stats;
+ EXPECT_APPROX(0.0, stats.matchTimeAvg(), 0.00001);
+ EXPECT_APPROX(0.0, stats.groupingTimeAvg(), 0.00001);
+ EXPECT_APPROX(0.0, stats.rerankTimeAvg(), 0.00001);
+ EXPECT_APPROX(0.0, stats.queryCollateralTimeAvg(), 0.00001);
+ EXPECT_APPROX(0.0, stats.queryLatencyAvg(), 0.00001);
+ EXPECT_EQUAL(0u, stats.matchTimeCount());
+ EXPECT_EQUAL(0u, stats.groupingTimeCount());
+ EXPECT_EQUAL(0u, stats.rerankTimeCount());
+ EXPECT_EQUAL(0u, stats.queryCollateralTimeCount());
+ EXPECT_EQUAL(0u, stats.queryLatencyCount());
+ stats.matchTime(0.01).groupingTime(0.1).rerankTime(0.5).queryCollateralTime(2.0).queryLatency(1.0);
+ EXPECT_APPROX(0.01, stats.matchTimeAvg(), 0.00001);
+ EXPECT_APPROX(0.1, stats.groupingTimeAvg(), 0.00001);
+ EXPECT_APPROX(0.5, stats.rerankTimeAvg(), 0.00001);
+ EXPECT_APPROX(2.0, stats.queryCollateralTimeAvg(), 0.00001);
+ EXPECT_APPROX(1.0, stats.queryLatencyAvg(), 0.00001);
+ stats.add(MatchingStats().matchTime(0.03).groupingTime(0.3).rerankTime(1.5).queryCollateralTime(6.0).queryLatency(3.0));
+ EXPECT_APPROX(0.02, stats.matchTimeAvg(), 0.00001);
+ EXPECT_APPROX(0.2, stats.groupingTimeAvg(), 0.00001);
+ EXPECT_APPROX(1.0, stats.rerankTimeAvg(), 0.00001);
+ EXPECT_APPROX(4.0, stats.queryCollateralTimeAvg(), 0.00001);
+ EXPECT_APPROX(2.0, stats.queryLatencyAvg(), 0.00001);
+ stats.add(MatchingStats().matchTime(0.05)
+ .groupingTime(0.5)
+ .rerankTime(2.5)
+ .queryCollateralTime(10.0)
+ .queryLatency(5.0));
+ stats.add(MatchingStats().matchTime(0.05).matchTime(0.03)
+ .groupingTime(0.5).groupingTime(0.3)
+ .rerankTime(2.5).rerankTime(1.5)
+ .queryCollateralTime(10.0).queryCollateralTime(6.0)
+ .queryLatency(5.0).queryLatency(3.0));
+ EXPECT_APPROX(0.03, stats.matchTimeAvg(), 0.00001);
+ EXPECT_APPROX(0.3, stats.groupingTimeAvg(), 0.00001);
+ EXPECT_APPROX(1.5, stats.rerankTimeAvg(), 0.00001);
+ EXPECT_APPROX(6.0, stats.queryCollateralTimeAvg(), 0.00001);
+ EXPECT_APPROX(3.0, stats.queryLatencyAvg(), 0.00001);
+ EXPECT_EQUAL(4u, stats.matchTimeCount());
+ EXPECT_EQUAL(4u, stats.groupingTimeCount());
+ EXPECT_EQUAL(4u, stats.rerankTimeCount());
+ EXPECT_EQUAL(4u, stats.queryCollateralTimeCount());
+ EXPECT_EQUAL(4u, stats.queryLatencyCount());
+}
+
+TEST("requireThatPartitionsAreAddedCorrectly") {
+ MatchingStats all1;
+ EXPECT_EQUAL(0u, all1.docsMatched());
+ EXPECT_EQUAL(0u, all1.getNumPartitions());
+
+ MatchingStats::Partition subPart;
+ subPart.docsMatched(3).docsRanked(2).docsReRanked(1)
+ .active_time(1.0).wait_time(0.5);
+ EXPECT_EQUAL(3u, subPart.docsMatched());
+ EXPECT_EQUAL(2u, subPart.docsRanked());
+ EXPECT_EQUAL(1u, subPart.docsReRanked());
+ EXPECT_EQUAL(1.0, subPart.active_time_avg());
+ EXPECT_EQUAL(0.5, subPart.wait_time_avg());
+ EXPECT_EQUAL(1u, subPart.active_time_count());
+ EXPECT_EQUAL(1u, subPart.wait_time_count());
+
+ all1.merge_partition(subPart, 0);
+ EXPECT_EQUAL(3u, all1.docsMatched());
+ EXPECT_EQUAL(2u, all1.docsRanked());
+ EXPECT_EQUAL(1u, all1.docsReRanked());
+ EXPECT_EQUAL(1u, all1.getNumPartitions());
+ EXPECT_EQUAL(3u, all1.getPartition(0).docsMatched());
+ EXPECT_EQUAL(2u, all1.getPartition(0).docsRanked());
+ EXPECT_EQUAL(1u, all1.getPartition(0).docsReRanked());
+ EXPECT_EQUAL(1.0, all1.getPartition(0).active_time_avg());
+ EXPECT_EQUAL(0.5, all1.getPartition(0).wait_time_avg());
+ EXPECT_EQUAL(1u, all1.getPartition(0).active_time_count());
+ EXPECT_EQUAL(1u, all1.getPartition(0).wait_time_count());
+
+ all1.merge_partition(subPart, 1);
+ EXPECT_EQUAL(6u, all1.docsMatched());
+ EXPECT_EQUAL(4u, all1.docsRanked());
+ EXPECT_EQUAL(2u, all1.docsReRanked());
+ EXPECT_EQUAL(2u, all1.getNumPartitions());
+ EXPECT_EQUAL(3u, all1.getPartition(1).docsMatched());
+ EXPECT_EQUAL(2u, all1.getPartition(1).docsRanked());
+ EXPECT_EQUAL(1u, all1.getPartition(1).docsReRanked());
+ EXPECT_EQUAL(1.0, all1.getPartition(1).active_time_avg());
+ EXPECT_EQUAL(0.5, all1.getPartition(1).wait_time_avg());
+ EXPECT_EQUAL(1u, all1.getPartition(1).active_time_count());
+ EXPECT_EQUAL(1u, all1.getPartition(1).wait_time_count());
+
+ all1.add(all1);
+ EXPECT_EQUAL(12u, all1.docsMatched());
+ EXPECT_EQUAL(8u, all1.docsRanked());
+ EXPECT_EQUAL(4u, all1.docsReRanked());
+ EXPECT_EQUAL(2u, all1.getNumPartitions());
+ EXPECT_EQUAL(6u, all1.getPartition(0).docsMatched());
+ EXPECT_EQUAL(4u, all1.getPartition(0).docsRanked());
+ EXPECT_EQUAL(2u, all1.getPartition(0).docsReRanked());
+ EXPECT_EQUAL(1.0, all1.getPartition(0).active_time_avg());
+ EXPECT_EQUAL(0.5, all1.getPartition(0).wait_time_avg());
+ EXPECT_EQUAL(2u, all1.getPartition(0).active_time_count());
+ EXPECT_EQUAL(2u, all1.getPartition(0).wait_time_count());
+ EXPECT_EQUAL(6u, all1.getPartition(1).docsMatched());
+ EXPECT_EQUAL(4u, all1.getPartition(1).docsRanked());
+ EXPECT_EQUAL(2u, all1.getPartition(1).docsReRanked());
+ EXPECT_EQUAL(1.0, all1.getPartition(1).active_time_avg());
+ EXPECT_EQUAL(0.5, all1.getPartition(1).wait_time_avg());
+ EXPECT_EQUAL(2u, all1.getPartition(1).active_time_count());
+ EXPECT_EQUAL(2u, all1.getPartition(1).wait_time_count());
+}
+
+TEST_MAIN() {
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/matching/matching_test.cpp b/searchcore/src/tests/proton/matching/matching_test.cpp
new file mode 100644
index 00000000000..b650c983be0
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/matching_test.cpp
@@ -0,0 +1,775 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("matching_test");
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <vespa/document/base/globalid.h>
+#include <initializer_list>
+#include <vespa/searchcommon/attribute/iattributecontext.h>
+#include <vespa/searchcore/proton/common/bucketfactory.h>
+#include <vespa/searchcore/proton/documentmetastore/documentmetastore.h>
+#include <vespa/searchcore/proton/matching/fakesearchcontext.h>
+#include <vespa/searchcore/proton/matching/isearchcontext.h>
+#include <vespa/searchcore/proton/matching/matcher.h>
+#include <vespa/searchcore/proton/matching/querynodes.h>
+#include <vespa/searchcore/proton/matching/sessionmanager.h>
+#include <vespa/searchcore/proton/matching/viewresolver.h>
+#include <vespa/searchlib/aggregation/aggregation.h>
+#include <vespa/searchlib/aggregation/grouping.h>
+#include <vespa/searchlib/aggregation/perdocexpression.h>
+#include <vespa/searchlib/attribute/extendableattributes.h>
+#include <vespa/searchlib/common/featureset.h>
+#include <vespa/searchlib/engine/docsumrequest.h>
+#include <vespa/searchlib/fef/properties.h>
+#include <vespa/searchlib/query/tree/querybuilder.h>
+#include <vespa/searchlib/query/tree/stackdumpcreator.h>
+#include <vespa/searchlib/queryeval/isourceselector.h>
+#include <vespa/vespalib/util/simple_thread_bundle.h>
+#include <vespa/searchcore/proton/matching/match_params.h>
+
+using namespace proton::matching;
+using namespace proton;
+using namespace search::aggregation;
+using namespace search::attribute;
+using namespace search::engine;
+using namespace search::expression;
+using namespace search::fef;
+using namespace search::grouping;
+using namespace search::index;
+using namespace search::query;
+using namespace search::queryeval;
+using namespace search;
+using storage::spi::Timestamp;
+
+void inject_match_phase_limiting(Properties &setup, const vespalib::string &attribute, size_t max_hits, bool descending)
+{
+ Properties cfg;
+ cfg.add(indexproperties::matchphase::DegradationAttribute::NAME, attribute);
+ cfg.add(indexproperties::matchphase::DegradationAscendingOrder::NAME, descending ? "false" : "true");
+ cfg.add(indexproperties::matchphase::DegradationMaxHits::NAME, vespalib::make_string("%zu", max_hits));
+ setup.import(cfg);
+}
+
+//-----------------------------------------------------------------------------
+
+const uint32_t NUM_DOCS = 1000;
+
+//-----------------------------------------------------------------------------
+
+class MyAttributeContext : public IAttributeContext
+{
+private:
+ typedef std::map<string, IAttributeVector *> Map;
+ Map _vectors;
+
+public:
+ const IAttributeVector *get(const string &name) const {
+ if (_vectors.find(name) == _vectors.end()) {
+ return 0;
+ }
+ return _vectors.find(name)->second;
+ }
+ virtual const IAttributeVector *
+ getAttribute(const string &name) const {
+ return get(name);
+ }
+ virtual const IAttributeVector *
+ getAttributeStableEnum(const string &name) const {
+ return get(name);
+ }
+ virtual void
+ getAttributeList(std::vector<const IAttributeVector *> & list) const {
+ Map::const_iterator pos = _vectors.begin();
+ Map::const_iterator end = _vectors.end();
+ for (; pos != end; ++pos) {
+ list.push_back(pos->second);
+ }
+ }
+ ~MyAttributeContext() {
+ Map::iterator pos = _vectors.begin();
+ Map::iterator end = _vectors.end();
+ for (; pos != end; ++pos) {
+ delete pos->second;
+ }
+ }
+
+ //-------------------------------------------------------------------------
+
+ void add(IAttributeVector *attr) {
+ _vectors[attr->getName()] = attr;
+ }
+};
+
+//-----------------------------------------------------------------------------
+
+struct MyWorld {
+ Schema schema;
+ Properties config;
+ FakeSearchContext searchContext;
+ MyAttributeContext attributeContext;
+ SessionManager::SP sessionManager;
+ DocumentMetaStore metaStore;
+ MatchingStats matchingStats;
+ vespalib::Clock clock;
+ QueryLimiter queryLimiter;
+
+ MyWorld()
+ : schema(),
+ config(),
+ searchContext(),
+ attributeContext(),
+ sessionManager(),
+ metaStore(std::make_shared<BucketDBOwner>()),
+ matchingStats(),
+ clock(),
+ queryLimiter()
+ {
+ }
+
+ void basicSetup(size_t heapSize=10, size_t arraySize=100) {
+ // schema
+ schema.addIndexField(Schema::IndexField("f1", Schema::STRING));
+ schema.addIndexField(Schema::IndexField("f2", Schema::STRING));
+ schema.addIndexField(Schema::IndexField("tensor_field", Schema::TENSOR));
+ schema.addAttributeField(Schema::AttributeField("a1", Schema::INT32));
+ schema.addAttributeField(Schema::AttributeField("a2", Schema::INT32));
+ schema.addAttributeField(Schema::AttributeField("predicate_field", Schema::BOOLEANTREE));
+
+ // config
+ config.add(indexproperties::rank::FirstPhase::NAME, "attribute(a1)");
+ config.add(indexproperties::hitcollector::HeapSize::NAME, (vespalib::asciistream() << heapSize).str());
+ config.add(indexproperties::hitcollector::ArraySize::NAME, (vespalib::asciistream() << arraySize).str());
+ config.add(indexproperties::summary::Feature::NAME, "attribute(a1)");
+ config.add(indexproperties::summary::Feature::NAME, "value(100)");
+ config.add(indexproperties::dump::IgnoreDefaultFeatures::NAME, "true");
+ config.add(indexproperties::dump::Feature::NAME, "attribute(a2)");
+
+ // search context
+ searchContext.setLimit(NUM_DOCS);
+ searchContext.addIdx(0).addIdx(1);
+ for (uint32_t i = 0; i < NUM_DOCS; ++i) {
+ searchContext.selector().setSource(i, i % 2); // even -> 0
+ // odd -> 1
+ }
+
+ // attribute context
+ {
+ SingleInt32ExtAttribute *attr = new SingleInt32ExtAttribute("a1");
+ AttributeVector::DocId docid;
+ for (uint32_t i = 0; i < NUM_DOCS; ++i) {
+ attr->addDoc(docid);
+ attr->add(i, docid); // value = docid
+ }
+ assert(docid + 1 == NUM_DOCS);
+ attributeContext.add(attr);
+ }
+ {
+ SingleInt32ExtAttribute *attr = new SingleInt32ExtAttribute("a2");
+ AttributeVector::DocId docid;
+ for (uint32_t i = 0; i < NUM_DOCS; ++i) {
+ attr->addDoc(docid);
+ attr->add(i * 2, docid); // value = docid * 2
+ }
+ assert(docid + 1 == NUM_DOCS);
+ attributeContext.add(attr);
+ }
+
+ // grouping
+ sessionManager = SessionManager::SP(new SessionManager(100));
+
+ // metaStore
+ for (uint32_t i = 0; i < NUM_DOCS; ++i) {
+ document::DocumentId docId(vespalib::make_string("doc::%u", i));
+ const document::GlobalId &gid = docId.getGlobalId();
+ typedef DocumentMetaStore::Result PutRes;
+ document::BucketId bucketId(BucketFactory::getBucketId(docId));
+ PutRes putRes(metaStore.put(gid,
+ bucketId,
+ Timestamp(0u),
+ i));
+ metaStore.setBucketState(bucketId, true);
+ }
+ }
+
+ void set_property(const vespalib::string &name, const vespalib::string &value) {
+ Properties cfg;
+ cfg.add(name, value);
+ config.import(cfg);
+ }
+
+ void setup_match_phase_limiting(const vespalib::string &attribute, size_t max_hits, bool descending)
+ {
+ inject_match_phase_limiting(config, attribute, max_hits, descending);
+ }
+
+ void add_match_phase_limiting_result(const vespalib::string &attribute, size_t want_docs,
+ bool descending, std::initializer_list<uint32_t> docs)
+ {
+ vespalib::string term = vespalib::make_string("[;;%s%zu]", descending ? "-" : "", want_docs);
+ FakeResult result;
+ for (uint32_t doc: docs) {
+ result.doc(doc);
+ }
+ searchContext.attr().addResult(attribute, term, result);
+ }
+
+ void setupSecondPhaseRanking() {
+ Properties cfg;
+ cfg.add(indexproperties::rank::SecondPhase::NAME, "attribute(a2)");
+ cfg.add(indexproperties::hitcollector::HeapSize::NAME, "3");
+ config.import(cfg);
+ }
+
+ void verbose_a1_result(const vespalib::string &term) {
+ FakeResult result;
+ for (uint32_t i = 15; i < NUM_DOCS; ++i) {
+ result.doc(i);
+ }
+ searchContext.attr().addResult("a1", term, result);
+ }
+
+ void basicResults() {
+ searchContext.idx(0).getFake().addResult("f1", "foo",
+ FakeResult()
+ .doc(10).doc(20).doc(30));
+ searchContext.idx(0).getFake().addResult(
+ "f1", "spread",
+ FakeResult()
+ .doc(100).doc(200).doc(300).doc(400).doc(500)
+ .doc(600).doc(700).doc(800).doc(900));
+ }
+
+ void setStackDump(Request &request, const vespalib::string &field,
+ const vespalib::string &term) {
+ QueryBuilder<ProtonNodeTypes> builder;
+ builder.addStringTerm(term, field, 1, search::query::Weight(1));
+ vespalib::string stack_dump =
+ StackDumpCreator::create(*builder.build());
+ request.stackDump.assign(stack_dump.data(),
+ stack_dump.data() + stack_dump.size());
+ }
+
+ SearchRequest::SP createSimpleRequest(const vespalib::string &field,
+ const vespalib::string &term)
+ {
+ SearchRequest::SP request(new SearchRequest);
+ request->setTimeout(60 * fastos::TimeStamp::SEC);
+ setStackDump(*request, field, term);
+ request->maxhits = 10;
+ return request;
+ }
+
+ struct MySearchHandler : ISearchHandler {
+ Matcher::SP _matcher;
+
+ MySearchHandler(Matcher::SP matcher) : _matcher(matcher) {}
+
+ virtual DocsumReply::UP getDocsums(const DocsumRequest &)
+ { return DocsumReply::UP(); }
+ virtual SearchReply::UP match(const ISearchHandler::SP &,
+ const SearchRequest &,
+ vespalib::ThreadBundle &) const
+ { return SearchReply::UP(); }
+ };
+
+ double get_first_phase_termwise_limit() {
+ Matcher matcher(schema, config, clock, queryLimiter, 0);
+ SearchRequest::SP request = createSimpleRequest("f1", "spread");
+ search::fef::Properties overrides;
+ MatchToolsFactory::UP match_tools_factory = matcher.create_match_tools_factory(
+ *request, searchContext, attributeContext, metaStore, overrides);
+ MatchTools::UP match_tools = match_tools_factory->createMatchTools();
+ RankProgram::UP rank_program = match_tools->first_phase_program();
+ return rank_program->match_data().get_termwise_limit();
+ }
+
+ SearchReply::UP performSearch(SearchRequest::SP req, size_t threads) {
+ Matcher::SP matcher(new Matcher(schema, config, clock, queryLimiter, 0));
+ SearchSession::OwnershipBundle owned_objects;
+ owned_objects.search_handler.reset(new MySearchHandler(matcher));
+ owned_objects.context.reset(new MatchContext(
+ IAttributeContext::UP(new MyAttributeContext),
+ ISearchContext::UP(new FakeSearchContext)));
+ vespalib::SimpleThreadBundle threadBundle(threads);
+ SearchReply::UP reply =
+ matcher->match(*req, threadBundle, searchContext, attributeContext,
+ *sessionManager, metaStore,
+ std::move(owned_objects));
+ matchingStats.add(matcher->getStats());
+ return reply;
+ }
+
+ DocsumRequest::SP createSimpleDocsumRequest(const vespalib::string & field,
+ const vespalib::string & term)
+ {
+ DocsumRequest::SP request(new DocsumRequest);
+ setStackDump(*request, field, term);
+
+ // match a subset of basic result + request for a non-hit (not
+ // sorted on docid)
+ request->hits.push_back(DocsumRequest::Hit());
+ request->hits.back().docid = 30;
+ request->hits.push_back(DocsumRequest::Hit());
+ request->hits.back().docid = 10;
+ request->hits.push_back(DocsumRequest::Hit());
+ request->hits.back().docid = 15;
+ return request;
+ }
+
+ std::unique_ptr<FieldInfo> get_field_info(const vespalib::string &field_name) {
+ Matcher::SP matcher(new Matcher(schema, config, clock, queryLimiter, 0));
+ const FieldInfo *field = matcher->get_index_env().getFieldByName(field_name);
+ if (field == nullptr) {
+ return std::unique_ptr<FieldInfo>(nullptr);
+ }
+ return std::make_unique<FieldInfo>(*field);
+ }
+
+ FeatureSet::SP getSummaryFeatures(DocsumRequest::SP req) {
+ Matcher matcher(schema, config, clock, queryLimiter, 0);
+ return matcher.getSummaryFeatures(*req, searchContext,
+ attributeContext, *sessionManager);
+ }
+
+ FeatureSet::SP getRankFeatures(DocsumRequest::SP req) {
+ Matcher matcher(schema, config, clock, queryLimiter, 0);
+ return matcher.getRankFeatures(*req, searchContext, attributeContext,
+ *sessionManager);
+ }
+
+};
+
+//-----------------------------------------------------------------------------
+//-----------------------------------------------------------------------------
+
+void verifyViewResolver(const ViewResolver &resolver) {
+ {
+ std::vector<vespalib::string> fields;
+ EXPECT_TRUE(resolver.resolve("foo", fields));
+ ASSERT_TRUE(fields.size() == 2u);
+ EXPECT_EQUAL("x", fields[0]);
+ EXPECT_EQUAL("y", fields[1]);
+ }
+ {
+ std::vector<vespalib::string> fields;
+ EXPECT_TRUE(resolver.resolve("bar", fields));
+ ASSERT_TRUE(fields.size() == 1u);
+ EXPECT_EQUAL("z", fields[0]);
+ }
+ {
+ std::vector<vespalib::string> fields;
+ EXPECT_TRUE(!resolver.resolve("baz", fields));
+ ASSERT_TRUE(fields.size() == 1u);
+ EXPECT_EQUAL("baz", fields[0]);
+ }
+}
+
+TEST("require that view resolver can be set up directly") {
+ ViewResolver resolver;
+ resolver.add("foo", "x").add("foo", "y").add("bar", "z");
+ TEST_DO(verifyViewResolver(resolver));
+}
+
+TEST("require that view resolver can be set up from schema") {
+ Schema schema;
+ Schema::FieldSet foo("foo");
+ foo.addField("x").addField("y");
+ Schema::FieldSet bar("bar");
+ bar.addField("z");
+ schema.addFieldSet(foo);
+ schema.addFieldSet(bar);
+ ViewResolver resolver = ViewResolver::createFromSchema(schema);
+ TEST_DO(verifyViewResolver(resolver));
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("require that matching is performed (multi-threaded)") {
+ for (size_t threads = 1; threads <= 16; ++threads) {
+ MyWorld world;
+ world.basicSetup();
+ world.basicResults();
+ SearchRequest::SP request = world.createSimpleRequest("f1", "spread");
+ SearchReply::UP reply = world.performSearch(request, threads);
+ EXPECT_EQUAL(9u, world.matchingStats.docsMatched());
+ EXPECT_EQUAL(9u, reply->hits.size());
+ EXPECT_GREATER(world.matchingStats.matchTimeAvg(), 0.0000001);
+ }
+}
+
+TEST("require that matching also returns hits when only bitvector is used (multi-threaded)") {
+ for (size_t threads = 1; threads <= 16; ++threads) {
+ MyWorld world;
+ world.basicSetup(0, 0);
+ world.verbose_a1_result("all");
+ SearchRequest::SP request = world.createSimpleRequest("a1", "all");
+ SearchReply::UP reply = world.performSearch(request, threads);
+ EXPECT_EQUAL(985u, world.matchingStats.docsMatched());
+ EXPECT_EQUAL(10u, reply->hits.size());
+ EXPECT_GREATER(world.matchingStats.matchTimeAvg(), 0.0000001);
+ }
+}
+
+TEST("require that ranking is performed (multi-threaded)") {
+ for (size_t threads = 1; threads <= 16; ++threads) {
+ MyWorld world;
+ world.basicSetup();
+ world.basicResults();
+ SearchRequest::SP request = world.createSimpleRequest("f1", "spread");
+ SearchReply::UP reply = world.performSearch(request, threads);
+ EXPECT_EQUAL(9u, world.matchingStats.docsMatched());
+ EXPECT_EQUAL(9u, world.matchingStats.docsRanked());
+ EXPECT_EQUAL(0u, world.matchingStats.docsReRanked());
+ ASSERT_TRUE(reply->hits.size() == 9u);
+ EXPECT_EQUAL(document::DocumentId("doc::900").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(900.0, reply->hits[0].metric);
+ EXPECT_EQUAL(document::DocumentId("doc::800").getGlobalId(), reply->hits[1].gid);
+ EXPECT_EQUAL(800.0, reply->hits[1].metric);
+ EXPECT_EQUAL(document::DocumentId("doc::700").getGlobalId(), reply->hits[2].gid);
+ EXPECT_EQUAL(700.0, reply->hits[2].metric);
+ EXPECT_GREATER(world.matchingStats.matchTimeAvg(), 0.0000001);
+ EXPECT_EQUAL(0.0, world.matchingStats.rerankTimeAvg());
+ }
+}
+
+TEST("require that re-ranking is performed (multi-threaded)") {
+ for (size_t threads = 1; threads <= 16; ++threads) {
+ MyWorld world;
+ world.basicSetup();
+ world.setupSecondPhaseRanking();
+ world.basicResults();
+ SearchRequest::SP request = world.createSimpleRequest("f1", "spread");
+ SearchReply::UP reply = world.performSearch(request, threads);
+ EXPECT_EQUAL(9u, world.matchingStats.docsMatched());
+ EXPECT_EQUAL(9u, world.matchingStats.docsRanked());
+ EXPECT_EQUAL(3u, world.matchingStats.docsReRanked());
+ ASSERT_TRUE(reply->hits.size() == 9u);
+ EXPECT_EQUAL(document::DocumentId("doc::900").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(1800.0, reply->hits[0].metric);
+ EXPECT_EQUAL(document::DocumentId("doc::800").getGlobalId(), reply->hits[1].gid);
+ EXPECT_EQUAL(1600.0, reply->hits[1].metric);
+ EXPECT_EQUAL(document::DocumentId("doc::700").getGlobalId(), reply->hits[2].gid);
+ EXPECT_EQUAL(1400.0, reply->hits[2].metric);
+ EXPECT_EQUAL(document::DocumentId("doc::600").getGlobalId(), reply->hits[3].gid);
+ EXPECT_EQUAL(600.0, reply->hits[3].metric);
+ EXPECT_EQUAL(document::DocumentId("doc::500").getGlobalId(), reply->hits[4].gid);
+ EXPECT_EQUAL(500.0, reply->hits[4].metric);
+ EXPECT_GREATER(world.matchingStats.matchTimeAvg(), 0.0000001);
+ EXPECT_GREATER(world.matchingStats.rerankTimeAvg(), 0.0000001);
+ }
+}
+
+TEST("require that sortspec can be used (multi-threaded)") {
+ for (size_t threads = 1; threads <= 16; ++threads) {
+ MyWorld world;
+ world.basicSetup();
+ world.basicResults();
+ SearchRequest::SP request = world.createSimpleRequest("f1", "spread");
+ request->sortSpec = "+a1";
+ SearchReply::UP reply = world.performSearch(request, threads);
+ ASSERT_EQUAL(9u, reply->hits.size());
+ EXPECT_EQUAL(document::DocumentId("doc::100").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(0.0, reply->hits[0].metric);
+ EXPECT_EQUAL(document::DocumentId("doc::200").getGlobalId(), reply->hits[1].gid);
+ EXPECT_EQUAL(0.0, reply->hits[1].metric);
+ EXPECT_EQUAL(document::DocumentId("doc::300").getGlobalId(), reply->hits[2].gid);
+ EXPECT_EQUAL(0.0, reply->hits[2].metric);
+ }
+}
+
+TEST("require that grouping is performed (multi-threaded)") {
+ for (size_t threads = 1; threads <= 16; ++threads) {
+ MyWorld world;
+ world.basicSetup();
+ world.basicResults();
+ SearchRequest::SP request = world.createSimpleRequest("f1", "spread");
+ {
+ vespalib::nbostream buf;
+ vespalib::NBOSerializer os(buf);
+ uint32_t n = 1;
+ os << n;
+ Grouping grequest =
+ Grouping()
+ .setRoot(Group()
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("a1"))));
+ grequest.serialize(os);
+ request->groupSpec.assign(buf.c_str(), buf.c_str() + buf.size());
+ }
+ SearchReply::UP reply = world.performSearch(request, threads);
+ {
+ vespalib::nbostream buf(&reply->groupResult[0],
+ reply->groupResult.size());
+ vespalib::NBOSerializer is(buf);
+ uint32_t n;
+ is >> n;
+ EXPECT_EQUAL(1u, n);
+ Grouping gresult;
+ gresult.deserialize(is);
+ Grouping gexpect = Grouping()
+ .setRoot(Group()
+ .addResult(SumAggregationResult()
+ .setExpression(AttributeNode("a1"))
+ .setResult(Int64ResultNode(4500))));
+ EXPECT_EQUAL(gexpect.root().asString(), gresult.root().asString());
+ }
+ EXPECT_GREATER(world.matchingStats.groupingTimeAvg(), 0.0000001);
+ }
+}
+
+TEST("require that summary features are filled") {
+ MyWorld world;
+ world.basicSetup();
+ world.basicResults();
+ DocsumRequest::SP req = world.createSimpleDocsumRequest("f1", "foo");
+ FeatureSet::SP fs = world.getSummaryFeatures(req);
+ const feature_t * f = NULL;
+ EXPECT_EQUAL(2u, fs->numFeatures());
+ EXPECT_EQUAL("attribute(a1)", fs->getNames()[0]);
+ EXPECT_EQUAL("value(100)", fs->getNames()[1]);
+ EXPECT_EQUAL(2u, fs->numDocs());
+ f = fs->getFeaturesByDocId(10);
+ EXPECT_TRUE(f != NULL);
+ EXPECT_EQUAL(10, f[0]);
+ EXPECT_EQUAL(100, f[1]);
+ f = fs->getFeaturesByDocId(15);
+ EXPECT_TRUE(f == NULL);
+ f = fs->getFeaturesByDocId(30);
+ EXPECT_TRUE(f != NULL);
+ EXPECT_EQUAL(30, f[0]);
+ EXPECT_EQUAL(100, f[1]);
+}
+
+TEST("require that rank features are filled") {
+ MyWorld world;
+ world.basicSetup();
+ world.basicResults();
+ DocsumRequest::SP req = world.createSimpleDocsumRequest("f1", "foo");
+ FeatureSet::SP fs = world.getRankFeatures(req);
+ const feature_t * f = NULL;
+ EXPECT_EQUAL(1u, fs->numFeatures());
+ EXPECT_EQUAL("attribute(a2)", fs->getNames()[0]);
+ EXPECT_EQUAL(2u, fs->numDocs());
+ f = fs->getFeaturesByDocId(10);
+ EXPECT_TRUE(f != NULL);
+ EXPECT_EQUAL(20, f[0]);
+ f = fs->getFeaturesByDocId(15);
+ EXPECT_TRUE(f == NULL);
+ f = fs->getFeaturesByDocId(30);
+ EXPECT_TRUE(f != NULL);
+ EXPECT_EQUAL(60, f[0]);
+}
+
+TEST("require that search session can be cached") {
+ MyWorld world;
+ world.basicSetup();
+ world.basicResults();
+ SearchRequest::SP request = world.createSimpleRequest("f1", "foo");
+ request->propertiesMap.lookupCreate(search::MapNames::CACHES).add("query", "true");
+ request->sessionId.push_back('a');
+ EXPECT_EQUAL(0u, world.sessionManager->getSearchStats().numInsert);
+ SearchReply::UP reply = world.performSearch(request, 1);
+ EXPECT_EQUAL(1u, world.sessionManager->getSearchStats().numInsert);
+ SearchSession::SP session = world.sessionManager->pickSearch("a");
+ ASSERT_TRUE(session.get());
+ EXPECT_EQUAL(request->getTimeOfDoom(), session->getTimeOfDoom());
+ EXPECT_EQUAL("a", session->getSessionId());
+}
+
+TEST("require that getSummaryFeatures can use cached query setup") {
+ MyWorld world;
+ world.basicSetup();
+ world.basicResults();
+ SearchRequest::SP request = world.createSimpleRequest("f1", "foo");
+ request->propertiesMap.lookupCreate(search::MapNames::CACHES).add("query", "true");
+ request->sessionId.push_back('a');
+ world.performSearch(request, 1);
+
+ DocsumRequest::SP docsum_request(new DocsumRequest); // no stack dump
+ docsum_request->sessionId = request->sessionId;
+ docsum_request->
+ propertiesMap.lookupCreate(search::MapNames::CACHES).add("query", "true");
+ docsum_request->hits.push_back(DocsumRequest::Hit());
+ docsum_request->hits.back().docid = 30;
+
+ FeatureSet::SP fs = world.getSummaryFeatures(docsum_request);
+ ASSERT_EQUAL(2u, fs->numFeatures());
+ EXPECT_EQUAL("attribute(a1)", fs->getNames()[0]);
+ EXPECT_EQUAL("value(100)", fs->getNames()[1]);
+ ASSERT_EQUAL(1u, fs->numDocs());
+ const feature_t *f = fs->getFeaturesByDocId(30);
+ ASSERT_TRUE(f);
+ EXPECT_EQUAL(30, f[0]);
+ EXPECT_EQUAL(100, f[1]);
+
+ // getSummaryFeatures can be called multiple times.
+ fs = world.getSummaryFeatures(docsum_request);
+ ASSERT_EQUAL(2u, fs->numFeatures());
+ EXPECT_EQUAL("attribute(a1)", fs->getNames()[0]);
+ EXPECT_EQUAL("value(100)", fs->getNames()[1]);
+ ASSERT_EQUAL(1u, fs->numDocs());
+ f = fs->getFeaturesByDocId(30);
+ ASSERT_TRUE(f);
+ EXPECT_EQUAL(30, f[0]);
+ EXPECT_EQUAL(100, f[1]);
+}
+
+TEST("require that getSummaryFeatures prefers cached query setup") {
+ MyWorld world;
+ world.basicSetup();
+ world.basicResults();
+ SearchRequest::SP request = world.createSimpleRequest("f1", "spread");
+ request->propertiesMap.lookupCreate(search::MapNames::CACHES).add("query", "true");
+ request->sessionId.push_back('a');
+ world.performSearch(request, 1);
+
+ DocsumRequest::SP req = world.createSimpleDocsumRequest("f1", "foo");
+ req->sessionId = request->sessionId;
+ req->propertiesMap.lookupCreate(search::MapNames::CACHES).add("query", "true");
+ FeatureSet::SP fs = world.getSummaryFeatures(req);
+ EXPECT_EQUAL(2u, fs->numFeatures());
+ ASSERT_EQUAL(0u, fs->numDocs()); // "spread" has no hits
+
+ // Empty cache
+ auto pruneTime = fastos::ClockSystem::now() +
+ fastos::TimeStamp::MINUTE * 10;
+ world.sessionManager->pruneTimedOutSessions(pruneTime);
+
+ fs = world.getSummaryFeatures(req);
+ EXPECT_EQUAL(2u, fs->numFeatures());
+ ASSERT_EQUAL(2u, fs->numDocs()); // "foo" has two hits
+}
+
+TEST("require that match params are set up straight with ranking on") {
+ MatchParams p(1, 2, 4, 0.7, 0, 1, true, true);
+ ASSERT_EQUAL(1u, p.numDocs);
+ ASSERT_EQUAL(2u, p.heapSize);
+ ASSERT_EQUAL(4u, p.arraySize);
+ ASSERT_EQUAL(0.7, p.rankDropLimit);
+ ASSERT_EQUAL(0u, p.offset);
+ ASSERT_EQUAL(1u, p.hits);
+}
+
+TEST("require that match params are set up straight with ranking on arraySize is atleast the size of heapSize") {
+ MatchParams p(1, 6, 4, 0.7, 1, 1, true, true);
+ ASSERT_EQUAL(1u, p.numDocs);
+ ASSERT_EQUAL(6u, p.heapSize);
+ ASSERT_EQUAL(6u, p.arraySize);
+ ASSERT_EQUAL(0.7, p.rankDropLimit);
+ ASSERT_EQUAL(1u, p.offset);
+ ASSERT_EQUAL(1u, p.hits);
+}
+
+TEST("require that match params are set up straight with ranking on arraySize is atleast the size of hits+offset") {
+ MatchParams p(1, 6, 4, 0.7, 4, 4, true, true);
+ ASSERT_EQUAL(1u, p.numDocs);
+ ASSERT_EQUAL(6u, p.heapSize);
+ ASSERT_EQUAL(8u, p.arraySize);
+ ASSERT_EQUAL(0.7, p.rankDropLimit);
+ ASSERT_EQUAL(4u, p.offset);
+ ASSERT_EQUAL(4u, p.hits);
+}
+
+TEST("require that match params are set up straight with ranking off array and heap size is 0") {
+ MatchParams p(1, 6, 4, 0.7, 4, 4, true, false);
+ ASSERT_EQUAL(1u, p.numDocs);
+ ASSERT_EQUAL(0u, p.heapSize);
+ ASSERT_EQUAL(0u, p.arraySize);
+ ASSERT_EQUAL(0.7, p.rankDropLimit);
+ ASSERT_EQUAL(4u, p.offset);
+ ASSERT_EQUAL(4u, p.hits);
+}
+
+TEST("require that match phase limiting works") {
+ for (int s = 0; s <= 1; ++s) {
+ for (int i = 0; i <= 6; ++i) {
+ bool enable = (i != 0);
+ bool index_time = (i == 1) || (i == 2) || (i == 5) || (i == 6);
+ bool query_time = (i == 3) || (i == 4) || (i == 5) || (i == 6);
+ bool descending = (i == 2) || (i == 4) || (i == 6);
+ bool use_sorting = (s == 1);
+ size_t want_threads = 75;
+ MyWorld world;
+ world.basicSetup();
+ world.verbose_a1_result("all");
+ if (enable) {
+ if (index_time) {
+ if (query_time) {
+ // inject bogus setup to be overridden by query
+ world.setup_match_phase_limiting("limiter", 10, true);
+ } else {
+ world.setup_match_phase_limiting("limiter", 150, descending);
+ }
+ }
+ world.add_match_phase_limiting_result("limiter", 152, descending, {948, 951, 963, 987, 991, 994, 997});
+ }
+ SearchRequest::SP request = world.createSimpleRequest("a1", "all");
+ if (query_time) {
+ inject_match_phase_limiting(request->propertiesMap.lookupCreate(search::MapNames::RANK), "limiter", 150, descending);
+ }
+ if (use_sorting) {
+ request->sortSpec = "-a1";
+ }
+ SearchReply::UP reply = world.performSearch(request, want_threads);
+ ASSERT_EQUAL(10u, reply->hits.size());
+ if (enable) {
+ EXPECT_EQUAL(79u, reply->totalHitCount);
+ if (!use_sorting) {
+ EXPECT_EQUAL(997.0, reply->hits[0].metric);
+ EXPECT_EQUAL(994.0, reply->hits[1].metric);
+ EXPECT_EQUAL(991.0, reply->hits[2].metric);
+ EXPECT_EQUAL(987.0, reply->hits[3].metric);
+ EXPECT_EQUAL(974.0, reply->hits[4].metric);
+ EXPECT_EQUAL(963.0, reply->hits[5].metric);
+ EXPECT_EQUAL(961.0, reply->hits[6].metric);
+ EXPECT_EQUAL(951.0, reply->hits[7].metric);
+ EXPECT_EQUAL(948.0, reply->hits[8].metric);
+ EXPECT_EQUAL(935.0, reply->hits[9].metric);
+ }
+ } else {
+ EXPECT_EQUAL(985u, reply->totalHitCount);
+ if (!use_sorting) {
+ EXPECT_EQUAL(999.0, reply->hits[0].metric);
+ EXPECT_EQUAL(998.0, reply->hits[1].metric);
+ EXPECT_EQUAL(997.0, reply->hits[2].metric);
+ EXPECT_EQUAL(996.0, reply->hits[3].metric);
+ }
+ }
+ }
+ }
+}
+
+TEST("require that arithmetic used for rank drop limit works") {
+ double small = -HUGE_VAL;
+ double limit = -std::numeric_limits<feature_t>::quiet_NaN();
+ EXPECT_TRUE(!(small <= limit));
+}
+
+TEST("require that termwise limit is set correctly for first phase ranking program") {
+ MyWorld world;
+ world.basicSetup();
+ world.basicResults();
+ EXPECT_EQUAL(1.0, world.get_first_phase_termwise_limit());
+ world.set_property(indexproperties::matching::TermwiseLimit::NAME, "0.02");
+ EXPECT_EQUAL(0.02, world.get_first_phase_termwise_limit());
+}
+
+TEST("require that fields are tagged with data type") {
+ MyWorld world;
+ world.basicSetup();
+ auto int32_field = world.get_field_info("a1");
+ auto string_field = world.get_field_info("f1");
+ auto tensor_field = world.get_field_info("tensor_field");
+ auto predicate_field = world.get_field_info("predicate_field");
+ ASSERT_TRUE(bool(int32_field));
+ ASSERT_TRUE(bool(string_field));
+ ASSERT_TRUE(bool(tensor_field));
+ ASSERT_TRUE(bool(predicate_field));
+ EXPECT_EQUAL(int32_field->get_data_type(), FieldInfo::DataType::INT32);
+ EXPECT_EQUAL(string_field->get_data_type(), FieldInfo::DataType::STRING);
+ EXPECT_EQUAL(tensor_field->get_data_type(), FieldInfo::DataType::TENSOR);
+ EXPECT_EQUAL(predicate_field->get_data_type(), FieldInfo::DataType::BOOLEANTREE);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/matching/partial_result/.gitignore b/searchcore/src/tests/proton/matching/partial_result/.gitignore
new file mode 100644
index 00000000000..0284be2ead8
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/partial_result/.gitignore
@@ -0,0 +1 @@
+searchcore_partial_result_test_app
diff --git a/searchcore/src/tests/proton/matching/partial_result/CMakeLists.txt b/searchcore/src/tests/proton/matching/partial_result/CMakeLists.txt
new file mode 100644
index 00000000000..39c1679fc27
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/partial_result/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_partial_result_test_app
+ SOURCES
+ partial_result_test.cpp
+ DEPENDS
+ searchcore_matching
+)
+vespa_add_test(NAME searchcore_partial_result_test_app COMMAND searchcore_partial_result_test_app)
diff --git a/searchcore/src/tests/proton/matching/partial_result/FILES b/searchcore/src/tests/proton/matching/partial_result/FILES
new file mode 100644
index 00000000000..cb7cdbd3bb6
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/partial_result/FILES
@@ -0,0 +1 @@
+partial_result_test.cpp
diff --git a/searchcore/src/tests/proton/matching/partial_result/partial_result_test.cpp b/searchcore/src/tests/proton/matching/partial_result/partial_result_test.cpp
new file mode 100644
index 00000000000..48b92c5ae46
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/partial_result/partial_result_test.cpp
@@ -0,0 +1,159 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/searchcore/proton/matching/partial_result.h>
+#include <vespa/vespalib/util/box.h>
+
+using proton::matching::PartialResult;
+using namespace vespalib;
+
+void checkMerge(const std::vector<double> &a, const std::vector<double> &b,
+ size_t maxHits, const std::vector<double> &expect)
+{
+ PartialResult res_a(maxHits, false);
+ PartialResult res_b(maxHits, false);
+ for (size_t i = 0; i < a.size(); ++i) {
+ res_a.add(search::RankedHit(i, a[i]));
+ }
+ res_a.totalHits(a.size());
+ for (size_t i = 0; i < b.size(); ++i) {
+ res_b.add(search::RankedHit(i, b[i]));
+ }
+ res_b.totalHits(b.size());
+ res_a.merge(res_b);
+ EXPECT_EQUAL(a.size() + b.size(), res_a.totalHits());
+ ASSERT_EQUAL(expect.size(), res_a.size());
+ for (size_t i = 0; i < expect.size(); ++i) {
+ EXPECT_EQUAL(expect[i], res_a.hit(i)._rankValue);
+ }
+}
+
+void checkMerge(const std::vector<std::string> &a, const std::vector<std::string> &b,
+ size_t maxHits, const std::vector<std::string> &expect)
+{
+ size_t len = 0;
+ PartialResult res_a(maxHits, true);
+ PartialResult res_b(maxHits, true);
+ len = 0;
+ for (size_t i = 0; i < a.size(); ++i) {
+ len += a[i].size();
+ res_a.add(search::RankedHit(i, 0.0), PartialResult::SortRef(a[i].data(), a[i].size()));
+ }
+ res_a.totalHits(a.size());
+ EXPECT_EQUAL(len, res_a.sortDataSize());
+ len = 0;
+ for (size_t i = 0; i < b.size(); ++i) {
+ len += b[i].size();
+ res_b.add(search::RankedHit(i, 0.0), PartialResult::SortRef(b[i].data(), b[i].size()));
+ }
+ res_b.totalHits(b.size());
+ EXPECT_EQUAL(len, res_b.sortDataSize());
+ res_a.merge(res_b);
+ EXPECT_EQUAL(a.size() + b.size(), res_a.totalHits());
+ ASSERT_EQUAL(expect.size(), res_a.size());
+ len = 0;
+ for (size_t i = 0; i < expect.size(); ++i) {
+ len += expect[i].size();
+ EXPECT_EQUAL(expect[i], std::string(res_a.sortData(i).first, res_a.sortData(i).second));
+ }
+ EXPECT_EQUAL(len, res_a.sortDataSize());
+}
+
+TEST("require that partial results can be created without sort data") {
+ PartialResult res(100, false);
+ EXPECT_EQUAL(0u, res.size());
+ EXPECT_EQUAL(100u, res.maxSize());
+ EXPECT_EQUAL(0u, res.totalHits());
+ EXPECT_FALSE(res.hasSortData());
+ EXPECT_EQUAL(0u, res.sortDataSize());
+ res.add(search::RankedHit(1, 10.0));
+ res.add(search::RankedHit(2, 5.0));
+ res.totalHits(1000);
+ EXPECT_EQUAL(1000u, res.totalHits());
+ ASSERT_EQUAL(2u, res.size());
+ EXPECT_EQUAL(1u, res.hit(0)._docId);
+ EXPECT_EQUAL(10.0, res.hit(0)._rankValue);
+ EXPECT_EQUAL(2u, res.hit(1)._docId);
+ EXPECT_EQUAL(5.0, res.hit(1)._rankValue);
+}
+
+TEST("require that partial results can be created with sort data") {
+ std::string str1("aaa");
+ std::string str2("bbb");
+ PartialResult res(100, true);
+ EXPECT_EQUAL(0u, res.size());
+ EXPECT_EQUAL(100u, res.maxSize());
+ EXPECT_EQUAL(0u, res.totalHits());
+ EXPECT_TRUE(res.hasSortData());
+ EXPECT_EQUAL(0u, res.sortDataSize());
+ res.add(search::RankedHit(1, 10.0), PartialResult::SortRef(str1.data(), str1.size()));
+ res.add(search::RankedHit(2, 5.0), PartialResult::SortRef(str2.data(), str2.size()));
+ res.totalHits(1000);
+ EXPECT_EQUAL(1000u, res.totalHits());
+ ASSERT_EQUAL(2u, res.size());
+ EXPECT_EQUAL(1u, res.hit(0)._docId);
+ EXPECT_EQUAL(10.0, res.hit(0)._rankValue);
+ EXPECT_EQUAL(str1.data(), res.sortData(0).first);
+ EXPECT_EQUAL(str1.size(), res.sortData(0).second);
+ EXPECT_EQUAL(2u, res.hit(1)._docId);
+ EXPECT_EQUAL(5.0, res.hit(1)._rankValue);
+ EXPECT_EQUAL(str2.data(), res.sortData(1).first);
+ EXPECT_EQUAL(str2.size(), res.sortData(1).second);
+}
+
+TEST("require that partial results without sort data are merged correctly") {
+ TEST_DO(checkMerge(make_box(5.0, 4.0, 3.0), make_box(4.5, 3.5), 3, make_box(5.0, 4.5, 4.0)));
+ TEST_DO(checkMerge(make_box(4.5, 3.5), make_box(5.0, 4.0, 3.0), 3, make_box(5.0, 4.5, 4.0)));
+ TEST_DO(checkMerge(make_box(1.0), make_box(2.0), 10, make_box(2.0, 1.0)));
+ TEST_DO(checkMerge(make_box(2.0), make_box(1.0), 10, make_box(2.0, 1.0)));
+ TEST_DO(checkMerge(std::vector<double>(), make_box(1.0), 10, make_box(1.0)));
+ TEST_DO(checkMerge(make_box(1.0), std::vector<double>(), 10, make_box(1.0)));
+ TEST_DO(checkMerge(std::vector<double>(), make_box(1.0), 0, std::vector<double>()));
+ TEST_DO(checkMerge(make_box(1.0), std::vector<double>(), 0, std::vector<double>()));
+ TEST_DO(checkMerge(std::vector<double>(), std::vector<double>(), 10, std::vector<double>()));
+}
+
+TEST("require that partial results with sort data are merged correctly") {
+ TEST_DO(checkMerge(make_box<std::string>("a", "c", "e"), make_box<std::string>("b", "d"), 3, make_box<std::string>("a", "b", "c")));
+ TEST_DO(checkMerge(make_box<std::string>("b", "d"), make_box<std::string>("a", "c", "e"), 3, make_box<std::string>("a", "b", "c")));
+ TEST_DO(checkMerge(make_box<std::string>("a"), make_box<std::string>("aa"), 10, make_box<std::string>("a", "aa")));
+ TEST_DO(checkMerge(make_box<std::string>("aa"), make_box<std::string>("a"), 10, make_box<std::string>("a", "aa")));
+ TEST_DO(checkMerge(std::vector<std::string>(), make_box<std::string>("a"), 10, make_box<std::string>("a")));
+ TEST_DO(checkMerge(make_box<std::string>("a"), std::vector<std::string>(), 10, make_box<std::string>("a")));
+ TEST_DO(checkMerge(std::vector<std::string>(), make_box<std::string>("a"), 0, std::vector<std::string>()));
+ TEST_DO(checkMerge(make_box<std::string>("a"), std::vector<std::string>(), 0, std::vector<std::string>()));
+ TEST_DO(checkMerge(std::vector<std::string>(), std::vector<std::string>(), 10, std::vector<std::string>()));
+}
+
+TEST("require that lower docid is preferred when sorting on rank") {
+ PartialResult res_a(1, false);
+ PartialResult res_b(1, false);
+ PartialResult res_c(1, false);
+ res_a.add(search::RankedHit(2, 1.0));
+ res_b.add(search::RankedHit(3, 1.0));
+ res_c.add(search::RankedHit(1, 1.0));
+ res_a.merge(res_b);
+ ASSERT_EQUAL(1u, res_a.size());
+ EXPECT_EQUAL(2u, res_a.hit(0)._docId);
+ res_a.merge(res_c);
+ ASSERT_EQUAL(1u, res_a.size());
+ EXPECT_EQUAL(1u, res_a.hit(0)._docId);
+}
+
+TEST("require that lower docid is preferred when using sortspec") {
+ std::string foo("foo");
+ PartialResult res_a(1, true);
+ PartialResult res_b(1, true);
+ PartialResult res_c(1, true);
+ res_a.add(search::RankedHit(2, 1.0), PartialResult::SortRef(foo.data(), foo.size()));
+ res_b.add(search::RankedHit(3, 1.0), PartialResult::SortRef(foo.data(), foo.size()));
+ res_c.add(search::RankedHit(1, 1.0), PartialResult::SortRef(foo.data(), foo.size()));
+ res_a.merge(res_b);
+ ASSERT_EQUAL(1u, res_a.size());
+ EXPECT_EQUAL(2u, res_a.hit(0)._docId);
+ res_a.merge(res_c);
+ ASSERT_EQUAL(1u, res_a.size());
+ EXPECT_EQUAL(1u, res_a.hit(0)._docId);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/matching/query_test.cpp b/searchcore/src/tests/proton/matching/query_test.cpp
new file mode 100644
index 00000000000..caf52a5fca4
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/query_test.cpp
@@ -0,0 +1,900 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for query.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("query_test");
+
+#include <vespa/document/datatype/positiondatatype.h>
+#include <vespa/searchcore/proton/matching/fakesearchcontext.h>
+#include <vespa/searchcore/proton/matching/matchdatareservevisitor.h>
+#include <vespa/searchcore/proton/matching/blueprintbuilder.h>
+#include <vespa/searchcore/proton/matching/query.h>
+#include <vespa/searchcore/proton/matching/querynodes.h>
+#include <vespa/searchcore/proton/matching/resolveviewvisitor.h>
+#include <vespa/searchcore/proton/matching/termdataextractor.h>
+#include <vespa/searchcore/proton/matching/viewresolver.h>
+#include <vespa/searchlib/features/utils.h>
+#include <vespa/searchlib/fef/itermfielddata.h>
+#include <vespa/searchlib/fef/matchdata.h>
+#include <vespa/searchlib/fef/matchdatalayout.h>
+#include <vespa/searchlib/fef/test/indexenvironment.h>
+#include <vespa/searchlib/query/tree/customtypetermvisitor.h>
+#include <vespa/searchlib/query/tree/querybuilder.h>
+#include <vespa/searchlib/query/tree/stackdumpcreator.h>
+#include <vespa/searchlib/query/weight.h>
+#include <vespa/searchlib/queryeval/intermediate_blueprints.h>
+#include <vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h>
+#include <vespa/searchlib/queryeval/leaf_blueprints.h>
+#include <vespa/searchlib/queryeval/searchiterator.h>
+#include <vespa/searchlib/queryeval/simpleresult.h>
+#include <vespa/searchlib/queryeval/fake_requestcontext.h>
+#include <vespa/searchlib/queryeval/termasstring.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vector>
+
+using document::PositionDataType;
+using search::fef::CollectionType;
+using search::fef::FieldInfo;
+using search::fef::FieldType;
+using search::fef::ITermData;
+using search::fef::ITermFieldData;
+using search::fef::IllegalHandle;
+using search::fef::MatchData;
+using search::fef::MatchDataLayout;
+using search::fef::TermFieldMatchData;
+using search::fef::TermFieldHandle;
+using search::query::CustomTypeTermVisitor;
+using search::query::Node;
+using search::query::QueryBuilder;
+using search::query::Range;
+using search::query::StackDumpCreator;
+using search::query::Weight;
+using search::queryeval::termAsString;
+using search::queryeval::Blueprint;
+using search::queryeval::FakeResult;
+using search::queryeval::FakeSearchable;
+using search::queryeval::FakeRequestContext;
+using search::queryeval::FieldSpec;
+using search::queryeval::FieldSpecList;
+using search::queryeval::Searchable;
+using search::queryeval::SearchIterator;
+using search::queryeval::SimpleBlueprint;
+using search::queryeval::SimpleResult;
+using search::queryeval::ParallelWeakAndBlueprint;
+using std::string;
+using std::vector;
+namespace fef_test = search::fef::test;
+
+namespace proton {
+namespace matching {
+namespace {
+
+class Test : public vespalib::TestApp {
+ MatchData::UP _match_data;
+ Blueprint::UP _blueprint;
+ FakeRequestContext _requestContext;
+
+ void setUp();
+ void tearDown();
+
+ void requireThatMatchDataIsReserved();
+ void requireThatMatchDataIsReservedForEachFieldInAView();
+ void requireThatTermsAreLookedUp();
+ void requireThatTermsAreLookedUpInMultipleFieldsFromAView();
+ void requireThatAttributeTermsAreLookedUpInAttributeSource();
+ void requireThatAttributeTermDataHandlesAreAllocated();
+ void requireThatTermDataIsFilledIn();
+
+ SearchIterator::UP getIterator(Node &node, ISearchContext &context);
+
+ void requireThatSingleIndexCanUseBlendingAsBlacklisting();
+ void requireThatIteratorsAreBuiltWithBlending();
+ void requireThatIteratorsAreBuiltForAllTermNodes();
+ void requireThatNearIteratorsCanBeBuilt();
+ void requireThatONearIteratorsCanBeBuilt();
+ void requireThatPhraseIteratorsCanBeBuilt();
+
+ void requireThatUnknownFieldActsEmpty();
+ void requireThatIllegalFieldsAreIgnored();
+ void requireThatQueryGluesEverythingTogether();
+ void requireThatQueryAddsLocation();
+ void requireThatQueryAddsLocationCutoff();
+ void requireThatFakeFieldSearchDumpsDiffer();
+ void requireThatNoDocsGiveZeroDocFrequency();
+ void requireThatWeakAndBlueprintsAreCreatedCorrectly();
+ void requireThatParallelWandBlueprintsAreCreatedCorrectly();
+ void requireThatBlackListBlueprintCanBeUsed();
+
+public:
+ int Main();
+};
+
+#define TEST_CALL(func) \
+ TEST_DO(setUp()); \
+ TEST_DO(func()); \
+ TEST_DO(tearDown())
+
+void Test::setUp() {
+ _match_data.reset(0);
+ _blueprint.reset(0);
+}
+
+void Test::tearDown() {
+ _match_data.reset(0);
+ _blueprint.reset(0);
+}
+
+const string field = "field";
+const string resolved_field1 = "resolved1";
+const string resolved_field2 = "resolved2";
+const string unknown_field = "unknown_field";
+const string float_term = "3.14";
+const string int_term = "42";
+const string prefix_term = "foo";
+const string string_term = "bar";
+const uint32_t string_id = 4;
+const Weight string_weight(4);
+const string substring_term = "baz";
+const string suffix_term = "qux";
+const string phrase_term = "quux";
+const Range range_term = Range(32, 47);
+const int doc_count = 100;
+const int field_id = 154;
+const uint32_t term_index = 23;
+const uint32_t term_count = 8;
+
+fef_test::IndexEnvironment plain_index_env;
+fef_test::IndexEnvironment resolved_index_env;
+fef_test::IndexEnvironment attribute_index_env;
+
+void setupIndexEnvironments()
+{
+ FieldInfo field_info(FieldType::INDEX, CollectionType::SINGLE, field, field_id);
+ plain_index_env.getFields().push_back(field_info);
+
+ FieldInfo field_info1(FieldType::INDEX, CollectionType::SINGLE, resolved_field1, field_id);
+ resolved_index_env.getFields().push_back(field_info1);
+ FieldInfo field_info2(FieldType::INDEX, CollectionType::SINGLE, resolved_field2, field_id + 1);
+ resolved_index_env.getFields().push_back(field_info2);
+
+ FieldInfo attr_info(FieldType::ATTRIBUTE, CollectionType::SINGLE, field, 0);
+ attribute_index_env.getFields().push_back(attr_info);
+}
+
+Node::UP buildQueryTree(const ViewResolver &resolver,
+ const search::fef::IIndexEnvironment &idxEnv)
+{
+ QueryBuilder<ProtonNodeTypes> query_builder;
+ query_builder.addOr(term_count);
+ query_builder.addNumberTerm(float_term, field, 0, Weight(0));
+ query_builder.addNumberTerm(int_term, field, 1, Weight(0));
+ query_builder.addPrefixTerm(prefix_term, field, 2, Weight(0));
+ query_builder.addRangeTerm(range_term, field, 3, Weight(0));
+ query_builder.addStringTerm(string_term, field, string_id, string_weight)
+ .setTermIndex(term_index);
+ query_builder.addSubstringTerm(substring_term, field, 5, Weight(0));
+ query_builder.addSuffixTerm(suffix_term, field, 6, Weight(0));
+ query_builder.addPhrase(2, field, 7, Weight(0));
+ query_builder.addStringTerm(phrase_term, field, 8, Weight(0));
+ query_builder.addStringTerm(phrase_term, field, 9, Weight(0));
+ Node::UP node = query_builder.build();
+
+ ResolveViewVisitor visitor(resolver, idxEnv);
+ node->accept(visitor);
+ return node;
+}
+
+void Test::requireThatMatchDataIsReserved() {
+ Node::UP node = buildQueryTree(ViewResolver(), plain_index_env);
+
+ MatchDataLayout mdl;
+ MatchDataReserveVisitor visitor(mdl);
+ node->accept(visitor);
+ MatchData::UP match_data = mdl.createMatchData();
+
+ EXPECT_EQUAL(term_count, match_data->getNumTermFields());
+}
+
+ViewResolver getViewResolver() {
+ ViewResolver resolver;
+ resolver.add(field, resolved_field1);
+ resolver.add(field, resolved_field2);
+ return resolver;
+}
+
+void Test::requireThatMatchDataIsReservedForEachFieldInAView() {
+ Node::UP node = buildQueryTree(getViewResolver(), resolved_index_env);
+
+ MatchDataLayout mdl;
+ MatchDataReserveVisitor visitor(mdl);
+ node->accept(visitor);
+ MatchData::UP match_data = mdl.createMatchData();
+
+ EXPECT_EQUAL(term_count * 2, match_data->getNumTermFields());
+}
+
+class LookupTestCheckerVisitor : public CustomTypeTermVisitor<ProtonNodeTypes>
+{
+ int Main() { return 0; }
+
+public:
+ template <class TermType>
+ void checkNode(const TermType &n, int estimatedHitCount, bool empty) {
+ EXPECT_EQUAL(empty, (estimatedHitCount == 0));
+ EXPECT_EQUAL((double)estimatedHitCount / doc_count, n.field(0).getDocFreq());
+ }
+
+ virtual void visit(ProtonNumberTerm &n) { checkNode(n, 1, false); }
+ virtual void visit(ProtonLocationTerm &n) { checkNode(n, 0, true); }
+ virtual void visit(ProtonPrefixTerm &n) { checkNode(n, 1, false); }
+ virtual void visit(ProtonRangeTerm &n) { checkNode(n, 2, false); }
+ virtual void visit(ProtonStringTerm &n) { checkNode(n, 2, false); }
+ virtual void visit(ProtonSubstringTerm &n) { checkNode(n, 0, true); }
+ virtual void visit(ProtonSuffixTerm &n) { checkNode(n, 2, false); }
+ virtual void visit(ProtonPhrase &n) { checkNode(n, 0, true); }
+ virtual void visit(ProtonWeightedSetTerm &) {}
+ virtual void visit(ProtonDotProduct &) {}
+ virtual void visit(ProtonWandTerm &) {}
+ virtual void visit(ProtonPredicateQuery &) {}
+ virtual void visit(ProtonRegExpTerm &) {}
+};
+
+void Test::requireThatTermsAreLookedUp() {
+ FakeRequestContext requestContext;
+ Node::UP node = buildQueryTree(ViewResolver(), plain_index_env);
+
+ FakeSearchContext context;
+ context.addIdx(1).addIdx(2);
+ context.idx(0).getFake()
+ .addResult(field, prefix_term, FakeResult().doc(1).pos(2))
+ .addResult(field, string_term,
+ FakeResult().doc(2).pos(3).doc(3).pos(4))
+ .addResult(field, termAsString(int_term),
+ FakeResult().doc(4).pos(5));
+ context.idx(1).getFake()
+ .addResult(field, string_term, FakeResult().doc(6).pos(7))
+ .addResult(field, suffix_term,
+ FakeResult().doc(7).pos(8).doc(8).pos(9))
+ .addResult(field, termAsString(float_term),
+ FakeResult().doc(9).pos(10))
+ .addResult(field, termAsString(int_term),
+ FakeResult().doc(10).pos(11))
+ .addResult(field, termAsString(range_term),
+ FakeResult().doc(12).pos(13).doc(13).pos(14));
+ context.setLimit(doc_count + 1);
+
+ MatchDataLayout mdl;
+ MatchDataReserveVisitor visitor(mdl);
+ node->accept(visitor);
+
+ Blueprint::UP blueprint = BlueprintBuilder::build(requestContext, *node, context);
+
+ LookupTestCheckerVisitor checker;
+ TEST_DO(node->accept(checker));
+}
+
+void Test::requireThatTermsAreLookedUpInMultipleFieldsFromAView() {
+ Node::UP node = buildQueryTree(getViewResolver(), resolved_index_env);
+
+ FakeRequestContext requestContext;
+ FakeSearchContext context;
+ context.addIdx(1).addIdx(2);
+ context.idx(0).getFake()
+ .addResult(resolved_field1, prefix_term,
+ FakeResult().doc(1).pos(2))
+ .addResult(resolved_field2, string_term,
+ FakeResult().doc(2).pos(3).doc(3).pos(4))
+ .addResult(resolved_field1, termAsString(int_term),
+ FakeResult().doc(4).pos(5));
+ context.idx(1).getFake()
+ .addResult(resolved_field1, string_term,
+ FakeResult().doc(6).pos(7))
+ .addResult(resolved_field2, suffix_term,
+ FakeResult().doc(7).pos(8).doc(8).pos(9))
+ .addResult(resolved_field1, termAsString(float_term),
+ FakeResult().doc(9).pos(10))
+ .addResult(resolved_field2, termAsString(int_term),
+ FakeResult().doc(10).pos(11))
+ .addResult(resolved_field1, termAsString(range_term),
+ FakeResult().doc(12).pos(13).doc(13).pos(14));
+ context.setLimit(doc_count + 1);
+
+ MatchDataLayout mdl;
+ MatchDataReserveVisitor visitor(mdl);
+ node->accept(visitor);
+
+ Blueprint::UP blueprint = BlueprintBuilder::build(requestContext, *node, context);
+
+ LookupTestCheckerVisitor checker;
+ TEST_DO(node->accept(checker));
+}
+
+void Test::requireThatAttributeTermsAreLookedUpInAttributeSource() {
+ const string term = "bar";
+ ProtonStringTerm node(term, field, 1, Weight(2));
+ node.resolve(ViewResolver(), attribute_index_env);
+
+ FakeRequestContext requestContext;
+ FakeSearchContext context;
+ context.addIdx(1);
+ context.attr().addResult(field, term, FakeResult().doc(1).pos(2));
+
+ MatchDataLayout mdl;
+ MatchDataReserveVisitor visitor(mdl);
+ node.accept(visitor);
+
+ Blueprint::UP blueprint = BlueprintBuilder::build(requestContext, node, context);
+
+ EXPECT_TRUE(!blueprint->getState().estimate().empty);
+ EXPECT_EQUAL(1u, blueprint->getState().estimate().estHits);
+}
+
+void Test::requireThatAttributeTermDataHandlesAreAllocated() {
+ const string term = "bar";
+ ProtonStringTerm node(term, field, 1, Weight(2));
+ node.resolve(ViewResolver(), attribute_index_env);
+
+ FakeSearchContext context;
+ FakeRequestContext requestContext;
+
+ MatchDataLayout mdl;
+ MatchDataReserveVisitor reserve_visitor(mdl);
+ node.accept(reserve_visitor);
+
+ Blueprint::UP blueprint = BlueprintBuilder::build(requestContext, node, context);
+
+ MatchData::UP match_data = mdl.createMatchData();
+
+ EXPECT_EQUAL(1u, match_data->getNumTermFields());
+ EXPECT_TRUE(node.field(0).attribute_field);
+}
+
+
+class SetUpTermDataTestCheckerVisitor
+ : public CustomTypeTermVisitor<ProtonNodeTypes>
+{
+ int Main() { return 0; }
+
+public:
+ virtual void visit(ProtonNumberTerm &) {}
+ virtual void visit(ProtonLocationTerm &) {}
+ virtual void visit(ProtonPrefixTerm &) {}
+ virtual void visit(ProtonRangeTerm &) {}
+
+ virtual void visit(ProtonStringTerm &n) {
+ const ITermData &term_data = n;
+ EXPECT_EQUAL(string_weight.percent(),
+ term_data.getWeight().percent());
+ EXPECT_EQUAL(1u, term_data.getPhraseLength());
+ EXPECT_EQUAL(-1u, term_data.getTermIndex());
+ EXPECT_EQUAL(string_id, term_data.getUniqueId());
+ EXPECT_EQUAL(term_data.numFields(), n.numFields());
+ for (size_t i = 0; i < term_data.numFields(); ++i) {
+ const ITermFieldData &term_field_data = term_data.field(i);
+ EXPECT_APPROX(2.0 / doc_count, term_field_data.getDocFreq(), 1.0e-6);
+ EXPECT_TRUE(!n.field(i).attribute_field);
+ EXPECT_EQUAL(field_id + i, term_field_data.getFieldId());
+ }
+ }
+
+ virtual void visit(ProtonSubstringTerm &) {}
+ virtual void visit(ProtonSuffixTerm &) {}
+ virtual void visit(ProtonPhrase &n) {
+ const ITermData &term_data = n;
+ EXPECT_EQUAL(2u, term_data.getPhraseLength());
+ }
+ virtual void visit(ProtonWeightedSetTerm &) {}
+ virtual void visit(ProtonDotProduct &) {}
+ virtual void visit(ProtonWandTerm &) {}
+ virtual void visit(ProtonPredicateQuery &) {}
+ virtual void visit(ProtonRegExpTerm &) {}
+};
+
+void Test::requireThatTermDataIsFilledIn() {
+ Node::UP node = buildQueryTree(getViewResolver(), resolved_index_env);
+
+ FakeRequestContext requestContext;
+ FakeSearchContext context;
+ context.addIdx(1);
+ context.idx(0).getFake().addResult(resolved_field1, string_term,
+ FakeResult().doc(1).pos(2).doc(5).pos(3));
+ context.setLimit(doc_count + 1);
+
+ MatchDataLayout mdl;
+ MatchDataReserveVisitor reserve_visitor(mdl);
+ node->accept(reserve_visitor);
+
+ Blueprint::UP blueprint = BlueprintBuilder::build(requestContext, *node, context);
+
+ TEST_DO(
+ SetUpTermDataTestCheckerVisitor checker;
+ node->accept(checker);
+ );
+}
+
+SearchIterator::UP Test::getIterator(Node &node, ISearchContext &context) {
+ MatchDataLayout mdl;
+ MatchDataReserveVisitor mdr_visitor(mdl);
+ node.accept(mdr_visitor);
+ _match_data = mdl.createMatchData();
+
+ _blueprint = BlueprintBuilder::build(_requestContext, node, context);
+
+ _blueprint->fetchPostings(true);
+ SearchIterator::UP search(_blueprint->createSearch(*_match_data, true));
+ search->initFullRange();
+ return search;
+}
+
+FakeIndexSearchable getFakeSearchable(const string &term, int doc1, int doc2) {
+ FakeIndexSearchable source;
+ source.getFake().addResult(field, term,
+ FakeResult().doc(doc1).pos(2).doc(doc2).pos(3));
+ return source;
+}
+
+void Test::requireThatSingleIndexCanUseBlendingAsBlacklisting() {
+ QueryBuilder<ProtonNodeTypes> builder;
+ builder.addStringTerm(string_term, field, 1, Weight(2))
+ .resolve(ViewResolver(), plain_index_env);
+ Node::UP node = builder.build();
+ ASSERT_TRUE(node.get());
+
+ FakeSearchContext context;
+ context.addIdx(1).idx(0) = getFakeSearchable(string_term, 2, 5);
+ context.selector().setSource(5, 1);
+
+ SearchIterator::UP iterator = getIterator(*node, context);
+ ASSERT_TRUE(iterator.get());
+ EXPECT_TRUE(!iterator->seek(1));
+ EXPECT_TRUE(!iterator->seek(2));
+ EXPECT_TRUE(iterator->seek(5));
+ iterator->unpack(5);
+}
+
+void Test::requireThatIteratorsAreBuiltWithBlending() {
+ QueryBuilder<ProtonNodeTypes> builder;
+ builder.addStringTerm(string_term, field, 1, Weight(2))
+ .resolve(ViewResolver(), plain_index_env);
+ Node::UP node = builder.build();
+ ASSERT_TRUE(node.get());
+
+ FakeSearchContext context;
+ context.addIdx(1).idx(0) = getFakeSearchable(string_term, 3, 7);
+ context.addIdx(0).idx(1) = getFakeSearchable(string_term, 2, 6);
+ context.selector().setSource(3, 1);
+ context.selector().setSource(7, 1);
+
+ SearchIterator::UP iterator = getIterator(*node, context);
+ ASSERT_TRUE(iterator.get());
+
+ EXPECT_TRUE(!iterator->seek(1));
+ EXPECT_TRUE(iterator->seek(2));
+ EXPECT_TRUE(iterator->seek(3));
+ EXPECT_TRUE(iterator->seek(6));
+ EXPECT_TRUE(iterator->seek(7));
+}
+
+void Test::requireThatIteratorsAreBuiltForAllTermNodes() {
+ Node::UP node = buildQueryTree(ViewResolver(), plain_index_env);
+ ASSERT_TRUE(node.get());
+
+ FakeSearchContext context(42);
+ context.addIdx(0).idx(0).getFake()
+ .addResult(field, termAsString(float_term),
+ FakeResult().doc(2).pos(2))
+ .addResult(field, termAsString(int_term),
+ FakeResult().doc(4).pos(2))
+ .addResult(field, prefix_term, FakeResult().doc(8).pos(2))
+ .addResult(field, termAsString(range_term),
+ FakeResult().doc(15).pos(2))
+ .addResult(field, string_term, FakeResult().doc(16).pos(2))
+ .addResult(field, substring_term, FakeResult().doc(23).pos(2))
+ .addResult(field, suffix_term, FakeResult().doc(42).pos(2));
+
+ SearchIterator::UP iterator = getIterator(*node, context);
+ ASSERT_TRUE(iterator.get());
+
+ EXPECT_TRUE(!iterator->seek(1));
+ EXPECT_TRUE(iterator->seek(2));
+ EXPECT_TRUE(iterator->seek(4));
+ EXPECT_TRUE(iterator->seek(8));
+ EXPECT_TRUE(iterator->seek(15));
+ EXPECT_TRUE(iterator->seek(16));
+ EXPECT_TRUE(iterator->seek(23));
+ EXPECT_TRUE(iterator->seek(42));
+}
+
+void Test::requireThatNearIteratorsCanBeBuilt() {
+ QueryBuilder<ProtonNodeTypes> builder;
+ builder.addNear(2, 4);
+ builder.addStringTerm(string_term, field, 1, Weight(2));
+ builder.addStringTerm(prefix_term, field, 1, Weight(2));
+ Node::UP node = builder.build();
+ ResolveViewVisitor resolver(ViewResolver(), plain_index_env);
+ node->accept(resolver);
+ ASSERT_TRUE(node.get());
+
+ FakeSearchContext context(8);
+ context.addIdx(0).idx(0).getFake()
+ .addResult(field, prefix_term, FakeResult()
+ .doc(4).pos(2).len(50).doc(8).pos(2).len(50))
+ .addResult(field, string_term, FakeResult()
+ .doc(4).pos(40).len(50).doc(8).pos(5).len(50));
+
+ SearchIterator::UP iterator = getIterator(*node, context);
+ ASSERT_TRUE(iterator.get());
+ EXPECT_TRUE(!iterator->seek(4));
+ EXPECT_TRUE(iterator->seek(8));
+}
+
+void Test::requireThatONearIteratorsCanBeBuilt() {
+ QueryBuilder<ProtonNodeTypes> builder;
+ builder.addONear(2, 4);
+ builder.addStringTerm(string_term, field, 1, Weight(2));
+ builder.addStringTerm(prefix_term, field, 1, Weight(2));
+ Node::UP node = builder.build();
+ ResolveViewVisitor resolver(ViewResolver(), plain_index_env);
+ node->accept(resolver);
+ ASSERT_TRUE(node.get());
+
+ FakeSearchContext context(8);
+ context.addIdx(0).idx(0).getFake()
+ .addResult(field, string_term, FakeResult()
+ .doc(4).pos(5).len(50).doc(8).pos(2).len(50))
+ .addResult(field, prefix_term, FakeResult()
+ .doc(4).pos(2).len(50).doc(8).pos(5).len(50));
+
+ SearchIterator::UP iterator = getIterator(*node, context);
+ ASSERT_TRUE(iterator.get());
+ EXPECT_TRUE(!iterator->seek(4));
+ EXPECT_TRUE(iterator->seek(8));
+}
+
+void Test::requireThatPhraseIteratorsCanBeBuilt() {
+ QueryBuilder<ProtonNodeTypes> builder;
+ builder.addPhrase(3, field, 0, Weight(42));
+ builder.addStringTerm(string_term, field, 1, Weight(2));
+ builder.addStringTerm(prefix_term, field, 1, Weight(2));
+ builder.addStringTerm(suffix_term, field, 1, Weight(2));
+ Node::UP node = builder.build();
+ ResolveViewVisitor resolver(ViewResolver(), plain_index_env);
+ node->accept(resolver);
+ ASSERT_TRUE(node.get());
+
+ FakeSearchContext context(9);
+ context.addIdx(0).idx(0).getFake()
+ .addResult(field, string_term, FakeResult()
+ .doc(4).pos(3).len(50)
+ .doc(5).pos(2).len(50)
+ .doc(8).pos(2).len(50)
+ .doc(9).pos(2).len(50))
+ .addResult(field, prefix_term, FakeResult()
+ .doc(4).pos(2).len(50)
+ .doc(5).pos(4).len(50)
+ .doc(8).pos(3).len(50))
+ .addResult(field, suffix_term, FakeResult()
+ .doc(4).pos(1).len(50)
+ .doc(5).pos(5).len(50)
+ .doc(8).pos(4).len(50));
+
+ SearchIterator::UP iterator = getIterator(*node, context);
+ ASSERT_TRUE(iterator.get());
+ EXPECT_TRUE(!iterator->seek(4));
+ EXPECT_TRUE(!iterator->seek(5));
+ EXPECT_TRUE(iterator->seek(8));
+ EXPECT_TRUE(!iterator->seek(9));
+ EXPECT_TRUE(iterator->isAtEnd());
+}
+
+void
+Test::requireThatUnknownFieldActsEmpty()
+{
+ FakeSearchContext context;
+ context.addIdx(0).idx(0).getFake()
+ .addResult(unknown_field, string_term, FakeResult()
+ .doc(4).pos(3).len(50)
+ .doc(5).pos(2).len(50));
+
+ ProtonNodeTypes::StringTerm
+ node(string_term, unknown_field, string_id, string_weight);
+ node.resolve(ViewResolver(), plain_index_env);
+
+ std::vector<const ITermData *> terms;
+ TermDataExtractor::extractTerms(node, terms);
+
+ SearchIterator::UP iterator = getIterator(node, context);
+
+ ASSERT_TRUE(EXPECT_EQUAL(1u, terms.size()));
+ EXPECT_EQUAL(0u, terms[0]->numFields());
+
+ ASSERT_TRUE(iterator.get());
+ EXPECT_TRUE(!iterator->seek(1));
+ EXPECT_TRUE(iterator->isAtEnd());
+}
+
+void
+Test::requireThatIllegalFieldsAreIgnored()
+{
+ ProtonNodeTypes::StringTerm
+ node(string_term, unknown_field, string_id, string_weight);
+ node.resolve(ViewResolver(), plain_index_env);
+
+ FakeRequestContext requestContext;
+ FakeSearchContext context;
+
+ MatchDataLayout mdl;
+ MatchDataReserveVisitor reserve_visitor(mdl);
+ node.accept(reserve_visitor);
+
+ Blueprint::UP blueprint = BlueprintBuilder::build(requestContext, node, context);
+
+ EXPECT_EQUAL(0u, node.numFields());
+
+ MatchData::UP match_data = mdl.createMatchData();
+ EXPECT_EQUAL(0u, match_data->getNumTermFields());
+}
+
+void Test::requireThatQueryGluesEverythingTogether() {
+ QueryBuilder<ProtonNodeTypes> builder;
+ builder.addStringTerm(string_term, field, 1, Weight(2));
+ string stack_dump = StackDumpCreator::create(*builder.build());
+
+ Query query;
+ query.buildTree(stack_dump, "", ViewResolver(), plain_index_env);
+ vector<const ITermData *> term_data;
+ query.extractTerms(term_data);
+ EXPECT_EQUAL(1u, term_data.size());
+
+ FakeRequestContext requestContext;
+ FakeSearchContext context;
+ context.setLimit(42);
+ MatchDataLayout mdl;
+ query.reserveHandles(requestContext, context, mdl);
+ MatchData::UP md = mdl.createMatchData();
+ EXPECT_EQUAL(1u, md->getNumTermFields());
+
+ query.optimize();
+ query.fetchPostings();
+ SearchIterator::UP search = query.createSearch(*md);
+ ASSERT_TRUE(search.get());
+}
+
+void checkQueryAddsLocation(Test &test, const string &loc_string) {
+ const string loc_field = "location";
+
+ fef_test::IndexEnvironment index_environment;
+ FieldInfo field_info(FieldType::INDEX, CollectionType::SINGLE, field, 0);
+ index_environment.getFields().push_back(field_info);
+ field_info = FieldInfo(FieldType::ATTRIBUTE, CollectionType::SINGLE,
+ PositionDataType::getZCurveFieldName(loc_field), 1);
+ index_environment.getFields().push_back(field_info);
+
+ QueryBuilder<ProtonNodeTypes> builder;
+ builder.addStringTerm(string_term, field, 1, Weight(2));
+ string stack_dump = StackDumpCreator::create(*builder.build());
+
+ Query query;
+ query.buildTree(stack_dump,
+ loc_field + ":" + loc_string,
+ ViewResolver(), index_environment);
+ vector<const ITermData *> term_data;
+ query.extractTerms(term_data);
+ test.EXPECT_EQUAL(1u, term_data.size());
+
+ FakeRequestContext requestContext;
+ FakeSearchContext context;
+ context.addIdx(0).setLimit(42);
+ MatchDataLayout mdl;
+ query.reserveHandles(requestContext, context, mdl);
+ MatchData::UP md = mdl.createMatchData();
+ test.EXPECT_EQUAL(2u, md->getNumTermFields());
+
+ query.fetchPostings();
+ SearchIterator::UP search = query.createSearch(*md);
+ test.ASSERT_TRUE(search.get());
+ if (!test.EXPECT_NOT_EQUAL(string::npos, search->asString().find(loc_string))) {
+ fprintf(stderr, "search (missing loc_string): %s", search->asString().c_str());
+ }
+}
+
+void Test::requireThatQueryAddsLocation() {
+ checkQueryAddsLocation(*this, "(2,10,10,3,0,1,0,0)");
+}
+
+void Test::requireThatQueryAddsLocationCutoff() {
+ checkQueryAddsLocation(*this, "[2,10,10,20,20]");
+}
+
+void
+Test::requireThatFakeFieldSearchDumpsDiffer()
+{
+ FakeRequestContext requestContext;
+ uint32_t fieldId = 0;
+ MatchDataLayout mdl;
+ TermFieldHandle handle = mdl.allocTermField(fieldId);
+ MatchData::UP match_data = mdl.createMatchData();
+
+ FakeSearchable a;
+ FakeSearchable b;
+ a.tag("a");
+ b.tag("b");
+ ProtonStringTerm n1("term1", "field1", string_id, string_weight);
+ ProtonStringTerm n2("term2", "field1", string_id, string_weight);
+ ProtonStringTerm n3("term1", "field2", string_id, string_weight);
+
+ FieldSpecList fields1;
+ FieldSpecList fields2;
+ fields1.add(FieldSpec("field1", fieldId, handle));
+ fields2.add(FieldSpec("field2", fieldId, handle));
+
+ Blueprint::UP l1(a.createBlueprint(requestContext, fields1, n1)); // reference
+ Blueprint::UP l2(a.createBlueprint(requestContext, fields1, n2)); // term
+ Blueprint::UP l3(a.createBlueprint(requestContext, fields2, n3)); // field
+ Blueprint::UP l4(b.createBlueprint(requestContext, fields1, n1)); // tag
+
+ l1->fetchPostings(true);
+ l2->fetchPostings(true);
+ l3->fetchPostings(true);
+ l4->fetchPostings(true);
+
+ SearchIterator::UP s1(l1->createSearch(*match_data, true));
+ SearchIterator::UP s2(l2->createSearch(*match_data, true));
+ SearchIterator::UP s3(l3->createSearch(*match_data, true));
+ SearchIterator::UP s4(l4->createSearch(*match_data, true));
+
+ EXPECT_NOT_EQUAL(s1->asString(), s2->asString());
+ EXPECT_NOT_EQUAL(s1->asString(), s3->asString());
+ EXPECT_NOT_EQUAL(s1->asString(), s4->asString());
+}
+
+void Test::requireThatNoDocsGiveZeroDocFrequency() {
+ ProtonStringTerm node(string_term, field, string_id, string_weight);
+ node.resolve(ViewResolver(), plain_index_env);
+ FakeSearchContext context;
+ FakeRequestContext requestContext;
+ context.setLimit(0);
+
+ MatchDataLayout mdl;
+ MatchDataReserveVisitor reserve_visitor(mdl);
+ node.accept(reserve_visitor);
+
+ Blueprint::UP blueprint = BlueprintBuilder::build(requestContext, node, context);
+
+ EXPECT_EQUAL(1u, node.numFields());
+ EXPECT_EQUAL(0.0, node.field(0).getDocFreq());
+}
+
+void Test::requireThatWeakAndBlueprintsAreCreatedCorrectly() {
+ using search::queryeval::WeakAndBlueprint;
+
+ ProtonWeakAnd wand(123, "view");
+ wand.append(Node::UP(new ProtonStringTerm("foo", field, 0, Weight(3))));
+ wand.append(Node::UP(new ProtonStringTerm("bar", field, 0, Weight(7))));
+
+ ResolveViewVisitor resolve_visitor(ViewResolver(), plain_index_env);
+ wand.accept(resolve_visitor);
+
+ FakeRequestContext requestContext;
+ FakeSearchContext context;
+ context.addIdx(0).idx(0).getFake()
+ .addResult(field, "foo", FakeResult().doc(1).doc(3))
+ .addResult(field, "bar", FakeResult().doc(2).doc(3).doc(4));
+
+ MatchDataLayout mdl;
+ MatchDataReserveVisitor reserve_visitor(mdl);
+ wand.accept(reserve_visitor);
+
+ Blueprint::UP blueprint = BlueprintBuilder::build(requestContext, wand, context);
+ WeakAndBlueprint *wbp = dynamic_cast<WeakAndBlueprint*>(blueprint.get());
+ ASSERT_TRUE(wbp != 0);
+ ASSERT_EQUAL(2u, wbp->getWeights().size());
+ ASSERT_EQUAL(2u, wbp->childCnt());
+ EXPECT_EQUAL(123u, wbp->getN());
+ EXPECT_EQUAL(3u, wbp->getWeights()[0]);
+ EXPECT_EQUAL(7u, wbp->getWeights()[1]);
+ EXPECT_EQUAL(2u, wbp->getChild(0).getState().estimate().estHits);
+ EXPECT_EQUAL(3u, wbp->getChild(1).getState().estimate().estHits);
+}
+
+void Test::requireThatParallelWandBlueprintsAreCreatedCorrectly() {
+ using search::queryeval::WeakAndBlueprint;
+
+ ProtonWandTerm wand(field, 42, Weight(100), 123, 9000, 1.25);
+ wand.append(Node::UP(new ProtonStringTerm("foo", field, 0, Weight(3))));
+ wand.append(Node::UP(new ProtonStringTerm("bar", field, 0, Weight(7))));
+
+ ResolveViewVisitor resolve_visitor(ViewResolver(), attribute_index_env);
+ wand.accept(resolve_visitor);
+
+ FakeRequestContext requestContext;
+ FakeSearchContext context;
+ context.setLimit(1000);
+ context.addIdx(0).idx(0).getFake()
+ .addResult(field, "foo", FakeResult().doc(1).doc(3))
+ .addResult(field, "bar", FakeResult().doc(2).doc(3).doc(4));
+
+ MatchDataLayout mdl;
+ MatchDataReserveVisitor reserve_visitor(mdl);
+ wand.accept(reserve_visitor);
+
+ Blueprint::UP blueprint = BlueprintBuilder::build(requestContext, wand, context);
+ ParallelWeakAndBlueprint *wbp = dynamic_cast<ParallelWeakAndBlueprint*>(blueprint.get());
+ ASSERT_TRUE(wbp != nullptr);
+ EXPECT_EQUAL(9000, wbp->getScoreThreshold());
+ EXPECT_EQUAL(1.25, wbp->getThresholdBoostFactor());
+ EXPECT_EQUAL(1000u, wbp->get_docid_limit());
+}
+
+void
+Test::requireThatBlackListBlueprintCanBeUsed()
+{
+ QueryBuilder<ProtonNodeTypes> builder;
+ builder.addStringTerm("foo", field, field_id, string_weight);
+ std::string stackDump = StackDumpCreator::create(*builder.build());
+
+ Query query;
+ query.buildTree(stackDump, "", ViewResolver(), plain_index_env);
+
+ FakeSearchContext context(42);
+ context.addIdx(0).idx(0).getFake()
+ .addResult(field, "foo", FakeResult().doc(1).doc(3).doc(5).doc(7).doc(9).doc(11));
+ context.setLimit(42);
+
+ query.setBlackListBlueprint(SimpleBlueprint::UP(new SimpleBlueprint(SimpleResult().addHit(3).addHit(9))));
+
+ FakeRequestContext requestContext;
+ MatchDataLayout mdl;
+ query.reserveHandles(requestContext, context, mdl);
+ MatchData::UP md = mdl.createMatchData();
+
+ query.optimize();
+ query.fetchPostings();
+ SearchIterator::UP search = query.createSearch(*md);
+ SimpleResult exp = SimpleResult().addHit(1).addHit(5).addHit(7).addHit(11);
+ SimpleResult act;
+ act.search(*search);
+ EXPECT_EQUAL(exp, act);
+}
+
+int
+Test::Main()
+{
+ setupIndexEnvironments();
+
+ TEST_INIT("query_test");
+
+ TEST_CALL(requireThatMatchDataIsReserved);
+ TEST_CALL(requireThatMatchDataIsReservedForEachFieldInAView);
+ TEST_CALL(requireThatTermsAreLookedUp);
+ TEST_CALL(requireThatTermsAreLookedUpInMultipleFieldsFromAView);
+ TEST_CALL(requireThatAttributeTermsAreLookedUpInAttributeSource);
+ TEST_CALL(requireThatAttributeTermDataHandlesAreAllocated);
+ TEST_CALL(requireThatTermDataIsFilledIn);
+ TEST_CALL(requireThatSingleIndexCanUseBlendingAsBlacklisting);
+ TEST_CALL(requireThatIteratorsAreBuiltWithBlending);
+ TEST_CALL(requireThatIteratorsAreBuiltForAllTermNodes);
+ TEST_CALL(requireThatNearIteratorsCanBeBuilt);
+ TEST_CALL(requireThatONearIteratorsCanBeBuilt);
+ TEST_CALL(requireThatPhraseIteratorsCanBeBuilt);
+ TEST_CALL(requireThatUnknownFieldActsEmpty);
+ TEST_CALL(requireThatIllegalFieldsAreIgnored);
+ TEST_CALL(requireThatQueryGluesEverythingTogether);
+ TEST_CALL(requireThatQueryAddsLocation);
+ TEST_CALL(requireThatQueryAddsLocationCutoff);
+ TEST_CALL(requireThatFakeFieldSearchDumpsDiffer);
+ TEST_CALL(requireThatNoDocsGiveZeroDocFrequency);
+ TEST_CALL(requireThatWeakAndBlueprintsAreCreatedCorrectly);
+ TEST_CALL(requireThatParallelWandBlueprintsAreCreatedCorrectly);
+ TEST_CALL(requireThatBlackListBlueprintCanBeUsed);
+
+ TEST_DONE();
+}
+
+
+} // namespace
+} // namespace matching
+} // namespace proton
+
+TEST_APPHOOK(proton::matching::Test);
diff --git a/searchcore/src/tests/proton/matching/querynodes_test.cpp b/searchcore/src/tests/proton/matching/querynodes_test.cpp
new file mode 100644
index 00000000000..054b70f9b98
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/querynodes_test.cpp
@@ -0,0 +1,486 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for querynodes.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("querynodes_test");
+
+#include <vespa/searchcore/proton/matching/querynodes.h>
+
+#include <vespa/searchcore/proton/matching/fakesearchcontext.h>
+#include <vespa/searchcore/proton/matching/blueprintbuilder.h>
+#include <vespa/searchcore/proton/matching/matchdatareservevisitor.h>
+#include <vespa/searchcore/proton/matching/resolveviewvisitor.h>
+#include <vespa/searchcore/proton/matching/viewresolver.h>
+#include <vespa/searchlib/fef/fieldinfo.h>
+#include <vespa/searchlib/fef/fieldtype.h>
+#include <vespa/searchlib/fef/matchdata.h>
+#include <vespa/searchlib/fef/matchdatalayout.h>
+#include <vespa/searchlib/fef/termfieldmatchdata.h>
+#include <vespa/searchlib/fef/termfieldmatchdataarray.h>
+#include <vespa/searchlib/fef/test/indexenvironment.h>
+#include <vespa/searchlib/query/tree/node.h>
+#include <vespa/searchlib/query/tree/querybuilder.h>
+#include <vespa/searchlib/queryeval/blueprint.h>
+#include <vespa/searchlib/queryeval/isourceselector.h>
+#include <vespa/searchlib/queryeval/nearsearch.h>
+#include <vespa/searchlib/queryeval/orsearch.h>
+#include <vespa/searchlib/queryeval/andsearch.h>
+#include <vespa/searchlib/queryeval/andnotsearch.h>
+#include <vespa/searchlib/queryeval/ranksearch.h>
+#include <vespa/searchlib/queryeval/searchiterator.h>
+#include <vespa/searchlib/queryeval/simple_phrase_search.h>
+#include <vespa/searchlib/queryeval/sourceblendersearch.h>
+#include <vespa/searchlib/queryeval/fake_search.h>
+#include <vespa/searchlib/queryeval/fake_requestcontext.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <cstdarg>
+#include <string>
+#include <vector>
+#include <vespa/searchlib/attribute/singlenumericattribute.hpp>
+
+using search::fef::CollectionType;
+using search::fef::FieldInfo;
+using search::fef::FieldType;
+using search::fef::MatchData;
+using search::fef::MatchDataLayout;
+using search::fef::TermFieldMatchData;
+using search::fef::TermFieldHandle;
+using search::fef::TermFieldMatchDataArray;
+using search::fef::test::IndexEnvironment;
+using search::query::Node;
+using search::query::QueryBuilder;
+using search::queryeval::ISourceSelector;
+using search::queryeval::NearSearch;
+using search::queryeval::ONearSearch;
+using search::queryeval::OrSearch;
+using search::queryeval::AndSearch;
+using search::queryeval::AndNotSearch;
+using search::queryeval::RankSearch;
+using search::queryeval::Blueprint;
+using search::queryeval::SearchIterator;
+using search::queryeval::SourceBlenderSearch;
+using search::queryeval::FieldSpec;
+using search::queryeval::Searchable;
+using search::queryeval::FakeSearch;
+using search::queryeval::FakeResult;
+using search::queryeval::FakeRequestContext;
+using search::queryeval::SimplePhraseSearch;
+using std::string;
+using std::vector;
+using namespace proton::matching;
+namespace fef_test = search::fef::test;
+
+namespace {
+
+template <typename T> void checkTwoFieldsTwoAttributesTwoIndexes();
+template <typename T> void checkTwoFieldsTwoAttributesOneIndex();
+template <typename T> void checkOneFieldOneAttributeTwoIndexes();
+template <typename T> void checkOneFieldNoAttributesTwoIndexes();
+template <typename T> void checkTwoFieldsNoAttributesTwoIndexes();
+template <typename T> void checkOneFieldNoAttributesOneIndex();
+
+template <typename T> void checkProperBlending();
+template <typename T> void checkProperBlendingWithParent();
+
+const string term = "term";
+const string phrase_term1 = "hello";
+const string phrase_term2 = "world";
+const string view = "view";
+const uint32_t id = 3;
+const search::query::Weight weight(7);
+const string field[] = { "field1", "field2" };
+const string attribute[] = { "attribute1", "attribute2" };
+const string source_tag[] = { "Source 1", "Source 2" };
+const string attribute_tag = "Attribute source";
+const uint32_t distance = 13;
+
+template <class SearchType>
+class Create {
+ bool _strict;
+ typename SearchType::Children _children;
+
+public:
+ explicit Create(bool strict = true) : _strict(strict) {}
+
+ Create &add(SearchIterator *s) {
+ _children.push_back(s);
+ return *this;
+ }
+
+ operator SearchIterator *() const {
+ return SearchType::create(_children, _strict);
+ }
+};
+typedef Create<OrSearch> MyOr;
+
+class ISourceSelectorDummy : public ISourceSelector
+{
+public:
+ static SourceStore _sourceStoreDummy;
+
+ static Iterator::UP
+ makeDummyIterator()
+ {
+ return Iterator::UP(new Iterator(_sourceStoreDummy));
+ }
+};
+
+ISourceSelector::SourceStore ISourceSelectorDummy::_sourceStoreDummy("foo");
+
+
+typedef uint32_t SourceId;
+class Blender {
+ bool _strict;
+ SourceBlenderSearch::Children _children;
+
+public:
+ explicit Blender(bool strict = true) : _strict(strict) {}
+
+ Blender &add(SourceId source_id, SearchIterator *search) {
+ _children.push_back(SourceBlenderSearch::Child(search, source_id));
+ return *this;
+ }
+
+ operator SearchIterator *() const {
+ return SourceBlenderSearch::create(
+ ISourceSelectorDummy::makeDummyIterator(), _children, _strict);
+ }
+};
+
+SearchIterator *getTerm(const string &trm, const string &fld, const string &tag) {
+ static TermFieldMatchData tmd;
+ TermFieldMatchDataArray tfmda;
+ tfmda.add(&tmd);
+ return new FakeSearch(tag, fld, trm, FakeResult(), tfmda);
+}
+
+class IteratorStructureTest {
+ int _field_count;
+ int _attribute_count;
+ int _index_count;
+
+public:
+ void setFieldCount(int count) { _field_count = count; }
+ void setAttributeCount(int count) { _attribute_count = count; }
+ void setIndexCount(int count) { _index_count = count; }
+
+ string getIteratorAsString(Node &node) {
+ ViewResolver resolver;
+ for (int i = 0; i < _field_count; ++i) {
+ resolver.add(view, field[i]);
+ }
+ for (int i = 0; i < _attribute_count; ++i) {
+ resolver.add(view, attribute[i]);
+ }
+
+ fef_test::IndexEnvironment index_environment;
+ uint32_t fieldId = 0;
+ for (int i = 0; i < _field_count; ++i) {
+ FieldInfo field_info(FieldType::INDEX, CollectionType::SINGLE, field[i], fieldId++);
+ index_environment.getFields().push_back(field_info);
+ }
+ for (int i = 0; i < _attribute_count; ++i) {
+ FieldInfo field_info(FieldType::ATTRIBUTE, CollectionType::SINGLE, attribute[i], fieldId++);
+ index_environment.getFields().push_back(field_info);
+ }
+
+ ResolveViewVisitor resolve_visitor(resolver, index_environment);
+ node.accept(resolve_visitor);
+
+ FakeSearchContext context;
+ context.attr().tag(attribute_tag);
+
+ for (int i = 0; i < _index_count; ++i) {
+ context.addIdx(i).idx(i).getFake().tag(source_tag[i]);
+ }
+
+ MatchDataLayout mdl;
+ FakeRequestContext requestContext;
+ MatchDataReserveVisitor reserve_visitor(mdl);
+ node.accept(reserve_visitor);
+ MatchData::UP match_data = mdl.createMatchData();
+
+ Blueprint::UP blueprint = BlueprintBuilder::build(requestContext, node, context);
+ blueprint->fetchPostings(true);
+ return blueprint->createSearch(*match_data, true)->asString();
+ }
+
+ template <typename Tag> string getIteratorAsString();
+};
+
+typedef QueryBuilder<ProtonNodeTypes> QB;
+struct Phrase {
+ void addToBuilder(QB& b) { b.addPhrase(2, view, id, weight); }
+};
+struct Near { void addToBuilder(QB& b) { b.addNear(2, distance); } };
+struct ONear { void addToBuilder(QB& b) { b.addONear(2, distance); } };
+struct Or { void addToBuilder(QB& b) { b.addOr(2); } };
+struct And { void addToBuilder(QB& b) { b.addAnd(2); } };
+struct AndNot { void addToBuilder(QB& b) { b.addAndNot(2); } };
+struct Rank { void addToBuilder(QB& b) { b.addRank(2); } };
+struct Term {};
+
+template <typename Tag>
+string IteratorStructureTest::getIteratorAsString() {
+ QueryBuilder<ProtonNodeTypes> query_builder;
+ Tag().addToBuilder(query_builder);
+ query_builder.addStringTerm(phrase_term1, view, id, weight);
+ query_builder.addStringTerm(phrase_term2, view, id, weight);
+ Node::UP node = query_builder.build();
+ return getIteratorAsString(*node);
+}
+
+template <>
+string IteratorStructureTest::getIteratorAsString<Term>() {
+ ProtonStringTerm node(term, view, id, weight);
+ return getIteratorAsString(node);
+}
+
+template <typename T>
+SearchIterator *getLeaf(const string &fld, const string &tag) {
+ return getTerm(term, fld, tag);
+}
+
+template <>
+SearchIterator *getLeaf<Phrase>(const string &fld, const string &tag) {
+ SimplePhraseSearch::Children children;
+ children.push_back(getTerm(phrase_term1, fld, tag));
+ children.push_back(getTerm(phrase_term2, fld, tag));
+ static TermFieldMatchData tmd;
+ TermFieldMatchDataArray tfmda;
+ tfmda.add(&tmd).add(&tmd);
+ vector<uint32_t> eval_order(2);
+ return new SimplePhraseSearch(children, MatchData::UP(), tfmda, eval_order, tmd, true);
+}
+
+template <typename NearType>
+SearchIterator *getNearParent(SearchIterator *a, SearchIterator *b) {
+ typename NearType::Children children;
+ children.push_back(a);
+ children.push_back(b);
+ TermFieldMatchDataArray data;
+ static TermFieldMatchData tmd;
+ // we only check how many term/field combinations
+ // are below the NearType parent:
+ // two terms searching in (two index fields + two attribute fields)
+ data.add(&tmd).add(&tmd).add(&tmd).add(&tmd)
+ .add(&tmd).add(&tmd).add(&tmd).add(&tmd);
+ return new NearType(children, data, distance, true);
+}
+
+template <typename SearchType>
+SearchIterator *getSimpleParent(SearchIterator *a, SearchIterator *b) {
+ typename SearchType::Children children;
+ children.push_back(a);
+ children.push_back(b);
+ return SearchType::create(children, true);
+}
+
+template <typename T>
+SearchIterator *getParent(SearchIterator *a, SearchIterator *b);
+
+template <>
+SearchIterator *getParent<Near>(SearchIterator *a, SearchIterator *b) {
+ return getNearParent<NearSearch>(a, b);
+}
+
+template <>
+SearchIterator *getParent<ONear>(SearchIterator *a, SearchIterator *b) {
+ return getNearParent<ONearSearch>(a, b);
+}
+
+template <>
+SearchIterator *getParent<Or>(SearchIterator *a, SearchIterator *b) {
+ return getSimpleParent<OrSearch>(a, b);
+}
+
+template <>
+SearchIterator *getParent<And>(SearchIterator *a, SearchIterator *b) {
+ return getSimpleParent<AndSearch>(a, b);
+}
+
+template <>
+SearchIterator *getParent<AndNot>(SearchIterator *a, SearchIterator *b) {
+ return getSimpleParent<AndNotSearch>(a, b);
+}
+
+template <>
+SearchIterator *getParent<Rank>(SearchIterator *a, SearchIterator *b) {
+ return getSimpleParent<RankSearch>(a, b);
+}
+
+template <typename T> bool bothStrict() { return false; }
+
+template <> bool bothStrict<Or>() { return true; }
+
+template <typename T>
+void checkTwoFieldsTwoAttributesTwoIndexes() {
+ IteratorStructureTest structure_test;
+ structure_test.setFieldCount(2);
+ structure_test.setAttributeCount(2);
+ structure_test.setIndexCount(2);
+
+ SearchIterator::UP expected(
+ MyOr()
+ .add(getLeaf<T>(attribute[0], attribute_tag))
+ .add(getLeaf<T>(attribute[1], attribute_tag))
+ .add(Blender()
+ .add(SourceId(0), MyOr()
+ .add(getLeaf<T>(field[0], source_tag[0]))
+ .add(getLeaf<T>(field[1], source_tag[0])))
+ .add(SourceId(1), MyOr()
+ .add(getLeaf<T>(field[0], source_tag[1]))
+ .add(getLeaf<T>(field[1], source_tag[1])))));
+ EXPECT_EQUAL(expected->asString(), structure_test.getIteratorAsString<T>());
+}
+
+template <typename T>
+void checkTwoFieldsTwoAttributesOneIndex() {
+ IteratorStructureTest structure_test;
+ structure_test.setFieldCount(2);
+ structure_test.setAttributeCount(2);
+ structure_test.setIndexCount(1);
+
+ SearchIterator::UP expected(
+ MyOr()
+ .add(getLeaf<T>(attribute[0], attribute_tag))
+ .add(getLeaf<T>(attribute[1], attribute_tag))
+ .add(Blender()
+ .add(SourceId(0), MyOr()
+ .add(getLeaf<T>(field[0], source_tag[0]))
+ .add(getLeaf<T>(field[1], source_tag[0])))));
+ EXPECT_EQUAL(expected->asString(), structure_test.getIteratorAsString<T>());
+}
+
+template <typename T>
+void checkOneFieldOneAttributeTwoIndexes() {
+ IteratorStructureTest structure_test;
+ structure_test.setFieldCount(1);
+ structure_test.setAttributeCount(1);
+ structure_test.setIndexCount(2);
+
+ SearchIterator::UP expected(
+ MyOr()
+ .add(getLeaf<T>(attribute[0], attribute_tag))
+ .add(Blender()
+ .add(SourceId(0),
+ getLeaf<T>(field[0], source_tag[0]))
+ .add(SourceId(1),
+ getLeaf<T>(field[0], source_tag[1]))));
+ EXPECT_EQUAL(expected->asString(), structure_test.getIteratorAsString<T>());
+}
+
+template <typename T>
+void checkOneFieldNoAttributesTwoIndexes() {
+ IteratorStructureTest structure_test;
+ structure_test.setFieldCount(1);
+ structure_test.setAttributeCount(0);
+ structure_test.setIndexCount(2);
+
+ SearchIterator::UP expected(
+ Blender()
+ .add(SourceId(0), getLeaf<T>(field[0], source_tag[0]))
+ .add(SourceId(1), getLeaf<T>(field[0], source_tag[1])));
+ EXPECT_EQUAL(expected->asString(), structure_test.getIteratorAsString<T>());
+}
+
+template <typename T>
+void checkTwoFieldsNoAttributesTwoIndexes() {
+ IteratorStructureTest structure_test;
+ structure_test.setFieldCount(2);
+ structure_test.setAttributeCount(0);
+ structure_test.setIndexCount(2);
+
+ SearchIterator::UP expected(
+ Blender()
+ .add(SourceId(0), MyOr()
+ .add(getLeaf<T>(field[0], source_tag[0]))
+ .add(getLeaf<T>(field[1], source_tag[0])))
+ .add(SourceId(1), MyOr()
+ .add(getLeaf<T>(field[0], source_tag[1]))
+ .add(getLeaf<T>(field[1], source_tag[1]))));
+ EXPECT_EQUAL(expected->asString(), structure_test.getIteratorAsString<T>());
+}
+
+template <typename T>
+void checkOneFieldNoAttributesOneIndex() {
+ IteratorStructureTest structure_test;
+ structure_test.setFieldCount(1);
+ structure_test.setAttributeCount(0);
+ structure_test.setIndexCount(1);
+
+ SearchIterator::UP expected(
+ Blender()
+ .add(SourceId(0), getLeaf<T>(field[0], source_tag[0])));
+ EXPECT_EQUAL(expected->asString(), structure_test.getIteratorAsString<T>());
+}
+
+template <typename T>
+void checkProperBlending() {
+ TEST_DO(checkTwoFieldsTwoAttributesTwoIndexes<T>());
+ TEST_DO(checkTwoFieldsTwoAttributesOneIndex<T>());
+ TEST_DO(checkOneFieldOneAttributeTwoIndexes<T>());
+ TEST_DO(checkOneFieldNoAttributesTwoIndexes<T>());
+ TEST_DO(checkTwoFieldsNoAttributesTwoIndexes<T>());
+ TEST_DO(checkOneFieldNoAttributesOneIndex<T>());
+}
+
+template <typename T>
+void checkProperBlendingWithParent() {
+ IteratorStructureTest structure_test;
+ structure_test.setFieldCount(2);
+ structure_test.setAttributeCount(2);
+ structure_test.setIndexCount(2);
+
+ SearchIterator::UP expected(
+ getParent<T>(
+ MyOr()
+ .add(getTerm(phrase_term1, attribute[0], attribute_tag))
+ .add(getTerm(phrase_term1, attribute[1], attribute_tag))
+ .add(Blender()
+ .add(SourceId(0), MyOr()
+ .add(getTerm(phrase_term1, field[0], source_tag[0]))
+ .add(getTerm(phrase_term1, field[1], source_tag[0])))
+ .add(SourceId(1), MyOr()
+ .add(getTerm(phrase_term1, field[0], source_tag[1]))
+ .add(getTerm(phrase_term1, field[1], source_tag[1])))),
+ MyOr(bothStrict<T>())
+ .add(getTerm(phrase_term2, attribute[0], attribute_tag))
+ .add(getTerm(phrase_term2, attribute[1], attribute_tag))
+ .add(Blender(bothStrict<T>())
+ .add(SourceId(0), MyOr(bothStrict<T>())
+ .add(getTerm(phrase_term2, field[0], source_tag[0]))
+ .add(getTerm(phrase_term2, field[1], source_tag[0])))
+ .add(SourceId(1), MyOr(bothStrict<T>())
+ .add(getTerm(phrase_term2, field[0], source_tag[1]))
+ .add(getTerm(phrase_term2, field[1], source_tag[1]))))));
+ EXPECT_EQUAL(expected->asString(), structure_test.getIteratorAsString<T>());
+}
+
+TEST("requireThatTermNodeSearchIteratorsGetProperBlending") {
+ TEST_DO(checkProperBlending<Term>());
+}
+
+TEST("requireThatPhrasesGetProperBlending") {
+ TEST_DO(checkProperBlending<Phrase>());
+}
+
+TEST("requireThatNearGetProperBlending") {
+ TEST_DO(checkProperBlendingWithParent<Near>());
+}
+
+TEST("requireThatONearGetProperBlending") {
+ TEST_DO(checkProperBlendingWithParent<ONear>());
+}
+
+TEST("requireThatSimpleIntermediatesGetProperBlending") {
+ TEST_DO(checkProperBlendingWithParent<And>());
+ TEST_DO(checkProperBlendingWithParent<AndNot>());
+ TEST_DO(checkProperBlendingWithParent<Or>());
+ TEST_DO(checkProperBlendingWithParent<Rank>());
+}
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/matching/resolveviewvisitor_test.cpp b/searchcore/src/tests/proton/matching/resolveviewvisitor_test.cpp
new file mode 100644
index 00000000000..212762389f0
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/resolveviewvisitor_test.cpp
@@ -0,0 +1,142 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for resolveviewvisitor.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("resolveviewvisitor_test");
+
+#include <vespa/searchlib/fef/test/indexenvironment.h>
+#include <vespa/searchcore/proton/matching/querynodes.h>
+#include <vespa/searchcore/proton/matching/resolveviewvisitor.h>
+#include <vespa/searchcore/proton/matching/viewresolver.h>
+#include <vespa/searchlib/query/tree/node.h>
+#include <vespa/searchlib/query/tree/querybuilder.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <string>
+
+namespace fef_test = search::fef::test;
+using search::fef::CollectionType;
+using search::fef::FieldInfo;
+using search::fef::FieldType;
+using search::fef::test::IndexEnvironment;
+using search::query::Node;
+using search::query::QueryBuilder;
+using std::string;
+using namespace proton::matching;
+
+namespace {
+
+const string term = "term";
+const string view = "view";
+const string field1 = "field1";
+const string field2 = "field2";
+const uint32_t id = 1;
+const search::query::Weight weight(2);
+
+ViewResolver getResolver(const string &test_view) {
+ ViewResolver resolver;
+ resolver.add(test_view, field1);
+ resolver.add(test_view, field2);
+ return resolver;
+}
+
+struct Fixture {
+ IndexEnvironment index_environment;
+
+ Fixture() {
+ index_environment.getFields().push_back(FieldInfo(
+ FieldType::INDEX, CollectionType::SINGLE, field1, 0));
+ index_environment.getFields().push_back(FieldInfo(
+ FieldType::INDEX, CollectionType::SINGLE, field2, 1));
+ }
+};
+
+TEST_F("requireThatFieldsResolveToThemselves", Fixture) {
+ ViewResolver resolver = getResolver(view);
+
+ QueryBuilder<ProtonNodeTypes> builder;
+ ProtonTermData &base = builder.addStringTerm(term, field1, id, weight);
+ Node::UP node = builder.build();
+
+ ResolveViewVisitor visitor(resolver, f.index_environment);
+ node->accept(visitor);
+
+ EXPECT_EQUAL(1u, base.numFields());
+ EXPECT_EQUAL(field1, base.field(0).field_name);
+}
+
+void checkResolveAlias(const string &view_name, const string &alias,
+ const Fixture &f) {
+ ViewResolver resolver = getResolver(view_name);
+
+ QueryBuilder<ProtonNodeTypes> builder;
+ ProtonTermData &base = builder.addStringTerm(term, alias, id, weight);
+ Node::UP node = builder.build();
+
+ ResolveViewVisitor visitor(resolver, f.index_environment);
+ node->accept(visitor);
+
+ ASSERT_EQUAL(2u, base.numFields());
+ EXPECT_EQUAL(field1, base.field(0).field_name);
+ EXPECT_EQUAL(field2, base.field(1).field_name);
+}
+
+TEST_F("requireThatViewsCanResolveToMultipleFields", Fixture) {
+ checkResolveAlias(view, view, f);
+}
+
+TEST_F("requireThatEmptyViewResolvesAsDefault", Fixture) {
+ const string default_view = "default";
+ const string empty_view = "";
+ checkResolveAlias(default_view, empty_view, f);
+}
+
+TEST_F("requireThatWeCanForceFilterField", Fixture) {
+ ViewResolver resolver = getResolver(view);
+ f.index_environment.getFields().back().setFilter(true);
+ ResolveViewVisitor visitor(resolver, f.index_environment);
+
+ { // use filter field settings from index environment
+ QueryBuilder<ProtonNodeTypes> builder;
+ ProtonStringTerm &sterm =
+ builder.addStringTerm(term, view, id, weight);
+ Node::UP node = builder.build();
+ node->accept(visitor);
+ ASSERT_EQUAL(2u, sterm.numFields());
+ EXPECT_TRUE(!sterm.field(0).filter_field);
+ EXPECT_TRUE(sterm.field(1).filter_field);
+ }
+ { // force filter on all fields
+ QueryBuilder<ProtonNodeTypes> builder;
+ ProtonStringTerm &sterm =
+ builder.addStringTerm(term, view, id, weight);
+ sterm.setPositionData(false); // force filter
+ Node::UP node = builder.build();
+ node->accept(visitor);
+ ASSERT_EQUAL(2u, sterm.numFields());
+ EXPECT_TRUE(sterm.field(0).filter_field);
+ EXPECT_TRUE(sterm.field(1).filter_field);
+ }
+}
+
+TEST_F("require that equiv nodes resolve view from children", Fixture) {
+ ViewResolver resolver;
+ resolver.add(view, field1);
+
+ QueryBuilder<ProtonNodeTypes> builder;
+ ProtonTermData &base = builder.addEquiv(2, id, weight);
+ builder.addStringTerm(term, view, 42, weight);
+ builder.addStringTerm(term, field2, 43, weight);
+ Node::UP node = builder.build();
+
+ ResolveViewVisitor visitor(resolver, f.index_environment);
+ node->accept(visitor);
+
+ ASSERT_EQUAL(2u, base.numFields());
+ EXPECT_EQUAL(field1, base.field(0).field_name);
+ EXPECT_EQUAL(field2, base.field(1).field_name);
+}
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/matching/sessionmanager_test.cpp b/searchcore/src/tests/proton/matching/sessionmanager_test.cpp
new file mode 100644
index 00000000000..078a6985fc4
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/sessionmanager_test.cpp
@@ -0,0 +1,87 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for sessionmanager.
+
+#include <vespa/log/log.h>
+LOG_SETUP("sessionmanager_test");
+#include <vespa/fastos/fastos.h>
+
+#include <vespa/searchcore/proton/matching/sessionmanager.h>
+#include <vespa/searchcore/proton/matching/session_manager_explorer.h>
+#include <vespa/searchcore/proton/matching/search_session.h>
+#include <vespa/vespalib/stllike/string.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/data/slime/slime.h>
+
+using vespalib::string;
+using namespace proton;
+using namespace proton::matching;
+using vespalib::StateExplorer;
+
+namespace {
+
+void checkStats(SessionManager::Stats stats, uint32_t numInsert,
+ uint32_t numPick, uint32_t numDropped, uint32_t numCached,
+ uint32_t numTimedout) {
+ EXPECT_EQUAL(numInsert, stats.numInsert);
+ EXPECT_EQUAL(numPick, stats.numPick);
+ EXPECT_EQUAL(numDropped, stats.numDropped);
+ EXPECT_EQUAL(numCached, stats.numCached);
+ EXPECT_EQUAL(numTimedout, stats.numTimedout);
+}
+
+
+TEST("require that SessionManager handles SearchSessions.") {
+ string session_id("foo");
+ fastos::TimeStamp doom(1000);
+ MatchToolsFactory::UP mtf;
+ SearchSession::OwnershipBundle owned_objects;
+ SearchSession::SP session(
+ new SearchSession(session_id, doom, std::move(mtf),
+ std::move(owned_objects)));
+
+ SessionManager session_manager(10);
+ TEST_DO(checkStats(session_manager.getSearchStats(), 0, 0, 0, 0, 0));
+ session_manager.insert(std::move(session));
+ TEST_DO(checkStats(session_manager.getSearchStats(), 1, 0, 0, 1, 0));
+ session = session_manager.pickSearch(session_id);
+ EXPECT_TRUE(session.get());
+ TEST_DO(checkStats(session_manager.getSearchStats(), 0, 1, 0, 1, 0));
+ session_manager.insert(std::move(session));
+ TEST_DO(checkStats(session_manager.getSearchStats(), 1, 0, 0, 1, 0));
+ session_manager.pruneTimedOutSessions(500);
+ TEST_DO(checkStats(session_manager.getSearchStats(), 0, 0, 0, 1, 0));
+ session_manager.pruneTimedOutSessions(2000);
+ TEST_DO(checkStats(session_manager.getSearchStats(), 0, 0, 0, 0, 1));
+
+ session = session_manager.pickSearch(session_id);
+ EXPECT_FALSE(session.get());
+}
+
+TEST("require that SessionManager can be explored") {
+ fastos::TimeStamp doom(1000);
+ SessionManager session_manager(10);
+ session_manager.insert(SearchSession::SP(new SearchSession("foo", doom,
+ MatchToolsFactory::UP(), SearchSession::OwnershipBundle())));
+ session_manager.insert(SearchSession::SP(new SearchSession("bar", doom,
+ MatchToolsFactory::UP(), SearchSession::OwnershipBundle())));
+ session_manager.insert(SearchSession::SP(new SearchSession("baz", doom,
+ MatchToolsFactory::UP(), SearchSession::OwnershipBundle())));
+ SessionManagerExplorer explorer(session_manager);
+ EXPECT_EQUAL(std::vector<vespalib::string>({"search"}),
+ explorer.get_children_names());
+ std::unique_ptr<StateExplorer> search = explorer.get_child("search");
+ ASSERT_TRUE(search.get() != nullptr);
+ vespalib::Slime state;
+ vespalib::Slime full_state;
+ search->get_state(vespalib::slime::SlimeInserter(state), false);
+ search->get_state(vespalib::slime::SlimeInserter(full_state), true);
+ EXPECT_EQUAL(3, state.get()["numSessions"].asLong());
+ EXPECT_EQUAL(3, full_state.get()["numSessions"].asLong());
+ EXPECT_EQUAL(0u, state.get()["sessions"].entries());
+ EXPECT_EQUAL(3u, full_state.get()["sessions"].entries());
+}
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/matching/termdataextractor_test.cpp b/searchcore/src/tests/proton/matching/termdataextractor_test.cpp
new file mode 100644
index 00000000000..d61267b7d31
--- /dev/null
+++ b/searchcore/src/tests/proton/matching/termdataextractor_test.cpp
@@ -0,0 +1,167 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for TermDataExtractor.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("termdataextractor_test");
+
+#include <vespa/searchcore/proton/matching/querynodes.h>
+#include <vespa/searchcore/proton/matching/resolveviewvisitor.h>
+#include <vespa/searchcore/proton/matching/termdataextractor.h>
+#include <vespa/searchcore/proton/matching/viewresolver.h>
+#include <vespa/searchlib/fef/tablemanager.h>
+#include <vespa/searchlib/fef/itermdata.h>
+#include <vespa/searchlib/fef/test/indexenvironment.h>
+#include <vespa/searchlib/query/tree/location.h>
+#include <vespa/searchlib/query/tree/point.h>
+#include <vespa/searchlib/query/tree/querybuilder.h>
+#include <vespa/searchlib/query/weight.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <string>
+#include <vector>
+
+namespace fef_test = search::fef::test;
+using search::fef::CollectionType;
+using search::fef::FieldInfo;
+using search::fef::FieldType;
+using search::fef::ITermData;
+using search::fef::IIndexEnvironment;
+using search::query::Location;
+using search::query::Node;
+using search::query::Point;
+using search::query::QueryBuilder;
+using search::query::Range;
+using search::query::Weight;
+using std::string;
+using std::vector;
+using namespace proton::matching;
+
+namespace search { class AttributeManager; }
+
+namespace {
+
+class Test : public vespalib::TestApp {
+ void requireThatTermsAreAdded();
+ void requireThatAViewWithTwoFieldsGivesOneTermDataPerTerm();
+ void requireThatUnrankedTermsAreSkipped();
+ void requireThatNegativeTermsAreSkipped();
+
+public:
+ int Main();
+};
+
+int
+Test::Main()
+{
+ TEST_INIT("termdataextractor_test");
+
+ TEST_DO(requireThatTermsAreAdded());
+ TEST_DO(requireThatAViewWithTwoFieldsGivesOneTermDataPerTerm());
+ TEST_DO(requireThatUnrankedTermsAreSkipped());
+ TEST_DO(requireThatNegativeTermsAreSkipped());
+
+ TEST_DONE();
+}
+
+const string field = "field";
+const uint32_t id[] = { 10, 11, 12, 13, 14, 15, 16, 17, 18 };
+
+Node::UP getQuery(const ViewResolver &resolver)
+{
+ QueryBuilder<ProtonNodeTypes> query_builder;
+ query_builder.addAnd(8);
+ query_builder.addNumberTerm("0.0", field, id[0], Weight(0));
+ query_builder.addPrefixTerm("foo", field, id[1], Weight(0));
+ query_builder.addStringTerm("bar", field, id[2], Weight(0));
+ query_builder.addSubstringTerm("baz", field, id[3], Weight(0));
+ query_builder.addSuffixTerm("qux", field, id[4], Weight(0));
+ query_builder.addRangeTerm(Range(), field, id[5], Weight(0));
+ query_builder.addWeightedSetTerm(1, field, id[6], Weight(0));
+ {
+ // weighted token
+ query_builder.addStringTerm("bar", field, id[3], Weight(0));
+ }
+
+ query_builder.addLocationTerm(Location(Point(10, 10), 3, 0),
+ field, id[7], Weight(0));
+ Node::UP node = query_builder.build();
+
+ fef_test::IndexEnvironment index_environment;
+ index_environment.getFields().push_back(FieldInfo(FieldType::INDEX, CollectionType::SINGLE, field, 0));
+ index_environment.getFields().push_back(FieldInfo(FieldType::INDEX, CollectionType::SINGLE, "foo", 1));
+ index_environment.getFields().push_back(FieldInfo(FieldType::INDEX, CollectionType::SINGLE, "bar", 2));
+
+ ResolveViewVisitor visitor(resolver, index_environment);
+ node->accept(visitor);
+
+ return node;
+}
+
+void Test::requireThatTermsAreAdded() {
+ Node::UP node = getQuery(ViewResolver());
+
+ vector<const ITermData *> term_data;
+ TermDataExtractor::extractTerms(*node, term_data);
+ EXPECT_EQUAL(7u, term_data.size());
+ for (int i = 0; i < 7; ++i) {
+ EXPECT_EQUAL(id[i], term_data[i]->getUniqueId());
+ EXPECT_EQUAL(1u, term_data[i]->numFields());
+ }
+}
+
+void Test::requireThatAViewWithTwoFieldsGivesOneTermDataPerTerm() {
+ ViewResolver resolver;
+ resolver.add(field, "foo");
+ resolver.add(field, "bar");
+ Node::UP node = getQuery(resolver);
+
+ vector<const ITermData *> term_data;
+ TermDataExtractor::extractTerms(*node, term_data);
+ EXPECT_EQUAL(7u, term_data.size());
+ for (int i = 0; i < 7; ++i) {
+ EXPECT_EQUAL(id[i], term_data[i]->getUniqueId());
+ EXPECT_EQUAL(2u, term_data[i]->numFields());
+ }
+}
+
+void
+Test::requireThatUnrankedTermsAreSkipped()
+{
+ QueryBuilder<ProtonNodeTypes> query_builder;
+ query_builder.addAnd(2);
+ query_builder.addStringTerm("term1", field, id[0], Weight(0));
+ query_builder.addStringTerm("term2", field, id[1], Weight(0))
+ .setRanked(false);
+ Node::UP node = query_builder.build();
+
+ vector<const ITermData *> term_data;
+ TermDataExtractor::extractTerms(*node, term_data);
+ EXPECT_EQUAL(1u, term_data.size());
+ ASSERT_TRUE(term_data.size() >= 1);
+ EXPECT_EQUAL(id[0], term_data[0]->getUniqueId());
+}
+
+void
+Test::requireThatNegativeTermsAreSkipped()
+{
+ QueryBuilder<ProtonNodeTypes> query_builder;
+ query_builder.addAnd(2);
+ query_builder.addStringTerm("term1", field, id[0], Weight(0));
+ query_builder.addAndNot(2);
+ query_builder.addStringTerm("term2", field, id[1], Weight(0));
+ query_builder.addAndNot(2);
+ query_builder.addStringTerm("term3", field, id[2], Weight(0));
+ query_builder.addStringTerm("term4", field, id[3], Weight(0));
+ Node::UP node = query_builder.build();
+
+ vector<const ITermData *> term_data;
+ TermDataExtractor::extractTerms(*node, term_data);
+ EXPECT_EQUAL(2u, term_data.size());
+ ASSERT_TRUE(term_data.size() >= 2);
+ EXPECT_EQUAL(id[0], term_data[0]->getUniqueId());
+ EXPECT_EQUAL(id[1], term_data[1]->getUniqueId());
+}
+
+} // namespace
+
+TEST_APPHOOK(Test);
diff --git a/searchcore/src/tests/proton/metrics/documentdb_job_trackers/.gitignore b/searchcore/src/tests/proton/metrics/documentdb_job_trackers/.gitignore
new file mode 100644
index 00000000000..84c97c63aca
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/documentdb_job_trackers/.gitignore
@@ -0,0 +1 @@
+searchcore_documentdb_job_trackers_test_app
diff --git a/searchcore/src/tests/proton/metrics/documentdb_job_trackers/CMakeLists.txt b/searchcore/src/tests/proton/metrics/documentdb_job_trackers/CMakeLists.txt
new file mode 100644
index 00000000000..bf77c583468
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/documentdb_job_trackers/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_documentdb_job_trackers_test_app
+ SOURCES
+ documentdb_job_trackers_test.cpp
+ DEPENDS
+ searchcore_proton_metrics
+)
+vespa_add_test(NAME searchcore_documentdb_job_trackers_test_app COMMAND searchcore_documentdb_job_trackers_test_app)
diff --git a/searchcore/src/tests/proton/metrics/documentdb_job_trackers/DESC b/searchcore/src/tests/proton/metrics/documentdb_job_trackers/DESC
new file mode 100644
index 00000000000..ccd322886ea
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/documentdb_job_trackers/DESC
@@ -0,0 +1 @@
+documentdb job trackers test. Take a look at documentdb_job_trackers_test.cpp for details.
diff --git a/searchcore/src/tests/proton/metrics/documentdb_job_trackers/FILES b/searchcore/src/tests/proton/metrics/documentdb_job_trackers/FILES
new file mode 100644
index 00000000000..a63504feca2
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/documentdb_job_trackers/FILES
@@ -0,0 +1 @@
+documentdb_job_trackers_test.cpp
diff --git a/searchcore/src/tests/proton/metrics/documentdb_job_trackers/documentdb_job_trackers_test.cpp b/searchcore/src/tests/proton/metrics/documentdb_job_trackers/documentdb_job_trackers_test.cpp
new file mode 100644
index 00000000000..3269fe84dcd
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/documentdb_job_trackers/documentdb_job_trackers_test.cpp
@@ -0,0 +1,116 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("documentdb_job_trackers_test");
+
+#include <vespa/searchcore/proton/metrics/documentdb_job_trackers.h>
+#include <vespa/searchcore/proton/metrics/job_tracked_flush_target.h>
+#include <vespa/searchcore/proton/test/dummy_flush_target.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace proton;
+using namespace searchcorespi;
+
+constexpr double EPS = 0.000001;
+
+typedef IFlushTarget::Type FTT;
+typedef IFlushTarget::Component FTC;
+
+struct MFT : public test::DummyFlushTarget
+{
+ MFT(FTT type, FTC component) : test::DummyFlushTarget("", type, component) {}
+};
+
+struct AttributeFlush : public MFT { AttributeFlush() : MFT(FTT::SYNC, FTC::ATTRIBUTE) {} };
+struct MemoryIndexFlush : public MFT { MemoryIndexFlush() : MFT(FTT::FLUSH, FTC::INDEX) {} };
+struct DiskIndexFusion : public MFT { DiskIndexFusion() : MFT(FTT::GC, FTC::INDEX) {} };
+struct DocStoreFlush : public MFT { DocStoreFlush() : MFT(FTT::SYNC, FTC::DOCUMENT_STORE) {} };
+struct DocStoreCompaction : public MFT { DocStoreCompaction() : MFT(FTT::GC, FTC::DOCUMENT_STORE) {} };
+struct OtherFlush : public MFT { OtherFlush() : MFT(FTT::FLUSH, FTC::OTHER) {} };
+
+struct Fixture
+{
+ DocumentDBJobTrackers _trackers;
+ DocumentDBTaggedMetrics::JobMetrics _metrics;
+ Fixture()
+ : _trackers(),
+ _metrics(nullptr)
+ {
+ }
+};
+
+void
+startJobs(IJobTracker &tracker, uint32_t numJobs)
+{
+ for (uint32_t i = 0; i < numJobs; ++i) {
+ tracker.start();
+ }
+}
+
+TEST_F("require that job metrics are updated", Fixture)
+{
+ startJobs(f._trackers.getAttributeFlush(), 1);
+ startJobs(f._trackers.getMemoryIndexFlush(), 2);
+ startJobs(f._trackers.getDiskIndexFusion(), 3);
+ startJobs(f._trackers.getDocumentStoreFlush(), 4);
+ startJobs(f._trackers.getDocumentStoreCompact(), 5);
+ startJobs(*f._trackers.getBucketMove(), 6);
+ startJobs(*f._trackers.getLidSpaceCompact(), 7);
+ startJobs(*f._trackers.getRemovedDocumentsPrune(), 8);
+
+ // Update metrics 2 times to ensure that all jobs are running
+ // in the last interval we actually care about.
+ f._trackers.updateMetrics(f._metrics);
+ FastOS_Thread::Sleep(100);
+ f._trackers.updateMetrics(f._metrics);
+
+ EXPECT_APPROX(1.0, f._metrics.attributeFlush.getLast(), EPS);
+ EXPECT_APPROX(2.0, f._metrics.memoryIndexFlush.getLast(), EPS);
+ EXPECT_APPROX(3.0, f._metrics.diskIndexFusion.getLast(), EPS);
+ EXPECT_APPROX(4.0, f._metrics.documentStoreFlush.getLast(), EPS);
+ EXPECT_APPROX(5.0, f._metrics.documentStoreCompact.getLast(), EPS);
+ EXPECT_APPROX(6.0, f._metrics.bucketMove.getLast(), EPS);
+ EXPECT_APPROX(7.0, f._metrics.lidSpaceCompact.getLast(), EPS);
+ EXPECT_APPROX(8.0, f._metrics.removedDocumentsPrune.getLast(), EPS);
+ EXPECT_APPROX(36.0, f._metrics.total.getLast(), EPS);
+}
+
+bool
+assertFlushTarget(const IJobTracker &tracker, const IFlushTarget &target)
+{
+ const JobTrackedFlushTarget *tracked =
+ dynamic_cast<const JobTrackedFlushTarget *>(&target);
+ if (!EXPECT_TRUE(tracked != nullptr)) return false;
+ if (!EXPECT_EQUAL(&tracker, &tracked->getTracker())) return false;
+ return true;
+}
+
+TEST_F("require that known flush targets are tracked", Fixture)
+{
+ IFlushTarget::List input;
+ input.push_back(IFlushTarget::SP(new AttributeFlush()));
+ input.push_back(IFlushTarget::SP(new MemoryIndexFlush()));
+ input.push_back(IFlushTarget::SP(new DiskIndexFusion()));
+ input.push_back(IFlushTarget::SP(new DocStoreFlush()));
+ input.push_back(IFlushTarget::SP(new DocStoreCompaction()));
+
+ IFlushTarget::List output = f._trackers.trackFlushTargets(input);
+ EXPECT_EQUAL(5u, output.size());
+ EXPECT_TRUE(assertFlushTarget(f._trackers.getAttributeFlush(), *output[0]));
+ EXPECT_TRUE(assertFlushTarget(f._trackers.getMemoryIndexFlush(), *output[1]));
+ EXPECT_TRUE(assertFlushTarget(f._trackers.getDiskIndexFusion(), *output[2]));
+ EXPECT_TRUE(assertFlushTarget(f._trackers.getDocumentStoreFlush(), *output[3]));
+ EXPECT_TRUE(assertFlushTarget(f._trackers.getDocumentStoreCompact(), *output[4]));
+}
+
+TEST_F("require that un-known flush targets are not tracked", Fixture)
+{
+ IFlushTarget::List input;
+ input.push_back(IFlushTarget::SP(new OtherFlush()));
+
+ IFlushTarget::List output = f._trackers.trackFlushTargets(input);
+ EXPECT_EQUAL(1u, output.size());
+ EXPECT_EQUAL(&*output[0].get(), &*input[0]);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/metrics/job_load_sampler/.gitignore b/searchcore/src/tests/proton/metrics/job_load_sampler/.gitignore
new file mode 100644
index 00000000000..2e02ec8191b
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/job_load_sampler/.gitignore
@@ -0,0 +1 @@
+searchcore_job_load_sampler_test_app
diff --git a/searchcore/src/tests/proton/metrics/job_load_sampler/CMakeLists.txt b/searchcore/src/tests/proton/metrics/job_load_sampler/CMakeLists.txt
new file mode 100644
index 00000000000..478a7201228
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/job_load_sampler/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_job_load_sampler_test_app
+ SOURCES
+ job_load_sampler_test.cpp
+ DEPENDS
+ searchcore_proton_metrics
+)
+vespa_add_test(NAME searchcore_job_load_sampler_test_app COMMAND searchcore_job_load_sampler_test_app)
diff --git a/searchcore/src/tests/proton/metrics/job_load_sampler/DESC b/searchcore/src/tests/proton/metrics/job_load_sampler/DESC
new file mode 100644
index 00000000000..966bcdf83f6
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/job_load_sampler/DESC
@@ -0,0 +1 @@
+job load sampler test. Take a look at job_load_sampler_test.cpp for details.
diff --git a/searchcore/src/tests/proton/metrics/job_load_sampler/FILES b/searchcore/src/tests/proton/metrics/job_load_sampler/FILES
new file mode 100644
index 00000000000..1112ae6c5da
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/job_load_sampler/FILES
@@ -0,0 +1 @@
+job_load_sampler_test.cpp
diff --git a/searchcore/src/tests/proton/metrics/job_load_sampler/job_load_sampler_test.cpp b/searchcore/src/tests/proton/metrics/job_load_sampler/job_load_sampler_test.cpp
new file mode 100644
index 00000000000..b8fa728927f
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/job_load_sampler/job_load_sampler_test.cpp
@@ -0,0 +1,95 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("job_load_sampler_test");
+
+#include <vespa/searchcore/proton/metrics/job_load_sampler.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace proton;
+
+constexpr double EPS = 0.000001;
+
+struct Fixture
+{
+ JobLoadSampler _sampler;
+ Fixture()
+ : _sampler(10)
+ {
+ }
+ Fixture &start(double now) {
+ _sampler.startJob(now);
+ return *this;
+ }
+ Fixture &end(double now) {
+ _sampler.endJob(now);
+ return *this;
+ }
+ double sample(double now) {
+ return _sampler.sampleLoad(now);
+ }
+};
+
+TEST_F("require that empty sampler gives 0 load", Fixture)
+{
+ EXPECT_APPROX(0.0, f.sample(11), EPS);
+}
+
+TEST_F("require that empty time interval gives 0 load", Fixture)
+{
+ EXPECT_APPROX(0.0, f.sample(10), EPS);
+}
+
+TEST_F("require that job that starts and ends in interval gets correct load", Fixture)
+{
+ f.start(12).end(17);
+ EXPECT_APPROX(0.5, f.sample(20), EPS);
+ EXPECT_APPROX(0.0, f.sample(21), EPS);
+}
+
+TEST_F("require that job that starts in interval gets correct load", Fixture)
+{
+ f.start(12);
+ EXPECT_APPROX(0.8, f.sample(20), EPS);
+ EXPECT_APPROX(1.0, f.sample(21), EPS);
+}
+
+TEST_F("require that job that ends in interval gets correct load", Fixture)
+{
+ f.start(12).sample(20);
+ f.end(27);
+ EXPECT_APPROX(0.7, f.sample(30), EPS);
+ EXPECT_APPROX(0.0, f.sample(31), EPS);
+}
+
+TEST_F("require that job that runs in complete interval gets correct load", Fixture)
+{
+ f.start(12).sample(20);
+ EXPECT_APPROX(1.0, f.sample(30), EPS);
+ EXPECT_APPROX(1.0, f.sample(31), EPS);
+}
+
+TEST_F("require that multiple jobs that starts and ends in interval gets correct load", Fixture)
+{
+ // job1: 12->17: 0.5
+ // job2: 14->16: 0.2
+ f.start(12).start(14).end(16).end(17);
+ EXPECT_APPROX(0.7, f.sample(20), EPS);
+}
+
+TEST_F("require that multiple jobs that starts and ends in several intervals gets correct load", Fixture)
+{
+ // job1: 12->22
+ // job2: 14->34
+ // job3: 25->45
+ f.start(12).start(14);
+ EXPECT_APPROX(1.4, f.sample(20), EPS);
+ f.end(22).start(25);
+ EXPECT_APPROX(1.7, f.sample(30), EPS);
+ f.end(34);
+ EXPECT_APPROX(1.4, f.sample(40), EPS);
+ f.end(45);
+ EXPECT_APPROX(0.5, f.sample(50), EPS);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/metrics/job_tracked_flush/.gitignore b/searchcore/src/tests/proton/metrics/job_tracked_flush/.gitignore
new file mode 100644
index 00000000000..85e6097878b
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/job_tracked_flush/.gitignore
@@ -0,0 +1 @@
+searchcore_job_tracked_flush_test_app
diff --git a/searchcore/src/tests/proton/metrics/job_tracked_flush/CMakeLists.txt b/searchcore/src/tests/proton/metrics/job_tracked_flush/CMakeLists.txt
new file mode 100644
index 00000000000..f4544740f8e
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/job_tracked_flush/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_job_tracked_flush_test_app
+ SOURCES
+ job_tracked_flush_test.cpp
+ DEPENDS
+ searchcore_proton_metrics
+)
+vespa_add_test(NAME searchcore_job_tracked_flush_test_app COMMAND searchcore_job_tracked_flush_test_app)
diff --git a/searchcore/src/tests/proton/metrics/job_tracked_flush/DESC b/searchcore/src/tests/proton/metrics/job_tracked_flush/DESC
new file mode 100644
index 00000000000..b62528ff8b4
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/job_tracked_flush/DESC
@@ -0,0 +1,2 @@
+job tracked flush target/task test. Take a look at job_tracked_flush_test.cpp for details.
+
diff --git a/searchcore/src/tests/proton/metrics/job_tracked_flush/FILES b/searchcore/src/tests/proton/metrics/job_tracked_flush/FILES
new file mode 100644
index 00000000000..09f32789c94
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/job_tracked_flush/FILES
@@ -0,0 +1 @@
+job_tracked_flush_test.cpp
diff --git a/searchcore/src/tests/proton/metrics/job_tracked_flush/job_tracked_flush_test.cpp b/searchcore/src/tests/proton/metrics/job_tracked_flush/job_tracked_flush_test.cpp
new file mode 100644
index 00000000000..cf35ba0b505
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/job_tracked_flush/job_tracked_flush_test.cpp
@@ -0,0 +1,139 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("job_tracked_flush_test");
+
+#include <vespa/searchcore/proton/metrics/job_tracked_flush_target.h>
+#include <vespa/searchcore/proton/metrics/job_tracked_flush_task.h>
+#include <vespa/searchcore/proton/test/dummy_flush_target.h>
+#include <vespa/searchcore/proton/test/simple_job_tracker.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/util/closuretask.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <vespa/vespalib/util/sync.h>
+
+using namespace proton;
+using namespace searchcorespi;
+using search::SerialNum;
+using test::SimpleJobTracker;
+using vespalib::makeTask;
+using vespalib::makeClosure;
+using vespalib::CountDownLatch;
+using vespalib::Gate;
+using vespalib::ThreadStackExecutor;
+
+struct MyFlushTask : public searchcorespi::FlushTask
+{
+ Gate &_execGate;
+ MyFlushTask(Gate &execGate) : _execGate(execGate) {}
+
+ // Implements searchcorespi::FlushTask
+ virtual void run() {
+ _execGate.await(5000);
+ }
+ virtual search::SerialNum getFlushSerial() const { return 5; }
+};
+
+struct MyFlushTarget : public test::DummyFlushTarget
+{
+ typedef std::shared_ptr<MyFlushTarget> SP;
+ SerialNum _initFlushSerial;
+ Gate _execGate;
+ Gate _initGate;
+ MyFlushTarget()
+ : test::DummyFlushTarget("mytarget", Type::FLUSH, Component::OTHER),
+ _initFlushSerial(0),
+ _execGate(),
+ _initGate()
+ {}
+
+ // Implements searchcorespi::IFlushTarget
+ virtual FlushTask::UP initFlush(SerialNum currentSerial) {
+ if (currentSerial > 0) {
+ _initFlushSerial = currentSerial;
+ _initGate.await(5000);
+ return FlushTask::UP(new MyFlushTask(_execGate));
+ }
+ return FlushTask::UP();
+ }
+};
+
+struct Fixture
+{
+ SimpleJobTracker::SP _tracker;
+ MyFlushTarget::SP _target;
+ JobTrackedFlushTarget _trackedFlush;
+ FlushTask::UP _task;
+ Gate _taskGate;
+ ThreadStackExecutor _exec;
+ Fixture(uint32_t numJobTrackings = 1)
+ : _tracker(new SimpleJobTracker(numJobTrackings)),
+ _target(new MyFlushTarget()),
+ _trackedFlush(_tracker, _target),
+ _task(),
+ _taskGate(),
+ _exec(1, 64000)
+ {
+ }
+ void initFlush(SerialNum currentSerial) {
+ _task = _trackedFlush.initFlush(currentSerial);
+ _taskGate.countDown();
+ }
+};
+
+constexpr SerialNum FLUSH_SERIAL = 10;
+
+TEST_F("require that flush target name, type and component is preserved", Fixture)
+{
+ EXPECT_EQUAL("mytarget", f._trackedFlush.getName());
+ EXPECT_TRUE(IFlushTarget::Type::FLUSH == f._trackedFlush.getType());
+ EXPECT_TRUE(IFlushTarget::Component::OTHER == f._trackedFlush.getComponent());
+}
+
+TEST_F("require that flush task init is tracked", Fixture)
+{
+ EXPECT_EQUAL(1u, f._tracker->_started.getCount());
+ EXPECT_EQUAL(1u, f._tracker->_ended.getCount());
+
+ f._exec.execute(makeTask(makeClosure(&f, &Fixture::initFlush, FLUSH_SERIAL)));
+ f._tracker->_started.await(5000);
+ EXPECT_EQUAL(0u, f._tracker->_started.getCount());
+ EXPECT_EQUAL(1u, f._tracker->_ended.getCount());
+
+ f._target->_initGate.countDown();
+ f._taskGate.await(5000);
+ EXPECT_EQUAL(0u, f._tracker->_ended.getCount());
+ {
+ JobTrackedFlushTask *trackedTask = dynamic_cast<JobTrackedFlushTask *>(f._task.get());
+ EXPECT_TRUE(trackedTask != nullptr);
+ EXPECT_EQUAL(5u, trackedTask->getFlushSerial());
+ }
+ EXPECT_EQUAL(FLUSH_SERIAL, f._target->_initFlushSerial);
+}
+
+TEST_F("require that flush task execution is tracked", Fixture(2))
+{
+ f._exec.execute(makeTask(makeClosure(&f, &Fixture::initFlush, FLUSH_SERIAL)));
+ f._target->_initGate.countDown();
+ f._taskGate.await(5000);
+
+ EXPECT_EQUAL(1u, f._tracker->_started.getCount());
+ EXPECT_EQUAL(1u, f._tracker->_ended.getCount());
+
+ f._exec.execute(std::move(f._task));
+ f._tracker->_started.await(5000);
+ EXPECT_EQUAL(0u, f._tracker->_started.getCount());
+ EXPECT_EQUAL(1u, f._tracker->_ended.getCount());
+
+ f._target->_execGate.countDown();
+ f._tracker->_ended.await(5000);
+ EXPECT_EQUAL(0u, f._tracker->_ended.getCount());
+}
+
+TEST_F("require that nullptr flush task is not tracked", Fixture)
+{
+ FlushTask::UP task = f._trackedFlush.initFlush(0);
+ EXPECT_TRUE(task.get() == nullptr);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/metrics/metrics_engine/.gitignore b/searchcore/src/tests/proton/metrics/metrics_engine/.gitignore
new file mode 100644
index 00000000000..98ae77cb458
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/metrics_engine/.gitignore
@@ -0,0 +1 @@
+searchcore_metrics_engine_test_app
diff --git a/searchcore/src/tests/proton/metrics/metrics_engine/CMakeLists.txt b/searchcore/src/tests/proton/metrics/metrics_engine/CMakeLists.txt
new file mode 100644
index 00000000000..e50e584e578
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/metrics_engine/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_metrics_engine_test_app
+ SOURCES
+ metrics_engine_test.cpp
+ DEPENDS
+ searchcore_flushengine
+ searchcore_proton_metrics
+)
+vespa_add_test(NAME searchcore_metrics_engine_test_app COMMAND searchcore_metrics_engine_test_app)
diff --git a/searchcore/src/tests/proton/metrics/metrics_engine/DESC b/searchcore/src/tests/proton/metrics/metrics_engine/DESC
new file mode 100644
index 00000000000..2efe31d45d3
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/metrics_engine/DESC
@@ -0,0 +1 @@
+metrics engine test. Take a look at metrics_engine_test.cpp for details.
diff --git a/searchcore/src/tests/proton/metrics/metrics_engine/FILES b/searchcore/src/tests/proton/metrics/metrics_engine/FILES
new file mode 100644
index 00000000000..ac033a53070
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/metrics_engine/FILES
@@ -0,0 +1 @@
+metrics_engine_test.cpp
diff --git a/searchcore/src/tests/proton/metrics/metrics_engine/metrics_engine_test.cpp b/searchcore/src/tests/proton/metrics/metrics_engine/metrics_engine_test.cpp
new file mode 100644
index 00000000000..a70ce5a5333
--- /dev/null
+++ b/searchcore/src/tests/proton/metrics/metrics_engine/metrics_engine_test.cpp
@@ -0,0 +1,32 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for metrics_engine.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("metrics_engine_test");
+
+#include <vespa/searchcore/proton/metrics/metrics_engine.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace proton;
+
+namespace {
+
+TEST("require that the metric proton.diskusage is the sum of the documentDB "
+ "diskusage metrics.") {
+ MetricsEngine metrics_engine;
+
+ DocumentDBMetricsCollection metrics1("type1", 1);
+ DocumentDBMetricsCollection metrics2("type2", 1);
+ metrics1.getMetrics().index.diskUsage.addValue(100);
+ metrics2.getMetrics().index.diskUsage.addValue(1000);
+
+ metrics_engine.addDocumentDBMetrics(metrics1);
+ metrics_engine.addDocumentDBMetrics(metrics2);
+
+ EXPECT_EQUAL(1100, metrics_engine.legacyRoot().diskUsage.getLongValue("value"));
+}
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/persistenceconformance/.gitignore b/searchcore/src/tests/proton/persistenceconformance/.gitignore
new file mode 100644
index 00000000000..9b6330d1531
--- /dev/null
+++ b/searchcore/src/tests/proton/persistenceconformance/.gitignore
@@ -0,0 +1 @@
+/vlog.txt
diff --git a/searchcore/src/tests/proton/persistenceconformance/CMakeLists.txt b/searchcore/src/tests/proton/persistenceconformance/CMakeLists.txt
new file mode 100644
index 00000000000..f71dbf3c1ba
--- /dev/null
+++ b/searchcore/src/tests/proton/persistenceconformance/CMakeLists.txt
@@ -0,0 +1,6 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_test(
+ NAME searchcore_persistenceconformance_test_app
+ COMMAND ../../../apps/tests/searchcore_persistenceconformance_test_app
+ ENVIRONMENT "VESPA_LOG_TARGET=file:vlog.txt"
+)
diff --git a/searchcore/src/tests/proton/persistenceconformance/DESC b/searchcore/src/tests/proton/persistenceconformance/DESC
new file mode 100644
index 00000000000..98392d5a316
--- /dev/null
+++ b/searchcore/src/tests/proton/persistenceconformance/DESC
@@ -0,0 +1 @@
+Persistence provider conformance test for proton integration. Take a look at persistenceconformance_test.cpp for details.
diff --git a/searchcore/src/tests/proton/persistenceconformance/FILES b/searchcore/src/tests/proton/persistenceconformance/FILES
new file mode 100644
index 00000000000..6912ecd1d9b
--- /dev/null
+++ b/searchcore/src/tests/proton/persistenceconformance/FILES
@@ -0,0 +1 @@
+persistenceconformance_test.cpp
diff --git a/searchcore/src/tests/proton/persistenceengine/.gitignore b/searchcore/src/tests/proton/persistenceengine/.gitignore
new file mode 100644
index 00000000000..93d1e27e9c6
--- /dev/null
+++ b/searchcore/src/tests/proton/persistenceengine/.gitignore
@@ -0,0 +1 @@
+searchcore_persistenceengine_test_app
diff --git a/searchcore/src/tests/proton/persistenceengine/CMakeLists.txt b/searchcore/src/tests/proton/persistenceengine/CMakeLists.txt
new file mode 100644
index 00000000000..b5e42ac5075
--- /dev/null
+++ b/searchcore/src/tests/proton/persistenceengine/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_persistenceengine_test_app
+ SOURCES
+ persistenceengine_test.cpp
+ DEPENDS
+ searchcore_persistenceengine
+ searchcore_pcommon
+ searchcore_proton_metrics
+)
+vespa_add_test(NAME searchcore_persistenceengine_test_app COMMAND searchcore_persistenceengine_test_app)
diff --git a/searchcore/src/tests/proton/persistenceengine/DESC b/searchcore/src/tests/proton/persistenceengine/DESC
new file mode 100644
index 00000000000..ec363711b48
--- /dev/null
+++ b/searchcore/src/tests/proton/persistenceengine/DESC
@@ -0,0 +1 @@
+persistenceengine test. Take a look at persistenceengine_test.cpp for details.
diff --git a/searchcore/src/tests/proton/persistenceengine/FILES b/searchcore/src/tests/proton/persistenceengine/FILES
new file mode 100644
index 00000000000..12d47ca9632
--- /dev/null
+++ b/searchcore/src/tests/proton/persistenceengine/FILES
@@ -0,0 +1 @@
+persistenceengine_test.cpp
diff --git a/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp b/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
new file mode 100644
index 00000000000..4346f7d43c1
--- /dev/null
+++ b/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
@@ -0,0 +1,828 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("persistenceengine_test");
+
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/persistence/spi/documentselection.h>
+#include <vespa/searchcore/proton/persistenceengine/bucket_guard.h>
+#include <vespa/searchcore/proton/persistenceengine/ipersistenceengineowner.h>
+#include <vespa/searchcore/proton/persistenceengine/persistenceengine.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/document/fieldset/fieldsets.h>
+#include <set>
+
+using document::BucketId;
+using document::Document;
+using document::DocumentId;
+using document::DocumentType;
+using search::DocumentMetaData;
+using storage::spi::BucketChecksum;
+using storage::spi::BucketInfo;
+using storage::spi::ClusterState;
+using storage::spi::DocumentSelection;
+using storage::spi::GetResult;
+using namespace proton;
+using namespace vespalib;
+
+DocumentType
+createDocType(const vespalib::string &name, int32_t id)
+{
+ return DocumentType(name, id);
+}
+
+
+document::Document::SP
+createDoc(const DocumentType &docType, const DocumentId &docId)
+{
+ return document::Document::SP(new document::Document(docType, docId));
+}
+
+
+document::DocumentUpdate::SP
+createUpd(const DocumentType& docType, const DocumentId &docId)
+{
+ return document::DocumentUpdate::SP(new document::DocumentUpdate(docType, docId));
+}
+
+
+document::Document::UP
+clone(const document::Document::SP &doc)
+{
+ return document::Document::UP(doc->clone());
+}
+
+
+document::DocumentUpdate::UP
+clone(const document::DocumentUpdate::SP &upd)
+{
+ return document::DocumentUpdate::UP(upd->clone());
+}
+
+
+storage::spi::ClusterState
+createClusterState(const storage::lib::State& nodeState =
+ storage::lib::State::UP)
+{
+ using storage::lib::Distribution;
+ using storage::lib::Node;
+ using storage::lib::NodeState;
+ using storage::lib::NodeType;
+ using storage::lib::State;
+ using vespa::config::content::StorDistributionConfigBuilder;
+ typedef StorDistributionConfigBuilder::Group Group;
+ typedef Group::Nodes Nodes;
+ storage::lib::ClusterState cstate;
+ StorDistributionConfigBuilder dc;
+
+ cstate.setNodeState(Node(NodeType::STORAGE, 0),
+ NodeState(NodeType::STORAGE,
+ nodeState,
+ "dummy desc",
+ 1.0,
+ 1));
+ cstate.setClusterState(State::UP);
+ dc.redundancy = 1;
+ dc.readyCopies = 1;
+ dc.group.push_back(Group());
+ Group &g(dc.group[0]);
+ g.index = "invalid";
+ g.name = "invalid";
+ g.capacity = 1.0;
+ g.partitions = "";
+ g.nodes.push_back(Nodes());
+ Nodes &n(g.nodes[0]);
+ n.index = 0;
+ Distribution dist(dc);
+ return ClusterState(cstate, 0, dist);
+}
+
+
+struct MyDocumentRetriever : DocumentRetrieverBaseForTest {
+ document::DocumentTypeRepo repo;
+ const Document *document;
+ Timestamp timestamp;
+ DocumentId &last_doc_id;
+
+ MyDocumentRetriever(const Document *d, Timestamp ts, DocumentId &last_id)
+ : repo(), document(d), timestamp(ts), last_doc_id(last_id) {}
+ virtual const document::DocumentTypeRepo &getDocumentTypeRepo() const {
+ return repo;
+ }
+ virtual void getBucketMetaData(const storage::spi::Bucket &,
+ search::DocumentMetaData::Vector &v) const {
+ if (document != 0) {
+ v.push_back(getDocumentMetaData(document->getId()));
+ }
+ }
+ virtual DocumentMetaData getDocumentMetaData(const DocumentId &id) const {
+ last_doc_id = id;
+ if (document != 0) {
+ return DocumentMetaData(1, timestamp, document::BucketId(1),
+ document->getId().getGlobalId());
+ }
+ return DocumentMetaData();
+ }
+ virtual document::Document::UP getDocument(search::DocumentIdT) const {
+ if (document != 0) {
+ return Document::UP(document->clone());
+ }
+ return Document::UP();
+ }
+
+ virtual CachedSelect::SP
+ parseSelect(const vespalib::string &) const
+ {
+ return CachedSelect::SP();
+ }
+};
+
+struct MyHandler : public IPersistenceHandler, IBucketFreezer {
+ bool initialized;
+ Bucket lastBucket;
+ Timestamp lastTimestamp;
+ DocumentId lastDocId;
+ Timestamp existingTimestamp;
+ const ClusterState* lastCalc;
+ storage::spi::BucketInfo::ActiveState lastBucketState;
+ BucketIdListResult::List bucketList;
+ Result bucketStateResult;
+ BucketInfo bucketInfo;
+ Result deleteBucketResult;
+ BucketIdListResult::List modBucketList;
+ Result _splitResult;
+ Result _joinResult;
+ Result _createBucketResult;
+ const Document *document;
+ std::multiset<uint64_t> frozen;
+ std::multiset<uint64_t> was_frozen;
+
+ MyHandler()
+ : initialized(false),
+ lastBucket(),
+ lastTimestamp(),
+ lastDocId(),
+ existingTimestamp(),
+ lastCalc(NULL),
+ lastBucketState(),
+ bucketList(),
+ bucketStateResult(),
+ bucketInfo(),
+ deleteBucketResult(),
+ modBucketList(),
+ _splitResult(),
+ _joinResult(),
+ _createBucketResult(),
+ document(0),
+ frozen(),
+ was_frozen()
+ {
+ }
+
+ void setExistingTimestamp(Timestamp ts) {
+ existingTimestamp = ts;
+ }
+ void setDocument(const Document &doc, Timestamp ts) {
+ document = &doc;
+ setExistingTimestamp(ts);
+ }
+ void handle(FeedToken token, const Bucket &bucket, Timestamp timestamp, const DocumentId &docId) {
+ lastBucket = bucket;
+ lastTimestamp = timestamp;
+ lastDocId = docId;
+ token.ack();
+ }
+
+ virtual void initialize() { initialized = true; }
+
+ virtual void handlePut(FeedToken token, const Bucket& bucket,
+ Timestamp timestamp, const document::Document::SP& doc) {
+ token.setResult(ResultUP(new storage::spi::Result()), false);
+ handle(token, bucket, timestamp, doc->getId());
+ }
+
+ virtual void handleUpdate(FeedToken token, const Bucket& bucket,
+ Timestamp timestamp, const document::DocumentUpdate::SP& upd) {
+ token.setResult(ResultUP(new storage::spi::UpdateResult(existingTimestamp)),
+ existingTimestamp > 0);
+ handle(token, bucket, timestamp, upd->getId());
+ }
+
+ virtual void handleRemove(FeedToken token, const Bucket& bucket,
+ Timestamp timestamp, const DocumentId& id) {
+ bool wasFound = existingTimestamp > 0;
+ token.setResult(ResultUP(new storage::spi::RemoveResult(wasFound)), wasFound);
+ handle(token, bucket, timestamp, id);
+ }
+
+ virtual void handleListBuckets(IBucketIdListResultHandler &resultHandler) {
+ resultHandler.handle(BucketIdListResult(bucketList));
+ }
+
+ virtual void handleSetClusterState(const ClusterState &calc,
+ IGenericResultHandler &resultHandler) {
+ lastCalc = &calc;
+ resultHandler.handle(Result());
+ }
+
+ virtual void handleSetActiveState(const Bucket &bucket,
+ storage::spi::BucketInfo::ActiveState newState,
+ IGenericResultHandler &resultHandler) {
+ lastBucket = bucket;
+ lastBucketState = newState;
+ resultHandler.handle(bucketStateResult);
+ }
+
+ virtual void handleGetBucketInfo(const Bucket &,
+ IBucketInfoResultHandler &resultHandler) {
+ resultHandler.handle(BucketInfoResult(bucketInfo));
+ }
+
+ virtual void
+ handleCreateBucket(FeedToken token,
+ const storage::spi::Bucket &)
+ {
+ token.setResult(ResultUP(new Result(_createBucketResult)), true);
+ token.ack();
+ }
+
+ virtual void handleDeleteBucket(FeedToken token,
+ const storage::spi::Bucket &) {
+ token.setResult(ResultUP(new Result(deleteBucketResult)), true);
+ token.ack();
+ }
+
+ virtual void handleGetModifiedBuckets(IBucketIdListResultHandler &resultHandler) {
+ resultHandler.handle(BucketIdListResult(modBucketList));
+ }
+
+ virtual void
+ handleSplit(FeedToken token,
+ const storage::spi::Bucket &source,
+ const storage::spi::Bucket &target1,
+ const storage::spi::Bucket &target2)
+ {
+ (void) source;
+ (void) target1;
+ (void) target2;
+ token.setResult(ResultUP(new Result(_splitResult)), true);
+ token.ack();
+ }
+
+ virtual void
+ handleJoin(FeedToken token,
+ const storage::spi::Bucket &source1,
+ const storage::spi::Bucket &source2,
+ const storage::spi::Bucket &target)
+ {
+ (void) source1;
+ (void) source2;
+ (void) target;
+ token.setResult(ResultUP(new Result(_joinResult)), true);
+ token.ack();
+ }
+
+ virtual RetrieversSP getDocumentRetrievers() {
+ RetrieversSP ret(new std::vector<IDocumentRetriever::SP>);
+ ret->push_back(IDocumentRetriever::SP(new MyDocumentRetriever(
+ 0, Timestamp(), lastDocId)));
+ ret->push_back(IDocumentRetriever::SP(new MyDocumentRetriever(
+ document, existingTimestamp, lastDocId)));
+ return ret;
+ }
+
+ virtual BucketGuard::UP lockBucket(const storage::spi::Bucket &b) {
+ return BucketGuard::UP(new BucketGuard(b.getBucketId(), *this));
+ }
+
+ virtual void
+ handleListActiveBuckets(IBucketIdListResultHandler &resultHandler)
+ {
+ BucketIdListResult::List list;
+ resultHandler.handle(BucketIdListResult(list));
+ }
+
+ virtual void
+ handlePopulateActiveBuckets(document::BucketId::List &buckets,
+ IGenericResultHandler &resultHandler)
+ {
+ (void) buckets;
+ resultHandler.handle(Result());
+ }
+
+ virtual void freezeBucket(BucketId bucket) {
+ frozen.insert(bucket.getId());
+ was_frozen.insert(bucket.getId());
+ }
+ virtual void thawBucket(BucketId bucket) {
+ std::multiset<uint64_t>::iterator it = frozen.find(bucket.getId());
+ ASSERT_TRUE(it != frozen.end());
+ frozen.erase(it);
+ }
+ bool isFrozen(const Bucket &bucket) {
+ return frozen.find(bucket.getBucketId().getId()) != frozen.end();
+ }
+ bool wasFrozen(const Bucket &bucket) {
+ return was_frozen.find(bucket.getBucketId().getId())
+ != was_frozen.end();
+ }
+};
+
+
+struct HandlerSet {
+ IPersistenceHandler::SP phandler1;
+ IPersistenceHandler::SP phandler2;
+ MyHandler &handler1;
+ MyHandler &handler2;
+ HandlerSet() :
+ phandler1(new MyHandler()),
+ phandler2(new MyHandler()),
+ handler1(static_cast<MyHandler &>(*phandler1.get())),
+ handler2(static_cast<MyHandler &>(*phandler2.get()))
+ {}
+};
+
+
+DocumentType type1(createDocType("type1", 1));
+DocumentType type2(createDocType("type2", 2));
+DocumentType type3(createDocType("type3", 3));
+DocumentId docId0;
+DocumentId docId1("id:type1:type1::1");
+DocumentId docId2("id:type2:type2::1");
+DocumentId docId3("id:type3:type3::1");
+Document::SP doc1(createDoc(type1, docId1));
+Document::SP doc2(createDoc(type2, docId2));
+Document::SP doc3(createDoc(type3, docId3));
+Document::SP old_doc(createDoc(type1, DocumentId("doc:old:id-scheme")));
+document::DocumentUpdate::SP upd1(createUpd(type1, docId1));
+document::DocumentUpdate::SP upd2(createUpd(type2, docId2));
+document::DocumentUpdate::SP upd3(createUpd(type3, docId3));
+PartitionId partId(0);
+BucketId bckId1(1);
+BucketId bckId2(2);
+BucketId bckId3(3);
+Bucket bucket0;
+Bucket bucket1(bckId1, partId);
+Bucket bucket2(bckId2, partId);
+BucketChecksum checksum1(1);
+BucketChecksum checksum2(2);
+BucketChecksum checksum3(1+2);
+BucketInfo bucketInfo1(checksum1, 1, 0, 1, 0);
+BucketInfo bucketInfo2(checksum2, 2, 0, 2, 0);
+BucketInfo bucketInfo3(checksum3, 3, 0, 3, 0);
+Timestamp tstamp0;
+Timestamp tstamp1(1);
+Timestamp tstamp2(2);
+Timestamp tstamp3(3);
+DocumentSelection doc_sel("");
+Selection selection(doc_sel);
+
+
+class SimplePersistenceEngineOwner : public IPersistenceEngineOwner
+{
+ virtual void
+ setClusterState(const storage::spi::ClusterState &calc)
+ {
+ (void) calc;
+ }
+};
+
+struct SimpleResourceWriteFilter : public IResourceWriteFilter
+{
+ bool _acceptWriteOperation;
+ vespalib::string _message;
+ SimpleResourceWriteFilter()
+ : _acceptWriteOperation(true),
+ _message()
+ {}
+
+ virtual bool acceptWriteOperation() const override { return _acceptWriteOperation; }
+ virtual State getAcceptState() const override {
+ return IResourceWriteFilter::State(acceptWriteOperation(), _message);
+ }
+};
+
+
+struct SimpleFixture {
+ SimplePersistenceEngineOwner _owner;
+ SimpleResourceWriteFilter _writeFilter;
+ PersistenceEngine engine;
+ HandlerSet hset;
+ SimpleFixture()
+ : _owner(),
+ engine(_owner, _writeFilter, -1, false),
+ hset()
+ {
+ engine.putHandler(DocTypeName(doc1->getType()), hset.phandler1);
+ engine.putHandler(DocTypeName(doc2->getType()), hset.phandler2);
+ }
+};
+
+
+void
+assertHandler(const Bucket &expBucket, Timestamp expTimestamp,
+ const DocumentId &expDocId, const MyHandler &handler)
+{
+ EXPECT_EQUAL(expBucket, handler.lastBucket);
+ EXPECT_EQUAL(expTimestamp, handler.lastTimestamp);
+ EXPECT_EQUAL(expDocId, handler.lastDocId);
+}
+
+
+TEST_F("require that getPartitionStates() prepares all handlers", SimpleFixture)
+{
+ EXPECT_FALSE(f.hset.handler1.initialized);
+ EXPECT_FALSE(f.hset.handler2.initialized);
+ f.engine.initialize();
+ EXPECT_TRUE(f.hset.handler1.initialized);
+ EXPECT_TRUE(f.hset.handler2.initialized);
+}
+
+
+TEST_F("require that puts are routed to handler", SimpleFixture)
+{
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ f.engine.put(bucket1, tstamp1, doc1, context);
+ assertHandler(bucket1, tstamp1, docId1, f.hset.handler1);
+ assertHandler(bucket0, tstamp0, docId0, f.hset.handler2);
+
+ f.engine.put(bucket1, tstamp1, doc2, context);
+ assertHandler(bucket1, tstamp1, docId1, f.hset.handler1);
+ assertHandler(bucket1, tstamp1, docId2, f.hset.handler2);
+
+ EXPECT_EQUAL(
+ Result(Result::PERMANENT_ERROR, "No handler for document type 'type3'"),
+ f.engine.put(bucket1, tstamp1, doc3, context));
+}
+
+
+TEST_F("require that puts with old id scheme are rejected", SimpleFixture) {
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ EXPECT_EQUAL(
+ Result(Result::PERMANENT_ERROR, "Old id scheme not supported in "
+ "elastic mode (doc:old:id-scheme)"),
+ f.engine.put(bucket1, tstamp1, old_doc, context));
+}
+
+
+TEST_F("require that put is rejected if resource limit is reached", SimpleFixture)
+{
+ f._writeFilter._acceptWriteOperation = false;
+ f._writeFilter._message = "Disk is full";
+
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ EXPECT_EQUAL(
+ Result(Result::RESOURCE_EXHAUSTED,
+ "Put operation rejected for document 'doc:old:id-scheme': 'Disk is full'"),
+ f.engine.put(bucket1, tstamp1, old_doc, context));
+}
+
+
+TEST_F("require that updates are routed to handler", SimpleFixture)
+{
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ f.hset.handler1.setExistingTimestamp(tstamp2);
+ UpdateResult ur = f.engine.update(bucket1, tstamp1, upd1, context);
+ assertHandler(bucket1, tstamp1, docId1, f.hset.handler1);
+ assertHandler(bucket0, tstamp0, docId0, f.hset.handler2);
+ EXPECT_EQUAL(tstamp2, ur.getExistingTimestamp());
+
+ f.hset.handler2.setExistingTimestamp(tstamp3);
+ ur = f.engine.update(bucket1, tstamp1, upd2, context);
+ assertHandler(bucket1, tstamp1, docId1, f.hset.handler1);
+ assertHandler(bucket1, tstamp1, docId2, f.hset.handler2);
+ EXPECT_EQUAL(tstamp3, ur.getExistingTimestamp());
+
+ EXPECT_EQUAL(
+ Result(Result::PERMANENT_ERROR, "No handler for document type 'type3'"),
+ f.engine.update(bucket1, tstamp1, upd3, context));
+}
+
+
+TEST_F("require that update is rejected if resource limit is reached", SimpleFixture)
+{
+ f._writeFilter._acceptWriteOperation = false;
+ f._writeFilter._message = "Disk is full";
+
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+
+ EXPECT_EQUAL(
+ Result(Result::RESOURCE_EXHAUSTED,
+ "Update operation rejected for document 'id:type1:type1::1': 'Disk is full'"),
+ f.engine.update(bucket1, tstamp1, upd1, context));
+}
+
+
+TEST_F("require that removes are routed to handlers", SimpleFixture)
+{
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ RemoveResult rr = f.engine.remove(bucket1, tstamp1, docId3, context);
+ assertHandler(bucket0, tstamp0, docId0, f.hset.handler1);
+ assertHandler(bucket0, tstamp0, docId0, f.hset.handler2);
+ EXPECT_FALSE(rr.wasFound());
+
+ f.hset.handler1.setExistingTimestamp(tstamp2);
+ rr = f.engine.remove(bucket1, tstamp1, docId1, context);
+ assertHandler(bucket1, tstamp1, docId1, f.hset.handler1);
+ assertHandler(bucket0, tstamp0, docId0, f.hset.handler2);
+ EXPECT_TRUE(rr.wasFound());
+
+ f.hset.handler1.setExistingTimestamp(tstamp0);
+ f.hset.handler2.setExistingTimestamp(tstamp3);
+ rr = f.engine.remove(bucket1, tstamp1, docId2, context);
+ assertHandler(bucket1, tstamp1, docId1, f.hset.handler1);
+ assertHandler(bucket1, tstamp1, docId2, f.hset.handler2);
+ EXPECT_TRUE(rr.wasFound());
+
+ f.hset.handler2.setExistingTimestamp(tstamp0);
+ rr = f.engine.remove(bucket1, tstamp1, docId2, context);
+ assertHandler(bucket1, tstamp1, docId1, f.hset.handler1);
+ assertHandler(bucket1, tstamp1, docId2, f.hset.handler2);
+ EXPECT_FALSE(rr.wasFound());
+}
+
+
+TEST_F("require that remove is NOT rejected if resource limit is reached", SimpleFixture)
+{
+ f._writeFilter._acceptWriteOperation = false;
+ f._writeFilter._message = "Disk is full";
+
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+
+ EXPECT_EQUAL(RemoveResult(false), f.engine.remove(bucket1, tstamp1, docId1, context));
+}
+
+
+TEST_F("require that listBuckets() is routed to handlers and merged", SimpleFixture)
+{
+ f.hset.handler1.bucketList.push_back(bckId1);
+ f.hset.handler1.bucketList.push_back(bckId2);
+ f.hset.handler2.bucketList.push_back(bckId2);
+ f.hset.handler2.bucketList.push_back(bckId3);
+
+ EXPECT_TRUE(f.engine.listBuckets(PartitionId(1)).getList().empty());
+ BucketIdListResult result = f.engine.listBuckets(partId);
+ const BucketIdListResult::List &bucketList = result.getList();
+ EXPECT_EQUAL(3u, bucketList.size());
+ EXPECT_EQUAL(bckId1, bucketList[0]);
+ EXPECT_EQUAL(bckId2, bucketList[1]);
+ EXPECT_EQUAL(bckId3, bucketList[2]);
+}
+
+
+TEST_F("require that setClusterState() is routed to handlers", SimpleFixture)
+{
+ ClusterState state(createClusterState());
+
+ f.engine.setClusterState(state);
+ EXPECT_EQUAL(&state, f.hset.handler1.lastCalc);
+ EXPECT_EQUAL(&state, f.hset.handler2.lastCalc);
+}
+
+
+TEST_F("require that setActiveState() is routed to handlers and merged", SimpleFixture)
+{
+ f.hset.handler1.bucketStateResult = Result(Result::TRANSIENT_ERROR, "err1");
+ f.hset.handler2.bucketStateResult = Result(Result::PERMANENT_ERROR, "err2");
+
+ Result result = f.engine.setActiveState(bucket1,
+ storage::spi::BucketInfo::NOT_ACTIVE);
+ EXPECT_EQUAL(Result::PERMANENT_ERROR, result.getErrorCode());
+ EXPECT_EQUAL("err1, err2", result.getErrorMessage());
+ EXPECT_EQUAL(storage::spi::BucketInfo::NOT_ACTIVE, f.hset.handler1.lastBucketState);
+ EXPECT_EQUAL(storage::spi::BucketInfo::NOT_ACTIVE, f.hset.handler2.lastBucketState);
+
+ f.engine.setActiveState(bucket1, storage::spi::BucketInfo::ACTIVE);
+ EXPECT_EQUAL(storage::spi::BucketInfo::ACTIVE, f.hset.handler1.lastBucketState);
+ EXPECT_EQUAL(storage::spi::BucketInfo::ACTIVE, f.hset.handler2.lastBucketState);
+}
+
+
+TEST_F("require that getBucketInfo() is routed to handlers and merged", SimpleFixture)
+{
+ f.hset.handler1.bucketInfo = bucketInfo1;
+ f.hset.handler2.bucketInfo = bucketInfo2;
+
+ BucketInfoResult result = f.engine.getBucketInfo(bucket1);
+ EXPECT_EQUAL(bucketInfo3, result.getBucketInfo());
+}
+
+
+TEST_F("require that createBucket() is routed to handlers and merged",
+ SimpleFixture)
+{
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ f.hset.handler1._createBucketResult =
+ Result(Result::TRANSIENT_ERROR, "err1a");
+ f.hset.handler2._createBucketResult =
+ Result(Result::PERMANENT_ERROR, "err2a");
+
+ Result result = f.engine.createBucket(bucket1, context);
+ EXPECT_EQUAL(Result::PERMANENT_ERROR, result.getErrorCode());
+ EXPECT_EQUAL("err1a, err2a", result.getErrorMessage());
+}
+
+
+TEST_F("require that deleteBucket() is routed to handlers and merged", SimpleFixture)
+{
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ f.hset.handler1.deleteBucketResult = Result(Result::TRANSIENT_ERROR, "err1");
+ f.hset.handler2.deleteBucketResult = Result(Result::PERMANENT_ERROR, "err2");
+
+ Result result = f.engine.deleteBucket(bucket1, context);
+ EXPECT_EQUAL(Result::PERMANENT_ERROR, result.getErrorCode());
+ EXPECT_EQUAL("err1, err2", result.getErrorMessage());
+}
+
+
+TEST_F("require that getModifiedBuckets() is routed to handlers and merged", SimpleFixture)
+{
+ f.hset.handler1.modBucketList.push_back(bckId1);
+ f.hset.handler1.modBucketList.push_back(bckId2);
+ f.hset.handler2.modBucketList.push_back(bckId2);
+ f.hset.handler2.modBucketList.push_back(bckId3);
+
+ BucketIdListResult result = f.engine.getModifiedBuckets();
+ const BucketIdListResult::List &bucketList = result.getList();
+ EXPECT_EQUAL(3u, bucketList.size());
+ EXPECT_EQUAL(bckId1, bucketList[0]);
+ EXPECT_EQUAL(bckId2, bucketList[1]);
+ EXPECT_EQUAL(bckId3, bucketList[2]);
+}
+
+
+TEST_F("require that get is sent to all handlers", SimpleFixture) {
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ GetResult result = f.engine.get(bucket1, document::AllFields(), docId1,
+ context);
+
+ EXPECT_EQUAL(docId1, f.hset.handler1.lastDocId);
+ EXPECT_EQUAL(docId1, f.hset.handler2.lastDocId);
+}
+
+TEST_F("require that get freezes the bucket", SimpleFixture) {
+ EXPECT_FALSE(f.hset.handler1.wasFrozen(bucket1));
+ EXPECT_FALSE(f.hset.handler2.wasFrozen(bucket1));
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ f.engine.get(bucket1, document::AllFields(), docId1, context);
+ EXPECT_TRUE(f.hset.handler1.wasFrozen(bucket1));
+ EXPECT_TRUE(f.hset.handler2.wasFrozen(bucket1));
+ EXPECT_FALSE(f.hset.handler1.isFrozen(bucket1));
+ EXPECT_FALSE(f.hset.handler2.isFrozen(bucket1));
+}
+
+TEST_F("require that get returns the first document found", SimpleFixture) {
+ f.hset.handler1.setDocument(*doc1, tstamp1);
+ f.hset.handler2.setDocument(*doc2, tstamp2);
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ GetResult result = f.engine.get(bucket1, document::AllFields(), docId1,
+ context);
+
+ EXPECT_EQUAL(docId1, f.hset.handler1.lastDocId);
+ EXPECT_EQUAL(DocumentId(), f.hset.handler2.lastDocId);
+
+ EXPECT_EQUAL(tstamp1, result.getTimestamp());
+ ASSERT_TRUE(result.hasDocument());
+ EXPECT_EQUAL(*doc1, result.getDocument());
+}
+
+TEST_F("require that createIterator does", SimpleFixture) {
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ CreateIteratorResult result =
+ f.engine.createIterator(bucket1, document::AllFields(), selection,
+ storage::spi::NEWEST_DOCUMENT_ONLY, context);
+ EXPECT_FALSE(result.hasError());
+ EXPECT_TRUE(result.getIteratorId());
+
+ uint64_t max_size = 1024;
+ IterateResult it_result =
+ f.engine.iterate(result.getIteratorId(), max_size, context);
+ EXPECT_FALSE(it_result.hasError());
+}
+
+TEST_F("require that iterator ids are unique", SimpleFixture) {
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ CreateIteratorResult result =
+ f.engine.createIterator(bucket1, document::AllFields(), selection,
+ storage::spi::NEWEST_DOCUMENT_ONLY, context);
+ CreateIteratorResult result2 =
+ f.engine.createIterator(bucket1, document::AllFields(), selection,
+ storage::spi::NEWEST_DOCUMENT_ONLY, context);
+ EXPECT_FALSE(result.hasError());
+ EXPECT_FALSE(result2.hasError());
+ EXPECT_NOT_EQUAL(result.getIteratorId(), result2.getIteratorId());
+}
+
+TEST_F("require that iterate requires valid iterator", SimpleFixture) {
+ uint64_t max_size = 1024;
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ IterateResult it_result = f.engine.iterate(IteratorId(1), max_size,
+ context);
+ EXPECT_TRUE(it_result.hasError());
+ EXPECT_EQUAL(Result::PERMANENT_ERROR, it_result.getErrorCode());
+ EXPECT_EQUAL("Unknown iterator with id 1", it_result.getErrorMessage());
+
+ CreateIteratorResult result =
+ f.engine.createIterator(bucket1, document::AllFields(), selection,
+ storage::spi::NEWEST_DOCUMENT_ONLY, context);
+ EXPECT_TRUE(result.getIteratorId());
+
+ it_result = f.engine.iterate(result.getIteratorId(), max_size, context);
+ EXPECT_FALSE(it_result.hasError());
+}
+
+TEST_F("require that iterate returns documents", SimpleFixture) {
+ f.hset.handler1.setDocument(*doc1, tstamp1);
+ f.hset.handler2.setDocument(*doc2, tstamp2);
+
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ uint64_t max_size = 1024;
+ CreateIteratorResult result =
+ f.engine.createIterator(bucket1, document::AllFields(), selection,
+ storage::spi::NEWEST_DOCUMENT_ONLY, context);
+ EXPECT_TRUE(result.getIteratorId());
+
+ IterateResult it_result =
+ f.engine.iterate(result.getIteratorId(), max_size, context);
+ EXPECT_FALSE(it_result.hasError());
+ EXPECT_EQUAL(2u, it_result.getEntries().size());
+}
+
+TEST_F("require that destroyIterator prevents iteration", SimpleFixture) {
+ f.hset.handler1.setDocument(*doc1, tstamp1);
+
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ CreateIteratorResult create_result =
+ f.engine.createIterator(bucket1, document::AllFields(), selection,
+ storage::spi::NEWEST_DOCUMENT_ONLY, context);
+ EXPECT_TRUE(create_result.getIteratorId());
+
+ Result result = f.engine.destroyIterator(create_result.getIteratorId(),
+ context);
+ EXPECT_FALSE(result.hasError());
+
+ uint64_t max_size = 1024;
+ IterateResult it_result =
+ f.engine.iterate(create_result.getIteratorId(), max_size, context);
+ EXPECT_TRUE(it_result.hasError());
+ EXPECT_EQUAL(Result::PERMANENT_ERROR, it_result.getErrorCode());
+ string msg_prefix = "Unknown iterator with id";
+ EXPECT_EQUAL(msg_prefix,
+ it_result.getErrorMessage().substr(0, msg_prefix.size()));
+}
+
+TEST_F("require that buckets are frozen during iterator life", SimpleFixture) {
+ EXPECT_FALSE(f.hset.handler1.isFrozen(bucket1));
+ EXPECT_FALSE(f.hset.handler2.isFrozen(bucket1));
+ storage::spi::LoadType loadType(0, "default");
+ Context context(loadType, storage::spi::Priority(0),
+ storage::spi::Trace::TraceLevel(0));
+ CreateIteratorResult create_result =
+ f.engine.createIterator(bucket1, document::AllFields(), selection,
+ storage::spi::NEWEST_DOCUMENT_ONLY, context);
+ EXPECT_TRUE(f.hset.handler1.isFrozen(bucket1));
+ EXPECT_TRUE(f.hset.handler2.isFrozen(bucket1));
+ f.engine.destroyIterator(create_result.getIteratorId(), context);
+ EXPECT_FALSE(f.hset.handler1.isFrozen(bucket1));
+ EXPECT_FALSE(f.hset.handler2.isFrozen(bucket1));
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
+
diff --git a/searchcore/src/tests/proton/proton/CMakeLists.txt b/searchcore/src/tests/proton/proton/CMakeLists.txt
new file mode 100644
index 00000000000..5c90dd5bfcc
--- /dev/null
+++ b/searchcore/src/tests/proton/proton/CMakeLists.txt
@@ -0,0 +1 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
diff --git a/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/.gitignore b/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/.gitignore
new file mode 100644
index 00000000000..5d662ccaf21
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/.gitignore
@@ -0,0 +1 @@
+searchcore_attribute_reprocessing_initializer_test_app
diff --git a/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/CMakeLists.txt b/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/CMakeLists.txt
new file mode 100644
index 00000000000..5e17a1d6606
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_attribute_reprocessing_initializer_test_app
+ SOURCES
+ attribute_reprocessing_initializer_test.cpp
+ DEPENDS
+ searchcore_reprocessing
+ searchcore_attribute
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_attribute_reprocessing_initializer_test_app COMMAND searchcore_attribute_reprocessing_initializer_test_app)
diff --git a/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/DESC b/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/DESC
new file mode 100644
index 00000000000..2ae1d0c8164
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/DESC
@@ -0,0 +1,2 @@
+Test for attribute reprocessing initializer. Take a look at attribute_reprocessing_initializer_test.cpp for details.
+
diff --git a/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/FILES b/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/FILES
new file mode 100644
index 00000000000..6c9084f176d
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/FILES
@@ -0,0 +1 @@
+attribute_reprocessing_initializer_test.cpp
diff --git a/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/attribute_reprocessing_initializer_test.cpp b/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/attribute_reprocessing_initializer_test.cpp
new file mode 100644
index 00000000000..4670719897a
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/attribute_reprocessing_initializer/attribute_reprocessing_initializer_test.cpp
@@ -0,0 +1,247 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("attribute_reprocessing_initializer_test");
+
+#include <vespa/searchcore/proton/attribute/attribute_populator.h>
+#include <vespa/searchcore/proton/attribute/attributemanager.h>
+#include <vespa/searchcore/proton/attribute/document_field_populator.h>
+#include <vespa/searchcore/proton/reprocessing/attribute_reprocessing_initializer.h>
+#include <vespa/searchcore/proton/reprocessing/i_reprocessing_handler.h>
+#include <vespa/searchcore/proton/test/attribute_utils.h>
+#include <vespa/searchcore/proton/test/directory_handler.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchlib/common/foregroundtaskexecutor.h>
+
+using namespace proton;
+using namespace search;
+using namespace search::index;
+
+const vespalib::string TEST_DIR = "test_output";
+const SerialNum INIT_SERIAL_NUM = 10;
+typedef std::vector<vespalib::string> StringVector;
+typedef std::set<vespalib::string> StringSet;
+typedef AttributeReprocessingInitializer::Config ARIConfig;
+
+struct MyReprocessingHandler : public IReprocessingHandler
+{
+ IReprocessingReader::SP _reader;
+ std::vector<IReprocessingRewriter::SP> _rewriters;
+ MyReprocessingHandler() : _reader(), _rewriters() {}
+ virtual void addReader(const IReprocessingReader::SP &reader) {
+ _reader = reader;
+ }
+ virtual void addRewriter(const IReprocessingRewriter::SP &rewriter) {
+ _rewriters.push_back(rewriter);
+ }
+};
+
+struct MyDocTypeInspector : public IDocumentTypeInspector
+{
+ typedef std::shared_ptr<MyDocTypeInspector> SP;
+ std::set<vespalib::string> _fields;
+ MyDocTypeInspector() : _fields() {}
+ virtual bool hasField(const vespalib::string &name) const {
+ return _fields.count(name) > 0;
+ }
+};
+
+struct MyConfig
+{
+ DummyFileHeaderContext _fileHeaderContext;
+ ForegroundTaskExecutor _attributeFieldWriter;
+ AttributeManager::SP _mgr;
+ search::index::Schema _schema;
+ MyDocTypeInspector::SP _inspector;
+ MyConfig()
+ : _fileHeaderContext(),
+ _attributeFieldWriter(),
+ _mgr(new AttributeManager(TEST_DIR, "test.subdb", TuneFileAttributes(),
+ _fileHeaderContext,
+ _attributeFieldWriter)),
+ _schema(),
+ _inspector(new MyDocTypeInspector())
+ {
+ }
+ void addFields(const StringVector &fields) {
+ for (auto field : fields) {
+ _inspector->_fields.insert(field);
+ }
+ }
+ void addAttrs(const StringVector &attrs) {
+ for (auto attr : attrs) {
+ if (attr == "tensor") {
+ _mgr->addAttribute(attr, test::AttributeUtils::getTensorConfig(), 1);
+ _schema.addAttributeField(Schema::AttributeField(attr, Schema::TENSOR));
+ } else if (attr == "predicate") {
+ _mgr->addAttribute(attr, test::AttributeUtils::getPredicateConfig(), 1);
+ _schema.addAttributeField(Schema::AttributeField(attr, Schema::BOOLEANTREE));
+ } else {
+ _mgr->addAttribute(attr, test::AttributeUtils::getStringConfig(), 1);
+ _schema.addAttributeField(Schema::AttributeField(attr, Schema::STRING));
+ }
+ }
+ }
+ void addIndexField(const vespalib::string &name) {
+ _schema.addIndexField(Schema::IndexField(name, Schema::STRING));
+ }
+};
+
+struct Fixture
+{
+ test::DirectoryHandler _dirHandler;
+ DummyFileHeaderContext _fileHeaderContext;
+ ForegroundTaskExecutor _attributeFieldWriter;
+ AttributeManager::SP _mgr;
+ MyConfig _oldCfg;
+ MyConfig _newCfg;
+ AttributeReprocessingInitializer::UP _initializer;
+ MyReprocessingHandler _handler;
+ Fixture()
+ : _dirHandler(TEST_DIR),
+ _fileHeaderContext(),
+ _attributeFieldWriter(),
+ _mgr(new AttributeManager(TEST_DIR, "test.subdb", TuneFileAttributes(),
+ _fileHeaderContext,
+ _attributeFieldWriter)),
+ _initializer(),
+ _handler()
+ {
+ }
+ void init() {
+ _initializer.reset(new AttributeReprocessingInitializer
+ (ARIConfig(_newCfg._mgr, _newCfg._schema, _newCfg._inspector),
+ ARIConfig(_oldCfg._mgr, _oldCfg._schema, _oldCfg._inspector), "test"));
+ _initializer->initialize(_handler);
+ }
+ Fixture &addOldConfig(const StringVector &fields,
+ const StringVector &attrs) {
+ return addConfig(fields, attrs, _oldCfg);
+ }
+ Fixture &addNewConfig(const StringVector &fields,
+ const StringVector &attrs) {
+ return addConfig(fields, attrs, _newCfg);
+ }
+ Fixture &addConfig(const StringVector &fields,
+ const StringVector &attrs,
+ MyConfig &cfg) {
+ cfg.addFields(fields);
+ cfg.addAttrs(attrs);
+ return *this;
+ }
+ bool assertAttributes(const StringSet &expAttrs) {
+ if (expAttrs.empty()) {
+ if (!EXPECT_TRUE(_handler._reader.get() == nullptr)) return false;
+ } else {
+ const AttributePopulator &populator =
+ dynamic_cast<const AttributePopulator &>(*_handler._reader);
+ std::vector<search::AttributeVector *> attrList =
+ populator.getWriter().getWritableAttributes();
+ std::set<vespalib::string> actAttrs;
+ for (const auto attr : attrList) {
+ actAttrs.insert(attr->getName());
+ }
+ if (!EXPECT_EQUAL(expAttrs, actAttrs)) return false;
+ }
+ return true;
+ }
+ bool assertFields(const StringSet &expFields) {
+ if (expFields.empty()) {
+ if (!EXPECT_EQUAL(0u, _handler._rewriters.size())) return false;
+ } else {
+ StringSet actFields;
+ for (auto rewriter : _handler._rewriters) {
+ const DocumentFieldPopulator &populator =
+ dynamic_cast<const DocumentFieldPopulator &>(*rewriter);
+ actFields.insert(populator.getAttribute().getName());
+ }
+ if (!EXPECT_EQUAL(expFields, actFields)) return false;
+ }
+ return true;
+ }
+};
+
+TEST_F("require that new field does NOT require attribute populate", Fixture)
+{
+ f.addOldConfig({}, {}).addNewConfig({"a"}, {"a"}).init();
+ EXPECT_TRUE(f.assertAttributes({}));
+}
+
+TEST_F("require that added attribute aspect does require attribute populate", Fixture)
+{
+ f.addOldConfig({"a"}, {}).addNewConfig({"a"}, {"a"}).init();
+ EXPECT_TRUE(f.assertAttributes({"a"}));
+}
+
+TEST_F("require that initializer can setup populate of several attributes", Fixture)
+{
+ f.addOldConfig({"a", "b", "c", "d"}, {"a", "b"}).
+ addNewConfig({"a", "b", "c", "d"}, {"a", "b", "c", "d"}).init();
+ EXPECT_TRUE(f.assertAttributes({"c", "d"}));
+}
+
+TEST_F("require that new field does NOT require document field populate", Fixture)
+{
+ f.addOldConfig({}, {}).addNewConfig({"a"}, {"a"}).init();
+ EXPECT_TRUE(f.assertFields({}));
+}
+
+TEST_F("require that removed field does NOT require document field populate", Fixture)
+{
+ f.addOldConfig({"a"}, {"a"}).addNewConfig({}, {}).init();
+ EXPECT_TRUE(f.assertFields({}));
+}
+
+TEST_F("require that removed attribute aspect does require document field populate", Fixture)
+{
+ f.addOldConfig({"a"}, {"a"}).addNewConfig({"a"}, {}).init();
+ EXPECT_TRUE(f.assertFields({"a"}));
+}
+
+TEST_F("require that removed attribute aspect (when also index field) does NOT require document field populate",
+ Fixture)
+{
+ f.addOldConfig({"a"}, {"a"}).addNewConfig({"a"}, {});
+ f._oldCfg.addIndexField("a");
+ f._newCfg.addIndexField("a");
+ f.init();
+ EXPECT_TRUE(f.assertFields({}));
+}
+
+TEST_F("require that initializer can setup populate of several document fields", Fixture)
+{
+ f.addOldConfig({"a", "b", "c", "d"}, {"a", "b", "c", "d"}).
+ addNewConfig({"a", "b", "c", "d"}, {"a", "b"}).init();
+ EXPECT_TRUE(f.assertFields({"c", "d"}));
+}
+
+TEST_F("require that initializer can setup both attribute and document field populate", Fixture)
+{
+ f.addOldConfig({"a", "b"}, {"a"}).
+ addNewConfig({"a", "b"}, {"b"}).init();
+ EXPECT_TRUE(f.assertAttributes({"b"}));
+ EXPECT_TRUE(f.assertFields({"a"}));
+}
+
+TEST_F("require that tensor fields are not populated from attribute", Fixture)
+{
+ f.addOldConfig({"a", "b", "c", "d", "tensor"},
+ {"a", "b", "c", "d", "tensor"}).
+ addNewConfig({"a", "b", "c", "d", "tensor"}, {"a", "b"}).init();
+ EXPECT_TRUE(f.assertFields({"c", "d"}));
+}
+
+TEST_F("require that predicate fields are not populated from attribute", Fixture)
+{
+ f.addOldConfig({"a", "b", "c", "d", "predicate"},
+ {"a", "b", "c", "d", "predicate"}).
+ addNewConfig({"a", "b", "c", "d", "predicate"}, {"a", "b"}).init();
+ EXPECT_TRUE(f.assertFields({"c", "d"}));
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/.gitignore b/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/.gitignore
new file mode 100644
index 00000000000..50e203b78e8
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/.gitignore
@@ -0,0 +1 @@
+searchcore_document_reprocessing_handler_test_app
diff --git a/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/CMakeLists.txt b/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/CMakeLists.txt
new file mode 100644
index 00000000000..170e381c99c
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_document_reprocessing_handler_test_app
+ SOURCES
+ document_reprocessing_handler_test.cpp
+ DEPENDS
+ searchcore_reprocessing
+)
+vespa_add_test(NAME searchcore_document_reprocessing_handler_test_app COMMAND searchcore_document_reprocessing_handler_test_app)
diff --git a/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/DESC b/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/DESC
new file mode 100644
index 00000000000..663b2304d37
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/DESC
@@ -0,0 +1,2 @@
+Test for document reprocessing handler. Take a look at document_reprocessing_handler_test.cpp for details.
+
diff --git a/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/FILES b/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/FILES
new file mode 100644
index 00000000000..2301fc01844
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/FILES
@@ -0,0 +1 @@
+document_reprocessing_handler_test.cpp
diff --git a/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/document_reprocessing_handler_test.cpp b/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/document_reprocessing_handler_test.cpp
new file mode 100644
index 00000000000..f22762a56bb
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/document_reprocessing_handler/document_reprocessing_handler_test.cpp
@@ -0,0 +1,124 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("document_reprocessing_handler_test");
+
+#include <vespa/searchcore/proton/reprocessing/document_reprocessing_handler.h>
+#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace document;
+using namespace proton;
+using namespace search::index;
+
+template <typename ReprocessingType, typename DocumentType>
+struct MyProcessor : public ReprocessingType
+{
+ typedef std::shared_ptr<MyProcessor<ReprocessingType, DocumentType> > SP;
+ uint32_t _lid;
+ DocumentId _docId;
+
+ MyProcessor() : _lid(0), _docId() {}
+ virtual void handleExisting(uint32_t lid, DocumentType doc) {
+ _lid = lid;
+ _docId = doc.getId();
+ }
+};
+
+typedef MyProcessor<IReprocessingReader, const Document &> MyReader;
+typedef MyProcessor<IReprocessingRewriter, Document &> MyRewriter;
+
+const vespalib::string DOC_ID = "id:test:searchdocument::0";
+
+struct FixtureBase
+{
+ DocumentReprocessingHandler _handler;
+ DocBuilder _docBuilder;
+ FixtureBase(uint32_t docIdLimit)
+ : _handler(docIdLimit),
+ _docBuilder(Schema())
+ {
+ }
+ Document::UP createDoc() {
+ return _docBuilder.startDocument(DOC_ID).endDocument();
+ }
+};
+
+struct ReaderFixture : public FixtureBase
+{
+ MyReader::SP _reader1;
+ MyReader::SP _reader2;
+ ReaderFixture()
+ : ReaderFixture(std::numeric_limits<uint32_t>::max())
+ {
+ }
+ ReaderFixture(uint32_t docIdLimit)
+ : FixtureBase(docIdLimit),
+ _reader1(new MyReader()),
+ _reader2(new MyReader())
+ {
+ _handler.addReader(_reader1);
+ _handler.addReader(_reader2);
+ }
+};
+
+struct RewriterFixture : public FixtureBase
+{
+ MyRewriter::SP _rewriter1;
+ MyRewriter::SP _rewriter2;
+ RewriterFixture()
+ : RewriterFixture(std::numeric_limits<uint32_t>::max())
+ {
+ }
+ RewriterFixture(uint32_t docIdLimit)
+ : FixtureBase(docIdLimit),
+ _rewriter1(new MyRewriter()),
+ _rewriter2(new MyRewriter())
+ {
+ _handler.addRewriter(_rewriter1);
+ _handler.addRewriter(_rewriter2);
+ }
+};
+
+TEST_F("require that handler propagates visit of existing document to readers", ReaderFixture)
+{
+ f._handler.visit(23u, *f.createDoc());
+ EXPECT_EQUAL(23u, f._reader1->_lid);
+ EXPECT_EQUAL(DOC_ID, f._reader1->_docId.toString());
+ EXPECT_EQUAL(23u, f._reader2->_lid);
+ EXPECT_EQUAL(DOC_ID, f._reader2->_docId.toString());
+}
+
+TEST_F("require that handler propagates visit of existing document to rewriters", RewriterFixture)
+{
+ f._handler.getRewriteVisitor().visit(23u, *f.createDoc());
+ EXPECT_EQUAL(23u, f._rewriter1->_lid);
+ EXPECT_EQUAL(DOC_ID, f._rewriter1->_docId.toString());
+ EXPECT_EQUAL(23u, f._rewriter2->_lid);
+ EXPECT_EQUAL(DOC_ID, f._rewriter2->_docId.toString());
+}
+
+TEST_F("require that handler skips out of range visit to readers",
+ ReaderFixture(10))
+{
+ f._handler.visit(23u, *f.createDoc());
+ EXPECT_EQUAL(0u, f._reader1->_lid);
+ EXPECT_EQUAL(DocumentId().toString(), f._reader1->_docId.toString());
+ EXPECT_EQUAL(0u, f._reader2->_lid);
+ EXPECT_EQUAL(DocumentId().toString(), f._reader2->_docId.toString());
+}
+
+TEST_F("require that handler skips out of range visit to rewriters",
+ RewriterFixture(10))
+{
+ f._handler.getRewriteVisitor().visit(23u, *f.createDoc());
+ EXPECT_EQUAL(0u, f._rewriter1->_lid);
+ EXPECT_EQUAL(DocumentId().toString(), f._rewriter1->_docId.toString());
+ EXPECT_EQUAL(0u, f._rewriter2->_lid);
+ EXPECT_EQUAL(DocumentId().toString(), f._rewriter2->_docId.toString());
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/reprocessing/reprocessing_runner/.gitignore b/searchcore/src/tests/proton/reprocessing/reprocessing_runner/.gitignore
new file mode 100644
index 00000000000..ecb260d2c0e
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/reprocessing_runner/.gitignore
@@ -0,0 +1 @@
+searchcore_reprocessing_runner_test_app
diff --git a/searchcore/src/tests/proton/reprocessing/reprocessing_runner/CMakeLists.txt b/searchcore/src/tests/proton/reprocessing/reprocessing_runner/CMakeLists.txt
new file mode 100644
index 00000000000..f5eff73f9f0
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/reprocessing_runner/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_reprocessing_runner_test_app
+ SOURCES
+ reprocessing_runner_test.cpp
+ DEPENDS
+ searchcore_reprocessing
+)
+vespa_add_test(NAME searchcore_reprocessing_runner_test_app COMMAND searchcore_reprocessing_runner_test_app)
diff --git a/searchcore/src/tests/proton/reprocessing/reprocessing_runner/DESC b/searchcore/src/tests/proton/reprocessing/reprocessing_runner/DESC
new file mode 100644
index 00000000000..ffa0db7ae9e
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/reprocessing_runner/DESC
@@ -0,0 +1 @@
+Test reprocessing runner.
diff --git a/searchcore/src/tests/proton/reprocessing/reprocessing_runner/FILES b/searchcore/src/tests/proton/reprocessing/reprocessing_runner/FILES
new file mode 100644
index 00000000000..091769d58cb
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/reprocessing_runner/FILES
@@ -0,0 +1 @@
+reprocessing_runner_test.cpp
diff --git a/searchcore/src/tests/proton/reprocessing/reprocessing_runner/reprocessing_runner_test.cpp b/searchcore/src/tests/proton/reprocessing/reprocessing_runner/reprocessing_runner_test.cpp
new file mode 100644
index 00000000000..c4c462ecfa1
--- /dev/null
+++ b/searchcore/src/tests/proton/reprocessing/reprocessing_runner/reprocessing_runner_test.cpp
@@ -0,0 +1,141 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("reprocessing_runner_test");
+
+#include <vespa/searchcore/proton/reprocessing/i_reprocessing_task.h>
+#include <vespa/searchcore/proton/reprocessing/reprocessingrunner.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace proton;
+
+struct Fixture
+{
+ ReprocessingRunner _runner;
+ Fixture()
+ : _runner()
+ {
+ }
+};
+
+typedef ReprocessingRunner::ReprocessingTasks TaskList;
+
+struct MyTask : public IReprocessingTask
+{
+ ReprocessingRunner &_runner;
+ double _initProgress;
+ double _middleProgress;
+ double _finalProgress;
+ double _myProgress;
+ double _weight;
+
+ MyTask(ReprocessingRunner &runner,
+ double initProgress,
+ double middleProgress,
+ double finalProgress,
+ double weight)
+ : _runner(runner),
+ _initProgress(initProgress),
+ _middleProgress(middleProgress),
+ _finalProgress(finalProgress),
+ _myProgress(0.0),
+ _weight(weight)
+ {
+ }
+
+ virtual void
+ run()
+ {
+ ASSERT_EQUAL(_initProgress, _runner.getProgress());
+ _myProgress = 0.5;
+ ASSERT_EQUAL(_middleProgress, _runner.getProgress());
+ _myProgress = 1.0;
+ ASSERT_EQUAL(_finalProgress, _runner.getProgress());
+ }
+
+ virtual Progress
+ getProgress(void) const
+ {
+ return Progress(_myProgress, _weight);
+ }
+
+ static std::shared_ptr<MyTask>
+ create(ReprocessingRunner &runner,
+ double initProgress,
+ double middleProgress,
+ double finalProgress,
+ double weight)
+ {
+ return std::make_shared<MyTask>(runner,
+ initProgress,
+ middleProgress,
+ finalProgress,
+ weight);
+ }
+};
+
+TEST_F("require that progress is calculated when tasks are executed", Fixture)
+{
+ TaskList tasks;
+ EXPECT_EQUAL(0.0, f._runner.getProgress());
+ tasks.push_back(MyTask::create(f._runner,
+ 0.0,
+ 0.1,
+ 0.2,
+ 1.0));
+ tasks.push_back(MyTask::create(f._runner,
+ 0.2,
+ 0.6,
+ 1.0,
+ 4.0));
+ f._runner.addTasks(tasks);
+ tasks.clear();
+ EXPECT_EQUAL(0.0, f._runner.getProgress());
+ f._runner.run();
+ EXPECT_EQUAL(1.0, f._runner.getProgress());
+}
+
+
+TEST_F("require that runner can be reset", Fixture)
+{
+ TaskList tasks;
+ EXPECT_EQUAL(0.0, f._runner.getProgress());
+ tasks.push_back(MyTask::create(f._runner,
+ 0.0,
+ 0.5,
+ 1.0,
+ 1.0));
+ f._runner.addTasks(tasks);
+ tasks.clear();
+ EXPECT_EQUAL(0.0, f._runner.getProgress());
+ f._runner.run();
+ EXPECT_EQUAL(1.0, f._runner.getProgress());
+ f._runner.reset();
+ EXPECT_EQUAL(0.0, f._runner.getProgress());
+ tasks.push_back(MyTask::create(f._runner,
+ 0.0,
+ 0.5,
+ 1.0,
+ 1.0));
+ f._runner.addTasks(tasks);
+ tasks.clear();
+ EXPECT_EQUAL(0.0, f._runner.getProgress());
+ f._runner.reset();
+ EXPECT_EQUAL(0.0, f._runner.getProgress());
+ tasks.push_back(MyTask::create(f._runner,
+ 0.0,
+ 0.5,
+ 1.0,
+ 4.0));
+ f._runner.addTasks(tasks);
+ tasks.clear();
+ EXPECT_EQUAL(0.0, f._runner.getProgress());
+ f._runner.run();
+ EXPECT_EQUAL(1.0, f._runner.getProgress());
+}
+
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/server/.gitignore b/searchcore/src/tests/proton/server/.gitignore
new file mode 100644
index 00000000000..dc96b15f5fe
--- /dev/null
+++ b/searchcore/src/tests/proton/server/.gitignore
@@ -0,0 +1,9 @@
+*_test
+.depend
+Makefile
+test_data
+searchcore_attribute_metrics_test_app
+searchcore_documentretriever_test_app
+searchcore_feeddebugger_test_app
+searchcore_feedstates_test_app
+searchcore_memoryconfigstore_test_app
diff --git a/searchcore/src/tests/proton/server/CMakeLists.txt b/searchcore/src/tests/proton/server/CMakeLists.txt
new file mode 100644
index 00000000000..3ae89e7393d
--- /dev/null
+++ b/searchcore/src/tests/proton/server/CMakeLists.txt
@@ -0,0 +1,52 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_attribute_metrics_test_app
+ SOURCES
+ attribute_metrics_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_proton_metrics
+)
+vespa_add_test(NAME searchcore_attribute_metrics_test_app COMMAND searchcore_attribute_metrics_test_app)
+vespa_add_executable(searchcore_documentretriever_test_app
+ SOURCES
+ documentretriever_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_fconfig
+ searchcore_attribute
+ searchcore_feedoperation
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_pcommon
+ searchcore_persistenceengine
+)
+vespa_add_test(NAME searchcore_documentretriever_test_app COMMAND searchcore_documentretriever_test_app)
+vespa_add_executable(searchcore_feeddebugger_test_app
+ SOURCES
+ feeddebugger_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_feeddebugger_test_app COMMAND searchcore_feeddebugger_test_app)
+vespa_add_executable(searchcore_feedstates_test_app
+ SOURCES
+ feedstates_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_bucketdb
+ searchcore_persistenceengine
+ searchcore_feedoperation
+ searchcore_pcommon
+ searchcore_proton_metrics
+ searchcore_fconfig
+)
+vespa_add_test(NAME searchcore_feedstates_test_app COMMAND searchcore_feedstates_test_app)
+vespa_add_executable(searchcore_memoryconfigstore_test_app
+ SOURCES
+ memoryconfigstore_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_memoryconfigstore_test_app COMMAND searchcore_memoryconfigstore_test_app)
diff --git a/searchcore/src/tests/proton/server/attribute_metrics_test.cpp b/searchcore/src/tests/proton/server/attribute_metrics_test.cpp
new file mode 100644
index 00000000000..18f35d9bf5e
--- /dev/null
+++ b/searchcore/src/tests/proton/server/attribute_metrics_test.cpp
@@ -0,0 +1,56 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("attribute_metrics_test");
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <vespa/searchcore/proton/metrics/attribute_metrics.h>
+
+using namespace proton;
+
+class Test : public vespalib::TestApp
+{
+public:
+ int Main();
+};
+
+int
+Test::Main()
+{
+ TEST_INIT("attribute_metrics_test");
+ {
+ AttributeMetrics attrMetrics(0);
+ EXPECT_EQUAL(0u, attrMetrics.list.release().size());
+ {
+ AttributeMetrics::List::Entry::LP e1 = attrMetrics.list.add("foo");
+ AttributeMetrics::List::Entry::LP e2 = attrMetrics.list.add("bar");
+ AttributeMetrics::List::Entry::LP e3 = attrMetrics.list.add("foo");
+ EXPECT_TRUE(e1.get() != 0);
+ EXPECT_TRUE(e2.get() != 0);
+ EXPECT_TRUE(e3.get() == 0);
+ }
+ {
+ const AttributeMetrics &constMetrics = attrMetrics;
+ AttributeMetrics::List::Entry::LP e1 = constMetrics.list.get("foo");
+ AttributeMetrics::List::Entry::LP e2 = constMetrics.list.get("bar");
+ AttributeMetrics::List::Entry::LP e3 = constMetrics.list.get("baz");
+ EXPECT_TRUE(e1.get() != 0);
+ EXPECT_TRUE(e2.get() != 0);
+ EXPECT_TRUE(e3.get() == 0);
+ }
+ EXPECT_EQUAL(2u, attrMetrics.list.release().size());
+ {
+ const AttributeMetrics &constMetrics = attrMetrics;
+ AttributeMetrics::List::Entry::LP e1 = constMetrics.list.get("foo");
+ AttributeMetrics::List::Entry::LP e2 = constMetrics.list.get("bar");
+ AttributeMetrics::List::Entry::LP e3 = constMetrics.list.get("baz");
+ EXPECT_TRUE(e1.get() == 0);
+ EXPECT_TRUE(e2.get() == 0);
+ EXPECT_TRUE(e3.get() == 0);
+ }
+ EXPECT_EQUAL(0u, attrMetrics.list.release().size());
+ }
+ TEST_DONE();
+}
+
+TEST_APPHOOK(Test);
diff --git a/searchcore/src/tests/proton/server/data_directory_upgrader/.gitignore b/searchcore/src/tests/proton/server/data_directory_upgrader/.gitignore
new file mode 100644
index 00000000000..b085eedc970
--- /dev/null
+++ b/searchcore/src/tests/proton/server/data_directory_upgrader/.gitignore
@@ -0,0 +1 @@
+searchcore_data_directory_upgrader_test_app
diff --git a/searchcore/src/tests/proton/server/data_directory_upgrader/CMakeLists.txt b/searchcore/src/tests/proton/server/data_directory_upgrader/CMakeLists.txt
new file mode 100644
index 00000000000..9c8048a0a69
--- /dev/null
+++ b/searchcore/src/tests/proton/server/data_directory_upgrader/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_data_directory_upgrader_test_app
+ SOURCES
+ data_directory_upgrader_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_flushengine
+)
+vespa_add_test(NAME searchcore_data_directory_upgrader_test_app COMMAND searchcore_data_directory_upgrader_test_app)
diff --git a/searchcore/src/tests/proton/server/data_directory_upgrader/DESC b/searchcore/src/tests/proton/server/data_directory_upgrader/DESC
new file mode 100644
index 00000000000..d0ca4c99210
--- /dev/null
+++ b/searchcore/src/tests/proton/server/data_directory_upgrader/DESC
@@ -0,0 +1 @@
+data_directory_upgrader test. Take a look at data_directory_upgrader_test.cpp for details.
diff --git a/searchcore/src/tests/proton/server/data_directory_upgrader/FILES b/searchcore/src/tests/proton/server/data_directory_upgrader/FILES
new file mode 100644
index 00000000000..d1aee9bddfa
--- /dev/null
+++ b/searchcore/src/tests/proton/server/data_directory_upgrader/FILES
@@ -0,0 +1 @@
+data_directory_upgrader_test.cpp
diff --git a/searchcore/src/tests/proton/server/data_directory_upgrader/data_directory_upgrader_test.cpp b/searchcore/src/tests/proton/server/data_directory_upgrader/data_directory_upgrader_test.cpp
new file mode 100644
index 00000000000..7b6cf9143ee
--- /dev/null
+++ b/searchcore/src/tests/proton/server/data_directory_upgrader/data_directory_upgrader_test.cpp
@@ -0,0 +1,200 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("data_directory_upgrader_test");
+#include <vespa/vespalib/testkit/testapp.h>
+
+#include <vespa/searchcore/proton/server/data_directory_upgrader.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/vespalib/stllike/asciistream.h>
+#include <iostream>
+
+using namespace proton;
+using namespace vespalib;
+
+typedef DataDirectoryUpgrader::RowColDir RowColDir;
+typedef DataDirectoryUpgrader::ScanResult ScanResult;
+typedef DataDirectoryUpgrader::UpgradeResult UpgradeResult;
+
+const string SCAN_DIR = "mytest";
+const string DEST_DIR = SCAN_DIR + "/n1";
+
+void
+assertDirs(const DirectoryList &exp, const DirectoryList &act)
+{
+ ASSERT_EQUAL(exp.size(), act.size());
+ for (size_t i = 0; i < exp.size(); ++i) {
+ EXPECT_EQUAL(exp[i], act[i]);
+ }
+}
+
+void
+assertDirs(const DirectoryList &rowColDirs, bool destDirExisting, const ScanResult &act)
+{
+ ASSERT_EQUAL(rowColDirs.size(), act.getRowColDirs().size());
+ for (size_t i = 0; i < rowColDirs.size(); ++i) {
+ EXPECT_EQUAL(rowColDirs[i], act.getRowColDirs()[i].dir());
+ }
+ EXPECT_EQUAL(destDirExisting, act.isDestDirExisting());
+}
+
+void
+assertDataFile(const vespalib::string &dir)
+{
+ FileInfo::UP file = stat(dir + "/data.txt");
+ ASSERT_TRUE(file.get() != NULL);
+ EXPECT_TRUE(file->_plainfile);
+}
+
+vespalib::string
+readFile(const vespalib::string &fileName)
+{
+ File file(fileName);
+ file.open(File::READONLY);
+ FileInfo info = file.stat();
+ char buf[512];
+ size_t bytesRead = file.read(&buf, info._size, 0);
+ return vespalib::string(buf, bytesRead);
+}
+
+void
+assertUpgradeFile(const vespalib::string &exp, const vespalib::string &dir)
+{
+ EXPECT_EQUAL(exp, readFile(dir + "/data-directory-upgrade-source.txt"));
+}
+
+void
+assertDowngradeScript(const vespalib::string &exp, const vespalib::string &dir)
+{
+ EXPECT_EQUAL(exp, readFile(dir + "/data-directory-downgrade.sh"));
+}
+
+struct BaseFixture
+{
+ DataDirectoryUpgrader _upg;
+ BaseFixture(const DirectoryList &dirs, bool createDestDir = false) : _upg(SCAN_DIR, DEST_DIR) {
+ mkdir(SCAN_DIR);
+ if (createDestDir) {
+ mkdir(DEST_DIR);
+ }
+ for (const string &dir : dirs) {
+ mkdir(SCAN_DIR + "/" + dir);
+ File f(SCAN_DIR + "/" + dir + "/data.txt");
+ f.open(File::CREATE);
+ f.close();
+ }
+ }
+ virtual ~BaseFixture() {
+ rmdir(SCAN_DIR, true);
+ }
+ DirectoryList getDirs(const vespalib::string &subDir = "") const {
+ DirectoryList l = listDirectory(SCAN_DIR + "/" + subDir);
+ std::sort(l.begin(), l.end());
+ return l;
+ }
+};
+
+struct EmptyFixture : public BaseFixture
+{
+ EmptyFixture() : BaseFixture({}) {}
+};
+
+struct SingleFixture : public BaseFixture
+{
+ SingleFixture() : BaseFixture({"r0/c0"}) {}
+};
+
+struct DoubleFixture : public BaseFixture
+{
+ DoubleFixture() : BaseFixture({"r0/c0", "r1/c1"}) {}
+};
+
+struct UnrelatedFixture : public BaseFixture
+{
+ UnrelatedFixture() : BaseFixture({"r0/cY", "rX/c1", "r0"}) {}
+};
+
+struct ExistingDestinationFixture : public BaseFixture
+{
+ ExistingDestinationFixture() : BaseFixture({"r0/c0"}, true) {}
+};
+
+TEST_F("require that single row/column directory is discovered", SingleFixture)
+{
+ ScanResult res = f._upg.scan();
+ assertDirs({"r0/c0"}, false, res);
+}
+
+TEST_F("require that multiple row/column directories are discovered", DoubleFixture)
+{
+ ScanResult res = f._upg.scan();
+ assertDirs({"r0/c0", "r1/c1"}, false, res);
+}
+
+TEST_F("require that unrelated directories are not discovered", UnrelatedFixture)
+{
+ ScanResult res = f._upg.scan();
+ assertDirs({}, false, res);
+}
+
+TEST_F("require that existing destination directory is discovered", ExistingDestinationFixture)
+{
+ ScanResult res = f._upg.scan();
+ assertDirs({"r0/c0"}, true, res);
+}
+
+TEST("require that no-existing scan directory is handled")
+{
+ DataDirectoryUpgrader upg(SCAN_DIR, DEST_DIR);
+ ScanResult res = upg.scan();
+ assertDirs({}, false, res);
+}
+
+TEST_F("require that empty directory is left untouched", EmptyFixture)
+{
+ UpgradeResult res = f._upg.upgrade(f._upg.scan());
+ EXPECT_EQUAL(DataDirectoryUpgrader::IGNORE, res.getStatus());
+ EXPECT_EQUAL("No directory to upgrade", res.getDesc());
+ DirectoryList dirs = f.getDirs();
+ assertDirs({}, dirs);
+}
+
+TEST_F("require that existing destination directory is left untouched", ExistingDestinationFixture)
+{
+ UpgradeResult res = f._upg.upgrade(f._upg.scan());
+ EXPECT_EQUAL(DataDirectoryUpgrader::IGNORE, res.getStatus());
+ EXPECT_EQUAL("Destination directory 'mytest/n1' is already existing", res.getDesc());
+ DirectoryList dirs = f.getDirs();
+ assertDirs({"n1", "r0"}, dirs);
+}
+
+TEST_F("require that single directory is upgraded", SingleFixture)
+{
+ UpgradeResult res = f._upg.upgrade(f._upg.scan());
+ EXPECT_EQUAL(DataDirectoryUpgrader::COMPLETE, res.getStatus());
+ EXPECT_EQUAL("Moved data from 'mytest/r0/c0' to 'mytest/n1'", res.getDesc());
+ DirectoryList dirs = f.getDirs();
+ std::sort(dirs.begin(), dirs.end());
+ assertDirs({"n1"}, dirs);
+ assertDataFile(DEST_DIR);
+ assertUpgradeFile("mytest/r0/c0", DEST_DIR);
+ assertDowngradeScript("#!/bin/sh\n\n"
+ "mkdir mytest/r0 || exit 1\n"
+ "chown yahoo mytest/r0\n"
+ "mv mytest/n1 mytest/r0/c0\n"
+ "rm mytest/r0/c0/data-directory-upgrade-source.txt\n"
+ "rm mytest/r0/c0/data-directory-downgrade.sh\n", DEST_DIR);
+}
+
+TEST_F("require that multiple directories are left untouched", DoubleFixture)
+{
+ UpgradeResult res = f._upg.upgrade(f._upg.scan());
+ EXPECT_EQUAL(DataDirectoryUpgrader::ERROR, res.getStatus());
+ EXPECT_EQUAL("Can only upgrade a single directory, was asked to upgrade 2 ('r0/c0', 'r1/c1')", res.getDesc());
+ DirectoryList dirs = f.getDirs();
+ std::sort(dirs.begin(), dirs.end());
+ assertDirs({"r0", "r1"}, dirs);
+ assertDataFile(SCAN_DIR + "/r0/c0");
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/server/disk_mem_usage_filter/.gitignore b/searchcore/src/tests/proton/server/disk_mem_usage_filter/.gitignore
new file mode 100644
index 00000000000..ec8610c93cf
--- /dev/null
+++ b/searchcore/src/tests/proton/server/disk_mem_usage_filter/.gitignore
@@ -0,0 +1 @@
+searchcore_disk_mem_usage_filter_test_app
diff --git a/searchcore/src/tests/proton/server/disk_mem_usage_filter/CMakeLists.txt b/searchcore/src/tests/proton/server/disk_mem_usage_filter/CMakeLists.txt
new file mode 100644
index 00000000000..1d9b0234d76
--- /dev/null
+++ b/searchcore/src/tests/proton/server/disk_mem_usage_filter/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_disk_mem_usage_filter_test_app
+ SOURCES
+ disk_mem_usage_filter_test.cpp
+ DEPENDS
+ searchcore_server
+)
+vespa_add_target_system_dependency(searchcore_disk_mem_usage_filter_test_app boost boost_system-mt-d)
+vespa_add_target_system_dependency(searchcore_disk_mem_usage_filter_test_app boost boost_filesystem-mt-d)
+vespa_add_test(NAME searchcore_disk_mem_usage_filter_test_app COMMAND searchcore_disk_mem_usage_filter_test_app)
diff --git a/searchcore/src/tests/proton/server/disk_mem_usage_filter/DESC b/searchcore/src/tests/proton/server/disk_mem_usage_filter/DESC
new file mode 100644
index 00000000000..fca9e3b7656
--- /dev/null
+++ b/searchcore/src/tests/proton/server/disk_mem_usage_filter/DESC
@@ -0,0 +1 @@
+DiskMemUsageFilter test. Take a look at disk_mem_usage_filter_test.cpp for details.
diff --git a/searchcore/src/tests/proton/server/disk_mem_usage_filter/FILES b/searchcore/src/tests/proton/server/disk_mem_usage_filter/FILES
new file mode 100644
index 00000000000..b6cdfc4bffc
--- /dev/null
+++ b/searchcore/src/tests/proton/server/disk_mem_usage_filter/FILES
@@ -0,0 +1 @@
+disk_mem_usage_filter_test.cpp
diff --git a/searchcore/src/tests/proton/server/disk_mem_usage_filter/disk_mem_usage_filter_test.cpp b/searchcore/src/tests/proton/server/disk_mem_usage_filter/disk_mem_usage_filter_test.cpp
new file mode 100644
index 00000000000..70e559e2d23
--- /dev/null
+++ b/searchcore/src/tests/proton/server/disk_mem_usage_filter/disk_mem_usage_filter_test.cpp
@@ -0,0 +1,113 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("disk_mem_usage_filter_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/server/disk_mem_usage_filter.h>
+
+using proton::DiskMemUsageFilter;
+
+namespace fs = boost::filesystem;
+
+namespace
+{
+
+struct Fixture
+{
+ DiskMemUsageFilter _filter;
+ using State = DiskMemUsageFilter::State;
+ using Config = DiskMemUsageFilter::Config;
+
+ Fixture()
+ : _filter(64 * 1024 * 1024)
+ {
+ _filter.setDiskStats({.capacity = 100, .free = 100, .available=100});
+ _filter.setMemoryStats(vespalib::ProcessMemoryStats(10000000,
+ 10000001,
+ 10000002,
+ 10000003));
+ }
+
+ void testWrite(const vespalib::string &exp) {
+ if (exp.empty()) {
+ EXPECT_TRUE(_filter.acceptWriteOperation());
+ State state = _filter.getAcceptState();
+ EXPECT_TRUE(state.acceptWriteOperation());
+ EXPECT_EQUAL(exp, state.message());
+ } else {
+ EXPECT_FALSE(_filter.acceptWriteOperation());
+ State state = _filter.getAcceptState();
+ EXPECT_FALSE(state.acceptWriteOperation());
+ EXPECT_EQUAL(exp, state.message());
+ }
+ }
+
+ void triggerDiskLimit() {
+ _filter.setDiskStats({.capacity = 100, .free = 20, .available=10});
+ }
+
+ void triggerMemoryLimit()
+ {
+ _filter.setMemoryStats(vespalib::ProcessMemoryStats(58720259,
+ 58720258,
+ 58720257,
+ 58720256));
+ }
+};
+
+}
+
+TEST_F("Check that default filter allows write", Fixture)
+{
+ f.testWrite("");
+}
+
+
+TEST_F("Check that disk limit can be reached", Fixture)
+{
+ f._filter.setConfig(Fixture::Config(1.0, 0.8));
+ f.triggerDiskLimit();
+ f.testWrite("diskLimitReached: { "
+ "action: \"add more content nodes\", "
+ "reason: \""
+ "disk used (0.9) > disk limit (0.8)"
+ "\", "
+ "capacity: 100, free: 20, available: 10, diskLimit: 0.8}");
+}
+
+TEST_F("Check that memory limit can be reached", Fixture)
+{
+ f._filter.setConfig(Fixture::Config(0.8, 1.0));
+ f.triggerMemoryLimit();
+ f.testWrite("memoryLimitReached: { "
+ "action: \"add more content nodes\", "
+ "reason: \""
+ "memory used (0.875) > memory limit (0.8)"
+ "\", "
+ "mapped: { virt: 58720259, rss: 58720258}, "
+ "anonymous: { virt: 58720257, rss: 58720256}, "
+ "physicalMemory: 67108864, memoryLimit : 0.8}");
+}
+
+TEST_F("Check that both disk limit and memory limit can be reached", Fixture)
+{
+ f._filter.setConfig(Fixture::Config(0.8, 0.8));
+ f.triggerMemoryLimit();
+ f.triggerDiskLimit();
+ f.testWrite("memoryLimitReached: { "
+ "action: \"add more content nodes\", "
+ "reason: \""
+ "memory used (0.875) > memory limit (0.8)"
+ "\", "
+ "mapped: { virt: 58720259, rss: 58720258}, "
+ "anonymous: { virt: 58720257, rss: 58720256}, "
+ "physicalMemory: 67108864, memoryLimit : 0.8}, "
+ "diskLimitReached: { "
+ "action: \"add more content nodes\", "
+ "reason: \""
+ "disk used (0.9) > disk limit (0.8)"
+ "\", "
+ "capacity: 100, free: 20, available: 10, diskLimit: 0.8}");
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/server/documentretriever_test.cpp b/searchcore/src/tests/proton/server/documentretriever_test.cpp
new file mode 100644
index 00000000000..99ef5879682
--- /dev/null
+++ b/searchcore/src/tests/proton/server/documentretriever_test.cpp
@@ -0,0 +1,455 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for documentretriever.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("documentretriever_test");
+
+#include <vespa/document/base/documentid.h>
+#include <vespa/document/bucket/bucketid.h>
+#include <vespa/document/datatype/datatype.h>
+#include <vespa/document/datatype/positiondatatype.h>
+#include <vespa/document/fieldvalue/arrayfieldvalue.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/document/fieldvalue/doublefieldvalue.h>
+#include <vespa/document/fieldvalue/intfieldvalue.h>
+#include <vespa/document/fieldvalue/longfieldvalue.h>
+#include <vespa/document/fieldvalue/predicatefieldvalue.h>
+#include <vespa/document/fieldvalue/stringfieldvalue.h>
+#include <vespa/document/fieldvalue/structfieldvalue.h>
+#include <vespa/document/fieldvalue/weightedsetfieldvalue.h>
+#include <vespa/document/repo/configbuilder.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/persistence/spi/bucket.h>
+#include <vespa/persistence/spi/result.h>
+#include <vespa/searchcore/proton/documentmetastore/documentmetastorecontext.h>
+#include <vespa/searchcore/proton/server/documentretriever.h>
+#include <vespa/searchcore/proton/test/dummy_document_store.h>
+#include <vespa/searchlib/attribute/attributefactory.h>
+#include <vespa/searchlib/attribute/attributeguard.h>
+#include <vespa/searchlib/attribute/attributemanager.h>
+#include <vespa/searchlib/attribute/floatbase.h>
+#include <vespa/searchlib/attribute/integerbase.h>
+#include <vespa/searchlib/attribute/predicate_attribute.h>
+#include <vespa/searchlib/attribute/stringbase.h>
+#include <vespa/searchlib/docstore/cachestats.h>
+#include <vespa/searchlib/docstore/idocumentstore.h>
+#include <vespa/vespalib/stllike/string.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/util/stringfmt.h>
+
+using document::ArrayFieldValue;
+using document::BucketId;
+using document::DataType;
+using document::Document;
+using document::DocumentId;
+using document::DocumentType;
+using document::DocumentTypeRepo;
+using document::DoubleFieldValue;
+using document::GlobalId;
+using document::IntFieldValue;
+using document::LongFieldValue;
+using document::PositionDataType;
+using document::PredicateFieldValue;
+using document::StringFieldValue;
+using document::StructFieldValue;
+using document::WeightedSetFieldValue;
+using search::AttributeFactory;
+using search::AttributeGuard;
+using search::AttributeVector;
+using search::CacheStats;
+using search::DocumentIdT;
+using search::DocumentMetaData;
+using search::FloatingPointAttribute;
+using search::IDocumentStore;
+using search::IntegerAttribute;
+using search::PredicateAttribute;
+using search::StringAttribute;
+using search::attribute::BasicType;
+using search::attribute::CollectionType;
+using search::attribute::Config;
+using search::attribute::IAttributeVector;
+using search::index::Schema;
+using storage::spi::Bucket;
+using storage::spi::GetResult;
+using storage::spi::PartitionId;
+using storage::spi::Timestamp;
+using vespalib::make_string;
+using vespalib::string;
+using namespace document::config_builder;
+
+using namespace proton;
+
+namespace {
+
+const string doc_type_name = "type_name";
+const char static_field[] = "static field";
+const char dyn_field_i[] = "dynamic int field";
+const char dyn_field_d[] = "dynamic double field";
+const char dyn_field_s[] = "dynamic string field";
+const char dyn_field_n[] = "dynamic null field"; // not in document, not in attribute
+const char dyn_field_nai[] = "dynamic null attr int field"; // in document, not in attribute
+const char dyn_field_nas[] = "dynamic null attr string field"; // in document, not in attribute
+const char position_field[] = "position_field";
+const char zcurve_field[] = "position_field_zcurve";
+const char dyn_field_p[] = "dynamic predicate field";
+const char dyn_arr_field_i[] = "dynamic int array field";
+const char dyn_arr_field_d[] = "dynamic double array field";
+const char dyn_arr_field_s[] = "dynamic string array field";
+const char dyn_arr_field_n[] = "dynamic null array field";
+const char dyn_wset_field_i[] = "dynamic int wset field";
+const char dyn_wset_field_d[] = "dynamic double wset field";
+const char dyn_wset_field_s[] = "dynamic string wset field";
+const char dyn_wset_field_n[] = "dynamic null wset field";
+const DocumentId doc_id("doc:test:1");
+const int32_t static_value = 4;
+const int32_t dyn_value_i = 17;
+const double dyn_value_d = 42.42;
+const char dyn_value_s[] = "Batman & Robin";
+const char static_value_s[] = "Dynamic duo";
+const PredicateFieldValue static_value_p;
+const int32_t dyn_weight = 21;
+const int64_t static_zcurve_value = 1118035438880ll;
+const int64_t dynamic_zcurve_value = 6145423666930817152ll;
+
+struct MyDocumentStore : proton::test::DummyDocumentStore {
+ virtual Document::UP read(DocumentIdT lid,
+ const DocumentTypeRepo &r) const override {
+ if (lid == 0) {
+ return Document::UP();
+ }
+ const DocumentType *doc_type = r.getDocumentType(doc_type_name);
+ Document::UP doc(new Document(*doc_type, doc_id));
+ ASSERT_TRUE(doc.get());
+ doc->set(static_field, static_value);
+ doc->set(dyn_field_i, static_value);
+ doc->set(dyn_field_s, static_value_s);
+ doc->set(dyn_field_nai, static_value);
+ doc->set(dyn_field_nas, static_value_s);
+ doc->set(zcurve_field, static_zcurve_value);
+ doc->setValue(dyn_field_p, static_value_p);
+ FieldValue::UP fv = PositionDataType::getInstance().createFieldValue();
+ StructFieldValue &pos = static_cast<StructFieldValue &>(*fv);
+ pos.set(PositionDataType::FIELD_X, 42);
+ pos.set(PositionDataType::FIELD_Y, 21);
+ doc->setValue(doc->getField(position_field), *fv);
+
+ return doc;
+ }
+
+ virtual uint64_t
+ initFlush(uint64_t syncToken) override
+ {
+ return syncToken;
+ }
+};
+
+document::DocumenttypesConfig getRepoConfig() {
+ const int32_t doc_type_id = 787121340;
+
+ DocumenttypesConfigBuilderHelper builder;
+ builder.document(doc_type_id, doc_type_name,
+ Struct(doc_type_name + ".header"),
+ Struct(doc_type_name + ".body")
+ .addField(static_field, DataType::T_INT)
+ .addField(dyn_field_i, DataType::T_INT)
+ .addField(dyn_field_d, DataType::T_DOUBLE)
+ .addField(dyn_field_s, DataType::T_STRING)
+ .addField(dyn_field_n, DataType::T_FLOAT)
+ .addField(dyn_field_nai, DataType::T_INT)
+ .addField(dyn_field_nas, DataType::T_STRING)
+ .addField(dyn_field_p, DataType::T_PREDICATE)
+ .addField(dyn_arr_field_i, Array(DataType::T_INT))
+ .addField(dyn_arr_field_d, Array(DataType::T_DOUBLE))
+ .addField(dyn_arr_field_s, Array(DataType::T_STRING))
+ .addField(dyn_arr_field_n, Array(DataType::T_FLOAT))
+ .addField(dyn_wset_field_i, Wset(DataType::T_INT))
+ .addField(dyn_wset_field_d, Wset(DataType::T_DOUBLE))
+ .addField(dyn_wset_field_s, Wset(DataType::T_STRING))
+ .addField(dyn_wset_field_n, Wset(DataType::T_FLOAT))
+ .addField(position_field,
+ PositionDataType::getInstance().getId())
+ .addField(zcurve_field, DataType::T_LONG));
+ return builder.config();
+}
+
+BasicType
+convertDataType(Schema::DataType t)
+{
+ switch (t) {
+ case Schema::INT32:
+ return BasicType::INT32;
+ case Schema::INT64:
+ return BasicType::INT64;
+ case Schema::FLOAT:
+ return BasicType::FLOAT;
+ case Schema::DOUBLE:
+ return BasicType::DOUBLE;
+ case Schema::STRING:
+ return BasicType::STRING;
+ case Schema::BOOLEANTREE:
+ return BasicType::PREDICATE;
+ default:
+ throw std::runtime_error(make_string("Data type %u not handled", (uint32_t)t));
+ }
+}
+
+CollectionType
+convertCollectionType(Schema::CollectionType ct)
+{
+ switch (ct) {
+ case Schema::SINGLE:
+ return CollectionType::SINGLE;
+ case Schema::ARRAY:
+ return CollectionType::ARRAY;
+ case Schema::WEIGHTEDSET:
+ return CollectionType::WSET;
+ default:
+ throw std::runtime_error(make_string("Collection type %u not handled", (uint32_t)ct));
+ }
+}
+
+search::attribute::Config
+convertConfig(Schema::DataType t, Schema::CollectionType ct)
+{
+ return search::attribute::Config(convertDataType(t), convertCollectionType(ct));
+}
+
+struct Fixture {
+ DocumentTypeRepo repo;
+ DocumentMetaStoreContext meta_store;
+ const GlobalId &gid;
+ BucketId bucket_id;
+ Timestamp timestamp;
+ DocumentMetaStore::DocId lid;
+ MyDocumentStore doc_store;
+ search::AttributeManager attr_manager;
+ Schema schema;
+ DocTypeName _dtName;
+ DocumentRetriever retriever;
+
+ template <typename T>
+ T *addAttribute(const char *name,
+ Schema::DataType t, Schema::CollectionType ct) {
+ AttributeVector::SP attrPtr = AttributeFactory::createAttribute(name, convertConfig(t, ct));
+ T *attr = dynamic_cast<T *>(attrPtr.get());
+ AttributeVector::DocId id;
+ attr_manager.add(attrPtr);
+ attr->addReservedDoc();
+ attr->addDoc(id);
+ attr->clearDoc(id);
+ EXPECT_EQUAL(id, lid);
+ schema.addAttributeField(Schema::Field(name, t, ct));
+ attr->commit();
+ return attr;
+ }
+
+ template <typename T, typename U>
+ void addAttribute(const char *name, U val,
+ Schema::DataType t, Schema::CollectionType ct) {
+ T *attr = addAttribute<T>(name, t, ct);
+ if (ct == Schema::SINGLE) {
+ attr->update(lid, val);
+ } else {
+ attr->append(lid, val + 1, dyn_weight);
+ attr->append(lid, val, dyn_weight);
+ }
+ attr->commit();
+ }
+
+ Fixture()
+ : repo(getRepoConfig()),
+ meta_store(std::make_shared<BucketDBOwner>()),
+ gid(doc_id.getGlobalId()),
+ bucket_id(gid.convertToBucketId()),
+ timestamp(21),
+ lid(),
+ doc_store(),
+ attr_manager(),
+ schema(),
+ _dtName(doc_type_name),
+ retriever(_dtName,
+ repo, schema, meta_store, attr_manager, doc_store)
+ {
+ typedef DocumentMetaStore::Result Result;
+ meta_store.constructFreeList();
+ Result inspect = meta_store.get().inspect(gid);
+ Result putRes(meta_store.get().put(gid, bucket_id, timestamp, inspect.getLid()));
+ lid = putRes.getLid();
+ ASSERT_TRUE(putRes.ok());
+ Schema::CollectionType ct = Schema::SINGLE;
+ addAttribute<IntegerAttribute>(
+ dyn_field_i, dyn_value_i, Schema::INT32, ct);
+ addAttribute<FloatingPointAttribute>(
+ dyn_field_d, dyn_value_d, Schema::DOUBLE, ct);
+ addAttribute<StringAttribute>(
+ dyn_field_s, dyn_value_s, Schema::STRING, ct);
+ addAttribute<FloatingPointAttribute>(
+ dyn_field_n, Schema::FLOAT, ct);
+ addAttribute<IntegerAttribute>(
+ dyn_field_nai, Schema::INT32, ct);
+ addAttribute<StringAttribute>(
+ dyn_field_nas, Schema::STRING, ct);
+ addAttribute<IntegerAttribute>(
+ zcurve_field, dynamic_zcurve_value, Schema::INT64, ct);
+ PredicateAttribute *attr = addAttribute<PredicateAttribute>(
+ dyn_field_p, Schema::BOOLEANTREE, ct);
+ attr->getIndex().indexEmptyDocument(lid);
+ attr->commit();
+ ct = Schema::ARRAY;
+ addAttribute<IntegerAttribute>(
+ dyn_arr_field_i, dyn_value_i, Schema::INT32, ct);
+ addAttribute<FloatingPointAttribute>(
+ dyn_arr_field_d, dyn_value_d, Schema::DOUBLE, ct);
+ addAttribute<StringAttribute>(
+ dyn_arr_field_s, dyn_value_s, Schema::STRING, ct);
+ addAttribute<FloatingPointAttribute>(
+ dyn_arr_field_n, Schema::FLOAT, ct);
+ ct = Schema::WEIGHTEDSET;
+ addAttribute<IntegerAttribute>(
+ dyn_wset_field_i, dyn_value_i, Schema::INT32, ct);
+ addAttribute<FloatingPointAttribute>(
+ dyn_wset_field_d, dyn_value_d, Schema::DOUBLE, ct);
+ addAttribute<StringAttribute>(
+ dyn_wset_field_s, dyn_value_s, Schema::STRING, ct);
+ addAttribute<FloatingPointAttribute>(
+ dyn_wset_field_n, Schema::FLOAT, ct);
+ }
+};
+
+TEST_F("require that document retriever can retrieve document meta data",
+ Fixture) {
+ DocumentMetaData meta_data = f.retriever.getDocumentMetaData(doc_id);
+ EXPECT_EQUAL(f.lid, meta_data.lid);
+ EXPECT_EQUAL(f.timestamp, meta_data.timestamp);
+}
+
+TEST_F("require that document retriever can retrieve bucket meta data",
+ Fixture) {
+ DocumentMetaData::Vector result;
+ f.retriever.getBucketMetaData(Bucket(f.bucket_id, PartitionId(0)), result);
+ ASSERT_EQUAL(1u, result.size());
+ EXPECT_EQUAL(f.lid, result[0].lid);
+ EXPECT_EQUAL(f.timestamp, result[0].timestamp);
+ result.clear();
+ f.retriever.getBucketMetaData(Bucket(BucketId(f.bucket_id.getId() + 1),
+ PartitionId(0)), result);
+ EXPECT_EQUAL(0u, result.size());
+}
+
+TEST_F("require that document retriever can retrieve document", Fixture) {
+ DocumentMetaData meta_data = f.retriever.getDocumentMetaData(doc_id);
+ Document::UP doc = f.retriever.getDocument(meta_data.lid);
+ ASSERT_TRUE(doc.get());
+ EXPECT_EQUAL(doc_id, doc->getId());
+}
+
+template <typename T>
+bool checkFieldValue(FieldValue::UP field_value, typename T::value_type v) {
+ ASSERT_TRUE(field_value.get());
+ T *t_value = dynamic_cast<T *>(field_value.get());
+ ASSERT_TRUE(t_value);
+ return EXPECT_EQUAL(v, t_value->getValue());
+}
+
+template <typename T>
+void checkArray(FieldValue::UP array, typename T::value_type v) {
+ ASSERT_TRUE(array.get());
+ ArrayFieldValue *array_val = dynamic_cast<ArrayFieldValue *>(array.get());
+ ASSERT_TRUE(array_val);
+ ASSERT_EQUAL(2u, array_val->size());
+ T *t_value = dynamic_cast<T *>(&(*array_val)[0]);
+ ASSERT_TRUE(t_value);
+ t_value = dynamic_cast<T *>(&(*array_val)[1]);
+ ASSERT_TRUE(t_value);
+ EXPECT_EQUAL(v, t_value->getValue());
+}
+
+template <typename T>
+void checkWset(FieldValue::UP wset, T v) {
+ ASSERT_TRUE(wset.get());
+ WeightedSetFieldValue *wset_val =
+ dynamic_cast<WeightedSetFieldValue *>(wset.get());
+ ASSERT_TRUE(wset_val);
+ ASSERT_EQUAL(2u, wset_val->size());
+ EXPECT_EQUAL(dyn_weight, wset_val->get(v));
+ EXPECT_EQUAL(dyn_weight, wset_val->get(v + 1));
+}
+
+TEST_F("require that attributes are patched into stored document", Fixture) {
+ DocumentMetaData meta_data = f.retriever.getDocumentMetaData(doc_id);
+ Document::UP doc = f.retriever.getDocument(meta_data.lid);
+ ASSERT_TRUE(doc.get());
+
+ FieldValue::UP value = doc->getValue(static_field);
+ ASSERT_TRUE(value.get());
+ IntFieldValue *int_value = dynamic_cast<IntFieldValue *>(value.get());
+ ASSERT_TRUE(int_value);
+ EXPECT_EQUAL(static_value, int_value->getValue());
+
+ EXPECT_TRUE(checkFieldValue<IntFieldValue>(doc->getValue(static_field), static_value));
+ EXPECT_TRUE(checkFieldValue<IntFieldValue>(doc->getValue(dyn_field_i), dyn_value_i));
+ EXPECT_TRUE(checkFieldValue<DoubleFieldValue>(doc->getValue(dyn_field_d), dyn_value_d));
+ EXPECT_TRUE(checkFieldValue<StringFieldValue>(doc->getValue(dyn_field_s), dyn_value_s));
+ EXPECT_FALSE(doc->getValue(dyn_field_n));
+ EXPECT_FALSE(doc->getValue(dyn_field_nai));
+ EXPECT_FALSE(doc->getValue(dyn_field_nas));
+
+ checkArray<IntFieldValue>(doc->getValue(dyn_arr_field_i), dyn_value_i);
+ checkArray<DoubleFieldValue>(doc->getValue(dyn_arr_field_d), dyn_value_d);
+ checkArray<StringFieldValue>(doc->getValue(dyn_arr_field_s), dyn_value_s);
+ EXPECT_FALSE(doc->getValue(dyn_arr_field_n));
+
+ checkWset(doc->getValue(dyn_wset_field_i), dyn_value_i);
+ checkWset(doc->getValue(dyn_wset_field_d), dyn_value_d);
+ checkWset(doc->getValue(dyn_wset_field_s), dyn_value_s);
+ EXPECT_FALSE(doc->getValue(dyn_wset_field_n));
+}
+
+TEST_F("require that attributes are patched into stored document unless also index field", Fixture) {
+ f.schema.addIndexField(Schema::IndexField(dyn_field_s, Schema::STRING));
+ DocumentMetaData meta_data = f.retriever.getDocumentMetaData(doc_id);
+ Document::UP doc = f.retriever.getDocument(meta_data.lid);
+ ASSERT_TRUE(doc.get());
+ checkFieldValue<StringFieldValue>(doc->getValue(dyn_field_s), static_value_s);
+}
+
+TEST_F("require that position fields are regenerated from zcurves", Fixture) {
+ DocumentMetaData meta_data = f.retriever.getDocumentMetaData(doc_id);
+ Document::UP doc = f.retriever.getDocument(meta_data.lid);
+ ASSERT_TRUE(doc.get());
+
+ FieldValue::UP value = doc->getValue(position_field);
+ ASSERT_TRUE(value.get());
+ StructFieldValue *position = dynamic_cast<StructFieldValue *>(value.get());
+ ASSERT_TRUE(position);
+ FieldValue::UP x = position->getValue(PositionDataType::FIELD_X);
+ FieldValue::UP y = position->getValue(PositionDataType::FIELD_Y);
+ EXPECT_EQUAL(-123096000, static_cast<IntFieldValue&>(*x).getValue());
+ EXPECT_EQUAL(49401000, static_cast<IntFieldValue&>(*y).getValue());
+
+ checkFieldValue<LongFieldValue>(doc->getValue(zcurve_field),
+ dynamic_zcurve_value);
+}
+
+TEST_F("require that non-existing lid returns null pointer", Fixture) {
+ Document::UP doc = f.retriever.getDocument(0);
+ ASSERT_FALSE(doc.get());
+}
+
+TEST_F("require that predicate attributes can be retrieved", Fixture) {
+ DocumentMetaData meta_data = f.retriever.getDocumentMetaData(doc_id);
+ Document::UP doc = f.retriever.getDocument(meta_data.lid);
+ ASSERT_TRUE(doc.get());
+
+ FieldValue::UP value = doc->getValue(dyn_field_p);
+ ASSERT_TRUE(value.get());
+ PredicateFieldValue *predicate_value =
+ dynamic_cast<PredicateFieldValue *>(value.get());
+ ASSERT_TRUE(predicate_value);
+}
+
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/server/feeddebugger_test.cpp b/searchcore/src/tests/proton/server/feeddebugger_test.cpp
new file mode 100644
index 00000000000..dfb1e7aa5ef
--- /dev/null
+++ b/searchcore/src/tests/proton/server/feeddebugger_test.cpp
@@ -0,0 +1,85 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for feeddebugger.
+
+#include <vespa/log/log.h>
+LOG_SETUP("feeddebugger_test");
+#include <vespa/fastos/fastos.h>
+
+#include <vespa/document/base/documentid.h>
+#include <vespa/searchcore/proton/common/feeddebugger.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using document::DocumentId;
+using std::string;
+using namespace proton;
+
+namespace {
+
+const char lid_env_name[] = "VESPA_PROTON_DEBUG_FEED_LID_LIST";
+const char docid_env_name[] = "VESPA_PROTON_DEBUG_FEED_DOCID_LIST";
+
+class EnvSaver {
+ const char *_name;
+ string _value;
+ bool _is_set;
+
+public:
+ EnvSaver(const char *name) : _name(name) {
+ char *val = getenv(_name);
+ _is_set = val;
+ if (val) {
+ _value = val;
+ }
+ }
+ ~EnvSaver() {
+ if (_is_set) {
+ setenv(_name, _value.c_str(), true);
+ } else {
+ unsetenv(_name);
+ }
+ }
+};
+
+TEST("require that when environment variable is not set, debugging is off") {
+ EnvSaver save_lid_env(lid_env_name);
+ EnvSaver save_docid_env(docid_env_name);
+ FeedDebugger debugger;
+ EXPECT_FALSE(debugger.isDebugging());
+}
+
+TEST("require that setting an environment variable turns on lid-specific"
+ " debugging.") {
+ EnvSaver save_lid_env(lid_env_name);
+ EnvSaver save_docid_env(docid_env_name);
+ setenv(lid_env_name, "1,3,5", true);
+
+ FeedDebugger debugger;
+ EXPECT_TRUE(debugger.isDebugging());
+ EXPECT_EQUAL(ns_log::Logger::info, debugger.getDebugLevel(1, 0));
+ EXPECT_EQUAL(ns_log::Logger::spam, debugger.getDebugLevel(2, 0));
+ EXPECT_EQUAL(ns_log::Logger::info, debugger.getDebugLevel(3, 0));
+ EXPECT_EQUAL(ns_log::Logger::spam, debugger.getDebugLevel(4, 0));
+ EXPECT_EQUAL(ns_log::Logger::info, debugger.getDebugLevel(5, 0));
+}
+
+TEST("require that setting an environment variable turns on docid-specific"
+ " debugging.") {
+ EnvSaver save_lid_env(lid_env_name);
+ EnvSaver save_docid_env(docid_env_name);
+ setenv(docid_env_name, "doc:test:foo,doc:test:bar,doc:test:baz", true);
+
+ FeedDebugger debugger;
+ EXPECT_TRUE(debugger.isDebugging());
+ EXPECT_EQUAL(ns_log::Logger::info,
+ debugger.getDebugLevel(1, DocumentId("doc:test:foo")));
+ EXPECT_EQUAL(ns_log::Logger::info,
+ debugger.getDebugLevel(1, DocumentId("doc:test:bar")));
+ EXPECT_EQUAL(ns_log::Logger::info,
+ debugger.getDebugLevel(1, DocumentId("doc:test:baz")));
+ EXPECT_EQUAL(ns_log::Logger::spam,
+ debugger.getDebugLevel(1, DocumentId("doc:test:qux")));
+}
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/server/feedstates_test.cpp b/searchcore/src/tests/proton/server/feedstates_test.cpp
new file mode 100644
index 00000000000..1d38fe6806a
--- /dev/null
+++ b/searchcore/src/tests/proton/server/feedstates_test.cpp
@@ -0,0 +1,136 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for feedstates.
+
+#include <vespa/log/log.h>
+LOG_SETUP("feedstates_test");
+#include <vespa/fastos/fastos.h>
+
+#include <vespa/document/base/documentid.h>
+#include <vespa/document/base/testdocrepo.h>
+#include <vespa/document/bucket/bucketid.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/searchcore/proton/common/bucketfactory.h>
+#include <vespa/searchcore/proton/server/feedstates.h>
+#include <vespa/searchcore/proton/server/memoryconfigstore.h>
+#include <vespa/searchcore/proton/test/dummy_feed_view.h>
+#include <vespa/searchlib/common/serialnum.h>
+#include <vespa/vespalib/objects/nbostream.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/util/buffer.h>
+#include <vespa/searchcore/proton/bucketdb/bucketdbhandler.h>
+
+using document::BucketId;
+using document::DocumentId;
+using document::DocumentTypeRepo;
+using document::TestDocRepo;
+using search::transactionlog::Packet;
+using search::SerialNum;
+using storage::spi::Timestamp;
+using vespalib::ConstBufferRef;
+using vespalib::nbostream;
+using namespace proton;
+
+namespace {
+
+struct MyFeedView : public test::DummyFeedView {
+ TestDocRepo repo;
+ DocumentTypeRepo::SP repo_sp;
+ int remove_handled;
+
+ MyFeedView() : repo_sp(repo.getTypeRepoSp()), remove_handled(0) {}
+
+ virtual const DocumentTypeRepo::SP &getDocumentTypeRepo() const
+ { return repo_sp; }
+ virtual void handleRemove(FeedToken *, const RemoveOperation &)
+ { ++remove_handled; }
+};
+
+struct MyReplayConfig : IReplayConfig {
+ virtual void replayConfig(SerialNum) {}
+ virtual void replayWipeHistory(SerialNum, fastos::TimeStamp) {}
+};
+
+struct InstantExecutor : vespalib::Executor {
+ virtual Task::UP execute(Task::UP task) {
+ task->run();
+ return Task::UP();
+ }
+};
+
+struct Fixture
+{
+ MyFeedView feed_view1;
+ MyFeedView feed_view2;
+ IFeedView *feed_view_ptr;
+ MyReplayConfig replay_config;
+ MemoryConfigStore config_store;
+ BucketDBOwner _bucketDB;
+ bucketdb::BucketDBHandler _bucketDBHandler;
+ ReplayTransactionLogState state;
+
+ Fixture()
+ : feed_view1(),
+ feed_view2(),
+ feed_view_ptr(&feed_view1),
+ replay_config(),
+ config_store(),
+ _bucketDB(),
+ _bucketDBHandler(_bucketDB),
+ state("doctypename", feed_view_ptr, _bucketDBHandler, replay_config,
+ config_store)
+ {
+ }
+};
+
+struct RemoveOperationContext
+{
+ DocumentId doc_id;
+ RemoveOperation op;
+ nbostream str;
+ std::unique_ptr<Packet> packet;
+
+ RemoveOperationContext(search::SerialNum serial)
+ : doc_id("doc:foo:bar"),
+ op(BucketFactory::getBucketId(doc_id), Timestamp(10), doc_id),
+ str(),
+ packet()
+ {
+ op.serialize(str);
+ ConstBufferRef buf(str.c_str(), str.wp());
+ packet.reset(new Packet());
+ packet->add(Packet::Entry(serial, FeedOperation::REMOVE, buf));
+ }
+};
+
+TEST_F("require that active FeedView can change during replay", Fixture)
+{
+ RemoveOperationContext opCtx(10);
+ PacketWrapper::SP wrap(new PacketWrapper(*opCtx.packet, NULL));
+ InstantExecutor executor;
+
+ EXPECT_EQUAL(0, f.feed_view1.remove_handled);
+ EXPECT_EQUAL(0, f.feed_view2.remove_handled);
+ f.state.receive(wrap, executor);
+ EXPECT_EQUAL(1, f.feed_view1.remove_handled);
+ EXPECT_EQUAL(0, f.feed_view2.remove_handled);
+ f.feed_view_ptr = &f.feed_view2;
+ f.state.receive(wrap, executor);
+ EXPECT_EQUAL(1, f.feed_view1.remove_handled);
+ EXPECT_EQUAL(1, f.feed_view2.remove_handled);
+}
+
+TEST_F("require that replay progress is tracked", Fixture)
+{
+ RemoveOperationContext opCtx(10);
+ TlsReplayProgress progress("test", 5, 15);
+ PacketWrapper::SP wrap(new PacketWrapper(*opCtx.packet, &progress));
+ InstantExecutor executor;
+
+ f.state.receive(wrap, executor);
+ EXPECT_EQUAL(10u, progress.getCurrent());
+ EXPECT_EQUAL(0.5, progress.getProgress());
+}
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/server/health_adapter/.gitignore b/searchcore/src/tests/proton/server/health_adapter/.gitignore
new file mode 100644
index 00000000000..c82499f49b7
--- /dev/null
+++ b/searchcore/src/tests/proton/server/health_adapter/.gitignore
@@ -0,0 +1 @@
+searchcore_health_adapter_test_app
diff --git a/searchcore/src/tests/proton/server/health_adapter/CMakeLists.txt b/searchcore/src/tests/proton/server/health_adapter/CMakeLists.txt
new file mode 100644
index 00000000000..2fee205f636
--- /dev/null
+++ b/searchcore/src/tests/proton/server/health_adapter/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_health_adapter_test_app
+ SOURCES
+ health_adapter_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_health_adapter_test_app COMMAND searchcore_health_adapter_test_app)
diff --git a/searchcore/src/tests/proton/server/health_adapter/FILES b/searchcore/src/tests/proton/server/health_adapter/FILES
new file mode 100644
index 00000000000..6faa8f6155f
--- /dev/null
+++ b/searchcore/src/tests/proton/server/health_adapter/FILES
@@ -0,0 +1 @@
+health_adapter_test.cpp
diff --git a/searchcore/src/tests/proton/server/health_adapter/health_adapter_test.cpp b/searchcore/src/tests/proton/server/health_adapter/health_adapter_test.cpp
new file mode 100644
index 00000000000..1957d8fbf33
--- /dev/null
+++ b/searchcore/src/tests/proton/server/health_adapter/health_adapter_test.cpp
@@ -0,0 +1,59 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/searchcore/proton/server/health_adapter.h>
+#include <vespa/searchcore/proton/common/statusreport.h>
+
+using namespace proton;
+
+struct MyStatusProducer : public StatusProducer {
+ StatusReport::List list;
+ void add(const std::string &comp, StatusReport::State state,
+ const std::string &msg)
+ {
+ list.push_back(StatusReport::SP(new StatusReport(StatusReport::Params(comp).
+ state(state).message(msg))));
+ }
+ virtual StatusReport::List getStatusReports() const {
+ return list;
+ }
+};
+
+TEST_FF("require that empty status list passes health check", MyStatusProducer(), HealthAdapter(f1)) {
+ EXPECT_TRUE(f2.getHealth().ok);
+ EXPECT_EQUAL(std::string("All OK"), f2.getHealth().msg);
+}
+
+TEST_FF("require that UP components passes health check", MyStatusProducer(), HealthAdapter(f1)) {
+ f1.add("c1", StatusReport::UPOK, "xxx");
+ f1.add("c2", StatusReport::UPOK, "yyy");
+ f1.add("c3", StatusReport::UPOK, "zzz");
+ EXPECT_TRUE(f2.getHealth().ok);
+ EXPECT_EQUAL(std::string("All OK"), f2.getHealth().msg);
+}
+
+TEST_FF("require that PARTIAL component fails health check", MyStatusProducer(), HealthAdapter(f1)) {
+ f1.add("c1", StatusReport::UPOK, "xxx");
+ f1.add("c2", StatusReport::PARTIAL, "yyy");
+ f1.add("c3", StatusReport::UPOK, "zzz");
+ EXPECT_FALSE(f2.getHealth().ok);
+ EXPECT_EQUAL(std::string("c2: yyy"), f2.getHealth().msg);
+}
+
+TEST_FF("require that DOWN component fails health check", MyStatusProducer(), HealthAdapter(f1)) {
+ f1.add("c1", StatusReport::UPOK, "xxx");
+ f1.add("c2", StatusReport::DOWN, "yyy");
+ f1.add("c3", StatusReport::UPOK, "zzz");
+ EXPECT_FALSE(f2.getHealth().ok);
+ EXPECT_EQUAL(std::string("c2: yyy"), f2.getHealth().msg);
+}
+
+TEST_FF("require that multiple failure messages are concatenated", MyStatusProducer(), HealthAdapter(f1)) {
+ f1.add("c1", StatusReport::PARTIAL, "xxx");
+ f1.add("c2", StatusReport::UPOK, "yyy");
+ f1.add("c3", StatusReport::DOWN, "zzz");
+ EXPECT_FALSE(f2.getHealth().ok);
+ EXPECT_EQUAL(std::string("c1: xxx, c3: zzz"), f2.getHealth().msg);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/server/memoryconfigstore_test.cpp b/searchcore/src/tests/proton/server/memoryconfigstore_test.cpp
new file mode 100644
index 00000000000..301633404bc
--- /dev/null
+++ b/searchcore/src/tests/proton/server/memoryconfigstore_test.cpp
@@ -0,0 +1,211 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for memoryconfigstore.
+
+#include <vespa/log/log.h>
+LOG_SETUP("memoryconfigstore_test");
+#include <vespa/fastos/fastos.h>
+
+#include <vespa/searchcommon/common/schema.h>
+#include <vespa/searchcore/proton/server/memoryconfigstore.h>
+#include <vespa/searchlib/common/serialnum.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/common/schemautil.h>
+
+using search::index::Schema;
+using search::SerialNum;
+using namespace proton;
+
+namespace {
+
+DocumentDBConfig::SP
+getConfig(int64_t generation, const Schema::SP &schema)
+{
+ return DocumentDBConfig::SP(
+ new DocumentDBConfig(
+ generation,
+ DocumentDBConfig::RankProfilesConfigSP(),
+ DocumentDBConfig::IndexschemaConfigSP(),
+ DocumentDBConfig::AttributesConfigSP(),
+ DocumentDBConfig::SummaryConfigSP(),
+ DocumentDBConfig::SummarymapConfigSP(),
+ DocumentDBConfig::JuniperrcConfigSP(),
+ DocumentDBConfig::DocumenttypesConfigSP(),
+ document::DocumentTypeRepo::SP(),
+ search::TuneFileDocumentDB::SP(),
+ schema,
+ DocumentDBMaintenanceConfig::SP(),
+ "client",
+ "test"));
+}
+
+
+DocumentDBConfig::SP
+getConfig(int64_t generation)
+{
+ return getConfig(generation, Schema::SP());
+}
+
+
+Schema::SP
+getSchema(int step)
+{
+ Schema::SP schema(new Schema);
+ schema->addIndexField(Schema::IndexField("foo1", Schema::STRING));
+ if (step < 2) {
+ schema->addIndexField(Schema::IndexField("foo2", Schema::STRING));
+ }
+ if (step < 1) {
+ schema->addIndexField(Schema::IndexField("foo3", Schema::STRING));
+ }
+ return schema;
+}
+
+TEST("require that configs can be stored and loaded") {
+ MemoryConfigStore config_store;
+ SerialNum serial(12);
+ config_store.saveConfig(*getConfig(10), Schema(), serial);
+ DocumentDBConfig::SP config;
+ Schema::SP history;
+ config_store.loadConfig(*getConfig(14), serial, config, history);
+ ASSERT_TRUE(config.get());
+ ASSERT_TRUE(history.get());
+ EXPECT_EQUAL(10, config->getGeneration());
+}
+
+TEST("require that best serial number is the most recent one") {
+ MemoryConfigStore config_store;
+ EXPECT_EQUAL(0u, config_store.getBestSerialNum());
+ config_store.saveConfig(*getConfig(10), Schema(), 5);
+ EXPECT_EQUAL(5u, config_store.getBestSerialNum());
+ config_store.saveConfig(*getConfig(10), Schema(), 2);
+ EXPECT_EQUAL(5u, config_store.getBestSerialNum());
+}
+
+TEST("require that oldest serial number is the first one or 0") {
+ MemoryConfigStore config_store;
+ EXPECT_EQUAL(0u, config_store.getOldestSerialNum());
+ config_store.saveConfig(*getConfig(10), Schema(), 5);
+ EXPECT_EQUAL(5u, config_store.getOldestSerialNum());
+ config_store.saveConfig(*getConfig(10), Schema(), 2);
+ EXPECT_EQUAL(2u, config_store.getOldestSerialNum());
+}
+
+TEST("require that existing serial numbers are valid") {
+ MemoryConfigStore config_store;
+ EXPECT_FALSE(config_store.hasValidSerial(5));
+ config_store.saveConfig(*getConfig(10), Schema(), 5);
+ EXPECT_TRUE(config_store.hasValidSerial(5));
+}
+
+TEST("require that prev valid serial number is the last one before the arg") {
+ MemoryConfigStore config_store;
+ EXPECT_EQUAL(0u, config_store.getPrevValidSerial(10));
+ config_store.saveConfig(*getConfig(10), Schema(), 5);
+ EXPECT_EQUAL(5u, config_store.getPrevValidSerial(10));
+ EXPECT_EQUAL(0u, config_store.getPrevValidSerial(5));
+ EXPECT_EQUAL(0u, config_store.getPrevValidSerial(4));
+ config_store.saveConfig(*getConfig(10), Schema(), 2);
+ EXPECT_EQUAL(0u, config_store.getPrevValidSerial(1));
+ EXPECT_EQUAL(0u, config_store.getPrevValidSerial(2));
+ EXPECT_EQUAL(2u, config_store.getPrevValidSerial(4));
+ EXPECT_EQUAL(2u, config_store.getPrevValidSerial(5));
+ EXPECT_EQUAL(5u, config_store.getPrevValidSerial(10));
+}
+
+TEST("require that prune removes old configs") {
+ MemoryConfigStore config_store;
+ config_store.saveConfig(*getConfig(10), Schema(), 5);
+ config_store.saveConfig(*getConfig(10), Schema(), 6);
+ EXPECT_TRUE(config_store.hasValidSerial(5));
+ config_store.prune(5);
+ EXPECT_FALSE(config_store.hasValidSerial(5));
+ EXPECT_TRUE(config_store.hasValidSerial(6));
+ config_store.prune(10);
+ EXPECT_FALSE(config_store.hasValidSerial(6));
+}
+
+TEST("require that wipe history clears previous history schema "
+ "and adds new, identical entry for current serial num") {
+ MemoryConfigStore config_store;
+ Schema::SP history(new Schema);
+ history->addIndexField(Schema::IndexField("foo", Schema::STRING));
+ config_store.saveConfig(*getConfig(10), *history, 5);
+ DocumentDBConfig::SP config;
+ config_store.loadConfig(*getConfig(14), 5, config, history);
+ EXPECT_EQUAL(1u, history->getNumIndexFields());
+ config_store.saveWipeHistoryConfig(6, 0);
+ EXPECT_TRUE(config_store.hasValidSerial(6));
+ config_store.loadConfig(*getConfig(14), 5, config, history);
+ EXPECT_EQUAL(1u, history->getNumIndexFields());
+ config_store.loadConfig(*getConfig(14), 6, config, history);
+ ASSERT_TRUE(config.get());
+ ASSERT_TRUE(history.get());
+ EXPECT_EQUAL(0u, history->getNumIndexFields());
+}
+
+
+TEST("require that wipe history clears only portions of history")
+{
+ MemoryConfigStore config_store;
+ Schema::SP schema(getSchema(0));
+ Schema::SP history(new Schema);
+ DocumentDBConfig::SP config(getConfig(5, schema));
+ config_store.saveConfig(*config, *history, 5);
+ Schema::SP oldSchema(schema);
+ schema = getSchema(1);
+ history = SchemaUtil::makeHistorySchema(*schema, *oldSchema, *history,
+ 100);
+ config_store.saveConfig(*config, *history, 10);
+ oldSchema = schema;
+ schema = getSchema(2);
+ history = SchemaUtil::makeHistorySchema(*schema, *oldSchema, *history,
+ 200);
+ config_store.saveConfig(*config, *history, 15);
+ config_store.saveWipeHistoryConfig(20, 50);
+ config_store.saveWipeHistoryConfig(25, 100);
+ config_store.saveWipeHistoryConfig(30, 150);
+ config_store.saveWipeHistoryConfig(35, 200);
+ config_store.saveWipeHistoryConfig(40, 250);
+ DocumentDBConfig::SP oldconfig(config);
+ config_store.loadConfig(*oldconfig, 20, config, history);
+ EXPECT_EQUAL(2u, history->getNumIndexFields());
+ oldconfig = config;
+ config_store.loadConfig(*oldconfig, 25, config, history);
+ EXPECT_EQUAL(2u, history->getNumIndexFields());
+ oldconfig = config;
+ config_store.loadConfig(*oldconfig, 30, config, history);
+ EXPECT_EQUAL(1u, history->getNumIndexFields());
+ oldconfig = config;
+ config_store.loadConfig(*oldconfig, 35, config, history);
+ EXPECT_EQUAL(1u, history->getNumIndexFields());
+ oldconfig = config;
+ config_store.loadConfig(*oldconfig, 40, config, history);
+ EXPECT_EQUAL(0u, history->getNumIndexFields());
+}
+
+TEST("require that wipe history does nothing if serial num exists") {
+ MemoryConfigStore config_store;
+ Schema::SP history(new Schema);
+ history->addIndexField(Schema::IndexField("foo", Schema::STRING));
+ config_store.saveConfig(*getConfig(10), *history, 5);
+ DocumentDBConfig::SP config;
+ config_store.saveWipeHistoryConfig(5, 0);
+ config_store.loadConfig(*getConfig(14), 5, config, history);
+ EXPECT_EQUAL(1u, history->getNumIndexFields());
+}
+
+TEST("require that MemoryConfigStores preserves state of "
+ "MemoryConfigStore between instantiations") {
+ MemoryConfigStores config_stores;
+ const std::string name("foo");
+ ConfigStore::UP config_store = config_stores.getConfigStore(name);
+ config_store->saveConfig(*getConfig(10), Schema(), 5);
+ EXPECT_TRUE(config_store->hasValidSerial(5));
+ config_store.reset();
+ config_store = config_stores.getConfigStore(name);
+ EXPECT_TRUE(config_store->hasValidSerial(5));
+}
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/server/memoryflush/.gitignore b/searchcore/src/tests/proton/server/memoryflush/.gitignore
new file mode 100644
index 00000000000..e7a2a22798a
--- /dev/null
+++ b/searchcore/src/tests/proton/server/memoryflush/.gitignore
@@ -0,0 +1 @@
+searchcore_memoryflush_test_app
diff --git a/searchcore/src/tests/proton/server/memoryflush/CMakeLists.txt b/searchcore/src/tests/proton/server/memoryflush/CMakeLists.txt
new file mode 100644
index 00000000000..51ea36dc077
--- /dev/null
+++ b/searchcore/src/tests/proton/server/memoryflush/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_memoryflush_test_app
+ SOURCES
+ memoryflush_test.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_flushengine
+)
+vespa_add_test(NAME searchcore_memoryflush_test_app COMMAND searchcore_memoryflush_test_app)
diff --git a/searchcore/src/tests/proton/server/memoryflush/DESC b/searchcore/src/tests/proton/server/memoryflush/DESC
new file mode 100644
index 00000000000..69bfba597c4
--- /dev/null
+++ b/searchcore/src/tests/proton/server/memoryflush/DESC
@@ -0,0 +1 @@
+memoryflush test. Take a look at memoryflush_test.cpp for details.
diff --git a/searchcore/src/tests/proton/server/memoryflush/FILES b/searchcore/src/tests/proton/server/memoryflush/FILES
new file mode 100644
index 00000000000..94ca0c97eb0
--- /dev/null
+++ b/searchcore/src/tests/proton/server/memoryflush/FILES
@@ -0,0 +1 @@
+memoryflush_test.cpp
diff --git a/searchcore/src/tests/proton/server/memoryflush/memoryflush_test.cpp b/searchcore/src/tests/proton/server/memoryflush/memoryflush_test.cpp
new file mode 100644
index 00000000000..2f4083228f9
--- /dev/null
+++ b/searchcore/src/tests/proton/server/memoryflush/memoryflush_test.cpp
@@ -0,0 +1,361 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("memoryflush_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/flushengine/flushcontext.h>
+#include <vespa/searchcore/proton/flushengine/iflushhandler.h>
+#include <vespa/searchcore/proton/flushengine/tls_stats_map.h>
+#include <vespa/searchcore/proton/test/dummy_flush_target.h>
+#include <vespa/searchcorespi/flush/iflushtarget.h>
+#include <vespa/searchcore/proton/server/memoryflush.h>
+
+using fastos::TimeStamp;
+using search::SerialNum;
+using namespace proton;
+using namespace searchcorespi;
+
+namespace
+{
+
+static constexpr uint64_t gibi = UINT64_C(1024) * UINT64_C(1024) * UINT64_C(1024);
+
+}
+
+typedef IFlushTarget::MemoryGain MemoryGain;
+typedef IFlushTarget::DiskGain DiskGain;
+
+class MyFlushHandler : public IFlushHandler {
+public:
+ MyFlushHandler(const vespalib::string &name) : IFlushHandler(name) {}
+ // Implements IFlushHandler
+ virtual std::vector<IFlushTarget::SP> getFlushTargets() {
+ return std::vector<IFlushTarget::SP>();
+ }
+ virtual SerialNum getCurrentSerialNumber() const { return 0; }
+ virtual void flushDone(SerialNum oldestSerial) { (void) oldestSerial; }
+
+ virtual void
+ syncTls(search::SerialNum syncTo)
+ {
+ (void) syncTo;
+ }
+};
+
+class MyFlushTarget : public test::DummyFlushTarget {
+private:
+ MemoryGain _memoryGain;
+ DiskGain _diskGain;
+ SerialNum _flushedSerial;
+ TimeStamp _lastFlushTime;
+ bool _urgentFlush;
+public:
+ MyFlushTarget(const vespalib::string &name, MemoryGain memoryGain,
+ DiskGain diskGain, SerialNum flushedSerial,
+ TimeStamp lastFlushTime, bool urgentFlush) :
+ test::DummyFlushTarget(name),
+ _memoryGain(memoryGain),
+ _diskGain(diskGain),
+ _flushedSerial(flushedSerial),
+ _lastFlushTime(lastFlushTime),
+ _urgentFlush(urgentFlush)
+ {
+ }
+ // Implements IFlushTarget
+ virtual MemoryGain getApproxMemoryGain() const override { return _memoryGain; }
+ virtual DiskGain getApproxDiskGain() const override { return _diskGain; }
+ virtual SerialNum getFlushedSerialNum() const override { return _flushedSerial; }
+ virtual TimeStamp getLastFlushTime() const override { return _lastFlushTime; }
+ virtual bool needUrgentFlush() const override { return _urgentFlush; }
+};
+
+struct StringList : public std::vector<vespalib::string> {
+ StringList() : std::vector<vespalib::string>() {}
+ StringList &add(const vespalib::string &str) {
+ push_back(str);
+ return *this;
+ }
+};
+
+class ContextBuilder {
+private:
+ FlushContext::List _list;
+ IFlushHandler::SP _handler;
+ flushengine::TlsStatsMap::Map _map;
+ void
+ fixupMap(const vespalib::string &name, SerialNum lastSerial)
+ {
+ flushengine::TlsStats oldStats = _map[name];
+ if (oldStats.getLastSerial() < lastSerial) {
+ _map[name] =
+ flushengine::TlsStats(oldStats.getNumBytes(),
+ oldStats.getFirstSerial(),
+ lastSerial);
+ }
+ }
+public:
+ ContextBuilder() : _list(), _handler(new MyFlushHandler("myhandler")) {}
+ void addTls(const vespalib::string &name,
+ const flushengine::TlsStats &tlsStats) {
+ _map[name] = tlsStats;
+ }
+ ContextBuilder &add(const FlushContext::SP &context) {
+ _list.push_back(context);
+ fixupMap(_handler->getName(), context->getLastSerial());
+ return *this;
+ }
+ ContextBuilder &add(const IFlushTarget::SP &target, SerialNum lastSerial = 0) {
+ FlushContext::SP ctx(new FlushContext(_handler, target, 0, lastSerial));
+ return add(ctx);
+ }
+ const FlushContext::List &list() const { return _list; }
+ flushengine::TlsStatsMap tlsStats() const {
+ flushengine::TlsStatsMap::Map map(_map);
+ return flushengine::TlsStatsMap(std::move(map));
+ }
+};
+
+MyFlushTarget::SP
+createTargetM(const vespalib::string &name, MemoryGain memoryGain)
+{
+ return MyFlushTarget::SP(new MyFlushTarget(name, memoryGain, DiskGain(),
+ SerialNum(), TimeStamp(), false));
+}
+
+MyFlushTarget::SP
+createTargetD(const vespalib::string &name, DiskGain diskGain, SerialNum serial = 0)
+{
+ return MyFlushTarget::SP(new MyFlushTarget(name, MemoryGain(), diskGain,
+ serial, TimeStamp(), false));
+}
+
+MyFlushTarget::SP
+createTargetS(const vespalib::string &name, SerialNum serial, TimeStamp timeStamp = TimeStamp())
+{
+ return MyFlushTarget::SP(new MyFlushTarget(name, MemoryGain(), DiskGain(),
+ serial, timeStamp, false));
+}
+
+MyFlushTarget::SP
+createTargetT(const vespalib::string &name, TimeStamp lastFlushTime, SerialNum serial = 0)
+{
+ return MyFlushTarget::SP(new MyFlushTarget(name, MemoryGain(), DiskGain(),
+ serial, lastFlushTime, false));
+}
+
+MyFlushTarget::SP
+createTargetF(const vespalib::string &name, bool urgentFlush)
+{
+ return MyFlushTarget::SP(new MyFlushTarget(name, MemoryGain(), DiskGain(),
+ SerialNum(), TimeStamp(), urgentFlush));
+}
+
+bool
+assertOrder(const StringList &exp, const FlushContext::List &act)
+{
+ if (!EXPECT_EQUAL(exp.size(), act.size())) return false;
+ for (size_t i = 0; i < exp.size(); ++i) {
+ if (!EXPECT_EQUAL(exp[i], act[i]->getTarget()->getName())) return false;
+ }
+ return true;
+}
+
+void
+requireThatWeCanOrderByMemoryGain()
+{
+ ContextBuilder cb;
+ cb.add(createTargetM("t2", MemoryGain(10, 0)))
+ .add(createTargetM("t1", MemoryGain(5, 0)))
+ .add(createTargetM("t4", MemoryGain(20, 0)))
+ .add(createTargetM("t3", MemoryGain(15, 0)));
+ { // target t4 has memoryGain >= maxMemoryGain
+ MemoryFlush flush({1000, 20 * gibi, 1.0, 20, 1.0, 1000, TimeStamp(TimeStamp::MINUTE)});
+ EXPECT_TRUE(assertOrder(StringList().add("t4").add("t3").add("t2").add("t1"),
+ flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+ { // trigger totalMemoryGain >= globalMaxMemory
+ MemoryFlush flush({50, 20 * gibi, 1.0, 1000, 1.0, 1000, TimeStamp(TimeStamp::MINUTE)});
+ EXPECT_TRUE(assertOrder(StringList().add("t4").add("t3").add("t2").add("t1"),
+ flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+}
+
+int64_t milli = 1000000;
+
+void
+requireThatWeCanOrderByDiskGainWithLargeValues()
+{
+ ContextBuilder cb;
+ int64_t before = 100 * milli;
+ cb.add(createTargetD("t2", DiskGain(before, 70 * milli))) // gain 30M
+ .add(createTargetD("t1", DiskGain(before, 75 * milli))) // gain 25M
+ .add(createTargetD("t4", DiskGain(before, 45 * milli))) // gain 55M
+ .add(createTargetD("t3", DiskGain(before, 50 * milli))); // gain 50M
+ { // target t4 has diskGain > bloatValue
+ // t4 gain: 55M / 100M = 0.55 -> bloat factor 0.54 to trigger
+ MemoryFlush flush({1000, 20 * gibi, 10.0, 1000, 0.54, 1000, TimeStamp(TimeStamp::MINUTE)});
+ EXPECT_TRUE(assertOrder(StringList().add("t4").add("t3").add("t2").add("t1"),
+ flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+ { // trigger totalDiskGain > totalBloatValue
+ // total gain: 160M / 4 * 100M = 0.4 -> bloat factor 0.39 to trigger
+ MemoryFlush flush({1000, 20 * gibi, 0.39, 1000, 10.0, 1000, TimeStamp(TimeStamp::MINUTE)});
+ EXPECT_TRUE(assertOrder(StringList().add("t4").add("t3").add("t2").add("t1"),
+ flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+}
+
+void
+requireThatWeCanOrderByDiskGainWithSmallValues()
+{
+ ContextBuilder cb;
+ cb.add(createTargetD("t2", DiskGain(100, 70))) // gain 30
+ .add(createTargetD("t1", DiskGain(100, 75))) // gain 25
+ .add(createTargetD("t4", DiskGain(100, 45))) // gain 55
+ .add(createTargetD("t3", DiskGain(100, 50))); // gain 50
+ // total disk bloat value calculation uses min 100M disk size
+ // target bloat value calculation uses min 10M disk size
+ { // target t4 has diskGain > bloatValue
+ // t4 gain: 55 / 10M = 0.0000055 -> bloat factor 0.0000054 to trigger
+ MemoryFlush flush({1000, 20 * gibi, 10.0, 1000, 0.0000054, 1000, TimeStamp(TimeStamp::MINUTE)});
+ EXPECT_TRUE(assertOrder(StringList().add("t4").add("t3").add("t2").add("t1"),
+ flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+ { // trigger totalDiskGain > totalBloatValue
+ // total gain: 160 / 100M = 0.0000016 -> bloat factor 0.0000015 to trigger
+ MemoryFlush flush({1000, 20 * gibi, 0.0000015, 1000, 10.0, 1000, TimeStamp(TimeStamp::MINUTE)});
+ EXPECT_TRUE(assertOrder(StringList().add("t4").add("t3").add("t2").add("t1"),
+ flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+}
+
+void
+requireThatWeCanOrderBySerialNum()
+{
+ SerialNum lastSerial = 99;
+ ContextBuilder cb;
+ cb.add(createTargetS("t2", 89), lastSerial)
+ .add(createTargetS("t1", 94), lastSerial)
+ .add(createTargetS("t4", 98), lastSerial + 19)
+ .add(createTargetS("t3", 84), lastSerial);
+ { // target t4 has serialDiff >= maxSerialGain
+ MemoryFlush flush({1000, 20 * gibi, 1.0, 1000, 1.0, 20, TimeStamp(TimeStamp::MINUTE)});
+ EXPECT_TRUE(assertOrder(StringList().add("t4").add("t3").add("t2").add("t1"),
+ flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+}
+
+void
+requireThatWeCanOrderByAge()
+{
+ TimeStamp now(fastos::ClockSystem::now());
+ TimeStamp start(now.val() - 20 * TimeStamp::SEC);
+ ContextBuilder cb;
+ cb.add(createTargetT("t2", TimeStamp(now.val() - 10 * TimeStamp::SEC)))
+ .add(createTargetT("t1", TimeStamp(now.val() - 5 * TimeStamp::SEC)))
+ .add(createTargetT("t4", TimeStamp()))
+ .add(createTargetT("t3", TimeStamp(now.val() - 15 * TimeStamp::SEC)));
+
+ { // all targets have timeDiff >= maxTimeGain
+ MemoryFlush flush({1000, 20 * gibi, 1.0, 1000, 1.0, 1000, TimeStamp(2 * TimeStamp::SEC)}, start);
+ EXPECT_TRUE(assertOrder(StringList().add("t4").add("t3").add("t2").add("t1"),
+ flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+ { // no targets have timeDiff >= maxTimeGain
+ MemoryFlush flush({1000, 20 * gibi, 1.0, 1000, 1.0, 1000, TimeStamp(30 * TimeStamp::SEC)}, start);
+ EXPECT_TRUE(assertOrder(StringList(), flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+}
+
+void
+requireThatWeCanOrderByTlsSize()
+{
+ TimeStamp now(fastos::ClockSystem::now());
+ TimeStamp start(now.val() - 20 * TimeStamp::SEC);
+ flushengine::TlsStatsMap::Map tlsMap;
+ ContextBuilder cb;
+ IFlushHandler::SP handler1(std::make_shared<MyFlushHandler>("handler1"));
+ IFlushHandler::SP handler2(std::make_shared<MyFlushHandler>("handler2"));
+ cb.addTls("handler1", {20 * gibi, 1001, 2000 });
+ cb.addTls("handler2", { 5 * gibi, 1001, 2000 });
+ cb.add(std::make_shared<FlushContext>
+ (handler1,
+ createTargetT("t2", TimeStamp(now.val() - 10 * TimeStamp::SEC),
+ 1900),
+ 2000, 2000)).
+ add(std::make_shared<FlushContext>
+ (handler2,
+ createTargetT("t1", TimeStamp(now.val() - 5 * TimeStamp::SEC),
+ 1000),
+ 2000, 2000)).
+ add(std::make_shared<FlushContext>
+ (handler1,
+ createTargetT("t4", TimeStamp(),
+ 1000),
+ 2000, 2000)).
+ add(std::make_shared<FlushContext>
+ (handler2,
+ createTargetT("t3", TimeStamp(now.val() - 15 * TimeStamp::SEC),
+ 1900),
+ 2000, 2000));
+ { // sum of tls sizes above limit, trigger sort order based on tls size
+ MemoryFlush flush({1000, 3 * gibi, 1.0, 1000, 1.0, 2000, TimeStamp(2 * TimeStamp::SEC)}, start);
+ EXPECT_TRUE(assertOrder(StringList().add("t4").add("t1").add("t2").add("t3"),
+ flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+ { // sum of tls sizes below limit
+ MemoryFlush flush({1000, 30 * gibi, 1.0, 1000, 1.0, 2000, TimeStamp(30 * TimeStamp::SEC)}, start);
+ EXPECT_TRUE(assertOrder(StringList(), flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+}
+
+void
+requireThatOrderTypeIsPreserved()
+{
+ TimeStamp now(fastos::ClockSystem::now());
+ TimeStamp ts1(now.val() - 30 * TimeStamp::SEC);
+ TimeStamp ts2(now.val() - 20 * TimeStamp::SEC);
+ TimeStamp ts3(now.val() - 10 * TimeStamp::SEC);
+ TimeStamp maxTimeGain(15 * TimeStamp::SEC);
+ { // MAXAGE VS MAXSERIAL
+ ContextBuilder cb;
+ cb.add(createTargetT("t2", ts2, 5), 14)
+ .add(createTargetS("t1", 4, ts3), 14);
+ MemoryFlush flush({1000, 20 * gibi, 1.0, 1000, 1.0, 10, maxTimeGain}, ts1);
+ EXPECT_TRUE(assertOrder(StringList().add("t1").add("t2"), flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+ { // MAXSERIAL VS DISKBLOAT
+ ContextBuilder cb;
+ cb.add(createTargetS("t2", 4))
+ .add(createTargetD("t1", DiskGain(100 * milli, 80 * milli), 5));
+ MemoryFlush flush({1000, 20 * gibi, 1.0, 1000, 0.19, 10, TimeStamp(30 * TimeStamp::SEC)});
+ EXPECT_TRUE(assertOrder(StringList().add("t1").add("t2"), flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+ { // DISKBLOAT VS MEMORY
+ ContextBuilder cb;
+ cb.add(createTargetD("t2", DiskGain(100 * milli, 80 * milli)))
+ .add(createTargetM("t1", MemoryGain(100, 80)));
+ MemoryFlush flush({1000, 20 * gibi, 1.0, 20, 0.19, 1000, TimeStamp(30 * TimeStamp::SEC)});
+ EXPECT_TRUE(assertOrder(StringList().add("t1").add("t2"), flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+ { // urgent flush
+ ContextBuilder cb;
+ cb.add(createTargetF("t2", false))
+ .add(createTargetF("t1", true));
+ MemoryFlush flush({1000, 20 * gibi, 1.0, 1000, 1.0, 1000, TimeStamp(30 * TimeStamp::SEC)});
+ EXPECT_TRUE(assertOrder(StringList().add("t1").add("t2"), flush.getFlushTargets(cb.list(), cb.tlsStats())));
+ }
+}
+
+TEST_MAIN()
+{
+ TEST_DO(requireThatWeCanOrderByMemoryGain());
+ TEST_DO(requireThatWeCanOrderByDiskGainWithLargeValues());
+ TEST_DO(requireThatWeCanOrderByDiskGainWithSmallValues());
+ TEST_DO(requireThatWeCanOrderBySerialNum());
+ TEST_DO(requireThatWeCanOrderByAge());
+ TEST_DO(requireThatWeCanOrderByTlsSize());
+ TEST_DO(requireThatOrderTypeIsPreserved());
+}
+
+
diff --git a/searchcore/src/tests/proton/server/visibility_handler/.gitignore b/searchcore/src/tests/proton/server/visibility_handler/.gitignore
new file mode 100644
index 00000000000..3666e0c37c3
--- /dev/null
+++ b/searchcore/src/tests/proton/server/visibility_handler/.gitignore
@@ -0,0 +1 @@
+searchcore_visibility_handler_test_app
diff --git a/searchcore/src/tests/proton/server/visibility_handler/CMakeLists.txt b/searchcore/src/tests/proton/server/visibility_handler/CMakeLists.txt
new file mode 100644
index 00000000000..f86504c84dc
--- /dev/null
+++ b/searchcore/src/tests/proton/server/visibility_handler/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_visibility_handler_test_app
+ SOURCES
+ visibility_handler_test.cpp
+ DEPENDS
+ searchcore_server
+)
+vespa_add_target_system_dependency(searchcore_visibility_handler_test_app boost boost_system-mt-d)
+vespa_add_target_system_dependency(searchcore_visibility_handler_test_app boost boost_filesystem-mt-d)
+vespa_add_test(NAME searchcore_visibility_handler_test_app COMMAND searchcore_visibility_handler_test_app)
diff --git a/searchcore/src/tests/proton/server/visibility_handler/DESC b/searchcore/src/tests/proton/server/visibility_handler/DESC
new file mode 100644
index 00000000000..588cd8b923e
--- /dev/null
+++ b/searchcore/src/tests/proton/server/visibility_handler/DESC
@@ -0,0 +1 @@
+visibility_handler test. Take a look at visibility_handler_test.cpp for details.
diff --git a/searchcore/src/tests/proton/server/visibility_handler/FILES b/searchcore/src/tests/proton/server/visibility_handler/FILES
new file mode 100644
index 00000000000..8dea2c9d408
--- /dev/null
+++ b/searchcore/src/tests/proton/server/visibility_handler/FILES
@@ -0,0 +1 @@
+visibility_handler_test.cpp
diff --git a/searchcore/src/tests/proton/server/visibility_handler/visibility_handler_test.cpp b/searchcore/src/tests/proton/server/visibility_handler/visibility_handler_test.cpp
new file mode 100644
index 00000000000..2b6b5bccee7
--- /dev/null
+++ b/searchcore/src/tests/proton/server/visibility_handler/visibility_handler_test.cpp
@@ -0,0 +1,188 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("visibility_handler_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/server/visibilityhandler.h>
+#include <vespa/searchcore/proton/test/dummy_feed_view.h>
+#include <vespa/searchcore/proton/test/threading_service_observer.h>
+#include <vespa/searchcore/proton/server/executorthreadingservice.h>
+#include <vespa/searchlib/common/lambdatask.h>
+
+using search::SerialNum;
+using proton::IGetSerialNum;
+using proton::test::DummyFeedView;
+using proton::ExecutorThreadingService;
+using proton::test::ThreadingServiceObserver;
+using proton::IFeedView;
+using proton::VisibilityHandler;
+using search::makeLambdaTask;
+using fastos::TimeStamp;
+
+namespace {
+
+class MyGetSerialNum : public IGetSerialNum
+{
+ SerialNum _serialNum;
+public:
+ MyGetSerialNum()
+ : _serialNum(0u)
+ {
+ }
+ virtual SerialNum getSerialNum() const override { return _serialNum; }
+ void setSerialNum(SerialNum serialNum) { _serialNum = serialNum; }
+};
+
+
+
+class MyFeedView : public DummyFeedView
+{
+ uint32_t _forceCommitCount;
+ SerialNum _committedSerialNum;
+
+
+public:
+ MyFeedView()
+ : _forceCommitCount(0u),
+ _committedSerialNum(0u)
+ {
+ }
+
+ void forceCommit(SerialNum serialNum) override
+ {
+ EXPECT_TRUE(serialNum >= _committedSerialNum);
+ _committedSerialNum = serialNum;
+ ++_forceCommitCount;
+ }
+
+ uint32_t getForceCommitCount() const { return _forceCommitCount; }
+ SerialNum getCommittedSerialNum() const { return _committedSerialNum; }
+};
+
+
+class Fixture
+{
+public:
+ MyGetSerialNum _getSerialNum;
+ ExecutorThreadingService _writeServiceReal;
+ ThreadingServiceObserver _writeService;
+ std::shared_ptr<MyFeedView> _feedViewReal;
+ vespalib::VarHolder<IFeedView::SP> _feedView;
+ VisibilityHandler _visibilityHandler;
+
+
+ Fixture()
+ : _getSerialNum(),
+ _writeServiceReal(),
+ _writeService(_writeServiceReal),
+ _feedViewReal(std::make_shared<MyFeedView>()),
+ _feedView(_feedViewReal),
+ _visibilityHandler(_getSerialNum, _writeService, _feedView)
+ {
+ }
+
+ void
+ checkCommitPostCondition(uint32_t expForceCommitCount,
+ SerialNum expCommittedSerialNum,
+ uint32_t expMasterExecuteCnt,
+ uint32_t expAttributeFieldWriterSyncCnt)
+ {
+ EXPECT_EQUAL(expForceCommitCount, _feedViewReal->getForceCommitCount());
+ EXPECT_EQUAL(expCommittedSerialNum,
+ _feedViewReal->getCommittedSerialNum());
+ EXPECT_EQUAL(expMasterExecuteCnt,
+ _writeService.masterObserver().getExecuteCnt());
+ EXPECT_EQUAL(expAttributeFieldWriterSyncCnt,
+ _writeService.attributeFieldWriterObserver().getSyncCnt());
+ }
+
+ void
+ testCommit(double visibilityDelay, bool internal,
+ uint32_t expForceCommitCount, SerialNum expCommittedSerialNum,
+ uint32_t expMasterExecuteCnt,
+ uint32_t expAttributeFieldWriterSyncCnt)
+ {
+ _getSerialNum.setSerialNum(10u);
+ _visibilityHandler.setVisibilityDelay(TimeStamp::Seconds(visibilityDelay));
+ if (internal) {
+ VisibilityHandler *visibilityHandler = &_visibilityHandler;
+ auto task = makeLambdaTask([=]() { visibilityHandler->commit(); });
+ _writeService.master().execute(std::move(task));
+ } else {
+ _visibilityHandler.commit();
+ }
+ _writeService.master().sync();
+ checkCommitPostCondition(expForceCommitCount,
+ expCommittedSerialNum,
+ expMasterExecuteCnt,
+ expAttributeFieldWriterSyncCnt);
+ }
+
+ void
+ testCommitAndWait(double visibilityDelay, bool internal,
+ uint32_t expForceCommitCount,
+ SerialNum expCommittedSerialNum,
+ uint32_t expMasterExecuteCnt,
+ uint32_t expAttributeFieldWriterSyncCnt)
+ {
+ _getSerialNum.setSerialNum(10u);
+ _visibilityHandler.setVisibilityDelay(TimeStamp::Seconds(visibilityDelay));
+ if (internal) {
+ VisibilityHandler *visibilityHandler = &_visibilityHandler;
+ auto task =
+ makeLambdaTask([=]() { visibilityHandler->commitAndWait(); });
+ _writeService.master().execute(std::move(task));
+ _writeService.master().sync();
+ } else {
+ _visibilityHandler.commitAndWait();
+ }
+ checkCommitPostCondition(expForceCommitCount,
+ expCommittedSerialNum,
+ expMasterExecuteCnt,
+ expAttributeFieldWriterSyncCnt);
+ }
+};
+
+}
+
+TEST_F("Check external commit with zero visibility delay", Fixture)
+{
+ f.testCommit(0.0, false, 0u, 0u, 0u, 0u);
+}
+
+TEST_F("Check external commit with nonzero visibility delay", Fixture)
+{
+ f.testCommit(1.0, false, 1u, 10u, 1u, 0u);
+}
+
+TEST_F("Check internal commit with zero visibility delay", Fixture)
+{
+ f.testCommit(0.0, true, 0u, 0u, 1u, 0u);
+}
+
+TEST_F("Check internal commit with nonzero visibility delay", Fixture)
+{
+ f.testCommit(1.0, true, 1u, 10u, 1u, 0u);
+}
+
+TEST_F("Check external commitAndWait with zero visibility delay", Fixture)
+{
+ f.testCommitAndWait(0.0, false, 0u, 0u, 0u, 1u);
+}
+
+TEST_F("Check external commitAndWait with nonzero visibility delay", Fixture)
+{
+ f.testCommitAndWait(1.0, false, 1u, 10u, 1u, 1u);
+}
+
+TEST_F("Check internal commitAndWait with zero visibility delay", Fixture)
+{
+ f.testCommitAndWait(0.0, true, 0u, 0u, 1u, 1u);
+}
+
+TEST_F("Check internal commitAndWait with nonzero visibility delay", Fixture)
+{
+ f.testCommitAndWait(1.0, true, 1u, 10u, 1u, 1u);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/statusreport/.gitignore b/searchcore/src/tests/proton/statusreport/.gitignore
new file mode 100644
index 00000000000..68753df292a
--- /dev/null
+++ b/searchcore/src/tests/proton/statusreport/.gitignore
@@ -0,0 +1 @@
+searchcore_statusreport_test_app
diff --git a/searchcore/src/tests/proton/statusreport/CMakeLists.txt b/searchcore/src/tests/proton/statusreport/CMakeLists.txt
new file mode 100644
index 00000000000..fa11b343d0d
--- /dev/null
+++ b/searchcore/src/tests/proton/statusreport/CMakeLists.txt
@@ -0,0 +1,7 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_statusreport_test_app
+ SOURCES
+ statusreport.cpp
+ DEPENDS
+)
+vespa_add_test(NAME searchcore_statusreport_test_app COMMAND searchcore_statusreport_test_app)
diff --git a/searchcore/src/tests/proton/statusreport/DESC b/searchcore/src/tests/proton/statusreport/DESC
new file mode 100644
index 00000000000..36c1c49cc80
--- /dev/null
+++ b/searchcore/src/tests/proton/statusreport/DESC
@@ -0,0 +1 @@
+statusreport test. Take a look at statusreport.cpp for details.
diff --git a/searchcore/src/tests/proton/statusreport/FILES b/searchcore/src/tests/proton/statusreport/FILES
new file mode 100644
index 00000000000..fe27097df03
--- /dev/null
+++ b/searchcore/src/tests/proton/statusreport/FILES
@@ -0,0 +1 @@
+statusreport.cpp
diff --git a/searchcore/src/tests/proton/statusreport/statusreport.cpp b/searchcore/src/tests/proton/statusreport/statusreport.cpp
new file mode 100644
index 00000000000..81dfe05fa2c
--- /dev/null
+++ b/searchcore/src/tests/proton/statusreport/statusreport.cpp
@@ -0,0 +1,44 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("statusreport_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/common/statusreport.h>
+
+namespace proton {
+
+TEST("require that default status report works")
+{
+ StatusReport sr(StatusReport::Params("foo"));
+
+ EXPECT_EQUAL("foo", sr.getComponent());
+ EXPECT_EQUAL(StatusReport::DOWN, sr.getState());
+ EXPECT_EQUAL("", sr.getInternalState());
+ EXPECT_EQUAL("", sr.getInternalConfigState());
+ EXPECT_FALSE(sr.hasProgress());
+ EXPECT_EQUAL("", sr.getMessage());
+ EXPECT_EQUAL("state=", sr.getInternalStatesStr());
+}
+
+TEST("require that custom status report works")
+{
+ StatusReport sr(StatusReport::Params("foo").
+ state(StatusReport::UPOK).
+ internalState("mystate").
+ internalConfigState("myconfigstate").
+ progress(65).
+ message("mymessage"));
+
+ EXPECT_EQUAL("foo", sr.getComponent());
+ EXPECT_EQUAL(StatusReport::UPOK, sr.getState());
+ EXPECT_EQUAL("mystate", sr.getInternalState());
+ EXPECT_EQUAL("myconfigstate", sr.getInternalConfigState());
+ EXPECT_TRUE(sr.hasProgress());
+ EXPECT_EQUAL(65, sr.getProgress());
+ EXPECT_EQUAL("mymessage", sr.getMessage());
+ EXPECT_EQUAL("state=mystate configstate=myconfigstate", sr.getInternalStatesStr());
+}
+
+} // namespace proton
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/summaryengine/.gitignore b/searchcore/src/tests/proton/summaryengine/.gitignore
new file mode 100644
index 00000000000..3a635a51def
--- /dev/null
+++ b/searchcore/src/tests/proton/summaryengine/.gitignore
@@ -0,0 +1,4 @@
+.depend
+Makefile
+summaryengine_test
+searchcore_summaryengine_test_app
diff --git a/searchcore/src/tests/proton/summaryengine/CMakeLists.txt b/searchcore/src/tests/proton/summaryengine/CMakeLists.txt
new file mode 100644
index 00000000000..af3b09db1de
--- /dev/null
+++ b/searchcore/src/tests/proton/summaryengine/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_summaryengine_test_app
+ SOURCES
+ summaryengine.cpp
+ DEPENDS
+ searchcore_summaryengine
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_summaryengine_test_app COMMAND searchcore_summaryengine_test_app)
diff --git a/searchcore/src/tests/proton/summaryengine/DESC b/searchcore/src/tests/proton/summaryengine/DESC
new file mode 100644
index 00000000000..be0687ecb07
--- /dev/null
+++ b/searchcore/src/tests/proton/summaryengine/DESC
@@ -0,0 +1 @@
+summaryengine test. Take a look at summaryengine.cpp for details.
diff --git a/searchcore/src/tests/proton/summaryengine/FILES b/searchcore/src/tests/proton/summaryengine/FILES
new file mode 100644
index 00000000000..cef9a6e88bf
--- /dev/null
+++ b/searchcore/src/tests/proton/summaryengine/FILES
@@ -0,0 +1 @@
+summaryengine.cpp
diff --git a/searchcore/src/tests/proton/summaryengine/summaryengine.cpp b/searchcore/src/tests/proton/summaryengine/summaryengine.cpp
new file mode 100644
index 00000000000..c0692ecd7ec
--- /dev/null
+++ b/searchcore/src/tests/proton/summaryengine/summaryengine.cpp
@@ -0,0 +1,434 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("summaryengine_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/summaryengine/summaryengine.h>
+#include <vespa/searchcore/proton/summaryengine/docsum_by_slime.h>
+#include <vespa/searchlib/engine/docsumapi.h>
+#include <vespa/searchlib/util/rawbuf.h>
+#include <vespa/searchlib/util/slime_output_raw_buf_adapter.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/data/databuffer.h>
+#include <vespa/document/util/compressor.h>
+
+using namespace search::engine;
+using namespace document;
+using namespace vespalib::slime;
+using vespalib::stringref;
+using vespalib::ConstBufferRef;
+using vespalib::DataBuffer;
+
+namespace proton {
+
+namespace {
+stringref MYREPLY("myreply");
+Memory DOCSUMS("docsums");
+Memory DOCSUM("docsum");
+}
+
+class MySearchHandler : public ISearchHandler {
+ std::string _name;
+ stringref _reply;
+public:
+ MySearchHandler(const std::string & name = "my",
+ const stringref & reply = MYREPLY) :
+ _name(name), _reply(reply) {}
+ virtual DocsumReply::UP getDocsums(const DocsumRequest & request) {
+ return (request.useRootSlime())
+ ? std::make_unique<DocsumReply>(createSlimeReply(request.hits.size()))
+ : createOldDocSum(request);
+ }
+ vespalib::Slime::UP createSlimeReply(size_t count) {
+ vespalib::Slime::UP response(std::make_unique<vespalib::Slime>());
+ Cursor & root = response->setObject();
+ Cursor & array = root.setArray(DOCSUMS);
+ const Symbol docsumSym = response->insert(DOCSUM);
+ for (size_t i=0; i < count; i++) {
+ Cursor & docSumC = array.addObject();
+ ObjectSymbolInserter inserter(docSumC, docsumSym);
+ inserter.insertObject().setLong("long", 982);
+ }
+ return response;
+ }
+ DocsumReply::UP createOldDocSum(const DocsumRequest & request) {
+ DocsumReply::UP retval(new DocsumReply());
+ for (size_t i=0; i < request.hits.size(); i++) {
+ const DocsumRequest::Hit & h = request.hits[i];
+ DocsumReply::Docsum docsum;
+ docsum.docid = 10 + i;
+ docsum.gid = h.gid;
+ docsum.setData(_reply.c_str(), _reply.size());
+ retval->docsums.push_back(docsum);
+ }
+ return retval;
+ }
+
+ virtual search::engine::SearchReply::UP match(
+ const ISearchHandler::SP &,
+ const search::engine::SearchRequest &,
+ vespalib::ThreadBundle &) const {
+ return SearchReply::UP(new SearchReply);
+ }
+};
+
+class MyDocsumClient : public DocsumClient {
+private:
+ vespalib::Monitor _monitor;
+ DocsumReply::UP _reply;
+
+public:
+ void getDocsumsDone(DocsumReply::UP reply) {
+ vespalib::MonitorGuard guard(_monitor);
+ _reply = std::move(reply);
+ guard.broadcast();
+ }
+
+ DocsumReply::UP getReply(uint32_t millis) {
+ vespalib::MonitorGuard guard(_monitor);
+ vespalib::TimedWaiter waiter(guard, millis);
+ while (_reply.get() == NULL && waiter.hasTime()) {
+ waiter.wait();
+ }
+ return std::move(_reply);
+ }
+};
+
+class Test : public vespalib::TestApp {
+private:
+ bool assertDocsumReply(SummaryEngine & engine,
+ const std::string & searchDocType,
+ const stringref & expReply);
+
+ void requireThatGetDocsumsExecute();
+ void requireThatHandlersAreStored();
+ void requireThatCorrectHandlerIsUsed();
+ void requireThatSlimeRequestIsConvertedCorrectly();
+ void requireThatSlimeInterfaceWorksFine();
+ void requireThatRPCInterfaceWorks();
+public:
+ int Main();
+};
+
+DocsumRequest::UP
+createRequest(size_t num=1)
+{
+ DocsumRequest::UP r(new DocsumRequest());
+ if (num == 1) {
+ r->hits.emplace_back(GlobalId("aaaaaaaaaaaa"));
+ } else {
+ for (size_t i=0; i < num; i++) {
+ vespalib::string s = vespalib::make_string("aaaaaaaaaaa%c", char('a' + i%26));
+ r->hits.push_back(GlobalId(s.c_str()));
+ }
+ }
+ return r;
+}
+
+void
+Test::requireThatGetDocsumsExecute()
+{
+ int numSummaryThreads = 2;
+ SummaryEngine engine(numSummaryThreads);
+ ISearchHandler::SP handler(new MySearchHandler);
+ DocTypeName dtnvfoo("foo");
+ engine.putSearchHandler(dtnvfoo, handler);
+
+ MyDocsumClient client;
+ { // async call when engine running
+ DocsumRequest::Source request(createRequest());
+ DocsumReply::UP reply = engine.getDocsums(std::move(request), client);
+ EXPECT_TRUE(reply.get() == NULL);
+ reply = client.getReply(10000);
+ EXPECT_TRUE(reply.get() != NULL);
+ EXPECT_EQUAL(1u, reply->docsums.size());
+ EXPECT_EQUAL(10u, reply->docsums[0].docid);
+ EXPECT_EQUAL(GlobalId("aaaaaaaaaaaa"), reply->docsums[0].gid);
+ EXPECT_EQUAL("myreply", std::string(reply->docsums[0].data.c_str(), reply->docsums[0].data.size()));
+ }
+ engine.close();
+ { // sync call when engine closed
+ DocsumRequest::Source request(createRequest());
+ DocsumReply::UP reply = engine.getDocsums(std::move(request), client);
+ EXPECT_TRUE(reply.get() != NULL);
+ }
+}
+
+void
+Test::requireThatHandlersAreStored()
+{
+ DocTypeName dtnvfoo("foo");
+ DocTypeName dtnvbar("bar");
+ int numSummaryThreads = 2;
+ SummaryEngine engine(numSummaryThreads);
+ ISearchHandler::SP h1(new MySearchHandler("foo"));
+ ISearchHandler::SP h2(new MySearchHandler("bar"));
+ ISearchHandler::SP h3(new MySearchHandler("baz"));
+ // not found
+ EXPECT_TRUE(engine.getSearchHandler(dtnvfoo).get() == NULL);
+ EXPECT_TRUE(engine.removeSearchHandler(dtnvfoo).get() == NULL);
+ // put & get
+ EXPECT_TRUE(engine.putSearchHandler(dtnvfoo, h1).get() == NULL);
+ EXPECT_EQUAL(engine.getSearchHandler(dtnvfoo).get(), h1.get());
+ EXPECT_TRUE(engine.putSearchHandler(dtnvbar, h2).get() == NULL);
+ EXPECT_EQUAL(engine.getSearchHandler(dtnvbar).get(), h2.get());
+ // replace
+ EXPECT_TRUE(engine.putSearchHandler(dtnvfoo, h3).get() == h1.get());
+ EXPECT_EQUAL(engine.getSearchHandler(dtnvfoo).get(), h3.get());
+ // remove
+ EXPECT_EQUAL(engine.removeSearchHandler(dtnvfoo).get(), h3.get());
+ EXPECT_TRUE(engine.getSearchHandler(dtnvfoo).get() == NULL);
+}
+
+bool
+Test::assertDocsumReply(SummaryEngine & engine, const std::string & searchDocType, const stringref & expReply)
+{
+ DocsumRequest::UP request(createRequest());
+ request->propertiesMap.lookupCreate(search::MapNames::MATCH).add("documentdb.searchdoctype", searchDocType);
+ MyDocsumClient client;
+ engine.getDocsums(DocsumRequest::Source(std::move(request)), client);
+ DocsumReply::UP reply = client.getReply(10000);
+ return EXPECT_EQUAL(vespalib::stringref(expReply), vespalib::stringref(reply->docsums[0].data.c_str(), reply->docsums[0].data.size()));
+}
+
+void
+Test::requireThatCorrectHandlerIsUsed()
+{
+ DocTypeName dtnvfoo("foo");
+ DocTypeName dtnvbar("bar");
+ DocTypeName dtnvbaz("baz");
+ SummaryEngine engine(1);
+ ISearchHandler::SP h1(new MySearchHandler("foo", "foo reply"));
+ ISearchHandler::SP h2(new MySearchHandler("bar", "bar reply"));
+ ISearchHandler::SP h3(new MySearchHandler("baz", "baz reply"));
+ engine.putSearchHandler(dtnvfoo, h1);
+ engine.putSearchHandler(dtnvbar, h2);
+ engine.putSearchHandler(dtnvbaz, h3);
+
+ EXPECT_TRUE(assertDocsumReply(engine, "foo", "foo reply"));
+ EXPECT_TRUE(assertDocsumReply(engine, "bar", "bar reply"));
+ EXPECT_TRUE(assertDocsumReply(engine, "baz", "baz reply"));
+ EXPECT_TRUE(assertDocsumReply(engine, "not", "bar reply")); // uses the first (sorted on name)
+}
+
+using vespalib::Slime;
+
+const char *GID1 = "abcdefghijkl";
+const char *GID2 = "bcdefghijklm";
+
+void
+verify(vespalib::stringref exp, const Slime & slime)
+{
+ Memory expMemory(exp);
+ vespalib::Slime expSlime;
+ size_t used = vespalib::slime::JsonFormat::decode(expMemory, expSlime);
+ EXPECT_EQUAL(used, expMemory.size);
+ SimpleBuffer output;
+ vespalib::slime::JsonFormat::encode(slime, output, true);
+ Slime reSlimed;
+ used = vespalib::slime::JsonFormat::decode(output.get(), reSlimed);
+ EXPECT_EQUAL(used, output.get().size);
+ EXPECT_EQUAL(expSlime, reSlimed);
+}
+
+Slime
+createSlimeRequestLarger(size_t num)
+{
+ Slime r;
+ Cursor & root = r.setObject();
+ root.setString("class", "your-summary");
+ Cursor & array = root.setArray("gids");
+ for (size_t i(0); i < num; i++) {
+ array.addData(Memory(GID1, 12));
+ array.addData(Memory(GID2, 12));
+ }
+ return std::move(r);
+}
+
+Slime
+createSlimeRequest()
+{
+ return createSlimeRequestLarger(1);
+}
+
+void
+Test::requireThatSlimeRequestIsConvertedCorrectly()
+{
+ vespalib::Slime slimeRequest = createSlimeRequest();
+ TEST_DO(verify("{"
+ " class: 'your-summary',"
+ " gids: ["
+ " '0x6162636465666768696A6B6C',"
+ " '0x62636465666768696A6B6C6D'"
+ " ]"
+ "}", slimeRequest));
+ DocsumRequest::UP r = DocsumBySlime::slimeToRequest(slimeRequest.get());
+ EXPECT_EQUAL("your-summary", r->resultClassName);
+ EXPECT_EQUAL(2u, r->hits.size());
+ EXPECT_EQUAL(GlobalId(GID1), r->hits[0].gid);
+ EXPECT_EQUAL(GlobalId(GID2), r->hits[1].gid);
+}
+
+void
+createSummary(search::RawBuf & buf)
+{
+ vespalib::Slime summary;
+ summary.setObject().setLong("long", 982);
+ uint32_t magic = search::fs4transport::SLIME_MAGIC_ID;
+ buf.append(&magic, sizeof(magic));
+ search::SlimeOutputRawBufAdapter adapter(buf);
+ BinaryFormat::encode(summary, adapter);
+}
+
+class BaseServer
+{
+protected:
+ BaseServer() :
+ buf(100)
+ {
+ createSummary(buf);
+ }
+protected:
+ search::RawBuf buf;
+};
+class Server : public BaseServer {
+public:
+ Server() :
+ BaseServer(),
+ engine(2),
+ handler(new MySearchHandler("slime", stringref(buf.GetDrainPos(), buf.GetUsedLen()))),
+ docsumBySlime(engine),
+ docsumByRPC(docsumBySlime)
+ {
+ DocTypeName dtnvfoo("foo");
+ engine.putSearchHandler(dtnvfoo, handler);
+ };
+private:
+ SummaryEngine engine;
+ ISearchHandler::SP handler;
+public:
+ DocsumBySlime docsumBySlime;
+ DocsumByRPC docsumByRPC;
+};
+
+vespalib::string
+getAnswer(size_t num)
+{
+ vespalib::string s =
+ "{"
+ " docsums: [";
+ for (size_t i(1); i < num*2; i++) {
+ s +=
+ " {"
+ " docsum: {"
+ " long: 982"
+ " }"
+ " },";
+ }
+ s +=
+ " {"
+ " docsum: {"
+ " long: 982"
+ " }"
+ " }"
+ " ]";
+ s += "}";
+ return s;
+}
+
+void
+Test::requireThatSlimeInterfaceWorksFine()
+{
+ Server server;
+ vespalib::Slime slimeRequest = createSlimeRequest();
+ vespalib::Slime::UP response = server.docsumBySlime.getDocsums(slimeRequest.get());
+ TEST_DO(verify("{"
+ " docsums: ["
+ " {"
+ " docsum: {"
+ " long: 982"
+ " }"
+ " },"
+ " {"
+ " docsum: {"
+ " long: 982"
+ " }"
+ " }"
+ " ]"
+ "}", *response));
+}
+
+void
+verifyReply(size_t count, document::CompressionConfig::Type encoding, size_t orgSize, size_t compressedSize, FRT_RPCRequest * request)
+{
+ FRT_Values &ret = *request->GetReturn();
+ EXPECT_EQUAL(encoding, ret[0]._intval8);
+ EXPECT_EQUAL(orgSize, ret[1]._intval32);
+ EXPECT_EQUAL(compressedSize, ret[2]._data._len);
+
+ DataBuffer uncompressed;
+ ConstBufferRef blob(ret[2]._data._buf, ret[2]._data._len);
+ document::decompress(CompressionConfig::toType(ret[0]._intval8), ret[1]._intval32, blob, uncompressed, false);
+ EXPECT_EQUAL(orgSize, uncompressed.getDataLen());
+
+ vespalib::Slime summaries;
+ BinaryFormat::decode(Memory(uncompressed.getData(), uncompressed.getDataLen()), summaries);
+ TEST_DO(verify(getAnswer(count), summaries));
+}
+
+void
+verifyRPC(size_t count,
+ document::CompressionConfig::Type requestCompression, size_t requestSize, size_t requestBlobSize,
+ document::CompressionConfig::Type replyCompression, size_t replySize, size_t replyBlobSize)
+{
+ Server server;
+ vespalib::Slime slimeRequest = createSlimeRequestLarger(count);
+ SimpleBuffer buf;
+ BinaryFormat::encode(slimeRequest, buf);
+ EXPECT_EQUAL(requestSize, buf.get().size);
+
+ CompressionConfig config(requestCompression, 9, 100);
+ DataBuffer compressed(const_cast<char *>(buf.get().data), buf.get().size);
+ CompressionConfig::Type type = document::compress(config, ConstBufferRef(buf.get().data, buf.get().size), compressed, true);
+ EXPECT_EQUAL(type, requestCompression);
+
+ FRT_RPCRequest * request = new FRT_RPCRequest();
+ FRT_Values &arg = *request->GetParams();
+ arg.AddInt8(type);
+ arg.AddInt32(buf.get().size);
+ arg.AddData(compressed.getData(), compressed.getDataLen());
+ EXPECT_EQUAL(requestBlobSize, compressed.getDataLen());
+
+ server.docsumByRPC.getDocsums(*request);
+ verifyReply(count, replyCompression, replySize, replyBlobSize, request);
+
+ request->SubRef();
+}
+
+void
+Test::requireThatRPCInterfaceWorks()
+{
+ verifyRPC(1, document::CompressionConfig::NONE, 55, 55, document::CompressionConfig::NONE, 38, 38);
+ verifyRPC(100, document::CompressionConfig::NONE, 2631, 2631, document::CompressionConfig::LZ4, 1426, 46);
+ verifyRPC(100, document::CompressionConfig::LZ4, 2631, 69, document::CompressionConfig::LZ4, 1426, 46);
+}
+
+int
+Test::Main()
+{
+ TEST_INIT("summaryengine_test");
+
+ requireThatGetDocsumsExecute();
+ requireThatHandlersAreStored();
+ requireThatCorrectHandlerIsUsed();
+ requireThatSlimeRequestIsConvertedCorrectly();
+ requireThatSlimeInterfaceWorksFine();
+ requireThatRPCInterfaceWorks();
+
+ TEST_DONE();
+}
+
+}
+
+TEST_APPHOOK(proton::Test);
+
diff --git a/searchcore/src/tests/proton/verify_ranksetup/.cvsignore b/searchcore/src/tests/proton/verify_ranksetup/.cvsignore
new file mode 100644
index 00000000000..a4848db32f8
--- /dev/null
+++ b/searchcore/src/tests/proton/verify_ranksetup/.cvsignore
@@ -0,0 +1,3 @@
+.depend
+Makefile
+verify_ranksetup_test
diff --git a/searchcore/src/tests/proton/verify_ranksetup/.gitignore b/searchcore/src/tests/proton/verify_ranksetup/.gitignore
new file mode 100644
index 00000000000..1142087d03d
--- /dev/null
+++ b/searchcore/src/tests/proton/verify_ranksetup/.gitignore
@@ -0,0 +1,5 @@
+*_test
+.depend
+Makefile
+generated
+searchcore_verify_ranksetup_test_app
diff --git a/searchcore/src/tests/proton/verify_ranksetup/CMakeLists.txt b/searchcore/src/tests/proton/verify_ranksetup/CMakeLists.txt
new file mode 100644
index 00000000000..2d74c323c1a
--- /dev/null
+++ b/searchcore/src/tests/proton/verify_ranksetup/CMakeLists.txt
@@ -0,0 +1,7 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_verify_ranksetup_test_app
+ SOURCES
+ verify_ranksetup_test.cpp
+ DEPENDS
+)
+vespa_add_test(NAME searchcore_verify_ranksetup_test_app COMMAND sh verify_ranksetup_test.sh)
diff --git a/searchcore/src/tests/proton/verify_ranksetup/DESC b/searchcore/src/tests/proton/verify_ranksetup/DESC
new file mode 100644
index 00000000000..700e2b1c2f9
--- /dev/null
+++ b/searchcore/src/tests/proton/verify_ranksetup/DESC
@@ -0,0 +1 @@
+verify_ranksetup test. Take a look at verify_ranksetup.cpp for details.
diff --git a/searchcore/src/tests/proton/verify_ranksetup/FILES b/searchcore/src/tests/proton/verify_ranksetup/FILES
new file mode 100644
index 00000000000..9c4a2ef3776
--- /dev/null
+++ b/searchcore/src/tests/proton/verify_ranksetup/FILES
@@ -0,0 +1 @@
+verify_ranksetup.cpp
diff --git a/searchcore/src/tests/proton/verify_ranksetup/invalid_attr_name/.gitignore b/searchcore/src/tests/proton/verify_ranksetup/invalid_attr_name/.gitignore
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/verify_ranksetup/invalid_attr_name/.gitignore
diff --git a/searchcore/src/tests/proton/verify_ranksetup/invalid_feature_name/.gitignore b/searchcore/src/tests/proton/verify_ranksetup/invalid_feature_name/.gitignore
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/verify_ranksetup/invalid_feature_name/.gitignore
diff --git a/searchcore/src/tests/proton/verify_ranksetup/unsupported_collection_type/.gitignore b/searchcore/src/tests/proton/verify_ranksetup/unsupported_collection_type/.gitignore
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/verify_ranksetup/unsupported_collection_type/.gitignore
diff --git a/searchcore/src/tests/proton/verify_ranksetup/valid/.gitignore b/searchcore/src/tests/proton/verify_ranksetup/valid/.gitignore
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/proton/verify_ranksetup/valid/.gitignore
diff --git a/searchcore/src/tests/proton/verify_ranksetup/verify_ranksetup_test.cpp b/searchcore/src/tests/proton/verify_ranksetup/verify_ranksetup_test.cpp
new file mode 100644
index 00000000000..3db23380675
--- /dev/null
+++ b/searchcore/src/tests/proton/verify_ranksetup/verify_ranksetup_test.cpp
@@ -0,0 +1,250 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/util/slaveproc.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/searchcommon/common/schema.h>
+#include <vespa/searchlib/fef/indexproperties.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <initializer_list>
+
+const char *prog = "../../../apps/verify_ranksetup/verify_ranksetup-bin";
+const std::string gen_dir("generated");
+
+const char *valid_feature = "value(0)";
+const char *invalid_feature = "invalid_feature_name and format";
+
+using search::index::Schema;
+using namespace search::fef::indexproperties;
+
+struct Writer {
+ FILE *file;
+ Writer(const std::string &file_name) {
+ file = fopen(file_name.c_str(), "w");
+ ASSERT_TRUE(file != 0);
+ }
+ void fmt(const char *format, ...) const
+#ifdef __GNUC__
+ __attribute__ ((format (printf,2,3)))
+#endif
+ {
+ va_list ap;
+ va_start(ap, format);
+ vfprintf(file, format, ap);
+ va_end(ap);
+ }
+ ~Writer() { fclose(file); }
+};
+
+void verify_dir() {
+ std::string pwd(getenv("PWD"));
+ ASSERT_NOT_EQUAL(pwd.find("searchcore/src/tests/proton/verify_ranksetup"), pwd.npos);
+}
+
+//-----------------------------------------------------------------------------
+
+struct Model {
+ std::map<std::string,std::pair<std::string,std::string> > indexes;
+ std::map<std::string,std::pair<std::string,std::string> > attributes;
+ std::map<std::string,std::string> properties;
+ std::vector<bool> extra_profiles;
+ Model() : indexes(), attributes(), properties(), extra_profiles() {
+ verify_dir();
+ }
+ void index(const std::string &name, Schema::DataType data_type,
+ Schema::CollectionType collection_type)
+ {
+ indexes[name].first = Schema::getTypeName(data_type);
+ indexes[name].second = Schema::getTypeName(collection_type);
+ }
+ void attribute(const std::string &name, Schema::DataType data_type,
+ Schema::CollectionType collection_type)
+ {
+ attributes[name].first = Schema::getTypeName(data_type);
+ attributes[name].second = Schema::getTypeName(collection_type);
+ }
+ void property(const std::string &name, const std::string &val) {
+ properties[name] = val;
+ }
+ void first_phase(const std::string &feature) {
+ property(rank::FirstPhase::NAME, feature);
+ }
+ void second_phase(const std::string &feature) {
+ property(rank::SecondPhase::NAME, feature);
+ }
+ void summary_feature(const std::string &feature) {
+ property(summary::Feature::NAME, feature);
+ }
+ void dump_feature(const std::string &feature) {
+ property(dump::Feature::NAME, feature);
+ }
+ void good_profile() {
+ extra_profiles.push_back(true);
+ }
+ void bad_profile() {
+ extra_profiles.push_back(false);
+ }
+ void write_attributes(const Writer &out) {
+ out.fmt("attribute[%zu]\n", attributes.size());
+ std::map<std::string,std::pair<std::string,std::string> >::const_iterator pos = attributes.begin();
+ for (size_t i = 0; pos != attributes.end(); ++pos, ++i) {
+ out.fmt("attribute[%zu].name \"%s\"\n", i, pos->first.c_str());
+ out.fmt("attribute[%zu].datatype %s\n", i, pos->second.first.c_str());
+ out.fmt("attribute[%zu].collectiontype %s\n", i, pos->second.second.c_str());
+ }
+ }
+ void write_indexschema(const Writer &out) {
+ out.fmt("indexfield[%zu]\n", indexes.size());
+ std::map<std::string,std::pair<std::string,std::string> >::const_iterator pos = indexes.begin();
+ for (size_t i = 0; pos != indexes.end(); ++pos, ++i) {
+ out.fmt("indexfield[%zu].name \"%s\"\n", i, pos->first.c_str());
+ out.fmt("indexfield[%zu].datatype %s\n", i, pos->second.first.c_str());
+ out.fmt("indexfield[%zu].collectiontype %s\n", i, pos->second.second.c_str());
+ }
+ }
+ void write_rank_profiles(const Writer &out) {
+ out.fmt("rankprofile[%zu]\n", extra_profiles.size() + 1);
+ out.fmt("rankprofile[0].name \"default\"\n");
+ std::map<std::string,std::string>::const_iterator pos = properties.begin();
+ for (size_t i = 0; pos != properties.end(); ++pos, ++i) {
+ out.fmt("rankprofile[0].fef.property[%zu]\n", properties.size());
+ out.fmt("rankprofile[0].fef.property[%zu].name \"%s\"\n", i, pos->first.c_str());
+ out.fmt("rankprofile[0].fef.property[%zu].value \"%s\"\n", i, pos->second.c_str());
+ }
+ for (size_t i = 1; i < (extra_profiles.size() + 1); ++i) {
+ out.fmt("rankprofile[%zu].name \"extra_%zu\"\n", i, i);
+ out.fmt("rankprofile[%zu].fef.property[%zu].name \"%s\"\n", i, i, rank::FirstPhase::NAME.c_str());
+ out.fmt("rankprofile[%zu].fef.property[%zu].value \"%s\"\n", i, i, extra_profiles[i-1]?valid_feature:invalid_feature);
+ }
+ }
+ void generate() {
+ write_attributes(Writer(gen_dir + "/attributes.cfg"));
+ write_indexschema(Writer(gen_dir + "/indexschema.cfg"));
+ write_rank_profiles(Writer(gen_dir + "/rank-profiles.cfg"));
+ }
+ bool verify() {
+ generate();
+ return vespalib::SlaveProc::run(vespalib::make_string("%s dir:%s", prog, gen_dir.c_str()).c_str());
+ }
+ void verify_valid(std::initializer_list<std::string> features) {
+ for (const std::string &f: features) {
+ first_phase(f);
+ if (!EXPECT_TRUE(verify())) {
+ fprintf(stderr, "--> feature '%s' was invalid (should be valid)\n", f.c_str());
+ }
+ }
+ }
+ void verify_invalid(std::initializer_list<std::string> features) {
+ for (const std::string &f: features) {
+ first_phase(f);
+ if (!EXPECT_FALSE(verify())) {
+ fprintf(stderr, "--> feature '%s' was valid (should be invalid)\n", f.c_str());
+ }
+ }
+ }
+};
+
+//-----------------------------------------------------------------------------
+
+struct EmptyModel : Model {};
+
+struct SimpleModel : Model {
+ SimpleModel() : Model() {
+ index("title", Schema::STRING, Schema::SINGLE);
+ index("list", Schema::STRING, Schema::ARRAY);
+ index("keywords", Schema::STRING, Schema::WEIGHTEDSET);
+ attribute("date", Schema::INT32, Schema::SINGLE);
+ }
+};
+
+struct ShadowModel : Model {
+ ShadowModel() : Model() {
+ index("both", Schema::STRING, Schema::SINGLE);
+ attribute("both", Schema::STRING, Schema::SINGLE);
+ }
+};
+
+TEST_F("print usage", Model()) {
+ EXPECT_FALSE(vespalib::SlaveProc::run(vespalib::make_string("%s", prog).c_str()));
+}
+
+TEST_F("setup output directory", Model()) {
+ ASSERT_TRUE(vespalib::SlaveProc::run(vespalib::make_string("rm -rf %s", gen_dir.c_str()).c_str()));
+ ASSERT_TRUE(vespalib::SlaveProc::run(vespalib::make_string("mkdir %s", gen_dir.c_str()).c_str()));
+}
+
+//-----------------------------------------------------------------------------
+
+TEST_F("require that empty setup passes validation", EmptyModel()) {
+ EXPECT_TRUE(f.verify());
+}
+
+TEST_F("require that we can verify multiple rank profiles", SimpleModel()) {
+ f.first_phase(valid_feature);
+ f.good_profile();
+ EXPECT_TRUE(f.verify());
+ f.bad_profile();
+ EXPECT_FALSE(f.verify());
+}
+
+TEST_F("require that first phase can break validation", SimpleModel()) {
+ f.first_phase(invalid_feature);
+ EXPECT_FALSE(f.verify());
+}
+
+TEST_F("require that second phase can break validation", SimpleModel()) {
+ f.second_phase(invalid_feature);
+ EXPECT_FALSE(f.verify());
+}
+
+TEST_F("require that summary features can break validation", SimpleModel()) {
+ f.summary_feature(invalid_feature);
+ EXPECT_FALSE(f.verify());
+}
+
+TEST_F("require that dump features can break validation", SimpleModel()) {
+ f.dump_feature(invalid_feature);
+ EXPECT_FALSE(f.verify());
+}
+
+TEST_F("require that fieldMatch feature requires single value field", SimpleModel()) {
+ f.first_phase("fieldMatch(keywords)");
+ EXPECT_FALSE(f.verify());
+ f.first_phase("fieldMatch(list)");
+ EXPECT_FALSE(f.verify());
+ f.first_phase("fieldMatch(title)");
+ EXPECT_TRUE(f.verify());
+}
+
+TEST_F("require that age feature requires attribute parameter", SimpleModel()) {
+ f.first_phase("age(unknown)");
+ EXPECT_FALSE(f.verify());
+ f.first_phase("age(title)");
+ EXPECT_FALSE(f.verify());
+ f.first_phase("age(date)");
+ EXPECT_TRUE(f.verify());
+}
+
+TEST_F("require that nativeRank can be used on any valid field", SimpleModel()) {
+ f.verify_invalid({"nativeRank(unknown)"});
+ f.verify_valid({"nativeRank", "nativeRank(title)", "nativeRank(date)", "nativeRank(title,date)"});
+}
+
+TEST_F("require that nativeAttributeMatch requires attribute parameter", SimpleModel()) {
+ f.verify_invalid({"nativeAttributeMatch(unknown)", "nativeAttributeMatch(title)", "nativeAttributeMatch(title,date)"});
+ f.verify_valid({"nativeAttributeMatch", "nativeAttributeMatch(date)"});
+}
+
+TEST_F("require that shadowed attributes can be used", ShadowModel()) {
+ f.first_phase("attribute(both)");
+ EXPECT_TRUE(f.verify());
+}
+
+//-----------------------------------------------------------------------------
+
+TEST_F("cleanup files", Model()) {
+ ASSERT_TRUE(vespalib::SlaveProc::run(vespalib::make_string("rm -rf %s", gen_dir.c_str()).c_str()));
+}
+
+TEST_MAIN_WITH_PROCESS_PROXY() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/verify_ranksetup/verify_ranksetup_test.sh b/searchcore/src/tests/proton/verify_ranksetup/verify_ranksetup_test.sh
new file mode 100755
index 00000000000..d03b6309ec9
--- /dev/null
+++ b/searchcore/src/tests/proton/verify_ranksetup/verify_ranksetup_test.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+export PWD=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
+$VALGRIND ./searchcore_verify_ranksetup_test_app
diff --git a/searchcore/src/tests/slime/convert_document_to_slime/.gitignore b/searchcore/src/tests/slime/convert_document_to_slime/.gitignore
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/searchcore/src/tests/slime/convert_document_to_slime/.gitignore