summaryrefslogtreecommitdiffstats
path: root/persistence
diff options
context:
space:
mode:
authorJon Bratseth <bratseth@yahoo-inc.com>2016-06-15 23:09:44 +0200
committerJon Bratseth <bratseth@yahoo-inc.com>2016-06-15 23:09:44 +0200
commit72231250ed81e10d66bfe70701e64fa5fe50f712 (patch)
tree2728bba1131a6f6e5bdf95afec7d7ff9358dac50 /persistence
Publish
Diffstat (limited to 'persistence')
-rw-r--r--persistence/.gitignore4
-rw-r--r--persistence/CMakeLists.txt31
-rw-r--r--persistence/OWNERS2
-rw-r--r--persistence/README1
-rw-r--r--persistence/pom.xml106
-rw-r--r--persistence/src/.gitignore4
-rw-r--r--persistence/src/Doxyfile994
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/rpc/BucketProviderMethod.java22
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderHandler.java401
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderMethod.java39
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/rpc/RPCHandler.java39
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/rpc/TimestampedProviderMethod.java20
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/rpc/package-info.java7
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/AbstractPersistenceProvider.java82
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/Bucket.java30
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/BucketInfo.java155
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/ClusterState.java32
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/ClusterStateImpl.java50
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/DocEntry.java56
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/PartitionState.java27
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/PersistenceProvider.java382
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/Selection.java70
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/conformance/ConformanceTest.java1605
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/conformance/TestDocMan.java37
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/conformance/package-info.java7
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/package-info.java7
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/BucketIdListResult.java36
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/BucketInfoResult.java37
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/CreateIteratorResult.java33
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/GetResult.java60
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/IterateResult.java43
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/PartitionStateListResult.java37
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/RemoveResult.java47
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/Result.java83
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/UpdateResult.java39
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/package-info.java7
-rw-r--r--persistence/src/main/resources/configdefinitions/persistence-rpc.def5
-rw-r--r--persistence/src/testlist.txt3
-rw-r--r--persistence/src/tests/.gitignore6
-rw-r--r--persistence/src/tests/CMakeLists.txt11
-rw-r--r--persistence/src/tests/dummyimpl/.gitignore4
-rw-r--r--persistence/src/tests/dummyimpl/CMakeLists.txt14
-rw-r--r--persistence/src/tests/dummyimpl/dummyimpltest.cpp46
-rw-r--r--persistence/src/tests/dummyimpl/dummypersistence_test.cpp88
-rw-r--r--persistence/src/tests/proxy/.gitignore10
-rw-r--r--persistence/src/tests/proxy/CMakeLists.txt28
-rw-r--r--persistence/src/tests/proxy/dummy_provider_factory.h35
-rw-r--r--persistence/src/tests/proxy/external_providerproxy_conformancetest.cpp43
-rw-r--r--persistence/src/tests/proxy/mockprovider.h172
-rw-r--r--persistence/src/tests/proxy/providerproxy_conformancetest.cpp64
-rw-r--r--persistence/src/tests/proxy/providerproxy_test.cpp402
-rw-r--r--persistence/src/tests/proxy/providerstub_test.cpp538
-rw-r--r--persistence/src/tests/proxy/proxy_factory_wrapper.h59
-rw-r--r--persistence/src/tests/proxy/proxy_test.sh4
-rw-r--r--persistence/src/tests/proxy/proxyfactory.h42
-rw-r--r--persistence/src/tests/spi/CMakeLists.txt6
-rw-r--r--persistence/src/tests/spi/clusterstatetest.cpp229
-rw-r--r--persistence/src/tests/testrunner.cpp15
-rw-r--r--persistence/src/vespa/persistence/.gitignore5
-rw-r--r--persistence/src/vespa/persistence/CMakeLists.txt15
-rw-r--r--persistence/src/vespa/persistence/conformancetest/.gitignore2
-rw-r--r--persistence/src/vespa/persistence/conformancetest/CMakeLists.txt7
-rw-r--r--persistence/src/vespa/persistence/conformancetest/conformancetest.cpp2314
-rw-r--r--persistence/src/vespa/persistence/conformancetest/conformancetest.h274
-rw-r--r--persistence/src/vespa/persistence/dummyimpl/.gitignore2
-rw-r--r--persistence/src/vespa/persistence/dummyimpl/CMakeLists.txt6
-rw-r--r--persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp943
-rw-r--r--persistence/src/vespa/persistence/dummyimpl/dummypersistence.h255
-rw-r--r--persistence/src/vespa/persistence/proxy/.gitignore2
-rw-r--r--persistence/src/vespa/persistence/proxy/CMakeLists.txt8
-rw-r--r--persistence/src/vespa/persistence/proxy/buildid.cpp8
-rw-r--r--persistence/src/vespa/persistence/proxy/buildid.h12
-rw-r--r--persistence/src/vespa/persistence/proxy/providerproxy.cpp493
-rw-r--r--persistence/src/vespa/persistence/proxy/providerproxy.h90
-rw-r--r--persistence/src/vespa/persistence/proxy/providerstub.cpp931
-rw-r--r--persistence/src/vespa/persistence/proxy/providerstub.h94
-rw-r--r--persistence/src/vespa/persistence/spi/.gitignore2
-rw-r--r--persistence/src/vespa/persistence/spi/CMakeLists.txt15
-rw-r--r--persistence/src/vespa/persistence/spi/abstractpersistenceprovider.cpp77
-rw-r--r--persistence/src/vespa/persistence/spi/abstractpersistenceprovider.h90
-rw-r--r--persistence/src/vespa/persistence/spi/bucket.cpp28
-rw-r--r--persistence/src/vespa/persistence/spi/bucket.h53
-rw-r--r--persistence/src/vespa/persistence/spi/bucketinfo.cpp75
-rw-r--r--persistence/src/vespa/persistence/spi/bucketinfo.h110
-rw-r--r--persistence/src/vespa/persistence/spi/clusterstate.cpp108
-rw-r--r--persistence/src/vespa/persistence/spi/clusterstate.h72
-rw-r--r--persistence/src/vespa/persistence/spi/clusterstateimpl.h66
-rw-r--r--persistence/src/vespa/persistence/spi/context.cpp9
-rw-r--r--persistence/src/vespa/persistence/spi/context.h98
-rw-r--r--persistence/src/vespa/persistence/spi/docentry.h229
-rw-r--r--persistence/src/vespa/persistence/spi/documentselection.h33
-rw-r--r--persistence/src/vespa/persistence/spi/exceptions.cpp12
-rw-r--r--persistence/src/vespa/persistence/spi/exceptions.h21
-rw-r--r--persistence/src/vespa/persistence/spi/matcher.h42
-rw-r--r--persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp300
-rw-r--r--persistence/src/vespa/persistence/spi/metricpersistenceprovider.h73
-rw-r--r--persistence/src/vespa/persistence/spi/partitionstate.cpp39
-rw-r--r--persistence/src/vespa/persistence/spi/partitionstate.h53
-rw-r--r--persistence/src/vespa/persistence/spi/persistenceprovider.cpp15
-rw-r--r--persistence/src/vespa/persistence/spi/persistenceprovider.h436
-rw-r--r--persistence/src/vespa/persistence/spi/providerfactory.h30
-rw-r--r--persistence/src/vespa/persistence/spi/read_consistency.cpp27
-rw-r--r--persistence/src/vespa/persistence/spi/read_consistency.h36
-rw-r--r--persistence/src/vespa/persistence/spi/result.h307
-rw-r--r--persistence/src/vespa/persistence/spi/selection.h93
-rw-r--r--persistence/testrun/.gitignore6
106 files changed, 14472 insertions, 0 deletions
diff --git a/persistence/.gitignore b/persistence/.gitignore
new file mode 100644
index 00000000000..be0452bed21
--- /dev/null
+++ b/persistence/.gitignore
@@ -0,0 +1,4 @@
+/target
+/pom.xml.build
+Makefile
+Testing
diff --git a/persistence/CMakeLists.txt b/persistence/CMakeLists.txt
new file mode 100644
index 00000000000..1b7ee4d5e9c
--- /dev/null
+++ b/persistence/CMakeLists.txt
@@ -0,0 +1,31 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_define_module(
+ DEPENDS
+ fastos
+ vespalog
+ vespalib
+ staging_vespalib
+ fnet
+ document
+ persistencetypes
+ config_cloudconfig
+ vdslib
+ metrics
+ configdefinitions
+
+ LIBS
+ src/vespa/persistence
+ src/vespa/persistence/conformancetest
+ src/vespa/persistence/dummyimpl
+ src/vespa/persistence/proxy
+ src/vespa/persistence/spi
+
+ TEST_DEPENDS
+ vdstestlib
+
+ TESTS
+ src/tests
+ src/tests/dummyimpl
+ src/tests/proxy
+ src/tests/spi
+)
diff --git a/persistence/OWNERS b/persistence/OWNERS
new file mode 100644
index 00000000000..97c35339850
--- /dev/null
+++ b/persistence/OWNERS
@@ -0,0 +1,2 @@
+vekterli
+dybdahl
diff --git a/persistence/README b/persistence/README
new file mode 100644
index 00000000000..f81492162c5
--- /dev/null
+++ b/persistence/README
@@ -0,0 +1 @@
+This module contains the persistence SPI and a dummy implementation.
diff --git a/persistence/pom.xml b/persistence/pom.xml
new file mode 100644
index 00000000000..3d887b6bf31
--- /dev/null
+++ b/persistence/pom.xml
@@ -0,0 +1,106 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>parent</artifactId>
+ <version>6-SNAPSHOT</version>
+ <relativePath>../parent/pom.xml</relativePath>
+ </parent>
+ <artifactId>persistence</artifactId>
+ <packaging>container-plugin</packaging>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <configuration>
+ <compilerArgs>
+ <arg>-Xlint:rawtypes</arg>
+ <arg>-Xlint:unchecked</arg>
+ <arg>-Xlint:deprecation</arg>
+ <arg>-Werror</arg>
+ </compilerArgs>
+ </configuration>
+ </plugin>
+
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-javadocs</id>
+ <goals>
+ <goal>jar</goal>
+ </goals>
+ <configuration>
+ <finalName>${project.artifactId}</finalName>
+ <additionalparam>-Xdoclint:${doclint} -Xdoclint:-missing</additionalparam>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-source-plugin</artifactId>
+ <version>2.1.2</version>
+ <executions>
+ <execution>
+ <id>attach-sources</id>
+ <goals>
+ <goal>jar-no-fork</goal>
+ </goals>
+ <configuration>
+ <finalName>${project.artifactId}</finalName>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+
+ <dependencies>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>container-dev</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>org.antlr</groupId>
+ <artifactId>antlr4-runtime</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>document</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>jrt</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ </dependencies>
+
+ <properties>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ </properties>
+</project>
diff --git a/persistence/src/.gitignore b/persistence/src/.gitignore
new file mode 100644
index 00000000000..ea5bfd4b499
--- /dev/null
+++ b/persistence/src/.gitignore
@@ -0,0 +1,4 @@
+/Makefile.ini
+/config_command.sh
+/project.dsw
+/persistence.mak
diff --git a/persistence/src/Doxyfile b/persistence/src/Doxyfile
new file mode 100644
index 00000000000..d40aff6f46c
--- /dev/null
+++ b/persistence/src/Doxyfile
@@ -0,0 +1,994 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+# Doxyfile 1.2.18
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# General configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = Storage
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = ../doc
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch,
+# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en
+# (Japanese with english messages), Korean, Norwegian, Polish, Portuguese,
+# Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish and Ukrainian.
+
+OUTPUT_LANGUAGE = English
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these class will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all inherited
+# members of a class in the documentation of that class as if those members were
+# ordinary class members. Constructors, destructors and assignment operators of
+# the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. It is allowed to use relative paths in the argument list.
+
+STRIP_FROM_PATH =
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower case letters. If set to YES upper case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# users are adviced to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like the Qt-style comments (thus requiring an
+# explict @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the DETAILS_AT_TOP tag is set to YES then Doxygen
+# will output the detailed description near the top, like JavaDoc.
+# If set to NO, the detailed description appears after the member
+# documentation.
+
+DETAILS_AT_TOP = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# reimplements.
+
+INHERIT_DOCS = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 4
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consist of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C.
+# For instance some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java sources
+# only. Doxygen will then generate output that is more tailored for Java.
+# For instance namespaces will be presented as packages, qualified scopes
+# will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text.
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = storage
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp
+# *.h++ *.idl *.odl
+
+FILE_PATTERNS = *.h *.cpp
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories
+# that are symbolic links (a Unix filesystem feature) are excluded from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+
+EXCLUDE_PATTERNS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+
+INPUT_FILTER =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default)
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default)
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet
+
+HTML_STYLESHEET = ../cpp/vespa_link.css
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output dir.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non empty doxygen will try to run
+# the html help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the Html help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript and frames is required (for instance Mozilla, Netscape 4.0+,
+# or Internet explorer 4.0+). Note that for large projects the tree generation
+# can take a very long time. In such cases it is better to disable this feature.
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimised for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assigments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_XML = NO
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_PREDEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_PREDEF_ONLY tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse the
+# parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::addtions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tagfiles.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in Html, RTF and LaTeX) for classes with base or
+# super classes. Setting the tag to NO turns the diagrams off. Note that this
+# option is superceded by the HAVE_DOT option below. This is only a fallback. It is
+# recommended to install and use dot, since it yield more powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found on the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+MAX_DOT_GRAPH_WIDTH = 1024
+
+# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+MAX_DOT_GRAPH_HEIGHT = 1024
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermedate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
+
+#---------------------------------------------------------------------------
+# Configuration::addtions related to the search engine
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE = NO
+
+# The CGI_NAME tag should be the name of the CGI script that
+# starts the search engine (doxysearch) with the correct parameters.
+# A script with this name will be generated by doxygen.
+
+CGI_NAME = search.cgi
+
+# The CGI_URL tag should be the absolute URL to the directory where the
+# cgi binaries are located. See the documentation of your http daemon for
+# details.
+
+CGI_URL =
+
+# The DOC_URL tag should be the absolute URL to the directory where the
+# documentation is located. If left blank the absolute path to the
+# documentation, with file:// prepended to it, will be used.
+
+DOC_URL =
+
+# The DOC_ABSPATH tag should be the absolute path to the directory where the
+# documentation is located. If left blank the directory on the local machine
+# will be used.
+
+DOC_ABSPATH =
+
+# The BIN_ABSPATH tag must point to the directory where the doxysearch binary
+# is installed.
+
+BIN_ABSPATH = /usr/local/bin/
+
+# The EXT_DOC_PATHS tag can be used to specify one or more paths to
+# documentation generated for other projects. This allows doxysearch to search
+# the documentation for these projects as well.
+
+EXT_DOC_PATHS =
diff --git a/persistence/src/main/java/com/yahoo/persistence/rpc/BucketProviderMethod.java b/persistence/src/main/java/com/yahoo/persistence/rpc/BucketProviderMethod.java
new file mode 100644
index 00000000000..3be746d6063
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/rpc/BucketProviderMethod.java
@@ -0,0 +1,22 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.rpc;
+
+/**
+ * Class to represent a persistence provider method that has a bucket
+ * as its first parameter.
+ */
+public class BucketProviderMethod extends PersistenceProviderMethod {
+ public BucketProviderMethod(String name, PersistenceProviderHandler owner) {
+ this(name, owner, "", "");
+ }
+
+ public BucketProviderMethod(String name, PersistenceProviderHandler owner, String paramTypes) {
+ this(name, owner, paramTypes, "");
+ }
+
+ public BucketProviderMethod(String name, PersistenceProviderHandler owner, String paramTypes, String returnTypes) {
+ super(name, owner, "ll" + paramTypes, returnTypes);
+ paramDesc("bucketId", "The bucket id to perform operation on");
+ paramDesc("partitionId", "The partition to perform operation on");
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderHandler.java b/persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderHandler.java
new file mode 100644
index 00000000000..7f9fb17ce88
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderHandler.java
@@ -0,0 +1,401 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.rpc;
+
+import com.yahoo.document.*;
+import com.yahoo.document.fieldset.AllFields;
+import com.yahoo.document.fieldset.FieldSet;
+import com.yahoo.document.select.parser.ParseException;
+import com.yahoo.document.serialization.*;
+import com.yahoo.io.GrowableByteBuffer;
+import com.yahoo.jrt.*;
+import com.yahoo.persistence.PersistenceRpcConfig;
+import com.yahoo.persistence.spi.*;
+import com.yahoo.persistence.spi.result.*;
+
+import java.nio.ByteBuffer;
+import java.util.TreeSet;
+
+/**
+ * @author thomasg
+ */
+public class PersistenceProviderHandler extends RPCHandler {
+ DocumentTypeManager docTypeManager;
+ PersistenceProvider provider = null;
+ boolean started = false;
+
+ int magic_number = 0xf00ba2;
+
+ public PersistenceProviderHandler(PersistenceRpcConfig config) {
+ super(config.port());
+ }
+
+ public void initialize(PersistenceProvider provider, DocumentTypeManager manager) {
+ this.provider = provider;
+ this.docTypeManager = manager;
+
+ if (!started) {
+ addMethod(new Method("vespa.persistence.connect", "s", "", this, "RPC_connect")
+ .paramDesc(0, "buildId", "Id to make sure client and server come from the same build"));
+ addMethod(new PersistenceProviderMethod("initialize", this));
+ addMethod(new PersistenceProviderMethod("getPartitionStates", this, "", "IS"));
+ addMethod(new PersistenceProviderMethod("listBuckets", this, "l", "L")
+ .paramDesc("partitionId", "The partition to list buckets for")
+ .returnDesc("bucketIds", "An array of bucketids"));
+ addMethod(new PersistenceProviderMethod("getModifiedBuckets", this, "", "L")
+ .returnDesc("bucketIds", "An array of bucketids"));
+ addMethod(new PersistenceProviderMethod("setClusterState", this, "x")
+ .paramDesc("clusterState", "The updated cluster state"));
+ addMethod(new BucketProviderMethod("setActiveState", this, "b")
+ .paramDesc("bucketState", "The new state (active/not active)"));
+ addMethod(new BucketProviderMethod("getBucketInfo", this, "", "iiiiibb")
+ .returnDesc("checksum", "The bucket checksum")
+ .returnDesc("documentCount", "The number of unique documents stored in the bucket")
+ .returnDesc("documentSize", "The size of the unique documents")
+ .returnDesc("entryCount", "The number of entries (inserts/removes) in the bucket")
+ .returnDesc("usedSize", "The number of bytes used by the bucket in total")
+ .returnDesc("ready", "Whether the bucket is \"ready\" for external reads or not")
+ .returnDesc("active", "Whether the bucket has been activated for external reads or not"));
+ addMethod(new TimestampedProviderMethod("put", this, "x")
+ .paramDesc("document", "The serialized document"));
+ addMethod(new TimestampedProviderMethod("removeById", this, "s", "b")
+ .paramDesc("documentId", "The ID of the document to remove")
+ .returnDesc("existed", "Whether or not the document existed"));
+ addMethod(new TimestampedProviderMethod("removeIfFound", this, "s", "b")
+ .paramDesc("documentId", "The ID of the document to remove")
+ .returnDesc("existed", "Whether or not the document existed"));
+ addMethod(new TimestampedProviderMethod("update", this, "x", "l")
+ .paramDesc("update", "The document update to apply")
+ .returnDesc("existingTimestamp", "The timestamp of the document that the update was applied to, or 0 if it didn't exist"));
+ addMethod(new BucketProviderMethod("flush", this));
+ addMethod(new BucketProviderMethod("get", this, "ss", "lx")
+ .paramDesc("fieldSet", "A set of fields to return")
+ .paramDesc("documentId", "The document ID to fetch")
+ .returnDesc("timestamp", "The timestamp of the document fetched")
+ .returnDesc("document", "A serialized document"));
+ addMethod(new BucketProviderMethod("createIterator", this, "ssllLb", "l")
+ .paramDesc("fieldSet", "A set of fields to return")
+ .paramDesc("documentSelectionString", "Document selection to match with")
+ .paramDesc("timestampFrom", "lowest timestamp to include")
+ .paramDesc("timestampTo", "Highest timestamp to include")
+ .paramDesc("timestampSubset", "Array of timestamps to include")
+ .paramDesc("includedVersions", "Document versions to include")
+ .returnDesc("iteratorId", "An iterator id to use for further calls to iterate and destroyIterator"));
+ addMethod(new PersistenceProviderMethod("iterate", this, "ll", "LISXb")
+ .paramDesc("iteratorId", "An iterator id previously returned by createIterator")
+ .paramDesc("maxByteSize", "The maximum number of bytes to return in this call (approximate)")
+ .returnDesc("timestampArray", "Array of timestamps for DocEntries")
+ .returnDesc("flagArray", "Array of flags for DocEntries")
+ .returnDesc("docIdArray", "Array of document ids for DocEntries")
+ .returnDesc("docArray", "Array of documents for DocEntries")
+ .returnDesc("completed", "Whether or not iteration completed"));
+ addMethod(new PersistenceProviderMethod("destroyIterator", this, "l")
+ .paramDesc("iteratorId", "An iterator id previously returned by createIterator"));
+ addMethod(new BucketProviderMethod("createBucket", this));
+ addMethod(new BucketProviderMethod("deleteBucket", this));
+ addMethod(new BucketProviderMethod("split", this, "llll")
+ .paramDesc("target1Bucket", "Bucket id of first split target")
+ .paramDesc("target1Partition", "Partition id of first split target")
+ .paramDesc("target2Bucket", "Bucket id of second split target")
+ .paramDesc("target2Partition", "Partition id of second split target"));
+ addMethod(new PersistenceProviderMethod("join", this, "llllll")
+ .paramDesc("source1Bucket", "Bucket id of first source bucket")
+ .paramDesc("source1Partition", "Partition id of first source bucket")
+ .paramDesc("source1Bucket", "Bucket id of second source bucket")
+ .paramDesc("source1Partition", "Partition id of second source bucket")
+ .paramDesc("source1Bucket", "Bucket id of target bucket")
+ .paramDesc("source1Partition", "Partition id of target bucket"));
+ addMethod(new BucketProviderMethod("move", this, "l")
+ .paramDesc("partitionId", "The partition to move the bucket to"));
+ addMethod(new BucketProviderMethod("maintain", this, "b")
+ .paramDesc("maintenanceLevel", "LOW or HIGH maintenance"));
+ addMethod(new TimestampedProviderMethod("removeEntry", this));
+
+ start();
+ started = false;
+ }
+ }
+
+ public void RPC_connect(Request req) {
+ }
+
+ public void addResult(Result result, Request req) {
+ req.returnValues().add(new Int8Value((byte) result.getErrorType().ordinal()));
+ req.returnValues().add(new StringValue(result.getErrorMessage()));
+ }
+
+ public void RPC_initialize(Request req) {
+ addResult(provider.initialize(), req);
+ }
+
+ public void RPC_getPartitionStates(Request req) {
+ PartitionStateListResult result = provider.getPartitionStates();
+ addResult(result, req);
+
+ int[] states = new int[result.getPartitionStates().size()];
+ String[] reasons = new String[result.getPartitionStates().size()];
+
+ for (int i = 0; i < states.length; ++i) {
+ states[i] = result.getPartitionStates().get(i).getState().ordinal();
+ reasons[i] = result.getPartitionStates().get(i).getReason();
+ }
+
+ req.returnValues().add(new Int32Array(states));
+ req.returnValues().add(new StringArray(reasons));
+ }
+
+ void addBucketIdListResult(BucketIdListResult result, Request req) {
+ addResult(result, req);
+
+ long[] retVal = new long[result.getBuckets().size()];
+ for (int i = 0; i < retVal.length; ++i) {
+ retVal[i] = result.getBuckets().get(i).getRawId();
+ }
+
+ req.returnValues().add(new Int64Array(retVal));
+ }
+
+ public void RPC_listBuckets(Request req) {
+ addBucketIdListResult(provider.listBuckets((short) req.parameters().get(0).asInt64()), req);
+ }
+
+ public void RPC_setClusterState(Request req) throws java.text.ParseException {
+ ClusterStateImpl state = new ClusterStateImpl(req.parameters().get(0).asData());
+ addResult(provider.setClusterState(state), req);
+ }
+
+ Bucket getBucket(Request req, int index) {
+ return new Bucket((short)req.parameters().get(index + 1).asInt64(),
+ new BucketId(req.parameters().get(index).asInt64()));
+ }
+
+ public void RPC_setActiveState(Request req) {
+ try {
+ addResult(provider.setActiveState(getBucket(req, 0),
+ BucketInfo.ActiveState.values()[req.parameters().get(2).asInt8()]), req);
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new RuntimeException(e);
+ }
+ }
+
+ public void RPC_getBucketInfo(Request req) {
+ BucketInfoResult result = provider.getBucketInfo(getBucket(req, 0));
+
+ addResult(result, req);
+ req.returnValues().add(new Int32Value(result.getBucketInfo().getChecksum()));
+ req.returnValues().add(new Int32Value(result.getBucketInfo().getDocumentCount()));
+ req.returnValues().add(new Int32Value(result.getBucketInfo().getDocumentSize()));
+ req.returnValues().add(new Int32Value(result.getBucketInfo().getEntryCount()));
+ req.returnValues().add(new Int32Value(result.getBucketInfo().getUsedSize()));
+ req.returnValues().add(new Int8Value(result.getBucketInfo().isReady() ? (byte)1 : (byte)0));
+ req.returnValues().add(new Int8Value(result.getBucketInfo().isActive() ? (byte)1 : (byte)0));
+ }
+
+ public void RPC_put(Request req) {
+ try {
+ GrowableByteBuffer buffer = new GrowableByteBuffer(ByteBuffer.wrap(req.parameters().get(3).asData()));
+ Document doc = new Document(DocumentDeserializerFactory.create42(docTypeManager, buffer));
+ addResult(provider.put(getBucket(req, 0), req.parameters().get(2).asInt64(), doc), req);
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new RuntimeException(e);
+ }
+ }
+
+ public void addRemoveResult(RemoveResult result, Request req) {
+ addResult(result, req);
+ req.returnValues().add(new Int8Value(result.wasFound() ? (byte)1 : (byte)0));
+ }
+
+ public void RPC_removeById(Request req) {
+ addRemoveResult(
+ provider.remove(
+ getBucket(req, 0),
+ req.parameters().get(2).asInt64(),
+ new DocumentId(req.parameters().get(3).asString())), req);
+ }
+
+ public void RPC_removeIfFound(Request req) {
+ addRemoveResult(
+ provider.removeIfFound(
+ getBucket(req, 0),
+ req.parameters().get(2).asInt64(),
+ new DocumentId(req.parameters().get(3).asString())), req);
+ }
+
+ public void RPC_removeEntry(Request req) {
+ addResult(
+ provider.removeEntry(
+ getBucket(req, 0),
+ req.parameters().get(2).asInt64()), req);
+ }
+
+ public void RPC_update(Request req) {
+ try {
+ GrowableByteBuffer buffer = new GrowableByteBuffer(ByteBuffer.wrap(req.parameters().get(3).asData()));
+ DocumentUpdate update = new DocumentUpdate(DocumentDeserializerFactory.createHead(docTypeManager, buffer));
+ UpdateResult result = provider.update(getBucket(req, 0), req.parameters().get(2).asInt64(), update);
+ addResult(result, req);
+
+ req.returnValues().add(new Int64Value(result.getExistingTimestamp()));
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new RuntimeException(e);
+ }
+ }
+
+ public void RPC_flush(Request req) {
+ addResult(provider.flush(getBucket(req, 0)), req);
+ }
+
+ FieldSet getFieldSet(Request req, int index) {
+ return new AllFields();
+
+ //return new FieldSetRepo().parse(docTypeManager, req.parameters().get(index).asString());
+ }
+
+ byte[] serializeDocument(Document doc) {
+ if (doc != null) {
+ GrowableByteBuffer buf = new GrowableByteBuffer();
+ DocumentSerializer serializer = DocumentSerializerFactory.create42(buf);
+ doc.serialize(serializer);
+ buf.flip();
+ return buf.array();
+ } else {
+ return new byte[0];
+ }
+ }
+
+ public void RPC_get(Request req) {
+ GetResult result = provider.get(getBucket(req, 0),
+ getFieldSet(req, 2),
+ new DocumentId(req.parameters().get(3).asString()));
+ addResult(result, req);
+ req.returnValues().add(new Int64Value(result.getLastModifiedTimestamp()));
+ req.returnValues().add(new DataValue(serializeDocument(result.getDocument())));
+ }
+
+ public void RPC_createIterator(Request req) {
+ try {
+ TreeSet<Long> timestampSet = new TreeSet<Long>();
+ long[] timestamps = req.parameters().get(6).asInt64Array();
+ for (long l : timestamps) {
+ timestampSet.add(l);
+ }
+
+ Selection selection;
+ if (timestamps.length > 0) {
+ selection = new Selection(timestampSet);
+ } else {
+ selection = new Selection(
+ req.parameters().get(3).asString(),
+ req.parameters().get(4).asInt64(),
+ req.parameters().get(5).asInt64());
+ }
+
+ CreateIteratorResult result = provider.createIterator(
+ getBucket(req, 0),
+ getFieldSet(req, 2),
+ selection,
+ PersistenceProvider.IncludedVersions.values()[req.parameters().get(7).asInt8()]);
+
+ addResult(result, req);
+ req.returnValues().add(new Int64Value(result.getIteratorId()));
+ } catch (ParseException e) {
+ addResult(new Result(Result.ErrorType.PERMANENT_ERROR, "Unparseable document selection expression"), req);
+ req.returnValues().add(new Int64Value(0));
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new RuntimeException(e);
+ }
+ }
+
+ public void RPC_iterate(Request req) {
+ try {
+ long iteratorId = req.parameters().get(0).asInt64();
+ long maxByteSize = req.parameters().get(1).asInt64();
+
+ IterateResult result = provider.iterate(iteratorId, maxByteSize);
+
+ addResult(result, req);
+
+ int count = result.getEntries() != null ? result.getEntries().size() : 0;
+ long[] timestamps = new long[count];
+ int[] flags = new int[count];
+ String[] docIds = new String[count];
+ byte[][] documents = new byte[count][];
+
+ for (int i = 0; i < count; ++i) {
+ DocEntry entry = result.getEntries().get(i);
+ timestamps[i] = entry.getTimestamp();
+ flags[i] = entry.getType().ordinal();
+
+ if (entry.getDocumentId() != null) {
+ docIds[i] = entry.getDocumentId().toString();
+ } else {
+ docIds[i] = "";
+ }
+
+ if (entry.getDocument() != null) {
+ documents[i] = serializeDocument(entry.getDocument());
+ } else {
+ documents[i] = (new byte[0]);
+ }
+ }
+
+ req.returnValues().add(new Int64Array(timestamps));
+ req.returnValues().add(new Int32Array(flags));
+ req.returnValues().add(new StringArray(docIds));
+ req.returnValues().add(new DataArray(documents));
+ req.returnValues().add(new Int8Value(result.isCompleted() ? (byte)1 : (byte)0));
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new RuntimeException(e);
+ }
+ }
+
+ public void RPC_destroyIterator(Request req) {
+ try {
+ addResult(provider.destroyIterator(req.parameters().get(0).asInt64()), req);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ public void RPC_createBucket(Request req) {
+ addResult(provider.createBucket(getBucket(req, 0)), req);
+ }
+
+ public void RPC_deleteBucket(Request req) {
+ addResult(provider.deleteBucket(getBucket(req, 0)), req);
+ }
+
+ public void RPC_getModifiedBuckets(Request req) {
+ addBucketIdListResult(provider.getModifiedBuckets(), req);
+ }
+
+ public void RPC_maintain(Request req) {
+ addResult(provider.maintain(getBucket(req, 0),
+ PersistenceProvider.MaintenanceLevel.values()[req.parameters().get(2).asInt8()]), req);
+ }
+
+ public void RPC_split(Request req) {
+ addResult(provider.split(
+ getBucket(req, 0),
+ getBucket(req, 2),
+ getBucket(req, 4)), req);
+ }
+
+ public void RPC_join(Request req) {
+ addResult(provider.join(
+ getBucket(req, 0),
+ getBucket(req, 2),
+ getBucket(req, 4)), req);
+ }
+
+ public void RPC_move(Request req) {
+ addResult(provider.move(
+ getBucket(req, 0),
+ (short)req.parameters().get(2).asInt64()), req);
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderMethod.java b/persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderMethod.java
new file mode 100644
index 00000000000..b8510e015ba
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderMethod.java
@@ -0,0 +1,39 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.rpc;
+
+import com.yahoo.jrt.*;
+
+/**
+ * Class to represent a JRT method used by PersistenceProviderHandler.
+ */
+public class PersistenceProviderMethod extends Method {
+ int nextReturnDesc = 0;
+ int nextParamDesc;
+
+ PersistenceProviderMethod returnDesc(String code, String text) {
+ returnDesc(nextReturnDesc, code, text);
+ ++nextReturnDesc;
+ return this;
+ }
+
+ PersistenceProviderMethod paramDesc(String code, String text) {
+ paramDesc(nextParamDesc, code, text);
+ ++nextParamDesc;
+ return this;
+ }
+
+ public PersistenceProviderMethod(String name, PersistenceProviderHandler owner, String paramTypes) {
+ this(name, owner, paramTypes, "");
+ }
+
+ public PersistenceProviderMethod(String name, PersistenceProviderHandler owner) {
+ this(name, owner, "", "");
+ }
+
+ public PersistenceProviderMethod(String name, PersistenceProviderHandler owner, String paramTypes, String returnTypes) {
+ super("vespa.persistence." + name, paramTypes, "bs" + returnTypes, owner, "RPC_" + name);
+ returnDesc("code", "Error code, or 0 if successful");
+ returnDesc("message", "Error message");
+ }
+
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/rpc/RPCHandler.java b/persistence/src/main/java/com/yahoo/persistence/rpc/RPCHandler.java
new file mode 100644
index 00000000000..e7fb8d3aa30
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/rpc/RPCHandler.java
@@ -0,0 +1,39 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.rpc;
+
+import com.yahoo.jrt.*;
+
+import java.util.logging.Logger;
+
+
+/**
+ * A handler that can be used to register RPC function calls,
+ * using Vespa JRT. To enable an RPC server, first call addMethod() any number of times,
+ * then start().
+ */
+public class RPCHandler {
+ private final static Logger log = Logger.getLogger(RPCHandler.class.getName());
+
+ private final int port;
+ private final Supervisor supervisor;
+ private Acceptor acceptor;
+
+ public RPCHandler(int port) {
+ supervisor = new Supervisor(new Transport());
+ this.port = port;
+ }
+
+ public void start() {
+ try {
+ acceptor = supervisor.listen(new Spec(port));
+ log.info("Listening for RPC requests on port " + port);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public void addMethod(Method method) {
+ supervisor.addMethod(method);
+ }
+
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/rpc/TimestampedProviderMethod.java b/persistence/src/main/java/com/yahoo/persistence/rpc/TimestampedProviderMethod.java
new file mode 100644
index 00000000000..e438628b43d
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/rpc/TimestampedProviderMethod.java
@@ -0,0 +1,20 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.rpc;
+
+/**
+ * Represents a JRT persistence provider method that includes a timestamp in its request
+ */
+public class TimestampedProviderMethod extends BucketProviderMethod {
+ public TimestampedProviderMethod(String name, PersistenceProviderHandler owner) {
+ this(name, owner, "", "");
+ }
+
+ public TimestampedProviderMethod(String name, PersistenceProviderHandler owner, String paramTypes) {
+ this(name, owner, paramTypes, "");
+ }
+
+ public TimestampedProviderMethod(String name, PersistenceProviderHandler owner, String paramTypes, String returnTypes) {
+ super(name, owner, "l" + paramTypes, returnTypes);
+ paramDesc("timestamp", "The timestamp of the operation");
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/rpc/package-info.java b/persistence/src/main/java/com/yahoo/persistence/rpc/package-info.java
new file mode 100644
index 00000000000..6d3fc4781a3
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/rpc/package-info.java
@@ -0,0 +1,7 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@ExportPackage
+@PublicApi
+package com.yahoo.persistence.rpc;
+
+import com.yahoo.api.annotations.PublicApi;
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/AbstractPersistenceProvider.java b/persistence/src/main/java/com/yahoo/persistence/spi/AbstractPersistenceProvider.java
new file mode 100644
index 00000000000..89c98a873b0
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/AbstractPersistenceProvider.java
@@ -0,0 +1,82 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi;
+
+import com.yahoo.document.*;
+import com.yahoo.document.fieldset.AllFields;
+import com.yahoo.persistence.spi.*;
+import com.yahoo.persistence.spi.result.*;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * An abstract class that implements persistence provider functionality that some providers
+ * may not have use for.
+ */
+public abstract class AbstractPersistenceProvider implements PersistenceProvider {
+ @Override
+ public Result initialize() {
+ return new Result();
+ }
+
+ @Override
+ public PartitionStateListResult getPartitionStates() {
+ List<PartitionState> partitionStates = new ArrayList<PartitionState>();
+ partitionStates.add(new PartitionState(PartitionState.State.UP, ""));
+ return new PartitionStateListResult(partitionStates);
+ }
+
+ @Override
+ public Result setClusterState(ClusterState state) {
+ return new Result();
+ }
+
+ @Override
+ public Result setActiveState(Bucket bucket, BucketInfo.ActiveState active) {
+ return new Result();
+ }
+
+
+ @Override
+ public RemoveResult removeIfFound(Bucket bucket, long timestamp, DocumentId id) {
+ return remove(bucket, timestamp, id);
+ }
+
+ @Override
+ public Result removeEntry(Bucket bucket, long timestampToRemove) {
+ return new Result();
+ }
+
+ @Override
+ public Result flush(Bucket bucket) {
+ return new Result();
+ }
+
+ @Override
+ public BucketIdListResult getModifiedBuckets() {
+ return new BucketIdListResult(new ArrayList<BucketId>());
+ }
+
+ @Override
+ public Result maintain(Bucket bucket, MaintenanceLevel level) {
+ return new Result();
+ }
+
+ @Override
+ public Result move(Bucket bucket, short partitionId) {
+ return new Result();
+ }
+
+ @Override
+ public UpdateResult update(Bucket bucket, long timestamp, DocumentUpdate update) {
+ GetResult result = get(bucket, new AllFields(), update.getId());
+ if (result.wasFound()) {
+ Document doc = result.getDocument().clone();
+ update.applyTo(doc);
+ put(bucket, timestamp, doc);
+ return new UpdateResult(result.getLastModifiedTimestamp());
+ } else {
+ return new UpdateResult();
+ }
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/Bucket.java b/persistence/src/main/java/com/yahoo/persistence/spi/Bucket.java
new file mode 100644
index 00000000000..0c9a8d16476
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/Bucket.java
@@ -0,0 +1,30 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi;
+
+import com.yahoo.document.BucketId;
+
+/**
+ * @author thomasg
+ */
+public class Bucket {
+ BucketId bucketId;
+ short partitionId;
+
+ /**
+ * @param partition The partition (i.e. disk) where the bucket is located
+ * @param bucketId The bucket id of the bucket
+ */
+ public Bucket(short partition, BucketId bucketId) {
+ this.partitionId = partition;
+ this.bucketId = bucketId;
+ }
+
+ public BucketId getBucketId() { return bucketId; }
+
+ public short getPartitionId() { return partitionId; }
+
+ @Override
+ public String toString() {
+ return partitionId + "/" + bucketId;
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/BucketInfo.java b/persistence/src/main/java/com/yahoo/persistence/spi/BucketInfo.java
new file mode 100644
index 00000000000..78d66f7d701
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/BucketInfo.java
@@ -0,0 +1,155 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi;
+
+/**
+ * Class to represents information about the buckets stored by the persistence provider.
+ */
+public class BucketInfo {
+ public enum ReadyState {
+ NOT_READY,
+ READY
+ }
+
+ public enum ActiveState {
+ NOT_ACTIVE,
+ ACTIVE
+ }
+
+ /** Create an empty bucket info object. */
+ public BucketInfo() {
+ }
+
+ /**
+ * @param checksum The checksum of the bucket contents.
+ * @param docCount The number of documents stored
+ * @param docSize The total size of the documents stored
+ * @param metaEntryCount The number of different versions of documents that are stored (including document removes)
+ * @param size The total size of entries in this bucket.
+ * @param ready Whether the bucket is ready or not
+ * @param active Whether the bucket is active or not
+ */
+ public BucketInfo(int checksum,
+ int docCount,
+ int docSize,
+ int metaEntryCount,
+ int size,
+ ReadyState ready,
+ ActiveState active) {
+ this.checksum = checksum;
+ this.documentCount = docCount;
+ this.documentSize = docSize;
+ this.entryCount = metaEntryCount;
+ this.size = size;
+ this.ready = ready;
+ this.active = active;
+ }
+
+ /**
+ * Constructor for bucketinfo for providers that don't care about the READY/ACTIVE paradigm.
+ *
+ * @param checksum The checksum of the bucket contents.
+ * @param docCount The number of documents stored
+ * @param docSize The total size of the documents stored
+ * @param metaEntryCount The number of different versions of documents that are stored (including document removes)
+ * @param size The total size of entries in this bucket.
+ */
+ public BucketInfo(int checksum,
+ int docCount,
+ int docSize,
+ int metaEntryCount,
+ int size) {
+ this(checksum, docCount, docSize, metaEntryCount, size, ReadyState.NOT_READY, ActiveState.NOT_ACTIVE);
+ }
+
+ public boolean equals(BucketInfo other) {
+ return checksum == other.checksum &&
+ documentCount == other.documentCount &&
+ documentSize == other.documentSize &&
+ entryCount == other.entryCount &&
+ size == other.size &&
+ ready == other.ready &&
+ active == other.active;
+ }
+
+ @Override
+ public String toString() {
+ String retVal = "BucketInfo(";
+ if (valid()) {
+ retVal += "crc " + checksum + ", uniqueCount " + documentCount +
+ ", uniqueSize " + documentSize + ", entry count " + entryCount +
+ ", usedSize " + size + ", ready " + isReady() +
+ ", active " + isActive();
+ } else {
+ retVal += "invalid";
+ }
+ retVal += ")";
+ return retVal;
+ }
+
+ /**
+ * @return Get the checksum of the bucket. An empty bucket should have checksum of
+ * zero. The checksum should only include data from the latest versions of
+ * non-removed documents. Otherwise, the checksum implementation is up to
+ * the persistence implementation. (Unless one wants to run multiple
+ * persistence implementations in the same cluster, in which case they have
+ * to match).
+ */
+ public int getChecksum() { return checksum; }
+
+ /**
+ * The number of unique documents that have not been removed from the
+ * bucket. A unique document count above the splitting threshold will cause
+ * the bucket to be split.
+ */
+ public int getDocumentCount() { return documentCount; }
+
+ /**
+ * The total size of all the unique documents in this bucket. A size above
+ * the splitting threshold will cause the bucket to be split. Knowing size
+ * is optional, but a bucket with more than zero unique documents should
+ * always return a non-zero value for size. If splitting on size is not
+ * required or desired, a simple solution here is to just set the number
+ * of unique documents as the size.
+ */
+ public int getDocumentSize() { return documentSize; }
+
+ /**
+ * The number of meta entries in the bucket. For a persistence layer
+ * keeping history of data (multiple versions of a document or remove
+ * entries), it may use more meta entries in the bucket than it has unique
+ * documents If the sum of meta entries from a pair of joinable buckets go
+ * below the join threshold, the buckets will be joined.
+ */
+ public int getEntryCount() { return entryCount; }
+
+ /**
+ * The total size used by the persistence layer to store all the documents
+ * for a given bucket. Possibly excluding pre-allocated space not currently
+ * in use. Knowing size is optional, but if the bucket contains more than
+ * zero meta entries, it should return a non-zero value for used size.
+ */
+ public int getUsedSize() { return size; }
+
+ /**
+ * @return Returns true if this bucket is considered "ready". Ready buckets
+ * are prioritized before non-ready buckets to be set active.
+ */
+ public boolean isReady() { return ready == ReadyState.READY; }
+
+ /**
+ * @return Returns true if this bucket is "active". If it is, the bucket should
+ * be included in read operations outside of the persistence provider API.
+ */
+ public boolean isActive() { return active == ActiveState.ACTIVE; }
+
+ public boolean valid()
+ { return (documentCount > 0 || documentSize == 0); }
+
+ int checksum = 0;
+ int documentCount = 0;
+ int documentSize = 0;
+ int entryCount = 0;
+ int size = 0;
+ ReadyState ready = ReadyState.READY;
+ ActiveState active = ActiveState.NOT_ACTIVE;
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/ClusterState.java b/persistence/src/main/java/com/yahoo/persistence/spi/ClusterState.java
new file mode 100644
index 00000000000..1199c9bb255
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/ClusterState.java
@@ -0,0 +1,32 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi;
+
+/**
+ * Class that allows a provider to figure out if the node is currently up, the cluster is up and/or a
+ * given bucket should be "ready" given the state.
+ */
+public interface ClusterState {
+ /**
+ * Returns true if the system has been set up to have
+ * "ready" nodes, and the given bucket is in the ideal state
+ * for readiness.
+ *
+ * @param bucket The bucket to check.
+ * @return Returns true if the bucket should be set to "ready".
+ */
+ public boolean shouldBeReady(Bucket bucket);
+
+ /**
+ * @return Returns false if the cluster has been deemed down. This can happen
+ * if the fleet controller has detected that too many nodes are down
+ * compared to the complete list of nodes, and deigns the system to be
+ * unusable.
+ */
+ public boolean clusterUp();
+
+ /**
+ * @return Returns false if this node has been set in a state where it should not
+ * receive external load.
+ */
+ public boolean nodeUp();
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/ClusterStateImpl.java b/persistence/src/main/java/com/yahoo/persistence/spi/ClusterStateImpl.java
new file mode 100644
index 00000000000..da1711f6587
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/ClusterStateImpl.java
@@ -0,0 +1,50 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi;
+
+import com.yahoo.vdslib.distribution.Distribution;
+import com.yahoo.vdslib.state.Node;
+import com.yahoo.vdslib.state.NodeType;
+
+import java.nio.ByteBuffer;
+import java.text.ParseException;
+
+/**
+ * Implementation of the cluster state interface for deserialization from RPC.
+ */
+public class ClusterStateImpl implements com.yahoo.persistence.spi.ClusterState {
+ com.yahoo.vdslib.state.ClusterState clusterState;
+ short clusterIndex;
+ Distribution distribution;
+
+ public ClusterStateImpl(byte[] serialized) throws ParseException {
+ ByteBuffer buf = ByteBuffer.wrap(serialized);
+
+ int clusterStateLength = buf.getInt();
+ byte[] clusterState = new byte[clusterStateLength];
+ buf.get(clusterState);
+
+ clusterIndex = buf.getShort();
+
+ int distributionLength = buf.getInt();
+ byte[] distribution = new byte[distributionLength];
+ buf.get(distribution);
+
+ this.clusterState = new com.yahoo.vdslib.state.ClusterState(new String(clusterState));
+ this.distribution = new Distribution("raw:" + new String(distribution));
+ }
+
+ @Override
+ public boolean shouldBeReady(Bucket bucket) {
+ return true;
+ }
+
+ @Override
+ public boolean clusterUp() {
+ return clusterState != null && clusterState.getClusterState().oneOf("u");
+ }
+
+ @Override
+ public boolean nodeUp() {
+ return !clusterUp() && clusterState.getNodeState(new Node(NodeType.STORAGE, clusterIndex)).getState().oneOf("uir");
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/DocEntry.java b/persistence/src/main/java/com/yahoo/persistence/spi/DocEntry.java
new file mode 100644
index 00000000000..cf7ef2cc50c
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/DocEntry.java
@@ -0,0 +1,56 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi;
+
+import com.yahoo.document.Document;
+import com.yahoo.document.DocumentId;
+
+/**
+ * Class that represents an entry retrieved by iterating.
+ */
+public class DocEntry implements Comparable<DocEntry> {
+
+
+ @Override
+ public int compareTo(DocEntry docEntry) {
+ return new Long(timestamp).compareTo(docEntry.getTimestamp());
+ }
+
+ public enum Type {
+ PUT_ENTRY,
+ REMOVE_ENTRY
+ }
+
+ long timestamp;
+ Type type;
+
+ DocumentId docId;
+ Document document;
+
+ public DocEntry(long timestamp, Document doc, Type type, DocumentId docId) {
+ this.timestamp = timestamp;
+ this.type = type;
+ this.docId = docId;
+ document = doc;
+ }
+
+
+ public DocEntry(long timestamp, Document doc) {
+ this(timestamp, doc, Type.PUT_ENTRY, doc.getId());
+ }
+
+ public DocEntry(long timestamp, DocumentId docId) {
+ this(timestamp, null, Type.REMOVE_ENTRY, docId);
+ }
+
+ public DocEntry(long timestamp, Type type) {
+ this(timestamp, null, type, null);
+ }
+
+ public Type getType() { return type; }
+
+ public long getTimestamp() { return timestamp; }
+
+ public DocumentId getDocumentId() { return docId; }
+
+ public Document getDocument() { return document; }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/PartitionState.java b/persistence/src/main/java/com/yahoo/persistence/spi/PartitionState.java
new file mode 100644
index 00000000000..10d3b2e73e4
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/PartitionState.java
@@ -0,0 +1,27 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi;
+
+/**
+* @author thomasg
+*/
+public class PartitionState {
+ public PartitionState(State state, String reason) {
+ this.state = state;
+ this.reason = reason;
+
+ if (reason == null || state == null) {
+ throw new IllegalArgumentException("State and reason must be non-null");
+ }
+ }
+
+ public State getState() { return state; }
+ public String getReason() { return reason; }
+
+ State state;
+ String reason;
+
+ public enum State {
+ UP,
+ DOWN
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/PersistenceProvider.java b/persistence/src/main/java/com/yahoo/persistence/spi/PersistenceProvider.java
new file mode 100644
index 00000000000..68e9f1b5e24
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/PersistenceProvider.java
@@ -0,0 +1,382 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi;
+
+import com.yahoo.document.Document;
+import com.yahoo.document.DocumentId;
+import com.yahoo.document.DocumentUpdate;
+import com.yahoo.document.fieldset.FieldSet;
+import com.yahoo.persistence.spi.result.*;
+
+/**
+ * <p>
+ * This interface is the basis for a persistence provider in Vespa.
+ * A persistence provider is used by Vespa Storage to provide an elastic stateful system.
+ * </p>
+ * <p>
+ * The Vespa distribution mechanisms are based on distributing "buckets"
+ * between the nodes in the system. A bucket is an abstract concept that
+ * groups a set of documents. The persistence provider can choose freely
+ * how to implement a bucket, but it needs to be able to access a bucket as
+ * a unit. The placement of these units is controlled by the distributors.
+ * </p>
+ * <p>
+ * A persistence provider may support multiple "partitions". One example of
+ * a partition is a physical disk, but the exact meaning of "partitions"
+ * is left to the provider. It must be able to report to the service layer though.
+ * </p>
+ * <p>
+ * All operations return a Result object. The base Result class only encapsulates
+ * potential errors, which can be <i>transient</i>, <i>permanent</i> or <i>fatal</i>.
+ * Transient errors are errors where it's conceivable that retrying the operation
+ * would lead to success, either on this data copy or on others. Permanent errors
+ * are errors where the request itself is faulty. Fatal errors are transient errors
+ * that have uncovered a problem with this instance of the provider (such as a failing disk),
+ * and where the provider wants the process to be shut down.
+ * </p>
+ * <p>
+ * All write operations have a timestamp. This timestamp is generated
+ * by the distributor, and is guaranteed to be unique for the bucket we're
+ * writing to. A persistence provider is required to store "entries" for each of
+ * these operations, and associate the timestamp with that entry.
+ * Iteration code can retrieve these entries, including entries
+ * for remove operations. The provider is not required to keep any history beyond
+ * the last operation that was performed on a given document.
+ * </p>
+ * <p>
+ * The contract for all write operations is that after returning from the function,
+ * provider read methods (get, iterate) should reflect the modified state.
+ * </p>
+ */
+public interface PersistenceProvider
+{
+ /**
+ * The different types of entries that can be returned
+ * from an iterator.
+ */
+ public enum IncludedVersions {
+ NEWEST_DOCUMENT_ONLY,
+ NEWEST_DOCUMENT_OR_REMOVE,
+ ALL_VERSIONS
+ }
+
+ /**
+ * The different kinds of maintenance we can do.
+ * LOW maintenance may be run more often than HIGH.
+ */
+ public enum MaintenanceLevel {
+ LOW,
+ HIGH
+ }
+
+ /**
+ * Initializes the persistence provider. This function is called exactly once when
+ * the persistence provider starts. If any error is returned here, the service layer
+ * will shut down.
+ */
+ Result initialize();
+
+ /**
+ * Returns a list of the partitions available,
+ * and which are up and down.
+ */
+ PartitionStateListResult getPartitionStates();
+
+ /**
+ * Return list of buckets that provider has stored on the given partition.
+ */
+ BucketIdListResult listBuckets(short partition);
+
+ /**
+ * Updates the persistence provider with the last cluster state.
+ * Only cluster states that are relevant for the provider are supplied (changes
+ * that relate to the distributor will not cause an update here).
+ */
+ Result setClusterState(ClusterState state);
+
+ /**
+ * Sets the bucket state to active or inactive. After this returns,
+ * other buckets may be deactivated, so the node must be able to serve
+ * the data from its secondary index or get reduced coverage.
+ */
+ Result setActiveState(Bucket bucket, BucketInfo.ActiveState active);
+
+ /**
+ * If the bucket doesn't exist, return empty bucket info.
+ */
+ BucketInfoResult getBucketInfo(Bucket bucket);
+
+ /**
+ * Stores the given document.
+ *
+ * @param timestamp The timestamp for the new bucket entry.
+ */
+ Result put(Bucket bucket, long timestamp, Document doc);
+
+ /**
+ * <p>
+ * Removes the document referenced by the document id.
+ * It is strongly recommended to keep entries for the removes for
+ * some period of time. For recovery to work properly, a node that
+ * has been down for a longer period of time than that should be totally
+ * erased. If not, documents that have been removed but have documents
+ * on nodes that have been down will be reinserted.
+ * </p>
+ * <p>
+ * Postconditions:
+ * A successful invocation of this function must add the remove to the
+ * bucket regardless of whether the document existed. More specifically,
+ * iterating over the bucket while including removes after this call
+ * shall yield a remove-entry at the given timestamp for the given
+ * document identifier as part of its result set. The remove entry
+ * shall be added even if there exist removes for the same document id
+ * at other timestamps in the bucket.
+ * </p>
+ * <p>
+ * Also, if the given timestamp is higher to or equal than any
+ * existing put entry, those entries should not be returned in subsequent
+ * get calls. If the timestamp is lower than an existing put entry,
+ * those entries should still be available.
+ * </p>
+ * @param timestamp The timestamp for the new bucket entry.
+ * @param id The ID to remove
+ */
+ RemoveResult remove(Bucket bucket, long timestamp, DocumentId id);
+ /**
+ * <p>
+ * See remove()
+ * </p>
+ * <p>
+ * Used for external remove operations. removeIfFound() has no extra
+ * postconditions than remove, but it may choose to <i>not</i> include
+ * a remove entry if there didn't already exist a put entry for the given
+ * entry. It is recommended, but not required, to not insert entries in this
+ * case, though if remove entries are considered critical it might be better
+ * to insert them in both cases.
+ * </p>
+ * @param timestamp The timestamp for the new bucket entry.
+ * @param id The ID to remove
+ */
+ RemoveResult removeIfFound(Bucket bucket, long timestamp, DocumentId id);
+
+ /**
+ * Removes the entry with the given timestamp. This is usually used to revert
+ * previously performed operations. This operation should be
+ * successful even if there doesn't exist such an entry.
+ */
+ Result removeEntry(Bucket bucket, long timestampToRemove);
+
+ /**
+ * Partially modifies a document referenced by the document update.
+ *
+ * @param timestamp The timestamp to use for the new update entry.
+ * @param update The document update to apply to the stored document.
+ */
+ UpdateResult update(Bucket bucket, long timestamp, DocumentUpdate update);
+
+ /**
+ * <p>
+ * For providers that store data persistently on disk, the contract of
+ * flush is that data has been stored persistently so that if the node should
+ * restart, the data will be available.
+ * </p>
+ * <p>
+ * The service layer may choose to batch certain commands. This means
+ * that the service layer will lock the bucket only once, then perform several
+ * commands, and finally get the bucket info from the bucket, and then flush it.
+ * This can be used to improve performance by caching the modifications, and
+ * persisting them to disk only when flush is called. The service layer guarantees
+ * that after one of these operations, flush() is called, regardless of whether
+ * the operation succeeded or not, before another bucket is processed in the same
+ * worker thread. The following operations can be batched and have the guarantees
+ * above:
+ * - put
+ * - get
+ * - remove
+ * - removeIfFound
+ * - update
+ * - removeEntry
+ * </p>
+ */
+ Result flush(Bucket bucket);
+
+ /**
+ * Retrieves the latest version of the document specified by the
+ * document id. If no versions were found, or the document was removed,
+ * the result should be successful, but contain no document (see GetResult).
+ *
+ * @param fieldSet A set of fields that should be retrieved.
+ * @param id The document id to retrieve.
+ */
+ GetResult get(Bucket bucket, FieldSet fieldSet, DocumentId id);
+
+ /**
+ * Create an iterator for a given bucket and selection criteria, returning
+ * a unique, non-zero iterator identifier that can be used by the caller as
+ * an argument to iterate and destroyIterator.
+ *
+ * Each successful invocation of createIterator shall be paired with
+ * a later invocation of destroyIterator by the caller to ensure
+ * resources are freed up. NOTE: this may not apply in a shutdown
+ * situation due to service layer communication channels closing down.
+ *
+ * It is assumed that a successful invocation of this function will result
+ * in some state being established in the persistence provider, holding
+ * the information required to match iterator ids up to their current
+ * iteration progress and selection criteria. destroyIterator will NOT
+ * be called when createIterator returns an error.
+ *
+ * @param selection Selection criteria used to limit the subset of
+ * the bucket's documents that will be returned by the iterator. The
+ * provider implementation may use these criteria to optimize its
+ * operation as it sees fit, as long as doing so does not violate
+ * selection correctness.
+ * @return A process-globally unique iterator identifier iff the result
+ * is successful and internal state has been created, otherwise an
+ * error. Identifier must be non-zero, as zero is used internally to
+ * signify an invalid iterator ID.
+ */
+ CreateIteratorResult createIterator(Bucket bucket,
+ FieldSet fieldSet,
+ Selection selection,
+ IncludedVersions versions);
+
+ /**
+ * Iterate over a bucket's document space using a valid iterator id
+ * received from createIterator. Each invocation of iterate upon an
+ * iterator that has not yet fully exhausted its document space shall
+ * return a minimum of 1 document entry per IterateResult to ensure progress.
+ * An implementation shall limit the result set per invocation to document
+ * entries whose combined in-memory/serialized size is a "soft" maximum of
+ * maxByteSize. More specifically, the sum of getSize() over all returned
+ * DocEntry instances should be &lt;= (maxByteSize + the size of the last
+ * document in the result set). This special case allows for limiting the
+ * result set both by observing "before the fact" that the next potential
+ * document to include would exceed the max size and by observing "after
+ * the fact" that the document that was just added caused the max size to
+ * be exceeded.
+ * However, if a document exceeds maxByteSize and not including it implies
+ * the result set would be empty, it must be included in the result anyway
+ * in order to not violate the progress requirement.
+ *
+ * The caller shall not make any assumptions on whether or not documents
+ * that arrive to--or are removed from--the bucket in the time between
+ * separate invocations of iterate for the same iterator id will show up
+ * in the results, assuming that these documents do not violate the
+ * selection criteria. This means that there is no requirement for
+ * maintaining a "snapshot" view of the bucket's state as it existed upon
+ * the initial createIterator call. Neither shall the caller make any
+ * assumptions on the ordering of the returned documents.
+ *
+ * The IterateResult shall--for each document entry that matches the
+ * selection criteria and falls within the maxByteSize limit mentioned
+ * above--return the following information in its result:
+ *
+ * -- For non-removed entries: A DocEntry where getDocumentOperation() will
+ * return a valid DocumentPut instance and getSize() will return the
+ * serialized size of the document.
+ * -- For removed entries: A DocEntry where getDocumentId() will
+ * return a valid document identifier. Remove entries shall not
+ * contain document instances.
+ * -- For meta entries: A DocEntry that shall not contain a document
+ * instance nor should it include a document id instance (if
+ * included, would be ignored by the service layer in any context
+ * where metadata-only is requested).
+ *
+ * The service layer shall guarantee that no two invocations of iterate
+ * will happen simultaneously/concurrently for the same iterator id.
+ *
+ * Upon a successful invocation of iterate, the persistence provider shall
+ * update its internal state to account for the progress made so that new
+ * invocations will cover a new subset of the document space. When an
+ * IterateResult contains the final documents for the iteration, i.e. the
+ * iterator has reached its end, setCompleted() must be set on the result
+ * to indicate this to the caller. Calling iterate on an already completed
+ * iterator must only set this flag on the result and return without any
+ * documents.
+ *
+ * @param iteratorId An iterator ID returned by a previous call to createIterator
+ * @param maxByteSize An indication of the maximum number of bytes that should be returned.
+ */
+ IterateResult iterate(long iteratorId, long maxByteSize);
+
+ /**
+ * <p>
+ * Destroys the iterator specified by the given id.
+ * </p>
+ * <p>
+ * IMPORTANT: this method has different invocation semantics than
+ * the other provider methods! It may be called from the context of
+ * ANY service layer thread, NOT just from the thread in which
+ * createIterator was invoked! The reason for this is because internal
+ * iterator destroy messages aren't mapped to partition threads in the
+ * way other messages are due to their need for guaranteed execution.
+ * </p>
+ * <p>
+ * This in turn implies that iterator states must be shared between
+ * partitions (and thus protected against cross-partition concurrent
+ * access).
+ * </p>
+ * @param iteratorId The iterator id previously returned by createIterator.
+ */
+ Result destroyIterator(long iteratorId);
+
+ /**
+ * Tells the provider that the given bucket has been created in the
+ * service layer. There is no requirement to do anything here.
+ */
+ Result createBucket(Bucket bucket);
+
+ /**
+ * Deletes the given bucket and all entries contained in that bucket.
+ * After this operation has succeeded, a restart of the provider should
+ * not yield the bucket in getBucketList().
+ */
+ Result deleteBucket(Bucket bucket);
+
+ /**
+ * This function is called continuously by the service layer. It allows
+ * the provider to signify whether it has done any out-of-band changes to
+ * buckets that need to be recognized by the rest of the system. The service
+ * layer will proceed to call getBucketInfo() on each of the returned buckets.
+ * After a call to getModifiedBuckets(), the provider should clear it's list
+ * of modified buckets, so that the next call does not return the same buckets.
+ */
+ BucketIdListResult getModifiedBuckets();
+
+ /**
+ * Allows the provider to do periodic maintenance and verification.
+ *
+ * @param level The level of maintenance to do. LOW maintenance is scheduled more
+ * often than HIGH maintenance, so should be cheaper.
+ */
+ Result maintain(Bucket bucket, MaintenanceLevel level);
+
+ /**
+ * <p>
+ * Splits the source bucket into the two target buckets.
+ * After the split, all documents belonging to target1 should be
+ * in that bucket, and all documents belonging to target2 should be
+ * there. The information in SplitResult should reflect
+ * this.
+ * </p>
+ * <p>
+ * Before calling this function, the service layer will iterate the bucket
+ * to figure out which buckets the source should be split into. This may
+ * result in splitting more than one bucket bit at a time.
+ * </p>
+ */
+ Result split(Bucket source, Bucket target1, Bucket target2);
+
+ /**
+ * Joins two buckets into one. After the join, all documents from
+ * source1 and source2 should be stored in the target bucket.
+ */
+ Result join(Bucket source1, Bucket source2, Bucket target);
+
+ /**
+ * Moves a bucket from one partition to another.
+ *
+ * @param partitionId The partition to move to.
+ */
+ Result move(Bucket bucket, short partitionId);
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/Selection.java b/persistence/src/main/java/com/yahoo/persistence/spi/Selection.java
new file mode 100644
index 00000000000..bf82c0f148e
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/Selection.java
@@ -0,0 +1,70 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi;
+
+import com.yahoo.document.Document;
+import com.yahoo.document.DocumentPut;
+import com.yahoo.document.select.DocumentSelector;
+import com.yahoo.document.select.Result;
+import com.yahoo.document.select.parser.ParseException;
+
+import java.util.Set;
+
+/**
+ * Class used when iterating to represent a selection of entries to be returned.
+ *
+ * This class is likely to be replaced by a more generic selection AST in the near future.
+ */
+public class Selection {
+ DocumentSelector documentSelection = null;
+ long fromTimestamp = 0;
+ long toTimestamp = Long.MAX_VALUE;
+ Set<Long> timestampSubset = null;
+
+ public Selection(String documentSelection, long fromTimestamp, long toTimestamp) throws ParseException {
+ this.documentSelection = new DocumentSelector(documentSelection);
+ this.fromTimestamp = fromTimestamp;
+ this.toTimestamp = toTimestamp;
+ }
+
+ public Selection(Set<Long> timestampSubset) {
+ this.timestampSubset = timestampSubset;
+ }
+
+ public boolean requiresFields() {
+ return documentSelection != null;
+ }
+
+ public Set<Long> getTimestampSubset() {
+ return timestampSubset;
+ }
+
+ /**
+ * Returns true if the entry matches the selection criteria given.
+ */
+ public boolean match(Document doc, long timestamp) {
+ if (timestamp < fromTimestamp) {
+ return false;
+ }
+
+ if (timestamp > toTimestamp) {
+ return false;
+ }
+
+ if (timestampSubset != null && !timestampSubset.contains(timestamp)) {
+ return false;
+ }
+
+ if (documentSelection != null && doc != null && !documentSelection.accepts(new DocumentPut(doc)).equals(Result.TRUE)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * Returns true if the entry matches the timestamp ranges/subsets specified in the selection.
+ */
+ public boolean match(long timestamp) {
+ return match(null, timestamp);
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/conformance/ConformanceTest.java b/persistence/src/main/java/com/yahoo/persistence/spi/conformance/ConformanceTest.java
new file mode 100644
index 00000000000..80aedc3263d
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/conformance/ConformanceTest.java
@@ -0,0 +1,1605 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi.conformance;
+
+import com.yahoo.document.BucketId;
+import com.yahoo.document.*;
+import com.yahoo.document.datatypes.IntegerFieldValue;
+import com.yahoo.document.datatypes.StringFieldValue;
+import com.yahoo.document.fieldset.AllFields;
+import com.yahoo.document.fieldset.FieldSet;
+import com.yahoo.document.update.AssignValueUpdate;
+import com.yahoo.document.update.FieldUpdate;
+import com.yahoo.persistence.spi.*;
+import com.yahoo.persistence.spi.result.*;
+import junit.framework.TestCase;
+
+import java.util.*;
+
+public abstract class ConformanceTest extends TestCase {
+ TestDocMan testDocMan = new TestDocMan();
+
+ public interface PersistenceProviderFactory {
+ public PersistenceProvider createProvider(DocumentTypeManager manager);
+ public boolean supportsActiveState();
+ }
+
+ PersistenceProvider init(PersistenceProviderFactory factory) {
+ return factory.createProvider(testDocMan);
+ }
+
+ // TODO: should invoke some form of destruction method on the provider after a test
+ protected void doConformanceTest(PersistenceProviderFactory factory) throws Exception {
+ testBasics(init(factory));
+ testPut(init(factory));
+ testRemove(init(factory));
+ testGet(init(factory));
+ testUpdate(init(factory));
+
+ testListBuckets(init(factory));
+ testBucketInfo(init(factory));
+ testOrderIndependentBucketInfo(init(factory));
+ testPutNewDocumentVersion(init(factory));
+ testPutOlderDocumentVersion(init(factory));
+ testPutDuplicate(init(factory));
+ testDeleteBucket(init(factory));
+ testSplitNormalCase(init(factory));
+ testSplitTargetExists(init(factory));
+ testJoinNormalCase(init(factory));
+ testJoinTargetExists(init(factory));
+ testJoinOneBucket(init(factory));
+
+ testMaintain(init(factory));
+ testGetModifiedBuckets(init(factory));
+
+ if (factory.supportsActiveState()) {
+ testBucketActivation(init(factory));
+ testBucketActivationSplitAndJoin(init(factory));
+ }
+
+ testIterateAllDocs(init(factory));
+ testIterateAllDocsNewestVersionOnly(init(factory));
+ testIterateCreateIterator(init(factory));
+ testIterateDestroyIterator(init(factory));
+ testIterateWithUnknownId(init(factory));
+ testIterateChunked(init(factory));
+ testIterateMatchTimestampRange(init(factory));
+ testIterateMaxByteSize(init(factory));
+ testIterateExplicitTimestampSubset(init(factory));
+ testIterateMatchSelection(init(factory));
+ testIterateRemoves(init(factory));
+ testIterationRequiringDocumentIdOnlyMatching(init(factory));
+ testIterateAlreadyCompleted(init(factory));
+ testIterateEmptyBucket(init(factory));
+
+ testRemoveMerge(init(factory));
+ }
+
+ List<DocEntry> iterateBucket(PersistenceProvider spi, Bucket bucket, PersistenceProvider.IncludedVersions versions) throws Exception {
+ List<DocEntry> ret = new ArrayList<DocEntry>();
+
+ CreateIteratorResult iter = spi.createIterator(
+ bucket,
+ new AllFields(),
+ new Selection("", 0, Long.MAX_VALUE),
+ versions);
+
+ assertFalse(iter.hasError());
+
+ while (true) {
+ IterateResult result = spi.iterate(iter.getIteratorId(), Long.MAX_VALUE);
+ assertFalse(result.hasError());
+
+ ret.addAll(result.getEntries());
+
+ if (result.isCompleted()) {
+ break;
+ }
+ }
+
+ Collections.sort(ret);
+
+ return ret;
+ }
+
+ void testBasicsIteration(PersistenceProvider provider, Bucket bucket, Document doc1, Document doc2, boolean includeRemoves) throws Exception {
+ Selection selection = new Selection("true", 0, Long.MAX_VALUE);
+
+ CreateIteratorResult iter = provider.createIterator(
+ bucket,
+ new AllFields(),
+ selection,
+ includeRemoves ? PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_OR_REMOVE : PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
+
+ assertTrue(!iter.hasError());
+
+ IterateResult result = provider.iterate(iter.getIteratorId(), Long.MAX_VALUE);
+ assertTrue(!result.hasError());
+ assertTrue(result.isCompleted());
+ assertEquals(new Result(), provider.destroyIterator(iter.getIteratorId()));
+
+ long timeRemoveDoc1 = 0;
+ long timeDoc1 = 0;
+ long timeDoc2 = 0;
+
+ for (DocEntry entry : result.getEntries()) {
+ assertNotNull(entry.getDocumentId());
+
+ if (entry.getDocumentId().equals(doc1.getId())) {
+ assertTrue("Got removed document 1 when iterating without removes", includeRemoves);
+
+ if (entry.getType() == DocEntry.Type.REMOVE_ENTRY) {
+ timeRemoveDoc1 = entry.getTimestamp();
+ } else {
+ timeDoc1 = entry.getTimestamp();
+ }
+ } else if (entry.getDocumentId().equals(doc2.getId())) {
+ assertEquals(DocEntry.Type.PUT_ENTRY, entry.getType());
+ timeDoc2 = entry.getTimestamp();
+ } else {
+ assertFalse("Unknown document " + entry.getDocumentId(), false);
+ }
+ }
+
+ assertEquals(2, timeDoc2);
+ assertTrue(timeDoc1 == 0 || timeRemoveDoc1 != 0);
+ }
+
+ void testBasics(PersistenceProvider provider) throws Exception {
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
+
+ assertEquals(new Result(), provider.createBucket(bucket));
+ assertEquals(new Result(), provider.put(bucket, 1, doc1));
+ assertEquals(new Result(), provider.put(bucket, 2, doc2));
+
+ assertEquals(new RemoveResult(true), provider.remove(bucket, 3, doc1.getId()));
+ assertEquals(new Result(), provider.flush(bucket));
+
+ testBasicsIteration(provider, bucket, doc1, doc2, false);
+ testBasicsIteration(provider, bucket, doc1, doc2, true);
+ }
+
+ void testListBuckets(PersistenceProvider provider) {
+ BucketId bucketId1 = new BucketId(8, 0x01);
+ BucketId bucketId2 = new BucketId(8, 0x02);
+ BucketId bucketId3 = new BucketId(8, 0x03);
+
+ Bucket bucket1 = new Bucket((short)0, bucketId1);
+ Bucket bucket2 = new Bucket((short)0, bucketId2);
+ Bucket bucket3 = new Bucket((short)0, bucketId3);
+
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document doc2 = testDocMan.createRandomDocumentAtLocation(0x02, 2);
+ Document doc3 = testDocMan.createRandomDocumentAtLocation(0x03, 3);
+
+ provider.createBucket(bucket1);
+ provider.createBucket(bucket2);
+ provider.createBucket(bucket3);
+
+ provider.put(bucket1, 1, doc1);
+ provider.flush(bucket1);
+
+ provider.put(bucket2, 2, doc2);
+ provider.flush(bucket2);
+
+ provider.put(bucket3, 3, doc3);
+ provider.flush(bucket3);
+
+ BucketIdListResult result = provider.listBuckets((short)0);
+ assertEquals(3, result.getBuckets().size());
+ assertTrue(result.getBuckets().contains(bucketId1));
+ assertTrue(result.getBuckets().contains(bucketId2));
+ assertTrue(result.getBuckets().contains(bucketId3));
+ }
+
+ void testBucketInfo(PersistenceProvider provider) {
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
+
+ provider.createBucket(bucket);
+ provider.put(bucket, 2, doc2);
+
+ BucketInfo info = provider.getBucketInfo(bucket).getBucketInfo();
+ provider.flush(bucket);
+
+ assertEquals(1, info.getDocumentCount());
+ assertTrue(info.getChecksum() != 0);
+
+ provider.put(bucket, 3, doc1);
+ BucketInfo info2 = provider.getBucketInfo(bucket).getBucketInfo();
+ provider.flush(bucket);
+
+ assertEquals(2, info2.getDocumentCount());
+ assertTrue(info2.getChecksum() != 0);
+ assertTrue(info.getChecksum() != info2.getChecksum());
+
+ provider.put(bucket, 4, doc1);
+ BucketInfo info3 = provider.getBucketInfo(bucket).getBucketInfo();
+ provider.flush(bucket);
+
+ assertEquals(2, info3.getDocumentCount());
+ assertTrue(info3.getChecksum() != 0);
+ assertTrue(info2.getChecksum() != info3.getChecksum());
+
+ provider.remove(bucket, 5, doc1.getId());
+ BucketInfo info4 = provider.getBucketInfo(bucket).getBucketInfo();
+ provider.flush(bucket);
+
+ assertEquals(1, info4.getDocumentCount());
+ assertTrue(info4.getChecksum() != 0);
+ }
+
+
+ void testOrderIndependentBucketInfo(PersistenceProvider spi)
+ {
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
+ spi.createBucket(bucket);
+
+ int checksumOrdered = 0;
+
+ {
+ spi.put(bucket, 2, doc1);
+ spi.put(bucket, 3, doc2);
+ spi.flush(bucket);
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+
+ checksumOrdered = info.getChecksum();
+ assertTrue(checksumOrdered != 0);
+ }
+
+ spi.deleteBucket(bucket);
+ spi.createBucket(bucket);
+ assertEquals(0, spi.getBucketInfo(bucket).getBucketInfo().getChecksum());
+
+ int checksumUnordered = 0;
+
+ {
+ // Swap order of puts
+ spi.put(bucket, 3, doc2);
+ spi.put(bucket, 2, doc1);
+ spi.flush(bucket);
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+
+ checksumUnordered = info.getChecksum();
+ assertTrue(checksumUnordered != 0);
+ }
+
+ assertEquals(checksumOrdered, checksumUnordered);
+ }
+
+ void testPut(PersistenceProvider spi) {
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ spi.createBucket(bucket);
+
+ assertEquals(new Result(), spi.put(bucket, 3, doc1));
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+ spi.flush(bucket);
+
+ assertEquals(1, (int)info.getDocumentCount());
+ assertTrue(info.getEntryCount() >= info.getDocumentCount());
+ assertTrue(info.getChecksum() != 0);
+ assertTrue(info.getDocumentSize() > 0);
+ assertTrue(info.getUsedSize() >= info.getDocumentSize());
+ }
+
+ void testPutNewDocumentVersion(PersistenceProvider spi) {
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document doc2 = doc1.clone();
+
+
+ doc2.setFieldValue("content", new StringFieldValue("hiho silver"));
+ spi.createBucket(bucket);
+
+ Result result = spi.put(bucket, 3, doc1);
+ {
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+ spi.flush(bucket);
+
+ assertEquals(1, (int)info.getDocumentCount());
+ assertTrue(info.getEntryCount() >= info.getDocumentCount());
+ assertTrue(info.getChecksum() != 0);
+ assertTrue(info.getDocumentSize() > 0);
+ assertTrue(info.getUsedSize() >= info.getDocumentSize());
+ }
+
+ result = spi.put(bucket, 4, doc2);
+ {
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+ spi.flush(bucket);
+
+ assertEquals(1, (int)info.getDocumentCount());
+ assertTrue(info.getEntryCount() >= info.getDocumentCount());
+ assertTrue(info.getChecksum() != 0);
+ assertTrue(info.getDocumentSize() > 0);
+ assertTrue(info.getUsedSize() >= info.getDocumentSize());
+ }
+
+ GetResult gr = spi.get(bucket, new AllFields(), doc1.getId());
+
+ assertEquals(Result.ErrorType.NONE, gr.getErrorType());
+ assertEquals(4, gr.getLastModifiedTimestamp());
+ assertEquals(doc2, gr.getDocument());
+ }
+
+ void testPutOlderDocumentVersion(PersistenceProvider spi) {
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document doc2 = doc1.clone();
+ doc2.setFieldValue("content", new StringFieldValue("hiho silver"));
+ spi.createBucket(bucket);
+
+ Result result = spi.put(bucket, 5, doc1);
+ BucketInfo info1 = spi.getBucketInfo(bucket).getBucketInfo();
+ spi.flush(bucket);
+ {
+ assertEquals(1, info1.getDocumentCount());
+ assertTrue(info1.getEntryCount() >= info1.getDocumentCount());
+ assertTrue(info1.getChecksum() != 0);
+ assertTrue(info1.getDocumentSize() > 0);
+ assertTrue(info1.getUsedSize() >= info1.getDocumentSize());
+ }
+
+ result = spi.put(bucket, 4, doc2);
+ {
+ BucketInfo info2 = spi.getBucketInfo(bucket).getBucketInfo();
+ spi.flush(bucket);
+
+ assertEquals(1, info2.getDocumentCount());
+ assertTrue(info2.getEntryCount() >= info1.getDocumentCount());
+ assertEquals(info1.getChecksum(), info2.getChecksum());
+ assertEquals(info1.getDocumentSize(), info2.getDocumentSize());
+ assertTrue(info2.getUsedSize() >= info1.getDocumentSize());
+ }
+
+ GetResult gr = spi.get(bucket, new AllFields(), doc1.getId());
+
+ assertEquals(Result.ErrorType.NONE, gr.getErrorType());
+ assertEquals(5, gr.getLastModifiedTimestamp());
+ assertEquals(doc1, gr.getDocument());
+ }
+
+ void testPutDuplicate(PersistenceProvider spi) throws Exception {
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ spi.createBucket(bucket);
+ assertEquals(new Result(), spi.put(bucket, 3, doc1));
+
+ int checksum;
+ {
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+ spi.flush(bucket);
+ assertEquals(1, (int)info.getDocumentCount());
+ checksum = info.getChecksum();
+ }
+ assertEquals(new Result(), spi.put(bucket, 3, doc1));
+
+ {
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+ spi.flush(bucket);
+ assertEquals(1, (int)info.getDocumentCount());
+ assertEquals(checksum, info.getChecksum());
+ }
+
+ List<DocEntry> entries = iterateBucket(spi, bucket, PersistenceProvider.IncludedVersions.ALL_VERSIONS);
+ assertEquals(1, entries.size());
+ }
+
+
+ void testRemove(PersistenceProvider spi) throws Exception {
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ spi.createBucket(bucket);
+
+ Result result = spi.put(bucket, 3, doc1);
+
+ {
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+ spi.flush(bucket);
+
+ assertEquals(1, (int)info.getDocumentCount());
+ assertTrue(info.getChecksum() != 0);
+
+ List<DocEntry> entries = iterateBucket(spi, bucket, PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
+ assertEquals(1, entries.size());
+ }
+
+ RemoveResult result2 = spi.removeIfFound(bucket, 5, doc1.getId());
+
+ {
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+ spi.flush(bucket);
+
+ assertEquals(0, info.getDocumentCount());
+ assertEquals(0, info.getChecksum());
+ assertEquals(true, result2.wasFound());
+ }
+
+ assertEquals(0, iterateBucket(spi, bucket, PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY).size());
+ assertEquals(1, iterateBucket(spi, bucket, PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_OR_REMOVE).size());
+
+ RemoveResult result3 = spi.remove(bucket, 7, doc1.getId());
+
+ {
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+ spi.flush(bucket);
+
+ assertEquals(0, (int)info.getDocumentCount());
+ assertEquals(0, (int)info.getChecksum());
+ assertEquals(false, result3.wasFound());
+ }
+
+ Result result4 = spi.put(bucket, 9, doc1);
+ spi.flush(bucket);
+
+ assertTrue(!result4.hasError());
+
+ RemoveResult result5 = spi.remove(bucket, 9, doc1.getId());
+
+ {
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+ spi.flush(bucket);
+
+ assertEquals(0, (int)info.getDocumentCount());
+ assertEquals(0, (int)info.getChecksum());
+ assertEquals(true, result5.wasFound());
+ assertTrue(!result5.hasError());
+ }
+
+ GetResult getResult = spi.get(bucket, new AllFields(), doc1.getId());
+ assertEquals(Result.ErrorType.NONE, getResult.getErrorType());
+ assertEquals(0, getResult.getLastModifiedTimestamp());
+ assertNull(getResult.getDocument());
+ }
+
+ void testRemoveMerge(PersistenceProvider spi) throws Exception {
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ DocumentId removeId = new DocumentId("userdoc:fraggle:1:rock");
+ spi.createBucket(bucket);
+
+ Result result = spi.put(bucket, 3, doc1);
+
+ // Remove a document that does not exist
+ {
+ RemoveResult removeResult = spi.remove(bucket, 10, removeId);
+ spi.flush(bucket);
+ assertEquals(Result.ErrorType.NONE, removeResult.getErrorType());
+ assertEquals(false, removeResult.wasFound());
+ }
+ // In a merge case, there might be multiple removes for the same document
+ // if resending et al has taken place. These must all be added.
+ {
+ RemoveResult removeResult = spi.remove(bucket,
+ 5,
+ removeId);
+ spi.flush(bucket);
+ assertEquals(Result.ErrorType.NONE, removeResult.getErrorType());
+ assertEquals(false, removeResult.wasFound());
+ }
+ {
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+
+ assertEquals(1, info.getDocumentCount());
+ assertEquals(3, info.getEntryCount());
+ assertTrue(info.getChecksum() != 0);
+ }
+ assertFalse(spi.flush(bucket).hasError());
+
+ List<DocEntry> entries = iterateBucket(spi, bucket, PersistenceProvider.IncludedVersions.ALL_VERSIONS);
+ // Remove entries should exist afterwards
+ assertEquals(3, entries.size());
+ for (int i = 2; i > 0; --i) {
+ assertEquals((i == 2) ? 10 : 5, entries.get(i).getTimestamp());
+ assertTrue(entries.get(i).getType() == DocEntry.Type.REMOVE_ENTRY);
+ assertNotNull(entries.get(i).getDocumentId());
+ assertEquals(removeId, entries.get(i).getDocumentId());
+ }
+
+ // Result tagged as document not found if CONVERT_PUT_TO_REMOVE flag is given and
+ // timestamp does not exist, and PERSIST_NONEXISTING is not set
+
+ // CONVERTED_REMOVE flag should be set if CONVERT_PUT_TO_REMOVE is set.
+
+ // Trying to turn a remove without CONVERTED_REMOVE flag set into
+ // unrevertable remove should work. (Should likely log warning, but we want
+ // to do it anyways, in order to get bucket copies in sync if it happens)
+
+ // Timestamps should not have been changed.
+
+ // Verify that a valid and altered bucket info is returned on success
+ }
+
+ void testUpdate(PersistenceProvider spi) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+ spi.createBucket(bucket);
+
+ DocumentType docType = testDocMan.getDocumentType("testdoctype1");
+
+ DocumentUpdate update = new DocumentUpdate(docType, doc1.getId());
+ FieldUpdate fieldUpdate = FieldUpdate.create(docType.getField("headerval"));
+ fieldUpdate.addValueUpdate(AssignValueUpdate.createAssign(new IntegerFieldValue(42)));
+ update.addFieldUpdate(fieldUpdate);
+
+ {
+ UpdateResult result = spi.update(bucket, 3, update);
+ spi.flush(bucket);
+ assertEquals(Result.ErrorType.NONE, result.getErrorType());
+ assertEquals(0, result.getExistingTimestamp());
+ }
+
+ spi.put(bucket, 3, doc1);
+ {
+ UpdateResult result = spi.update(bucket, 4, update);
+ spi.flush(bucket);
+
+ assertEquals(Result.ErrorType.NONE, result.getErrorType());
+ assertEquals(3, result.getExistingTimestamp());
+ }
+
+ {
+ GetResult result = spi.get(bucket, new AllFields(), doc1.getId());
+
+ assertEquals(Result.ErrorType.NONE, result.getErrorType());
+ assertEquals(4, result.getLastModifiedTimestamp());
+ assertEquals(new IntegerFieldValue(42), result.getDocument().getFieldValue("headerval"));
+ }
+
+ spi.remove(bucket, 5, doc1.getId());
+ spi.flush(bucket);
+
+ {
+ GetResult result = spi.get(bucket, new AllFields(), doc1.getId());
+
+ assertEquals(Result.ErrorType.NONE, result.getErrorType());
+ assertEquals(0, result.getLastModifiedTimestamp());
+ assertNull(result.getDocument());
+ }
+
+
+ {
+ UpdateResult result = spi.update(bucket, 6, update);
+ spi.flush(bucket);
+
+ assertEquals(Result.ErrorType.NONE, result.getErrorType());
+ assertEquals(0, result.getExistingTimestamp());
+ }
+ }
+
+ void testGet(PersistenceProvider spi) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+
+ spi.createBucket(bucket);
+
+ {
+ GetResult result = spi.get(bucket, new AllFields(), doc1.getId());
+
+ assertEquals(Result.ErrorType.NONE, result.getErrorType());
+ assertEquals(0, result.getLastModifiedTimestamp());
+ }
+
+ spi.put(bucket, 3, doc1);
+ spi.flush(bucket);
+
+ {
+ GetResult result = spi.get(bucket, new AllFields(), doc1.getId());
+ assertEquals(doc1, result.getDocument());
+ assertEquals(3, result.getLastModifiedTimestamp());
+ }
+
+ spi.remove(bucket,
+ 4,
+ doc1.getId());
+ spi.flush(bucket);
+
+ {
+ GetResult result = spi.get(bucket, new AllFields(), doc1.getId());
+
+ assertEquals(Result.ErrorType.NONE, result.getErrorType());
+ assertEquals(0, result.getLastModifiedTimestamp());
+ }
+ }
+
+ void
+ testIterateCreateIterator(PersistenceProvider spi) throws Exception
+ {
+
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+
+ spi.createBucket(bucket);
+
+ CreateIteratorResult result = spi.createIterator(bucket, new AllFields(), new Selection("", 0, Long.MAX_VALUE), PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
+ assertEquals(Result.ErrorType.NONE, result.getErrorType());
+ // Iterator ID 0 means invalid iterator, so cannot be returned
+ // from a successful createIterator call.
+ assertTrue(result.getIteratorId() != 0);
+
+ spi.destroyIterator(result.getIteratorId());
+ }
+
+ void
+ testIterateDestroyIterator(PersistenceProvider spi) throws Exception
+ {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+
+ CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE), PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
+
+ {
+ IterateResult result = spi.iterate(iter.getIteratorId(), 1024);
+ assertEquals(Result.ErrorType.NONE, result.getErrorType());
+ }
+
+ {
+ Result destroyResult = spi.destroyIterator(iter.getIteratorId());
+ assertTrue(!destroyResult.hasError());
+ }
+ // Iteration should now fail
+ {
+ IterateResult result = spi.iterate(iter.getIteratorId(), 1024);
+ assertEquals(Result.ErrorType.PERMANENT_ERROR, result.getErrorType());
+ }
+ {
+ Result destroyResult = spi.destroyIterator(iter.getIteratorId());
+ assertTrue(!destroyResult.hasError());
+ }
+ }
+
+ List<DocEntry> feedDocs(PersistenceProvider spi, Bucket bucket,
+ int numDocs,
+ int minSize,
+ int maxSize)
+ {
+ List<DocEntry> docs = new ArrayList<DocEntry>();
+
+ for (int i = 0; i < numDocs; ++i) {
+ Document doc = testDocMan.createRandomDocumentAtLocation(
+ bucket.getBucketId().getId(),
+ i,
+ minSize,
+ maxSize);
+ Result result = spi.put(bucket, 1000 + i, doc);
+ assertTrue(!result.hasError());
+ docs.add(new DocEntry(1000 + i, doc));
+ }
+ assertEquals(new Result(), spi.flush(bucket));
+ return docs;
+ }
+
+ void
+ testIterateWithUnknownId(PersistenceProvider spi)
+ {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+
+ IterateResult result = spi.iterate(123, 1024);
+ assertEquals(Result.ErrorType.PERMANENT_ERROR, result.getErrorType());
+ }
+
+ /**
+ * Do a full bucket iteration, returning a vector of DocEntry chunks.
+ */
+ List<IterateResult> doIterate(PersistenceProvider spi,
+ long id,
+ long maxByteSize,
+ int maxChunks)
+ {
+ List<IterateResult> chunks = new ArrayList<IterateResult>();
+
+ while (true) {
+ IterateResult result = spi.iterate(id, maxByteSize);
+ assertFalse(result.hasError());
+
+ assertTrue(result.getEntries().size() > 0);
+ chunks.add(result);
+
+ if (result.isCompleted()
+ || (maxChunks != 0 && chunks.size() >= maxChunks))
+ {
+ break;
+ }
+ }
+ return chunks;
+ }
+
+ boolean containsDocument(List<IterateResult> chunks, Document doc) {
+ for (IterateResult i : chunks) {
+ for (DocEntry e : i.getEntries()) {
+ if (e.getType() == DocEntry.Type.PUT_ENTRY && e.getDocument() != null && e.getDocument().equals(doc)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ boolean containsRemove(List<IterateResult> chunks, String docId) {
+ for (IterateResult i : chunks) {
+ for (DocEntry e : i.getEntries()) {
+ if (e.getType() == DocEntry.Type.REMOVE_ENTRY && e.getDocumentId() != null && e.getDocumentId().toString().equals(docId)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ void verifyDocs(List<DocEntry> docs, List<IterateResult> chunks, List<String> removes) {
+ int docCount = 0;
+ int removeCount = 0;
+ for (IterateResult result : chunks) {
+ for (DocEntry e : result.getEntries()) {
+ if (e.getType() == DocEntry.Type.PUT_ENTRY) {
+ ++docCount;
+ } else {
+ ++removeCount;
+ }
+ }
+ }
+
+ assertEquals(docs.size(), docCount);
+
+ for (DocEntry e : docs) {
+ assertTrue(e.getDocument().toString(), containsDocument(chunks, e.getDocument()));
+ }
+
+ if (removes != null) {
+ assertEquals(removes.size(), removeCount);
+
+ for (String docId : removes) {
+ assertTrue(docId, containsRemove(chunks, docId));
+ }
+ }
+ }
+
+ void verifyDocs(List<DocEntry> docs, List<IterateResult> chunks) {
+ verifyDocs(docs, chunks, null);
+ }
+
+
+ void testIterateAllDocs(PersistenceProvider spi) throws Exception {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+
+ List<DocEntry> docs = feedDocs(spi, b, 100, 110, 110);
+
+ CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE), PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
+
+ List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 4096, 0);
+ verifyDocs(docs, chunks);
+
+ spi.destroyIterator(iter.getIteratorId());
+ }
+
+ void testIterateAllDocsNewestVersionOnly(PersistenceProvider spi) throws Exception {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+
+ List<DocEntry> docs = feedDocs(spi, b, 100, 110, 110);
+ List<DocEntry> newDocs = new ArrayList<DocEntry>();
+
+ for (DocEntry e : docs) {
+ Document newDoc = e.getDocument().clone();
+ newDoc.setFieldValue("headerval", new IntegerFieldValue(5678 + (int)e.getTimestamp()));
+ spi.put(b, 1000 + e.getTimestamp(), newDoc);
+ newDocs.add(new DocEntry(1000 + e.getTimestamp(), newDoc));
+ }
+
+ spi.flush(b);
+
+ CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE), PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
+
+ List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 4096, 0);
+
+ verifyDocs(newDocs, chunks);
+
+ spi.destroyIterator(iter.getIteratorId());
+ }
+
+
+ void testIterateChunked(PersistenceProvider spi) throws Exception
+ {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+
+ List<DocEntry> docs = feedDocs(spi, b, 100, 110, 110);
+ CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE), PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
+
+ List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 1, 0);
+ assertEquals(100, chunks.size());
+ verifyDocs(docs, chunks);
+
+ spi.destroyIterator(iter.getIteratorId());
+ }
+
+ void
+ testIterateMaxByteSize(PersistenceProvider spi) throws Exception
+ {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+
+ List<DocEntry> docs = feedDocs(spi, b, 100, 4096, 4096);
+
+ CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE), PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
+
+ // Docs are 4k each and iterating with max combined size of 10k.
+ // Should receive no more than 3 docs in each chunk
+ List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 10000, 0);
+ assertTrue("Expected >= 33 chunks, got " + chunks.size(), chunks.size() >= 33);
+ verifyDocs(docs, chunks);
+
+ spi.destroyIterator(iter.getIteratorId());
+ }
+
+ void
+ testIterateMatchTimestampRange(PersistenceProvider spi) throws Exception
+ {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+
+ List<DocEntry> docsToVisit = new ArrayList<DocEntry>();
+
+ long fromTimestamp = 1010;
+ long toTimestamp = 1060;
+
+ for (int i = 0; i < 99; i++) {
+ long timestamp = 1000 + i;
+
+ Document doc = testDocMan.createRandomDocumentAtLocation(1, timestamp);
+
+ spi.put(b, timestamp, doc);
+ if (timestamp >= fromTimestamp && timestamp <= toTimestamp) {
+ docsToVisit.add(new DocEntry(timestamp, doc));
+ }
+ }
+ spi.flush(b);
+
+ CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", fromTimestamp, toTimestamp),
+ PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
+
+ List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 2048, 0);
+ verifyDocs(docsToVisit, chunks);
+
+ spi.destroyIterator(iter.getIteratorId());
+ }
+
+ void testIterateExplicitTimestampSubset(PersistenceProvider spi) throws Exception {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+
+ List<DocEntry> docsToVisit = new ArrayList<DocEntry>();
+ Set<Long> timestampsToVisit = new TreeSet<Long>();
+ List<String> removes = new ArrayList<String>();
+
+
+ for (int i = 0; i < 99; i++) {
+ long timestamp = 1000 + i;
+ Document doc = testDocMan.createRandomDocumentAtLocation(1, timestamp, 110, 110);
+
+ spi.put(b, timestamp, doc);
+ if (timestamp % 3 == 0) {
+ docsToVisit.add(new DocEntry(timestamp, doc));
+ timestampsToVisit.add(timestamp);
+ }
+ }
+
+ assertTrue(spi.remove(b, 2000, docsToVisit.get(0).getDocument().getId()).wasFound());
+ spi.flush(b);
+
+ timestampsToVisit.add(2000l);
+ removes.add(docsToVisit.get(0).getDocument().getId().toString());
+ timestampsToVisit.remove(docsToVisit.get(0).getTimestamp());
+ docsToVisit.remove(docsToVisit.get(0));
+
+ // When selecting a timestamp subset, we should ignore IncludedVersions, and return all matches regardless.
+ CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection(timestampsToVisit),
+ PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
+
+ List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 2048, 0);
+
+ verifyDocs(docsToVisit, chunks, removes);
+
+ spi.destroyIterator(iter.getIteratorId());
+ }
+
+ void testIterateRemoves(PersistenceProvider spi) throws Exception {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+
+ int docCount = 10;
+ List<DocEntry> docs = feedDocs(spi, b, docCount, 100, 100);
+ List<String> removedDocs = new ArrayList<String>();
+ List<DocEntry> nonRemovedDocs = new ArrayList<DocEntry>();
+
+ for (int i = 0; i < docCount; ++i) {
+ if (i % 3 == 0) {
+ removedDocs.add(docs.get(i).getDocument().getId().toString());
+ assertTrue(spi.remove(b, 2000 + i, docs.get(i).getDocument().getId()).wasFound());
+ } else {
+ nonRemovedDocs.add(docs.get(i));
+ }
+ }
+ spi.flush(b);
+
+ // First, test iteration without removes
+ {
+ CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE),
+ PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
+
+ List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 4096, 0);
+ verifyDocs(nonRemovedDocs, chunks);
+ spi.destroyIterator(iter.getIteratorId());
+ }
+
+ {
+ CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE),
+ PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_OR_REMOVE);
+
+ List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 4096, 0);
+ verifyDocs(nonRemovedDocs, chunks, removedDocs);
+ spi.destroyIterator(iter.getIteratorId());
+ }
+ }
+
+ void testIterateMatchSelection(PersistenceProvider spi) throws Exception
+ {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+
+ List<DocEntry> docsToVisit = new ArrayList<DocEntry>();
+
+ for (int i = 0; i < 99; i++) {
+ Document doc = testDocMan.createRandomDocumentAtLocation(1, 1000 + i, 110, 110);
+ doc.setFieldValue("headerval", new IntegerFieldValue(i));
+
+ spi.put(b, 1000 + i, doc);
+ if ((i % 3) == 0) {
+ docsToVisit.add(new DocEntry(1000 + i, doc));
+ }
+ }
+ spi.flush(b);
+
+ CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("testdoctype1.headerval % 3 == 0", 0, Long.MAX_VALUE),
+ PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
+
+ List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 2048, 0);
+ verifyDocs(docsToVisit, chunks);
+
+ spi.destroyIterator(iter.getIteratorId());
+ }
+
+ void testIterationRequiringDocumentIdOnlyMatching(PersistenceProvider spi) throws Exception
+ {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+
+ feedDocs(spi, b, 100, 100, 100);
+ DocumentId removedId = new DocumentId("userdoc:blarg:1:unknowndoc");
+
+ // Document does not already exist, remove should create a
+ // remove entry for it regardless.
+ assertFalse(spi.remove(b, 2000, removedId).wasFound());
+ spi.flush(b);
+
+ CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("id == '" + removedId.toString() + "'", 0, Long.MAX_VALUE),
+ PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_OR_REMOVE);
+
+ List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 4096, 0);
+ List<String> removes = new ArrayList<String>();
+ List<DocEntry> docs = new ArrayList<DocEntry>();
+
+ removes.add(removedId.toString());
+ verifyDocs(docs, chunks, removes);
+
+ spi.destroyIterator(iter.getIteratorId());
+ }
+
+ void testIterateAlreadyCompleted(PersistenceProvider spi) throws Exception
+ {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+
+ List<DocEntry> docs = feedDocs(spi, b, 10, 100, 100);
+ CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE),
+ PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_OR_REMOVE);
+
+ List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 4096, 0);
+ verifyDocs(docs, chunks);
+
+ IterateResult result = spi.iterate(iter.getIteratorId(), 4096);
+ assertEquals(Result.ErrorType.NONE, result.getErrorType());
+ assertEquals(0, result.getEntries().size());
+ assertTrue(result.isCompleted());
+
+ spi.destroyIterator(iter.getIteratorId());
+ }
+
+ void testIterateEmptyBucket(PersistenceProvider spi) throws Exception
+ {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+
+ CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE),
+ PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_OR_REMOVE);
+
+ IterateResult result = spi.iterate(iter.getIteratorId(), 4096);
+ assertEquals(Result.ErrorType.NONE, result.getErrorType());
+ assertEquals(0, result.getEntries().size());
+ assertTrue(result.isCompleted());
+
+ spi.destroyIterator(iter.getIteratorId());
+ }
+
+ void testDeleteBucket(PersistenceProvider spi) throws Exception
+ {
+ Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
+ spi.createBucket(b);
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+
+ spi.put(b, 3, doc1);
+ spi.flush(b);
+
+ spi.deleteBucket(b);
+ GetResult result = spi.get(b, new AllFields(), doc1.getId());
+
+ assertEquals(Result.ErrorType.NONE, result.getErrorType());
+ assertEquals(0, result.getLastModifiedTimestamp());
+ }
+
+
+ void testSplitNormalCase(PersistenceProvider spi)
+ {
+ Bucket bucketA = new Bucket((short)0, new BucketId(3, 0x2));
+ Bucket bucketB = new Bucket((short)0, new BucketId(3, 0x6));
+
+ Bucket bucketC = new Bucket((short)0, new BucketId(2, 0x2));
+ spi.createBucket(bucketC);
+
+ for (int i = 0; i < 10; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ spi.put(bucketC, i + 1, doc1);
+ }
+
+ for (int i = 10; i < 20; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ spi.put(bucketC, i + 1, doc1);
+ }
+
+ spi.flush(bucketC);
+
+ spi.split(bucketC, bucketA, bucketB);
+ testSplitNormalCasePostCondition(spi, bucketA, bucketB, bucketC);
+ /*if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testSplitNormalCasePostCondition(spi, bucketA, bucketB, bucketC,
+ testDocMan2);
+ }*/
+ }
+
+
+ void testSplitNormalCasePostCondition(PersistenceProvider spi, Bucket bucketA,
+ Bucket bucketB, Bucket bucketC)
+ {
+ assertEquals(10, spi.getBucketInfo(bucketA).getBucketInfo().
+ getDocumentCount());
+ assertEquals(10, spi.getBucketInfo(bucketB).getBucketInfo().
+ getDocumentCount());
+
+ FieldSet fs = new AllFields();
+ for (int i = 0; i < 10; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ assertTrue(spi.get(bucketA, fs, doc1.getId()).hasDocument());
+ assertTrue(!spi.get(bucketC, fs, doc1.getId()).hasDocument());
+ assertTrue(!spi.get(bucketB, fs, doc1.getId()).hasDocument());
+ }
+
+ for (int i = 10; i < 20; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ assertTrue(spi.get(bucketB, fs, doc1.getId()).hasDocument());
+ assertTrue(!spi.get(bucketA, fs, doc1.getId()).hasDocument());
+ assertTrue(!spi.get(bucketC, fs, doc1.getId()).hasDocument());
+ }
+ }
+
+ void testSplitTargetExists(PersistenceProvider spi) throws Exception
+ {
+ Bucket bucketA = new Bucket((short)0, new BucketId(3, 0x2));
+ Bucket bucketB = new Bucket((short)0, new BucketId(3, 0x6));
+ spi.createBucket(bucketB);
+
+ Bucket bucketC = new Bucket((short)0, new BucketId(2, 0x2));
+ spi.createBucket(bucketC);
+
+ for (int i = 0; i < 10; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ spi.put(bucketC, i + 1, doc1);
+ }
+
+ spi.flush(bucketC);
+
+ for (int i = 10; i < 20; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ spi.put(bucketB, i + 1, doc1);
+ }
+ spi.flush(bucketB);
+ assertTrue(!spi.getBucketInfo(bucketB).getBucketInfo().isActive());
+
+ for (int i = 10; i < 20; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ spi.put(bucketC, i + 1, doc1);
+ }
+ spi.flush(bucketC);
+
+ for (int i = 20; i < 25; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ spi.put(bucketB, i + 1, doc1);
+ }
+
+ spi.flush(bucketB);
+
+ spi.split(bucketC, bucketA, bucketB);
+ testSplitTargetExistsPostCondition(spi, bucketA, bucketB, bucketC);
+ /*if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testSplitTargetExistsPostCondition(spi, bucketA, bucketB, bucketC,
+ testDocMan2);
+ }*/
+ }
+
+
+ void testSplitTargetExistsPostCondition(PersistenceProvider spi, Bucket bucketA,
+ Bucket bucketB, Bucket bucketC)
+ {
+ assertEquals(10, spi.getBucketInfo(bucketA).getBucketInfo().
+ getDocumentCount());
+ assertEquals(15, spi.getBucketInfo(bucketB).getBucketInfo().
+ getDocumentCount());
+
+ FieldSet fs = new AllFields();
+ for (int i = 0; i < 10; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ assertTrue(spi.get(bucketA, fs, doc1.getId()).hasDocument());
+ assertTrue(!spi.get(bucketC, fs, doc1.getId()).hasDocument());
+ assertTrue(!spi.get(bucketB, fs, doc1.getId()).hasDocument());
+ }
+
+ for (int i = 10; i < 25; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ assertTrue(spi.get(bucketB, fs, doc1.getId()).hasDocument());
+ assertTrue(!spi.get(bucketA, fs, doc1.getId()).hasDocument());
+ assertTrue(!spi.get(bucketC, fs, doc1.getId()).hasDocument());
+ }
+ }
+
+ void testJoinNormalCase(PersistenceProvider spi) throws Exception
+ {
+ Bucket bucketA = new Bucket((short)0, new BucketId(3, 0x02));
+ spi.createBucket(bucketA);
+
+ Bucket bucketB = new Bucket((short)0, new BucketId(3, 0x06));
+ spi.createBucket(bucketB);
+
+ Bucket bucketC = new Bucket((short)0, new BucketId(2, 0x02));
+
+ for (int i = 0; i < 10; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ spi.put(bucketA, i + 1, doc1);
+ }
+
+ spi.flush(bucketA);
+
+ for (int i = 10; i < 20; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ spi.put(bucketB, i + 1, doc1);
+ }
+
+ spi.flush(bucketB);
+
+ spi.join(bucketA, bucketB, bucketC);
+ testJoinNormalCasePostCondition(spi, bucketA, bucketB, bucketC);
+ /*if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testJoinNormalCasePostCondition(spi, bucketA, bucketB, bucketC,
+ testDocMan2);
+ }*/
+ }
+
+ void testJoinNormalCasePostCondition(PersistenceProvider spi, Bucket bucketA,
+ Bucket bucketB, Bucket bucketC)
+ {
+ assertEquals(20, spi.getBucketInfo(bucketC).
+ getBucketInfo().getDocumentCount());
+
+ FieldSet fs = new AllFields();
+ for (int i = 0; i < 10; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ assertTrue(spi.get(bucketC, fs, doc1.getId()).hasDocument());
+ assertTrue(!spi.get(bucketA, fs, doc1.getId()).hasDocument());
+ }
+
+ for (int i = 10; i < 20; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ assertTrue(spi.get(bucketC, fs, doc1.getId()).hasDocument());
+ assertTrue(!spi.get(bucketB, fs, doc1.getId()).hasDocument());
+ }
+ }
+
+ void testJoinTargetExists(PersistenceProvider spi) throws Exception
+ {
+ Bucket bucketA = new Bucket((short)0, new BucketId(3, 0x02));
+ spi.createBucket(bucketA);
+
+ Bucket bucketB = new Bucket((short)0, new BucketId(3, 0x06));
+ spi.createBucket(bucketB);
+
+ Bucket bucketC = new Bucket((short)0, new BucketId(2, 0x02));
+ spi.createBucket(bucketC);
+
+ for (int i = 0; i < 10; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ spi.put(bucketA, i + 1, doc1);
+ }
+
+ spi.flush(bucketA);
+
+ for (int i = 10; i < 20; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ spi.put(bucketB, i + 1, doc1);
+ }
+ spi.flush(bucketB);
+
+ for (int i = 20; i < 30; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ spi.put(bucketC, i + 1, doc1);
+ }
+ spi.flush(bucketC);
+
+ spi.join(bucketA, bucketB, bucketC);
+ testJoinTargetExistsPostCondition(spi, bucketA, bucketB, bucketC);
+ /*if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testJoinTargetExistsPostCondition(spi, bucketA, bucketB, bucketC,
+ testDocMan2);
+ }*/
+ }
+
+ void testJoinTargetExistsPostCondition(PersistenceProvider spi, Bucket bucketA,
+ Bucket bucketB, Bucket bucketC)
+ {
+ assertEquals(30, spi.getBucketInfo(bucketC).getBucketInfo().
+ getDocumentCount());
+
+ FieldSet fs = new AllFields();
+ for (int i = 0; i < 10; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ assertTrue(spi.get(bucketC, fs, doc1.getId()).hasDocument());
+ assertTrue(!spi.get(bucketA, fs, doc1.getId()).hasDocument());
+ }
+
+ for (int i = 10; i < 20; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ assertTrue(spi.get(bucketC, fs, doc1.getId()).hasDocument());
+ assertTrue(!spi.get(bucketB, fs, doc1.getId()).hasDocument());
+ }
+
+ for (int i = 20; i < 30; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ assertTrue(spi.get(bucketC, fs, doc1.getId()).hasDocument());
+ }
+ }
+
+ void testJoinOneBucket(PersistenceProvider spi) throws Exception
+ {
+ Bucket bucketA = new Bucket((short)0, new BucketId(3, 0x02));
+ spi.createBucket(bucketA);
+
+ Bucket bucketB = new Bucket((short)0, new BucketId(3, 0x06));
+ Bucket bucketC = new Bucket((short)0, new BucketId(2, 0x02));
+
+ for (int i = 0; i < 10; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ spi.put(bucketA, i + 1, doc1);
+ }
+ spi.flush(bucketA);
+
+ spi.join(bucketA, bucketB, bucketC);
+ testJoinOneBucketPostCondition(spi, bucketA, bucketC);
+ /*if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testJoinOneBucketPostCondition(spi, bucketA, bucketC, testDocMan2);
+ }*/
+ }
+
+ void testJoinOneBucketPostCondition(PersistenceProvider spi, Bucket bucketA, Bucket bucketC)
+ {
+ assertEquals(10, spi.getBucketInfo(bucketC).getBucketInfo().
+ getDocumentCount());
+
+ FieldSet fs = new AllFields();
+ for (int i = 0; i < 10; ++i) {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ assertTrue(spi.get(bucketC, fs, doc1.getId()).hasDocument());
+ assertTrue(!spi.get(bucketA, fs, doc1.getId()).hasDocument());
+ }
+ }
+
+
+ void testMaintain(PersistenceProvider spi) throws Exception {
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+ spi.createBucket(bucket);
+
+ spi.put(bucket, 3, doc1);
+ spi.flush(bucket);
+
+ assertEquals(Result.ErrorType.NONE,
+ spi.maintain(bucket, PersistenceProvider.MaintenanceLevel.LOW).getErrorType());
+ }
+
+ void testGetModifiedBuckets(PersistenceProvider spi) throws Exception {
+ assertEquals(0, spi.getModifiedBuckets().getBuckets().size());
+ }
+
+ void testBucketActivation(PersistenceProvider spi) throws Exception {
+ Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
+
+ spi.createBucket(bucket);
+ {
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+ assertTrue(!info.isActive());
+ }
+
+ spi.setActiveState(bucket, BucketInfo.ActiveState.ACTIVE);
+ {
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+ assertTrue(info.isActive());
+ }
+
+ spi.setActiveState(bucket, BucketInfo.ActiveState.NOT_ACTIVE);
+ {
+ BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+ assertTrue(!info.isActive());
+ }
+ }
+
+ void testBucketActivationSplitAndJoin(PersistenceProvider spi) throws Exception
+ {
+ Bucket bucketA = new Bucket((short)0, new BucketId(3, 0x02));
+ Bucket bucketB = new Bucket((short)0, new BucketId(3, 0x06));
+ Bucket bucketC = new Bucket((short)0, new BucketId(2, 0x02));
+ Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, 1);
+ Document doc2 = testDocMan.createRandomDocumentAtLocation(0x06, 2);
+
+ spi.createBucket(bucketC);
+ spi.put(bucketC, 1, doc1);
+ spi.put(bucketC, 2, doc2);
+ spi.flush(bucketC);
+
+ spi.setActiveState(bucketC, BucketInfo.ActiveState.ACTIVE);
+ assertTrue(spi.getBucketInfo(bucketC).getBucketInfo().isActive());
+ spi.split(bucketC, bucketA, bucketB);
+ assertTrue(spi.getBucketInfo(bucketA).getBucketInfo().isActive());
+ assertTrue(spi.getBucketInfo(bucketB).getBucketInfo().isActive());
+ assertTrue(!spi.getBucketInfo(bucketC).getBucketInfo().isActive());
+
+ spi.setActiveState(bucketA, BucketInfo.ActiveState.NOT_ACTIVE);
+ spi.setActiveState(bucketB, BucketInfo.ActiveState.NOT_ACTIVE);
+ spi.join(bucketA, bucketB, bucketC);
+ assertTrue(!spi.getBucketInfo(bucketA).getBucketInfo().isActive());
+ assertTrue(!spi.getBucketInfo(bucketB).getBucketInfo().isActive());
+ assertTrue(!spi.getBucketInfo(bucketC).getBucketInfo().isActive());
+
+ spi.split(bucketC, bucketA, bucketB);
+ assertTrue(!spi.getBucketInfo(bucketA).getBucketInfo().isActive());
+ assertTrue(!spi.getBucketInfo(bucketB).getBucketInfo().isActive());
+ assertTrue(!spi.getBucketInfo(bucketC).getBucketInfo().isActive());
+
+ spi.setActiveState(bucketA, BucketInfo.ActiveState.ACTIVE);
+ spi.join(bucketA, bucketB, bucketC);
+ assertTrue(!spi.getBucketInfo(bucketA).getBucketInfo().isActive());
+ assertTrue(!spi.getBucketInfo(bucketB).getBucketInfo().isActive());
+ assertTrue(spi.getBucketInfo(bucketC).getBucketInfo().isActive());
+ }
+//
+// void testRemoveEntry()
+// {
+// if (!_factory->supportsRemoveEntry()) {
+// return;
+// }
+// document::TestDocMan testDocMan;
+// _factory->clear();
+// PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+//
+// Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+// Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+// Document doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
+// spi.createBucket(bucket);
+//
+// spi.put(bucket, 3, doc1);
+// spi.flush(bucket);
+// BucketInfo info1 = spi.getBucketInfo(bucket).getBucketInfo();
+//
+// {
+// spi.put(bucket, 4, doc2);
+// spi.flush(bucket);
+// spi.removeEntry(bucket, 4);
+// spi.flush(bucket);
+// BucketInfo info2 = spi.getBucketInfo(bucket).getBucketInfo();
+// assertEquals(info1, info2);
+// }
+//
+// // Test case where there exists a previous version of the document.
+// {
+// spi.put(bucket, 5, doc1);
+// spi.flush(bucket);
+// spi.removeEntry(bucket, 5);
+// spi.flush(bucket);
+// BucketInfo info2 = spi.getBucketInfo(bucket).getBucketInfo();
+// assertEquals(info1, info2);
+// }
+//
+// // Test case where the newest document version after removeEntrying is a remove.
+// {
+// spi.remove(bucket, 6, doc1.getId());
+// spi.flush(bucket);
+// BucketInfo info2 = spi.getBucketInfo(bucket).getBucketInfo();
+// assertEquals(0, info2.getDocumentCount());
+//
+// spi.put(bucket, 7, doc1);
+// spi.flush(bucket);
+// spi.removeEntry(bucket, 7);
+// spi.flush(bucket);
+// BucketInfo info3 = spi.getBucketInfo(bucket).getBucketInfo();
+// assertEquals(info2, info3);
+// }
+// }
+//
+}
+
+//
+//// Get number of puts and removes across all chunks (i.e. all entries)
+// size_t
+// getDocCount(const std::vector<Chunk>& chunks)
+// {
+// size_t count = 0;
+// for (size_t i=0; i<chunks.size(); ++i) {
+// count += chunks[i]._entries.size();
+// }
+// return count;
+// }
+//
+// size_t
+// getRemoveEntryCount(const std::vector<spi::DocEntry::LP>& entries)
+// {
+// size_t ret = 0;
+// for (size_t i = 0; i < entries.size(); ++i) {
+// if (entries[i]->isRemove()) {
+// ++ret;
+// }
+// }
+// return ret;
+// }
+//
+// List<DocEntry>
+// getEntriesFromChunks(const std::vector<Chunk>& chunks)
+// {
+// std::vector<spi::DocEntry::LP> ret;
+// for (size_t chunk = 0; chunk < chunks.size(); ++chunk) {
+// for (size_t i = 0; i < chunks[chunk]._entries.size(); ++i) {
+// ret.push_back(chunks[chunk]._entries[i]);
+// }
+// }
+// std::sort(ret.begin(),
+// ret.end(),
+// DocEntryIndirectTimestampComparator());
+// return ret;
+// }
+//
+//
+//
+// spi.destroyIterator(iter.getIteratorId());
+// std::sort(ret.begin(),
+// ret.end(),
+// DocEntryIndirectTimestampComparator());
+// return ret;
+// }
+//
+// void
+// verifyDocs(const std::vector<DocAndTimestamp>& wanted,
+// const std::vector<Chunk>& chunks,
+// const std::set<string>& removes = std::set<string>())
+// {
+// List<DocEntry> retrieved(
+// getEntriesFromChunks(chunks));
+// size_t removeCount = getRemoveEntryCount(retrieved);
+// // Ensure that we've got the correct number of puts and removes
+// assertEquals(removes.size(), removeCount);
+// assertEquals(wanted.size(), retrieved.size() - removeCount);
+//
+// size_t wantedIdx = 0;
+// for (size_t i = 0; i < retrieved.size(); ++i) {
+// DocEntry& entry(*retrieved[i]);
+// if (entry.getDocumentOperation() != 0) {
+// if (!(*wanted[wantedIdx].doc == *entry.getDocumentOperation())) {
+// std::ostringstream ss;
+// ss << "Documents differ! Wanted:\n"
+// << wanted[wantedIdx].doc->toString(true)
+// << "\n\nGot:\n"
+// << entry.getDocumentOperation()->toString(true);
+// CPPUNIT_FAIL(ss.str());
+// }
+// assertEquals(wanted[wantedIdx].timestamp, entry.getTimestamp());
+// size_t serSize = wanted[wantedIdx].doc->serialize()->getLength();
+// assertEquals(serSize + sizeof(DocEntry), size_t(entry.getSize()));
+// assertEquals(serSize, size_t(entry.getDocumentSize()));
+// ++wantedIdx;
+// } else {
+// // Remove-entry
+// assertTrue(entry.getDocumentId() != 0);
+// size_t serSize = entry.getDocumentId()->getSerializedSize();
+// assertEquals(serSize + sizeof(DocEntry), size_t(entry.getSize()));
+// assertEquals(serSize, size_t(entry.getDocumentSize()));
+// if (removes.find(entry.getDocumentId()->toString()) == removes.end()) {
+// std::ostringstream ss;
+// ss << "Got unexpected remove entry for document id "
+// << *entry.getDocumentId();
+// CPPUNIT_FAIL(ss.str());
+// }
+// }
+// }
+// }
+//
+//// Feed numDocs documents, starting from timestamp 1000
+//
+// } // namespace
+//
+//
+//
+
+
+
+
+
+// void detectAndTestOptionalBehavior() {
+// // Report if implementation supports setting bucket size info.
+//
+// // Report if joining same bucket on multiple partitions work.
+// // (Where target equals one of the sources). (If not supported service
+// // layer must die if a bucket is found during init on multiple partitions)
+// // Test functionality if it works.
+// }
+//
+//
+// } // spi
+// } // storage
+//
+//
+//}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/conformance/TestDocMan.java b/persistence/src/main/java/com/yahoo/persistence/spi/conformance/TestDocMan.java
new file mode 100644
index 00000000000..d447ae85c16
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/conformance/TestDocMan.java
@@ -0,0 +1,37 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi.conformance;
+
+import com.yahoo.document.*;
+import com.yahoo.document.datatypes.IntegerFieldValue;
+import com.yahoo.document.datatypes.StringFieldValue;
+
+public class TestDocMan extends DocumentTypeManager {
+
+ public TestDocMan() {
+ DocumentType docType = new DocumentType("testdoctype1");
+ docType.addHeaderField("headerval", DataType.INT);
+ docType.addField("content", DataType.STRING);
+
+ registerDocumentType(docType);
+ }
+
+ public Document createRandomDocumentAtLocation(long location, long timestamp) {
+ return createRandomDocumentAtLocation(location, timestamp, 100, 100);
+ }
+
+ public Document createRandomDocumentAtLocation(long location, long timestamp, int minSize, int maxSize) {
+ Document document = new Document(getDocumentType("testdoctype1"),
+ new DocumentId("userdoc:footype:" + location + ":" + timestamp));
+
+ document.setFieldValue("headerval", new IntegerFieldValue((int)timestamp));
+
+ StringBuffer value = new StringBuffer();
+ int length = (int)(Math.random() * (maxSize - minSize)) + minSize;
+ for (int i = 0; i < length; ++i) {
+ value.append("A");
+ }
+
+ document.setFieldValue("content", new StringFieldValue(value.toString()));
+ return document;
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/conformance/package-info.java b/persistence/src/main/java/com/yahoo/persistence/spi/conformance/package-info.java
new file mode 100644
index 00000000000..acb3963d066
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/conformance/package-info.java
@@ -0,0 +1,7 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@ExportPackage
+@PublicApi
+package com.yahoo.persistence.spi.conformance;
+
+import com.yahoo.api.annotations.PublicApi;
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/package-info.java b/persistence/src/main/java/com/yahoo/persistence/spi/package-info.java
new file mode 100644
index 00000000000..0530b8a0f3a
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/package-info.java
@@ -0,0 +1,7 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@ExportPackage
+@PublicApi
+package com.yahoo.persistence.spi;
+
+import com.yahoo.api.annotations.PublicApi;
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/BucketIdListResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/BucketIdListResult.java
new file mode 100644
index 00000000000..a32c4355113
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/result/BucketIdListResult.java
@@ -0,0 +1,36 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi.result;
+
+import com.yahoo.document.BucketId;
+
+import java.util.List;
+
+/**
+ * Result class used for bucket id list requests.
+ */
+public class BucketIdListResult extends Result {
+ List<BucketId> buckets;
+
+ /**
+ * Creates a result with an error.
+ *
+ * @param type The type of error
+ * @param message A human-readable error message to further detail the error.
+ */
+ public BucketIdListResult(ErrorType type, String message) {
+ super(type, message);
+ }
+
+ /**
+ * Creates a result containing a list of all the buckets the requested partition has.
+ *
+ * @param buckets The list of buckets.
+ */
+ public BucketIdListResult(List<BucketId> buckets) {
+ this.buckets = buckets;
+ }
+
+ public List<BucketId> getBuckets() {
+ return buckets;
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/BucketInfoResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/BucketInfoResult.java
new file mode 100644
index 00000000000..a840e9e2075
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/result/BucketInfoResult.java
@@ -0,0 +1,37 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi.result;
+
+import com.yahoo.persistence.spi.BucketInfo;
+
+/**
+ * Result class for the getBucketInfo() function.
+ */
+public class BucketInfoResult extends Result {
+ BucketInfo bucketInfo = null;
+
+ /**
+ * Constructor to use for a result where an error has been detected.
+ * The service layer will not update the bucket information in this case,
+ * so it should not be returned either.
+ *
+ * @param type The type of error.
+ * @param message A human readable message further detailing the error.
+ */
+ public BucketInfoResult(ErrorType type, String message) {
+ super(type, message);
+ }
+
+ /**
+ * Constructor to use when the write operation was successful,
+ * and the bucket info was modified.
+ *
+ * @param info Returns the information about the bucket.
+ */
+ public BucketInfoResult(BucketInfo info) {
+ this.bucketInfo = info;
+ }
+
+ public BucketInfo getBucketInfo() {
+ return bucketInfo;
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/CreateIteratorResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/CreateIteratorResult.java
new file mode 100644
index 00000000000..2eb9d105ec9
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/result/CreateIteratorResult.java
@@ -0,0 +1,33 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi.result;
+
+/**
+ * Result class for CreateIterator requests.
+ */
+public class CreateIteratorResult extends Result {
+ long iteratorId = 0;
+
+ /**
+ * Creates a result with an error.
+ *
+ * @param type The type of error
+ * @param message A human-readable error message to further detail the error.
+ */
+ public CreateIteratorResult(Result.ErrorType type, String message) {
+ super(type, message);
+ }
+
+ /**
+ * Creates a successful result, containing a unique identifier for this iterator
+ * (must be created and maintained by the provider).
+ *
+ * @param iteratorId The iterator ID to use for this iterator.
+ */
+ public CreateIteratorResult(long iteratorId) {
+ this.iteratorId = iteratorId;
+ }
+
+ public long getIteratorId() {
+ return iteratorId;
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/GetResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/GetResult.java
new file mode 100644
index 00000000000..d7528681958
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/result/GetResult.java
@@ -0,0 +1,60 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi.result;
+
+import com.yahoo.document.Document;
+
+/**
+ * Result class for Get operations
+ */
+public class GetResult extends Result {
+ Document doc;
+ long lastModifiedTimestamp = 0;
+
+ /**
+ * Constructor to use when there was an error retrieving the document.
+ * Not finding the document is not an error in this context.
+ *
+ * @param type The type of error.
+ * @param message A human readable message further detailing the error.
+ */
+ GetResult(ErrorType type, String message) {
+ super(type, message);
+ }
+
+ /**
+ * Constructor to use when we didn't find the document in question.
+ */
+ public GetResult() {}
+
+ /**
+ * Constructor to use when we found the document asked for.
+ *
+ * @param doc The document we found
+ * @param lastModifiedTimestamp The timestamp with which the document was stored.
+ */
+ public GetResult(Document doc, long lastModifiedTimestamp) {
+ this.doc = doc;
+ this.lastModifiedTimestamp = lastModifiedTimestamp;
+ }
+
+ /**
+ * @return Returns the timestamp at which the document was last modified, or 0 if
+ * no document was found.
+ */
+ public long getLastModifiedTimestamp() { return lastModifiedTimestamp;}
+
+ /**
+ * @return Returns true if the document was found.
+ */
+ public boolean wasFound() {
+ return doc != null;
+ }
+
+ public boolean hasDocument() {
+ return doc != null;
+ }
+
+ public Document getDocument() {
+ return doc;
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/IterateResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/IterateResult.java
new file mode 100644
index 00000000000..1228e27e325
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/result/IterateResult.java
@@ -0,0 +1,43 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi.result;
+
+import com.yahoo.persistence.spi.DocEntry;
+
+import java.util.List;
+
+/**
+ * Result class for iterate requests
+ */
+public class IterateResult extends Result {
+ List<DocEntry> entries = null;
+ boolean isCompleted = false;
+
+ /**
+ * Creates a result with an error.
+ *
+ * @param type The type of error
+ * @param message A human-readable error message to further detail the error.
+ */
+ public IterateResult(Result.ErrorType type, String message) {
+ super(type, message);
+ }
+
+ /**
+ * Creates a successful result.
+ *
+ * @param entries The next chunk of entries that were found during iteration.
+ * @param isCompleted Set to true if there are no more entries to iterate through.
+ */
+ public IterateResult(List<DocEntry> entries, boolean isCompleted) {
+ this.entries = entries;
+ this.isCompleted = isCompleted;
+ }
+
+ public List<DocEntry> getEntries() {
+ return entries;
+ }
+
+ public boolean isCompleted() {
+ return isCompleted;
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/PartitionStateListResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/PartitionStateListResult.java
new file mode 100644
index 00000000000..d687c021dd7
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/result/PartitionStateListResult.java
@@ -0,0 +1,37 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi.result;
+
+import com.yahoo.persistence.spi.PartitionState;
+
+import java.util.List;
+
+/**
+ * A result class for getPartitionState() requests.
+ */
+public class PartitionStateListResult extends Result {
+ List<PartitionState> partitionStates = null;
+
+ /**
+ * Creates a result with an error.
+ *
+ * @param type The type of error
+ * @param message A human-readable error message to further detail the error.
+ */
+ public PartitionStateListResult(Result.ErrorType type, String message) {
+ super(type, message);
+ }
+
+ /**
+ * Creates a result containing a list of all the partitions this provider has,
+ * and their states.
+ *
+ * @param partitions A map containing all the partitions
+ */
+ public PartitionStateListResult(List<PartitionState> partitions) {
+ this.partitionStates = partitions;
+ }
+
+ public List<PartitionState> getPartitionStates() {
+ return partitionStates;
+ }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/RemoveResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/RemoveResult.java
new file mode 100644
index 00000000000..c712da41286
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/result/RemoveResult.java
@@ -0,0 +1,47 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi.result;
+
+import com.yahoo.persistence.spi.BucketInfo;
+
+/**
+ * Result class for Remove operations
+ */
+public class RemoveResult extends Result {
+ boolean wasFound = false;
+
+ /**
+ * Constructor to use when an error occurred during the update
+ *
+ * @param error The type of error that occurred
+ * @param message A human readable message further detailing the error.
+ */
+ public RemoveResult(Result.ErrorType error, String message) {
+ super(error, message);
+ }
+
+ /**
+ * Constructor to use when there was no document to remove.
+ */
+ public RemoveResult() {}
+
+ /**
+ * Constructor to use when the update was successful.
+ *
+ * @param wasFound The timestamp of the document that was updated.
+ */
+ public RemoveResult(boolean wasFound) {
+ this.wasFound = wasFound;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other instanceof RemoveResult) {
+ return super.equals((Result)other) &&
+ wasFound == ((RemoveResult)other).wasFound;
+ }
+
+ return false;
+ }
+
+ public boolean wasFound() { return wasFound; }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/Result.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/Result.java
new file mode 100644
index 00000000000..ba2a00af1ff
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/result/Result.java
@@ -0,0 +1,83 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi.result;
+
+/**
+ * Represents a result from an SPI method invocation.
+ */
+public class Result {
+
+ /**
+ * Enumeration of the various categories of errors that can be returned
+ * in a result.
+ *
+ * The categories are:
+ *
+ * TRANSIENT_ERROR: Operation failed, but may succeed if attempted again or on other data copies
+ * PERMANENT_ERROR: Operation failed because it was somehow malformed or the operation parameters were wrong. Operation won't succeed
+ * on other data copies either.
+ * FATAL_ERROR: Operation failed in such a way that this node should be stopped (for instance, a disk failure). Operation will be retried
+ * on other data copies.
+ */
+ public enum ErrorType {
+ NONE,
+ TRANSIENT_ERROR,
+ PERMANENT_ERROR,
+ UNUSED_ID,
+ FATAL_ERROR
+ }
+
+ /**
+ * Constructor to use for a result where there is no error.
+ */
+ public Result() {
+ }
+
+ /**
+ * Creates a result with an error.
+ *
+ * @param type The type of error
+ * @param message A human-readable error message to further detail the error.
+ */
+ public Result(ErrorType type, String message) {
+ this.type = type;
+ this.message = message;
+ }
+
+ public boolean equals(Result other) {
+ return type.equals(other.type) &&
+ message.equals(other.message);
+ }
+
+ @Override
+ public boolean equals(Object otherResult) {
+ if (otherResult instanceof Result) {
+ return equals((Result)otherResult);
+ }
+
+ return false;
+ }
+
+ public boolean hasError() {
+ return type != ErrorType.NONE;
+ }
+
+ public ErrorType getErrorType() {
+ return type;
+ }
+
+ public String getErrorMessage() {
+ return message;
+ }
+
+ @Override
+ public String toString() {
+ if (type == null) {
+ return "Result(OK)";
+ }
+
+ return "Result(" + type.toString() + ", " + message + ")";
+ }
+
+ ErrorType type = ErrorType.NONE;
+ String message = "";
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/UpdateResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/UpdateResult.java
new file mode 100644
index 00000000000..5906411d594
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/result/UpdateResult.java
@@ -0,0 +1,39 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.persistence.spi.result;
+
+import com.yahoo.persistence.spi.BucketInfo;
+
+/**
+ * Result class for update operations.
+ */
+public class UpdateResult extends Result {
+ long existingTimestamp = 0;
+
+ /**
+ * Constructor to use when an error occurred during the update
+ *
+ * @param error The type of error that occurred
+ * @param message A human readable message further detailing the error.
+ */
+ public UpdateResult(ErrorType error, String message) {
+ super(error, message);
+ }
+
+ /**
+ * Constructor to use when the document to update was not found.
+ */
+ public UpdateResult() {
+ super();
+ }
+
+ /**
+ * Constructor to use when the update was successful.
+ *
+ * @param existingTimestamp The timestamp of the document that was updated.
+ */
+ public UpdateResult(long existingTimestamp) {
+ this.existingTimestamp = existingTimestamp;
+ }
+
+ public long getExistingTimestamp() { return existingTimestamp; }
+}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/package-info.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/package-info.java
new file mode 100644
index 00000000000..35767c7d8db
--- /dev/null
+++ b/persistence/src/main/java/com/yahoo/persistence/spi/result/package-info.java
@@ -0,0 +1,7 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@ExportPackage
+@PublicApi
+package com.yahoo.persistence.spi.result;
+
+import com.yahoo.api.annotations.PublicApi;
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/persistence/src/main/resources/configdefinitions/persistence-rpc.def b/persistence/src/main/resources/configdefinitions/persistence-rpc.def
new file mode 100644
index 00000000000..baad78fba81
--- /dev/null
+++ b/persistence/src/main/resources/configdefinitions/persistence-rpc.def
@@ -0,0 +1,5 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+version=1
+namespace=persistence
+
+port int default=3456 restart
diff --git a/persistence/src/testlist.txt b/persistence/src/testlist.txt
new file mode 100644
index 00000000000..efcb85cb31d
--- /dev/null
+++ b/persistence/src/testlist.txt
@@ -0,0 +1,3 @@
+tests
+tests/dummyimpl
+tests/proxy
diff --git a/persistence/src/tests/.gitignore b/persistence/src/tests/.gitignore
new file mode 100644
index 00000000000..80120fe6e00
--- /dev/null
+++ b/persistence/src/tests/.gitignore
@@ -0,0 +1,6 @@
+.depend
+Makefile
+/testrunner
+/test.vlog
+*_test
+persistence_testrunner_app
diff --git a/persistence/src/tests/CMakeLists.txt b/persistence/src/tests/CMakeLists.txt
new file mode 100644
index 00000000000..946e479d0c3
--- /dev/null
+++ b/persistence/src/tests/CMakeLists.txt
@@ -0,0 +1,11 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(persistence_testrunner_app
+ SOURCES
+ testrunner.cpp
+ DEPENDS
+ persistence_testdummyimpl
+ persistence_testspi
+ persistence
+ persistence_persistence_conformancetest
+)
+vespa_add_test(NAME persistence_testrunner_app COMMAND persistence_testrunner_app)
diff --git a/persistence/src/tests/dummyimpl/.gitignore b/persistence/src/tests/dummyimpl/.gitignore
new file mode 100644
index 00000000000..f277484fee1
--- /dev/null
+++ b/persistence/src/tests/dummyimpl/.gitignore
@@ -0,0 +1,4 @@
+/.depend
+/Makefile
+/dummypersistence_test
+persistence_dummypersistence_test_app
diff --git a/persistence/src/tests/dummyimpl/CMakeLists.txt b/persistence/src/tests/dummyimpl/CMakeLists.txt
new file mode 100644
index 00000000000..5a257eb56f3
--- /dev/null
+++ b/persistence/src/tests/dummyimpl/CMakeLists.txt
@@ -0,0 +1,14 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(persistence_testdummyimpl
+ SOURCES
+ dummyimpltest.cpp
+ DEPENDS
+)
+vespa_add_executable(persistence_dummypersistence_test_app
+ SOURCES
+ dummypersistence_test.cpp
+ DEPENDS
+ persistence
+ persistence_persistence_conformancetest
+)
+vespa_add_test(NAME persistence_dummypersistence_test_app COMMAND persistence_dummypersistence_test_app)
diff --git a/persistence/src/tests/dummyimpl/dummyimpltest.cpp b/persistence/src/tests/dummyimpl/dummyimpltest.cpp
new file mode 100644
index 00000000000..1215303f9e6
--- /dev/null
+++ b/persistence/src/tests/dummyimpl/dummyimpltest.cpp
@@ -0,0 +1,46 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/persistence/conformancetest/conformancetest.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+
+LOG_SETUP(".test.dummyimpl");
+
+namespace storage {
+namespace spi {
+
+struct DummyImplTest : public ConformanceTest {
+ struct Factory : public PersistenceFactory {
+
+ PersistenceProvider::UP
+ getPersistenceImplementation(const document::DocumentTypeRepo::SP& repo,
+ const document::DocumenttypesConfig&) {
+ return PersistenceProvider::UP(new dummy::DummyPersistence(repo, 4));
+ }
+
+ bool
+ supportsActiveState() const
+ {
+ return true;
+ }
+ bool
+ supportsRevert() const
+ {
+ return true;
+ }
+ };
+
+ DummyImplTest()
+ : ConformanceTest(PersistenceFactory::UP(new Factory)) {}
+
+ CPPUNIT_TEST_SUITE(DummyImplTest);
+ DEFINE_CONFORMANCE_TESTS();
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(DummyImplTest);
+
+} // spi
+} // storage
diff --git a/persistence/src/tests/dummyimpl/dummypersistence_test.cpp b/persistence/src/tests/dummyimpl/dummypersistence_test.cpp
new file mode 100644
index 00000000000..eef0c771177
--- /dev/null
+++ b/persistence/src/tests/dummyimpl/dummypersistence_test.cpp
@@ -0,0 +1,88 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for dummypersistence.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("dummypersistence_test");
+
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace storage::spi;
+using namespace storage;
+using dummy::BucketContent;
+
+namespace {
+
+struct Fixture {
+ BucketContent content;
+
+ void insert(DocumentId id, Timestamp timestamp, int meta_flags) {
+ content.insert(DocEntry::LP(new DocEntry(timestamp, meta_flags, id)));
+ }
+
+ Fixture() {
+ insert(DocumentId("doc:test:3"), Timestamp(3), NONE);
+ insert(DocumentId("doc:test:1"), Timestamp(1), NONE);
+ insert(DocumentId("doc:test:2"), Timestamp(2), NONE);
+ }
+};
+
+TEST("require that empty BucketContent behaves") {
+ BucketContent content;
+ EXPECT_FALSE(content.hasTimestamp(Timestamp(1)));
+ EXPECT_FALSE(content.getEntry(Timestamp(1)).get());
+ EXPECT_FALSE(content.getEntry(DocumentId("doc:test:1")).get());
+}
+
+TEST_F("require that BucketContent can retrieve by timestamp", Fixture) {
+ DocEntry::LP entry = f.content.getEntry(Timestamp(1));
+ ASSERT_TRUE(entry.get());
+ ASSERT_TRUE(entry->getDocumentId());
+ ASSERT_EQUAL("doc:test:1", entry->getDocumentId()->toString());
+}
+
+TEST_F("require that BucketContent can retrieve by doc id", Fixture) {
+ DocEntry::LP entry = f.content.getEntry(DocumentId("doc:test:2"));
+ ASSERT_TRUE(entry.get());
+ ASSERT_TRUE(entry->getDocumentId());
+ ASSERT_EQUAL("doc:test:2", entry->getDocumentId()->toString());
+}
+
+TEST_F("require that BucketContent can check a timestamp", Fixture) {
+ EXPECT_FALSE(f.content.hasTimestamp(Timestamp(0)));
+ EXPECT_TRUE(f.content.hasTimestamp(Timestamp(1)));
+ EXPECT_TRUE(f.content.hasTimestamp(Timestamp(2)));
+ EXPECT_TRUE(f.content.hasTimestamp(Timestamp(3)));
+ EXPECT_FALSE(f.content.hasTimestamp(Timestamp(4)));
+}
+
+TEST_F("require that BucketContent can provide bucket info", Fixture) {
+ uint32_t lastChecksum = 0;
+ EXPECT_NOT_EQUAL(lastChecksum, f.content.getBucketInfo().getChecksum());
+ lastChecksum = f.content.getBucketInfo().getChecksum();
+ f.insert(DocumentId("doc:test:3"), Timestamp(4), NONE);
+ EXPECT_NOT_EQUAL(lastChecksum, f.content.getBucketInfo().getChecksum());
+ lastChecksum = f.content.getBucketInfo().getChecksum();
+ f.insert(DocumentId("doc:test:2"), Timestamp(5), REMOVE_ENTRY);
+ EXPECT_NOT_EQUAL(lastChecksum, f.content.getBucketInfo().getChecksum());
+ f.insert(DocumentId("doc:test:1"), Timestamp(6), REMOVE_ENTRY);
+ f.insert(DocumentId("doc:test:3"), Timestamp(7), REMOVE_ENTRY);
+ EXPECT_EQUAL(0u, f.content.getBucketInfo().getChecksum());
+}
+
+TEST_F("require that setClusterState sets the cluster state", Fixture) {
+ lib::ClusterState s("version:1 storage:3 .1.s:d distributor:3");
+ lib::Distribution d(lib::Distribution::getDefaultDistributionConfig(3, 3));
+ ClusterState state(s, 1, d);
+
+ document::DocumentTypeRepo::SP repo;
+ dummy::DummyPersistence provider(repo);
+ provider.setClusterState(state);
+
+ EXPECT_EQUAL(false, provider.getClusterState().nodeUp());
+}
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/persistence/src/tests/proxy/.gitignore b/persistence/src/tests/proxy/.gitignore
new file mode 100644
index 00000000000..9bd2934723e
--- /dev/null
+++ b/persistence/src/tests/proxy/.gitignore
@@ -0,0 +1,10 @@
+/.depend
+/Makefile
+/providerstub_test
+/providerproxy_test
+/providerproxy_conformancetest
+/external_providerproxy_conformancetest
+persistence_providerproxy_conformance_test_app
+persistence_providerproxy_test_app
+persistence_providerstub_test_app
+persistence_external_providerproxy_conformancetest_app
diff --git a/persistence/src/tests/proxy/CMakeLists.txt b/persistence/src/tests/proxy/CMakeLists.txt
new file mode 100644
index 00000000000..c12eaf217a4
--- /dev/null
+++ b/persistence/src/tests/proxy/CMakeLists.txt
@@ -0,0 +1,28 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(persistence_providerstub_test_app
+ SOURCES
+ providerstub_test.cpp
+ DEPENDS
+ persistence
+)
+vespa_add_executable(persistence_providerproxy_test_app
+ SOURCES
+ providerproxy_test.cpp
+ DEPENDS
+ persistence
+)
+vespa_add_executable(persistence_providerproxy_conformance_test_app
+ SOURCES
+ providerproxy_conformancetest.cpp
+ DEPENDS
+ persistence
+ persistence_persistence_conformancetest
+)
+vespa_add_executable(persistence_external_providerproxy_conformancetest_app
+ SOURCES
+ external_providerproxy_conformancetest.cpp
+ DEPENDS
+ persistence
+ persistence_persistence_conformancetest
+)
+vespa_add_test(NAME persistence_providerproxy_conformance_test_app COMMAND sh proxy_test.sh)
diff --git a/persistence/src/tests/proxy/dummy_provider_factory.h b/persistence/src/tests/proxy/dummy_provider_factory.h
new file mode 100644
index 00000000000..8330b4a917b
--- /dev/null
+++ b/persistence/src/tests/proxy/dummy_provider_factory.h
@@ -0,0 +1,35 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/persistence/proxy/providerstub.h>
+#include <memory>
+
+namespace storage {
+namespace spi {
+
+/**
+ * A simple rpc server persistence provider factory that will only
+ * work once, by returning a precreated persistence provider instance.
+ **/
+struct DummyProviderFactory : ProviderStub::PersistenceProviderFactory
+{
+ typedef std::unique_ptr<DummyProviderFactory> UP;
+ typedef storage::spi::PersistenceProvider Provider;
+
+ mutable std::unique_ptr<Provider> provider;
+
+ DummyProviderFactory(std::unique_ptr<Provider> p) : provider(std::move(p)) {}
+
+ std::unique_ptr<Provider> create() const {
+ ASSERT_TRUE(provider.get() != 0);
+ std::unique_ptr<Provider> ret = std::move(provider);
+ ASSERT_TRUE(provider.get() == 0);
+ return ret;
+ }
+};
+
+} // namespace spi
+} // namespace storage
+
diff --git a/persistence/src/tests/proxy/external_providerproxy_conformancetest.cpp b/persistence/src/tests/proxy/external_providerproxy_conformancetest.cpp
new file mode 100644
index 00000000000..f24d81532b7
--- /dev/null
+++ b/persistence/src/tests/proxy/external_providerproxy_conformancetest.cpp
@@ -0,0 +1,43 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/persistence/conformancetest/conformancetest.h>
+#include <vespa/persistence/proxy/providerproxy.h>
+#include <vespa/persistence/proxy/providerstub.h>
+#include "proxyfactory.h"
+
+using namespace storage::spi;
+typedef document::DocumentTypeRepo Repo;
+typedef ConformanceTest::PersistenceFactory Factory;
+
+namespace {
+
+struct ConformanceFixture : public ConformanceTest {
+ ConformanceFixture(Factory::UP f) : ConformanceTest(std::move(f)) { setUp(); }
+ ~ConformanceFixture() { tearDown(); }
+};
+
+Factory::UP getFactory() {
+ return Factory::UP(new ProxyFactory());
+}
+
+#define CONVERT_TEST(testFunction, makeFactory) \
+namespace ns_ ## testFunction { \
+TEST_F(TEST_STR(testFunction) " " TEST_STR(makeFactory), ConformanceFixture(makeFactory)) { \
+ f.testFunction(); \
+} \
+} // namespace testFunction
+
+#undef CPPUNIT_TEST
+#define CPPUNIT_TEST(testFunction) CONVERT_TEST(testFunction, MAKE_FACTORY)
+
+#define MAKE_FACTORY getFactory()
+DEFINE_CONFORMANCE_TESTS();
+
+} // namespace
+
+TEST_MAIN() {
+ TEST_RUN_ALL();
+}
diff --git a/persistence/src/tests/proxy/mockprovider.h b/persistence/src/tests/proxy/mockprovider.h
new file mode 100644
index 00000000000..c2fd844a010
--- /dev/null
+++ b/persistence/src/tests/proxy/mockprovider.h
@@ -0,0 +1,172 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/persistence/spi/persistenceprovider.h>
+
+namespace storage {
+namespace spi {
+
+struct MockProvider : PersistenceProvider {
+ enum Function { NONE, INITIALIZE, GET_PARTITION_STATES, LIST_BUCKETS,
+ SET_CLUSTER_STATE,
+ SET_ACTIVE_STATE, GET_BUCKET_INFO, PUT, REMOVE_BY_ID,
+ REMOVE_IF_FOUND, REPLACE_WITH_REMOVE, UPDATE, FLUSH, GET,
+ CREATE_ITERATOR, ITERATE, DESTROY_ITERATOR, CREATE_BUCKET,
+ DELETE_BUCKET, GET_MODIFIED_BUCKETS, SPLIT, JOIN, MOVE, MAINTAIN,
+ REMOVE_ENTRY };
+
+ mutable Function last_called;
+
+ MockProvider() : last_called(NONE) {}
+
+ virtual Result initialize() {
+ last_called = INITIALIZE;
+ return Result();
+ }
+
+ virtual PartitionStateListResult getPartitionStates() const {
+ last_called = GET_PARTITION_STATES;
+ return PartitionStateListResult(PartitionStateList(1u));
+ }
+
+ virtual BucketIdListResult listBuckets(PartitionId id) const {
+ last_called = LIST_BUCKETS;
+ BucketIdListResult::List result;
+ result.push_back(document::BucketId(id));
+ return BucketIdListResult(result);
+ }
+
+ virtual Result setClusterState(const ClusterState &) {
+ last_called = SET_CLUSTER_STATE;
+ return Result();
+ }
+
+ virtual Result setActiveState(const Bucket &,
+ BucketInfo::ActiveState) {
+ last_called = SET_ACTIVE_STATE;
+ return Result();
+ }
+
+ virtual BucketInfoResult getBucketInfo(const Bucket &bucket) const {
+ last_called = GET_BUCKET_INFO;
+ return BucketInfoResult(BucketInfo(BucketChecksum(1), 2, 3,
+ bucket.getBucketId().getRawId(),
+ bucket.getPartition(),
+ BucketInfo::READY,
+ BucketInfo::ACTIVE));
+ }
+
+ virtual Result put(const Bucket &, Timestamp, const Document::SP&, Context&) {
+ last_called = PUT;
+ return Result();
+ }
+
+ virtual RemoveResult remove(const Bucket &, Timestamp,
+ const DocumentId &, Context&) {
+ last_called = REMOVE_BY_ID;
+ return RemoveResult(true);
+ }
+
+ virtual RemoveResult removeIfFound(const Bucket &, Timestamp,
+ const DocumentId &, Context&) {
+ last_called = REMOVE_IF_FOUND;
+ return RemoveResult(true);
+ }
+
+ virtual RemoveResult replaceWithRemove(const Bucket &, Timestamp,
+ const DocumentId &, Context&) {
+ last_called = REPLACE_WITH_REMOVE;
+ return RemoveResult(true);
+ }
+
+ virtual UpdateResult update(const Bucket &, Timestamp timestamp,
+ const DocumentUpdate::SP&, Context&) {
+ last_called = UPDATE;
+ return UpdateResult(Timestamp(timestamp - 10));
+ }
+
+ virtual Result flush(const Bucket&, Context&) {
+ last_called = FLUSH;
+ return Result();
+ }
+
+ virtual GetResult get(const Bucket &, const document::FieldSet&,
+ const DocumentId&, Context&) const {
+ last_called = GET;
+ return GetResult(Document::UP(new Document),
+ Timestamp(6u));
+ }
+
+ virtual CreateIteratorResult createIterator(const Bucket& bucket,
+ const document::FieldSet&,
+ const Selection&,
+ IncludedVersions,
+ Context&)
+ {
+ last_called = CREATE_ITERATOR;
+ return CreateIteratorResult(IteratorId(bucket.getPartition()));
+ }
+
+ virtual IterateResult iterate(IteratorId, uint64_t, Context&) const {
+ last_called = ITERATE;
+ IterateResult::List result;
+ result.push_back(DocEntry::LP(new DocEntry(Timestamp(1), 0)));
+ return IterateResult(result, true);
+ }
+
+ virtual Result destroyIterator(IteratorId, Context&) {
+ last_called = DESTROY_ITERATOR;
+ return Result();
+ }
+
+ virtual Result createBucket(const Bucket&, Context&) {
+ last_called = CREATE_BUCKET;
+ return Result();
+ }
+ virtual Result deleteBucket(const Bucket&, Context&) {
+ last_called = DELETE_BUCKET;
+ return Result();
+ }
+
+ virtual BucketIdListResult getModifiedBuckets() const {
+ last_called = GET_MODIFIED_BUCKETS;
+ BucketIdListResult::List list;
+ list.push_back(document::BucketId(2));
+ list.push_back(document::BucketId(3));
+ return BucketIdListResult(list);
+ }
+
+ virtual Result split(const Bucket &, const Bucket &, const Bucket &,
+ Context&)
+ {
+ last_called = SPLIT;
+ return Result();
+ }
+
+ virtual Result join(const Bucket &, const Bucket &, const Bucket &,
+ Context&)
+ {
+ last_called = JOIN;
+ return Result();
+ }
+
+ virtual Result move(const Bucket &, PartitionId, Context&) {
+ last_called = MOVE;
+ return Result();
+ }
+
+
+ virtual Result maintain(const Bucket &, MaintenanceLevel) {
+ last_called = MAINTAIN;
+ return Result();
+ }
+
+ virtual Result removeEntry(const Bucket &, Timestamp, Context&) {
+ last_called = REMOVE_ENTRY;
+ return Result();
+ }
+};
+
+} // namespace spi
+} // namespace storage
+
diff --git a/persistence/src/tests/proxy/providerproxy_conformancetest.cpp b/persistence/src/tests/proxy/providerproxy_conformancetest.cpp
new file mode 100644
index 00000000000..cd2f711ffd5
--- /dev/null
+++ b/persistence/src/tests/proxy/providerproxy_conformancetest.cpp
@@ -0,0 +1,64 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/persistence/conformancetest/conformancetest.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <vespa/persistence/proxy/providerproxy.h>
+#include <vespa/persistence/proxy/providerstub.h>
+#include "proxy_factory_wrapper.h"
+
+using namespace storage::spi;
+typedef document::DocumentTypeRepo Repo;
+typedef ConformanceTest::PersistenceFactory Factory;
+
+namespace {
+
+struct DummyFactory : Factory {
+ PersistenceProvider::UP getPersistenceImplementation(const Repo::SP& repo,
+ const document::DocumenttypesConfig &) {
+ return PersistenceProvider::UP(new dummy::DummyPersistence(repo, 4));
+ }
+
+ virtual bool
+ supportsActiveState() const
+ {
+ return true;
+ }
+};
+
+struct ConformanceFixture : public ConformanceTest {
+ ConformanceFixture(Factory::UP f) : ConformanceTest(std::move(f)) { setUp(); }
+ ~ConformanceFixture() { tearDown(); }
+};
+
+Factory::UP dummyViaProxy(size_t n) {
+ if (n == 0) {
+ return Factory::UP(new DummyFactory());
+ }
+ return Factory::UP(new ProxyFactoryWrapper(dummyViaProxy(n - 1)));
+}
+
+#define CONVERT_TEST(testFunction, makeFactory) \
+namespace ns_ ## testFunction { \
+TEST_F(TEST_STR(testFunction) " " TEST_STR(makeFactory), ConformanceFixture(makeFactory)) { \
+ f.testFunction(); \
+} \
+} // namespace testFunction
+
+#undef CPPUNIT_TEST
+#define CPPUNIT_TEST(testFunction) CONVERT_TEST(testFunction, MAKE_FACTORY)
+
+#define MAKE_FACTORY dummyViaProxy(1)
+DEFINE_CONFORMANCE_TESTS();
+
+#undef MAKE_FACTORY
+#define MAKE_FACTORY dummyViaProxy(7)
+DEFINE_CONFORMANCE_TESTS();
+
+} // namespace
+
+TEST_MAIN() {
+ TEST_RUN_ALL();
+}
diff --git a/persistence/src/tests/proxy/providerproxy_test.cpp b/persistence/src/tests/proxy/providerproxy_test.cpp
new file mode 100644
index 00000000000..34537b170e6
--- /dev/null
+++ b/persistence/src/tests/proxy/providerproxy_test.cpp
@@ -0,0 +1,402 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for providerproxy.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("providerproxy_test");
+
+#include "dummy_provider_factory.h"
+#include "mockprovider.h"
+#include <vespa/document/bucket/bucketid.h>
+#include <vespa/document/datatype/datatype.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/persistence/proxy/providerproxy.h>
+#include <vespa/persistence/proxy/providerstub.h>
+#include <vespa/persistence/spi/abstractpersistenceprovider.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/util/closure.h>
+#include <vespa/vespalib/util/closuretask.h>
+#include <vespa/vespalib/util/sync.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <vespa/document/fieldset/fieldsets.h>
+
+using document::BucketId;
+using document::DataType;
+using document::DocumentTypeRepo;
+using std::ostringstream;
+using vespalib::Gate;
+using vespalib::ThreadStackExecutor;
+using vespalib::makeClosure;
+using vespalib::makeTask;
+using namespace storage::spi;
+using namespace storage;
+
+namespace {
+
+const int port = 14863;
+const string connect_spec = "tcp/localhost:14863";
+LoadType defaultLoadType(0, "default");
+
+void startServer(const DocumentTypeRepo *repo, Gate *gate) {
+ DummyProviderFactory factory(MockProvider::UP(new MockProvider));
+ ProviderStub stub(port, 8, *repo, factory);
+ gate->await();
+ EXPECT_TRUE(stub.hasClient());
+}
+
+TEST("require that client can start connecting before server is up") {
+ const DocumentTypeRepo repo;
+ Gate gate;
+ ThreadStackExecutor executor(1, 65536);
+ executor.execute(makeTask(makeClosure(startServer, &repo, &gate)));
+ ProviderProxy proxy(connect_spec, repo);
+ gate.countDown();
+ executor.sync();
+}
+
+TEST("require that when the server goes down it causes permanent failure.") {
+ const DocumentTypeRepo repo;
+ DummyProviderFactory factory(MockProvider::UP(new MockProvider));
+ ProviderStub::UP server(new ProviderStub(port, 8, repo, factory));
+ ProviderProxy proxy(connect_spec, repo);
+ server.reset(0);
+
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Result result = proxy.flush(bucket, context);
+ EXPECT_EQUAL(Result::FATAL_ERROR, result.getErrorCode());
+}
+
+struct Fixture {
+ MockProvider &mock_spi;
+ DummyProviderFactory factory;
+ DocumentTypeRepo repo;
+ ProviderStub stub;
+ ProviderProxy proxy;
+
+ Fixture()
+ : mock_spi(*(new MockProvider)),
+ factory(PersistenceProvider::UP(&mock_spi)),
+ repo(),
+ stub(port, 8, repo, factory),
+ proxy(connect_spec, repo) {}
+};
+
+TEST_F("require that client handles initialize", Fixture) {
+ Result result = f.proxy.initialize();
+ EXPECT_EQUAL(MockProvider::INITIALIZE, f.mock_spi.last_called);
+}
+
+TEST_F("require that client handles getPartitionStates", Fixture) {
+ PartitionStateListResult result = f.proxy.getPartitionStates();
+ EXPECT_EQUAL(MockProvider::GET_PARTITION_STATES, f.mock_spi.last_called);
+ EXPECT_EQUAL(1u, result.getList().size());
+}
+
+TEST_F("require that client handles listBuckets", Fixture) {
+ const PartitionId partition_id(42);
+
+ BucketIdListResult result = f.proxy.listBuckets(partition_id);
+ EXPECT_EQUAL(MockProvider::LIST_BUCKETS, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+ ASSERT_EQUAL(1u, result.getList().size());
+}
+
+TEST_F("require that client handles setClusterState", Fixture) {
+ lib::ClusterState s("version:1 storage:3 distributor:3");
+ lib::Distribution d(lib::Distribution::getDefaultDistributionConfig(3, 3));
+ ClusterState state(s, 0, d);
+
+ Result result = f.proxy.setClusterState(state);
+ EXPECT_EQUAL(MockProvider::SET_CLUSTER_STATE, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+}
+
+TEST_F("require that client handles setActiveState", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+ const BucketInfo::ActiveState bucket_state = BucketInfo::NOT_ACTIVE;
+
+ Result result = f.proxy.setActiveState(bucket, bucket_state);
+ EXPECT_EQUAL(MockProvider::SET_ACTIVE_STATE, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+}
+
+TEST_F("require that client handles getBucketInfo", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+
+ BucketInfoResult result = f.proxy.getBucketInfo(bucket);
+ EXPECT_EQUAL(MockProvider::GET_BUCKET_INFO, f.mock_spi.last_called);
+
+ const BucketInfo& info(result.getBucketInfo());
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+ EXPECT_EQUAL(1u, info.getChecksum());
+ EXPECT_EQUAL(2u, info.getDocumentCount());
+ EXPECT_EQUAL(3u, info.getDocumentSize());
+ EXPECT_EQUAL(bucket_id, info.getEntryCount());
+ EXPECT_EQUAL(partition_id, info.getUsedSize());
+ EXPECT_EQUAL(true, info.isReady());
+ EXPECT_EQUAL(true, info.isActive());
+}
+
+TEST_F("require that client handles put", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+ const Timestamp timestamp(84);
+ Document::SP doc(new Document());
+
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Result result = f.proxy.put(bucket, timestamp, doc, context);
+ EXPECT_EQUAL(MockProvider::PUT, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+}
+
+TEST_F("require that client handles remove by id", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+ const Timestamp timestamp(84);
+ const DocumentId id("doc:test:1");
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ RemoveResult result = f.proxy.remove(bucket, timestamp, id, context);
+ EXPECT_EQUAL(MockProvider::REMOVE_BY_ID, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+ EXPECT_EQUAL(true, result.wasFound());
+}
+
+TEST_F("require that client handles removeIfFound", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+ const Timestamp timestamp(84);
+ const DocumentId id("doc:test:1");
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ RemoveResult result = f.proxy.removeIfFound(bucket, timestamp, id, context);
+ EXPECT_EQUAL(MockProvider::REMOVE_IF_FOUND, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+ EXPECT_EQUAL(true, result.wasFound());
+}
+
+TEST_F("require that client handles update", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+ const Timestamp timestamp(84);
+ DocumentUpdate::SP update(new DocumentUpdate(*DataType::DOCUMENT, DocumentId("doc:test:1")));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ UpdateResult result = f.proxy.update(bucket, timestamp, update, context);
+ EXPECT_EQUAL(MockProvider::UPDATE, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+ EXPECT_EQUAL(timestamp - 10, result.getExistingTimestamp());
+}
+
+TEST_F("require that client handles flush", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Result result = f.proxy.flush(bucket, context);
+ EXPECT_EQUAL(MockProvider::FLUSH, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+}
+
+TEST_F("require that client handles get", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+
+ document::AllFields field_set;
+ const DocumentId id("doc:test:1");
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ GetResult result = f.proxy.get(bucket, field_set, id, context);
+ EXPECT_EQUAL(MockProvider::GET, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+ EXPECT_EQUAL(6u, result.getTimestamp());
+ ASSERT_TRUE(result.hasDocument());
+ EXPECT_EQUAL(Document(), result.getDocument());
+}
+
+TEST_F("require that client handles createIterator", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+ const DocumentSelection doc_sel("docsel");
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ document::AllFields field_set;
+
+ Selection selection(doc_sel);
+ selection.setFromTimestamp(Timestamp(84));
+ selection.setToTimestamp(Timestamp(126));
+
+ CreateIteratorResult result =
+ f.proxy.createIterator(bucket, field_set, selection,
+ NEWEST_DOCUMENT_ONLY, context);
+
+ EXPECT_EQUAL(MockProvider::CREATE_ITERATOR, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+ EXPECT_EQUAL(partition_id, result.getIteratorId());
+}
+
+TEST_F("require that client handles iterate", Fixture) {
+ const IteratorId iterator_id(42);
+ const uint64_t max_byte_size = 21;
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ IterateResult result = f.proxy.iterate(iterator_id, max_byte_size, context);
+ EXPECT_EQUAL(MockProvider::ITERATE, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+ EXPECT_EQUAL(1u, result.getEntries().size());
+ EXPECT_TRUE(result.isCompleted());
+}
+
+TEST_F("require that client handles destroyIterator", Fixture) {
+ const IteratorId iterator_id(42);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ f.proxy.destroyIterator(iterator_id, context);
+ EXPECT_EQUAL(MockProvider::DESTROY_ITERATOR, f.mock_spi.last_called);
+}
+
+TEST_F("require that client handles createBucket", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ f.proxy.createBucket(bucket, context);
+ EXPECT_EQUAL(MockProvider::CREATE_BUCKET, f.mock_spi.last_called);
+}
+
+TEST_F("require that server accepts deleteBucket", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ f.proxy.deleteBucket(bucket, context);
+ EXPECT_EQUAL(MockProvider::DELETE_BUCKET, f.mock_spi.last_called);
+}
+
+TEST_F("require that client handles getModifiedBuckets", Fixture) {
+ BucketIdListResult modifiedBuckets = f.proxy.getModifiedBuckets();
+ EXPECT_EQUAL(MockProvider::GET_MODIFIED_BUCKETS, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(2u, modifiedBuckets.getList().size());
+}
+
+TEST_F("require that client handles split", Fixture) {
+ const uint64_t bucket_id_1 = 21;
+ const PartitionId partition_id_1(42);
+ const Bucket bucket_1(BucketId(bucket_id_1), partition_id_1);
+ const uint64_t bucket_id_2 = 210;
+ const PartitionId partition_id_2(420);
+ const Bucket bucket_2(BucketId(bucket_id_2), partition_id_2);
+ const uint64_t bucket_id_3 = 2100;
+ const PartitionId partition_id_3(4200);
+ const Bucket bucket_3(BucketId(bucket_id_3), partition_id_3);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Result result = f.proxy.split(bucket_1, bucket_2, bucket_3, context);
+ EXPECT_EQUAL(MockProvider::SPLIT, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+}
+
+TEST_F("require that client handles join", Fixture) {
+ const uint64_t bucket_id_1 = 21;
+ const PartitionId partition_id_1(42);
+ const Bucket bucket_1(BucketId(bucket_id_1), partition_id_1);
+ const uint64_t bucket_id_2 = 210;
+ const PartitionId partition_id_2(420);
+ const Bucket bucket_2(BucketId(bucket_id_2), partition_id_2);
+ const uint64_t bucket_id_3 = 2100;
+ const PartitionId partition_id_3(4200);
+ const Bucket bucket_3(BucketId(bucket_id_3), partition_id_3);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Result result = f.proxy.join(bucket_1, bucket_2, bucket_3, context);
+ EXPECT_EQUAL(MockProvider::JOIN, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+}
+
+TEST_F("require that client handles move", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId from_partition_id(42);
+ const PartitionId to_partition_id(43);
+ const Bucket bucket(BucketId(bucket_id), from_partition_id);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Result result = f.proxy.move(bucket, to_partition_id, context);
+ EXPECT_EQUAL(MockProvider::MOVE, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+}
+
+TEST_F("require that client handles maintain", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+
+ Result result = f.proxy.maintain(bucket, HIGH);
+ EXPECT_EQUAL(MockProvider::MAINTAIN, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+}
+
+TEST_F("require that client handles remove entry", Fixture) {
+ const uint64_t bucket_id = 21;
+ const PartitionId partition_id(42);
+ const Bucket bucket(BucketId(bucket_id), partition_id);
+ const Timestamp timestamp(345);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Result result = f.proxy.removeEntry(bucket, timestamp, context);
+ EXPECT_EQUAL(MockProvider::REMOVE_ENTRY, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0, result.getErrorCode());
+ EXPECT_EQUAL("", result.getErrorMessage());
+}
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/persistence/src/tests/proxy/providerstub_test.cpp b/persistence/src/tests/proxy/providerstub_test.cpp
new file mode 100644
index 00000000000..07eed26db19
--- /dev/null
+++ b/persistence/src/tests/proxy/providerstub_test.cpp
@@ -0,0 +1,538 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Unit tests for providerstub.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP("providerstub_test");
+
+#include <vespa/document/datatype/datatype.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/document/serialization/vespadocumentserializer.h>
+#include <vespa/document/util/bytebuffer.h>
+#include <vespa/persistence/proxy/buildid.h>
+#include <vespa/persistence/proxy/providerstub.h>
+#include <vespa/persistence/spi/abstractpersistenceprovider.h>
+#include <vespa/vespalib/objects/nbostream.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using document::BucketId;
+using document::ByteBuffer;
+using document::DataType;
+using document::DocumentTypeRepo;
+using document::VespaDocumentSerializer;
+using vespalib::nbostream;
+using namespace storage::spi;
+using namespace storage;
+
+#include <tests/proxy/mockprovider.h>
+#include "dummy_provider_factory.h"
+
+namespace {
+
+const int port = 14863;
+const char connect_spec[] = "tcp/localhost:14863";
+const string build_id = getBuildId();
+
+struct Fixture {
+ MockProvider &mock_spi;
+ DummyProviderFactory factory;
+ DocumentTypeRepo repo;
+ ProviderStub stub;
+ FRT_Supervisor supervisor;
+ FRT_RPCRequest *current_request;
+ FRT_Target *target;
+
+ Fixture()
+ : mock_spi(*(new MockProvider())),
+ factory(PersistenceProvider::UP(&mock_spi)),
+ repo(),
+ stub(port, 8, repo, factory),
+ supervisor(),
+ current_request(0),
+ target(supervisor.GetTarget(connect_spec))
+ {
+ supervisor.Start();
+ ASSERT_TRUE(target);
+ }
+ ~Fixture() {
+ if (current_request) {
+ current_request->SubRef();
+ }
+ target->SubRef();
+ supervisor.ShutDown(true);
+ }
+ FRT_RPCRequest *getRequest(const string &name) {
+ FRT_RPCRequest *req = supervisor.AllocRPCRequest(current_request);
+ current_request = req;
+ req->SetMethodName(name.c_str());
+ return req;
+ }
+ void callRpc(FRT_RPCRequest *req, const string &return_spec) {
+ target->InvokeSync(req, 5.0);
+ req->CheckReturnTypes(return_spec.c_str());
+ if (!EXPECT_EQUAL(uint32_t(FRTE_NO_ERROR), req->GetErrorCode())) {
+ TEST_FATAL(req->GetErrorMessage());
+ }
+ }
+ void failRpc(FRT_RPCRequest *req, uint32_t error_code) {
+ target->InvokeSync(req, 5.0);
+ EXPECT_EQUAL(error_code, req->GetErrorCode());
+ }
+};
+
+struct ConnectedFixture : Fixture {
+ ConnectedFixture() {
+ FRT_RPCRequest *req = getRequest("vespa.persistence.connect");
+ req->GetParams()->AddString(build_id.data(), build_id.size());
+ callRpc(req, "");
+ }
+};
+
+TEST("print build id") { fprintf(stderr, "build id: '%s'\n", getBuildId()); }
+
+TEST_F("require that server accepts connect", Fixture) {
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.connect");
+ req->GetParams()->AddString(build_id.data(), build_id.size());
+ f.callRpc(req, "");
+ EXPECT_TRUE(f.stub.hasClient());
+}
+
+TEST_F("require that connect can be called twice", ConnectedFixture) {
+ EXPECT_TRUE(f.stub.hasClient());
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.connect");
+ req->GetParams()->AddString(build_id.data(), build_id.size());
+ f.callRpc(req, "");
+ EXPECT_TRUE(f.stub.hasClient());
+}
+
+TEST_F("require that connect fails with wrong build id", Fixture) {
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.connect");
+ const string wrong_id = "wrong build id";
+ req->GetParams()->AddString(wrong_id.data(), wrong_id.size());
+ f.failRpc(req, FRTE_RPC_METHOD_FAILED);
+ string prefix("Wrong build id. Got 'wrong build id', required ");
+ EXPECT_EQUAL(prefix,
+ string(req->GetErrorMessage()).substr(0, prefix.size()));
+ EXPECT_FALSE(f.stub.hasClient());
+}
+
+TEST_F("require that only one client can connect", ConnectedFixture) {
+ EXPECT_TRUE(f.stub.hasClient());
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.connect");
+ req->GetParams()->AddString(build_id.data(), build_id.size());
+ FRT_Target *target = f.supervisor.GetTarget(connect_spec);
+ target->InvokeSync(req, 5.0);
+ target->SubRef();
+ EXPECT_EQUAL(uint32_t(FRTE_RPC_METHOD_FAILED), req->GetErrorCode());
+ EXPECT_EQUAL("Server is already connected",
+ string(req->GetErrorMessage()));
+}
+
+TEST_F("require that server accepts getPartitionStates", ConnectedFixture) {
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.getPartitionStates");
+ f.callRpc(req, "bsIS");
+ EXPECT_EQUAL(MockProvider::GET_PARTITION_STATES, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+ EXPECT_EQUAL(1u, req->GetReturn()->GetValue(2)._int32_array._len);
+ EXPECT_EQUAL(1u, req->GetReturn()->GetValue(3)._string_array._len);
+}
+
+TEST_F("require that server accepts listBuckets", ConnectedFixture) {
+ const uint64_t partition_id = 42;
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.listBuckets");
+ req->GetParams()->AddInt64(partition_id);
+ f.callRpc(req, "bsL");
+ EXPECT_EQUAL(MockProvider::LIST_BUCKETS, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+ EXPECT_EQUAL(1u, req->GetReturn()->GetValue(2)._int64_array._len);
+ EXPECT_EQUAL(partition_id,
+ req->GetReturn()->GetValue(2)._int64_array._pt[0]);
+}
+
+TEST_F("require that server accepts setClusterState", ConnectedFixture) {
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.setClusterState");
+
+ lib::ClusterState s("version:1 storage:3 distributor:3");
+ lib::Distribution d(lib::Distribution::getDefaultDistributionConfig(3, 3));
+ ClusterState state(s, 0, d);
+ vespalib::nbostream o;
+ state.serialize(o);
+ req->GetParams()->AddData(o.c_str(), o.size());
+ f.callRpc(req, "bs");
+ EXPECT_EQUAL(MockProvider::SET_CLUSTER_STATE, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+}
+
+TEST_F("require that server accepts setActiveState", ConnectedFixture) {
+ const uint64_t bucket_id = 21;
+ const uint64_t partition_id = 42;
+ const BucketInfo::ActiveState bucket_state = BucketInfo::NOT_ACTIVE;
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.setActiveState");
+ req->GetParams()->AddInt64(bucket_id);
+ req->GetParams()->AddInt64(partition_id);
+ req->GetParams()->AddInt8(bucket_state);
+ f.callRpc(req, "bs");
+ EXPECT_EQUAL(MockProvider::SET_ACTIVE_STATE, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+}
+
+TEST_F("require that server accepts getBucketInfo", ConnectedFixture) {
+ const uint64_t bucket_id = 21;
+ const uint64_t partition_id = 42;
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.getBucketInfo");
+ req->GetParams()->AddInt64(bucket_id);
+ req->GetParams()->AddInt64(partition_id);
+ f.callRpc(req, "bsiiiiibb");
+ EXPECT_EQUAL(MockProvider::GET_BUCKET_INFO, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+ EXPECT_EQUAL(1u, req->GetReturn()->GetValue(2)._intval32);
+ EXPECT_EQUAL(2u, req->GetReturn()->GetValue(3)._intval32);
+ EXPECT_EQUAL(3u, req->GetReturn()->GetValue(4)._intval32);
+ EXPECT_EQUAL(bucket_id, req->GetReturn()->GetValue(5)._intval32);
+ EXPECT_EQUAL(partition_id, req->GetReturn()->GetValue(6)._intval32);
+ EXPECT_EQUAL(static_cast<uint8_t>(BucketInfo::READY),
+ req->GetReturn()->GetValue(7)._intval8);
+ EXPECT_EQUAL(static_cast<uint8_t>(BucketInfo::ACTIVE),
+ req->GetReturn()->GetValue(8)._intval8);
+}
+
+TEST_F("require that server accepts put", ConnectedFixture) {
+ const uint64_t bucket_id = 21;
+ const uint64_t partition_id = 42;
+ const Timestamp timestamp(84);
+ Document::UP doc(new Document);
+ nbostream stream;
+ VespaDocumentSerializer serializer(stream);
+ serializer.write(*doc, document::COMPLETE);
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.put");
+ req->GetParams()->AddInt64(bucket_id);
+ req->GetParams()->AddInt64(partition_id);
+ req->GetParams()->AddInt64(timestamp);
+ req->GetParams()->AddData(stream.c_str(), stream.size());
+ f.callRpc(req, "bs");
+ EXPECT_EQUAL(MockProvider::PUT, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+}
+
+void testRemove(ConnectedFixture &f, const string &rpc_name,
+ MockProvider::Function func) {
+ const uint64_t bucket_id = 21;
+ const uint64_t partition_id = 42;
+ const Timestamp timestamp(84);
+ const DocumentId id("doc:test:1");
+
+ FRT_RPCRequest *req = f.getRequest(rpc_name);
+ req->GetParams()->AddInt64(bucket_id);
+ req->GetParams()->AddInt64(partition_id);
+ req->GetParams()->AddInt64(timestamp);
+ req->GetParams()->AddString(id.toString().data(), id.toString().size());
+ f.callRpc(req, "bsb");
+ EXPECT_EQUAL(func, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+ EXPECT_TRUE(req->GetReturn()->GetValue(2)._intval8);
+}
+
+TEST_F("require that server accepts remove by id", ConnectedFixture) {
+ testRemove(f, "vespa.persistence.removeById", MockProvider::REMOVE_BY_ID);
+}
+
+TEST_F("require that server accepts removeIfFound", ConnectedFixture) {
+ testRemove(f, "vespa.persistence.removeIfFound",
+ MockProvider::REMOVE_IF_FOUND);
+}
+
+TEST_F("require that server accepts update", ConnectedFixture) {
+ const uint64_t bucket_id = 21;
+ const uint64_t partition_id = 42;
+ const Timestamp timestamp(84);
+ DocumentUpdate update(*DataType::DOCUMENT, DocumentId("doc:test:1"));
+ vespalib::nbostream stream;
+ update.serializeHEAD(stream);
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.update");
+ req->GetParams()->AddInt64(bucket_id);
+ req->GetParams()->AddInt64(partition_id);
+ req->GetParams()->AddInt64(timestamp);
+ req->GetParams()->AddData(stream.c_str(), stream.size());
+ f.callRpc(req, "bsl");
+ EXPECT_EQUAL(MockProvider::UPDATE, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+ EXPECT_EQUAL(timestamp - 10, req->GetReturn()->GetValue(2)._intval64);
+}
+
+TEST_F("require that server accepts flush", ConnectedFixture) {
+ const uint64_t bucket_id = 21;
+ const uint64_t partition_id = 42;
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.flush");
+ req->GetParams()->AddInt64(bucket_id);
+ req->GetParams()->AddInt64(partition_id);
+ f.callRpc(req, "bs");
+ EXPECT_EQUAL(MockProvider::FLUSH, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+}
+
+TEST_F("require that server accepts get", ConnectedFixture) {
+ const uint64_t bucket_id = 21;
+ const uint64_t partition_id = 42;
+ const string field_set_1 = "[all]";
+ const DocumentId id("doc:test:1");
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.get");
+ req->GetParams()->AddInt64(bucket_id);
+ req->GetParams()->AddInt64(partition_id);
+ req->GetParams()->AddString(field_set_1.data(), field_set_1.size());
+ req->GetParams()->AddString(id.toString().data(), id.toString().size());
+ f.callRpc(req, "bslx");
+ EXPECT_EQUAL(MockProvider::GET, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+ EXPECT_EQUAL(6u, req->GetReturn()->GetValue(2)._intval64);
+ EXPECT_EQUAL(25u, req->GetReturn()->GetValue(3)._data._len);
+}
+
+TEST_F("require that server accepts createIterator", ConnectedFixture) {
+ const uint64_t bucket_id = 21;
+ const uint64_t partition_id = 42;
+ const string doc_sel = "docsel";
+ const Timestamp timestamp_from(84);
+ const Timestamp timestamp_to(126);
+ const Timestamp timestamp_subset(168);
+ const string field_set_1 = "[all]";
+ const bool include_removes = false;
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.createIterator");
+ req->GetParams()->AddInt64(bucket_id);
+ req->GetParams()->AddInt64(partition_id);
+ req->GetParams()->AddString(field_set_1.data(), field_set_1.size());
+ req->GetParams()->AddString(doc_sel.data(), doc_sel.size());
+ req->GetParams()->AddInt64(timestamp_from);
+ req->GetParams()->AddInt64(timestamp_to);
+ req->GetParams()->AddInt64Array(1)[0] = timestamp_subset;
+ req->GetParams()->AddInt8(include_removes);
+
+ f.callRpc(req, "bsl");
+ EXPECT_EQUAL(MockProvider::CREATE_ITERATOR, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+ EXPECT_EQUAL(partition_id, req->GetReturn()->GetValue(2)._intval64);
+}
+
+TEST_F("require that server accepts iterate", ConnectedFixture) {
+ const uint64_t iterator_id = 42;
+ const uint64_t max_byte_size = 21;
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.iterate");
+ req->GetParams()->AddInt64(iterator_id);
+ req->GetParams()->AddInt64(max_byte_size);
+ f.callRpc(req, "bsLISXb");
+ EXPECT_EQUAL(MockProvider::ITERATE, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+ EXPECT_EQUAL(1u, req->GetReturn()->GetValue(2)._int64_array._len);
+ EXPECT_EQUAL(1u, req->GetReturn()->GetValue(3)._int32_array._len);
+ EXPECT_EQUAL(1u, req->GetReturn()->GetValue(4)._string_array._len);
+ EXPECT_EQUAL(1u, req->GetReturn()->GetValue(5)._data_array._len);
+ EXPECT_TRUE(req->GetReturn()->GetValue(6)._intval8);
+}
+
+TEST_F("require that server accepts destroyIterator", ConnectedFixture) {
+ const uint64_t iterator_id = 42;
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.destroyIterator");
+ req->GetParams()->AddInt64(iterator_id);
+ f.callRpc(req, "bs");
+ EXPECT_EQUAL(MockProvider::DESTROY_ITERATOR, f.mock_spi.last_called);
+}
+
+TEST_F("require that server accepts createBucket", ConnectedFixture) {
+ const uint64_t bucket_id = 21;
+ const uint64_t partition_id = 42;
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.createBucket");
+ req->GetParams()->AddInt64(bucket_id);
+ req->GetParams()->AddInt64(partition_id);
+ f.callRpc(req, "bs");
+ EXPECT_EQUAL(MockProvider::CREATE_BUCKET, f.mock_spi.last_called);
+}
+
+TEST_F("require that server accepts deleteBucket", ConnectedFixture) {
+ const uint64_t bucket_id = 21;
+ const uint64_t partition_id = 42;
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.deleteBucket");
+ req->GetParams()->AddInt64(bucket_id);
+ req->GetParams()->AddInt64(partition_id);
+ f.callRpc(req, "bs");
+ EXPECT_EQUAL(MockProvider::DELETE_BUCKET, f.mock_spi.last_called);
+}
+
+TEST_F("require that server accepts getModifiedBuckets", ConnectedFixture) {
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.getModifiedBuckets");
+ f.callRpc(req, "bsL");
+ EXPECT_EQUAL(MockProvider::GET_MODIFIED_BUCKETS, f.mock_spi.last_called);
+ EXPECT_EQUAL(2u, req->GetReturn()->GetValue(2)._int64_array._len);
+}
+
+TEST_F("require that server accepts split", ConnectedFixture) {
+ const uint64_t bucket_id_1 = 21;
+ const uint64_t partition_id_1 = 42;
+ const uint64_t bucket_id_2 = 210;
+ const uint64_t partition_id_2 = 420;
+ const uint64_t bucket_id_3 = 2100;
+ const uint64_t partition_id_3 = 4200;
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.split");
+ req->GetParams()->AddInt64(bucket_id_1);
+ req->GetParams()->AddInt64(partition_id_1);
+ req->GetParams()->AddInt64(bucket_id_2);
+ req->GetParams()->AddInt64(partition_id_2);
+ req->GetParams()->AddInt64(bucket_id_3);
+ req->GetParams()->AddInt64(partition_id_3);
+ f.callRpc(req, "bs");
+ EXPECT_EQUAL(MockProvider::SPLIT, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+}
+
+TEST_F("require that server accepts join", ConnectedFixture) {
+ const uint64_t bucket_id_1 = 21;
+ const uint64_t partition_id_1 = 42;
+ const uint64_t bucket_id_2 = 210;
+ const uint64_t partition_id_2 = 420;
+ const uint64_t bucket_id_3 = 2100;
+ const uint64_t partition_id_3 = 4200;
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.join");
+ req->GetParams()->AddInt64(bucket_id_1);
+ req->GetParams()->AddInt64(partition_id_1);
+ req->GetParams()->AddInt64(bucket_id_2);
+ req->GetParams()->AddInt64(partition_id_2);
+ req->GetParams()->AddInt64(bucket_id_3);
+ req->GetParams()->AddInt64(partition_id_3);
+ f.callRpc(req, "bs");
+ EXPECT_EQUAL(MockProvider::JOIN, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+}
+
+TEST_F("require that server accepts move", ConnectedFixture) {
+ const uint64_t bucket_id = 21;
+ const uint64_t from_partition_id = 42;
+ const uint64_t to_partition_id = 43;
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.move");
+ req->GetParams()->AddInt64(bucket_id);
+ req->GetParams()->AddInt64(from_partition_id);
+ req->GetParams()->AddInt64(to_partition_id);
+ f.callRpc(req, "bs");
+ EXPECT_EQUAL(MockProvider::MOVE, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+}
+
+TEST_F("require that server accepts maintain", ConnectedFixture) {
+ const uint64_t bucket_id = 21;
+ const uint64_t partition_id = 42;
+ const MaintenanceLevel verification_level = HIGH;
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.maintain");
+ req->GetParams()->AddInt64(bucket_id);
+ req->GetParams()->AddInt64(partition_id);
+ req->GetParams()->AddInt8(verification_level);
+ f.callRpc(req, "bs");
+ EXPECT_EQUAL(MockProvider::MAINTAIN, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+}
+
+TEST_F("require that server accepts remove_entry", ConnectedFixture) {
+ const uint64_t bucket_id = 21;
+ const uint64_t partition_id = 42;
+ const Timestamp timestamp(345);
+
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence.removeEntry");
+ req->GetParams()->AddInt64(bucket_id);
+ req->GetParams()->AddInt64(partition_id);
+ req->GetParams()->AddInt64(timestamp);
+ f.callRpc(req, "bs");
+ EXPECT_EQUAL(MockProvider::REMOVE_ENTRY, f.mock_spi.last_called);
+
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
+ EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
+}
+
+void checkRpcFails(const string &name, const string &param_spec, Fixture &f) {
+ TEST_STATE(name.c_str());
+ FRT_RPCRequest *req = f.getRequest("vespa.persistence." + name);
+ for (size_t i = 0; i < param_spec.size(); ++i) {
+ switch(param_spec[i]) {
+ case 'b': req->GetParams()->AddInt8(0); break;
+ case 'l': req->GetParams()->AddInt64(0); break;
+ case 'L': req->GetParams()->AddInt64Array(0); break;
+ case 's': req->GetParams()->AddString(0, 0); break;
+ case 'S': req->GetParams()->AddStringArray(0); break;
+ case 'x': req->GetParams()->AddData(0, 0); break;
+ }
+ }
+ f.failRpc(req, FRTE_RPC_METHOD_FAILED);
+}
+
+TEST_F("require that unconnected server fails all SPI calls.", Fixture)
+{
+ checkRpcFails("initialize", "", f);
+ checkRpcFails("getPartitionStates", "", f);
+ checkRpcFails("listBuckets", "l", f);
+ checkRpcFails("setClusterState", "x", f);
+ checkRpcFails("setActiveState", "llb", f);
+ checkRpcFails("getBucketInfo", "ll", f);
+ checkRpcFails("put", "lllx", f);
+ checkRpcFails("removeById", "llls", f);
+ checkRpcFails("removeIfFound", "llls", f);
+ checkRpcFails("update", "lllx", f);
+ checkRpcFails("flush", "ll", f);
+ checkRpcFails("get", "llss", f);
+ checkRpcFails("createIterator", "llssllLb", f);
+ checkRpcFails("iterate", "ll", f);
+ checkRpcFails("destroyIterator", "l", f);
+ checkRpcFails("createBucket", "ll", f);
+ checkRpcFails("deleteBucket", "ll", f);
+ checkRpcFails("getModifiedBuckets", "", f);
+ checkRpcFails("split", "llllll", f);
+ checkRpcFails("join", "llllll", f);
+ checkRpcFails("maintain", "llb", f);
+ checkRpcFails("removeEntry", "lll", f);
+}
+
+} // namespace
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/persistence/src/tests/proxy/proxy_factory_wrapper.h b/persistence/src/tests/proxy/proxy_factory_wrapper.h
new file mode 100644
index 00000000000..10f251f2beb
--- /dev/null
+++ b/persistence/src/tests/proxy/proxy_factory_wrapper.h
@@ -0,0 +1,59 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/vespalib/util/vstringfmt.h>
+#include <vespa/persistence/conformancetest/conformancetest.h>
+#include <vespa/persistence/proxy/providerstub.h>
+#include <vespa/persistence/proxy/providerproxy.h>
+#include "dummy_provider_factory.h"
+
+namespace storage {
+namespace spi {
+
+/**
+ * Generic wrapper for persistence conformance test factories. This
+ * wrapper will take any other factory and expose a factory interface
+ * that will create persistence instances that communicate with
+ * persistence instances created by the wrapped factory using the RPC
+ * persistence Proxy.
+ **/
+struct ProxyFactoryWrapper : ConformanceTest::PersistenceFactory
+{
+ typedef storage::spi::ConformanceTest::PersistenceFactory Factory;
+ typedef storage::spi::PersistenceProvider Provider;
+ typedef storage::spi::ProviderStub Server;
+ typedef storage::spi::ProviderProxy Client;
+ typedef document::DocumentTypeRepo Repo;
+
+ Factory::UP factory;
+ ProxyFactoryWrapper(Factory::UP f) : factory(std::move(f)) {}
+
+ struct Wrapper : Client {
+ DummyProviderFactory::UP provider;
+ Server::UP server;
+ Wrapper(DummyProviderFactory::UP p, Server::UP s, const Repo &repo)
+ : Client(vespalib::make_vespa_string("tcp/localhost:%u", s->getPort()), repo),
+ provider(std::move(p)),
+ server(std::move(s))
+ {}
+ };
+
+ virtual Provider::UP
+ getPersistenceImplementation(const document::DocumentTypeRepo::SP &repo,
+ const document::DocumenttypesConfig &typesCfg) {
+ DummyProviderFactory::UP provider(new DummyProviderFactory(factory->getPersistenceImplementation(repo,
+ typesCfg)));
+ Server::UP server(new Server(0, 8, *repo, *provider));
+ return Provider::UP(new Wrapper(std::move(provider), std::move(server), *repo));
+ }
+
+ virtual bool
+ supportsActiveState() const
+ {
+ return factory->supportsActiveState();
+ }
+};
+} // namespace spi
+} // namespace storage
+
diff --git a/persistence/src/tests/proxy/proxy_test.sh b/persistence/src/tests/proxy/proxy_test.sh
new file mode 100644
index 00000000000..a78487831d6
--- /dev/null
+++ b/persistence/src/tests/proxy/proxy_test.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+$VALGRIND ./persistence_providerstub_test_app
+$VALGRIND ./persistence_providerproxy_test_app
+$VALGRIND ./persistence_providerproxy_conformance_test_app
diff --git a/persistence/src/tests/proxy/proxyfactory.h b/persistence/src/tests/proxy/proxyfactory.h
new file mode 100644
index 00000000000..9de9a39e873
--- /dev/null
+++ b/persistence/src/tests/proxy/proxyfactory.h
@@ -0,0 +1,42 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/vespalib/util/vstringfmt.h>
+#include <vespa/persistence/conformancetest/conformancetest.h>
+#include <vespa/persistence/proxy/providerstub.h>
+#include <vespa/persistence/proxy/providerproxy.h>
+
+namespace storage {
+namespace spi {
+
+/**
+ * Generic wrapper for persistence conformance test factories. This
+ * wrapper will take any other factory and expose a factory interface
+ * that will create persistence instances that communicate with
+ * persistence instances created by the wrapped factory using the RPC
+ * persistence Proxy.
+ **/
+struct ProxyFactory : ConformanceTest::PersistenceFactory
+{
+ typedef storage::spi::PersistenceProvider Provider;
+ typedef storage::spi::ProviderProxy Client;
+ typedef document::DocumentTypeRepo Repo;
+
+ ProxyFactory() {}
+
+ virtual Provider::UP
+ getPersistenceImplementation(const document::DocumentTypeRepo::SP &repo,
+ const document::DocumenttypesConfig &) {
+ return Provider::UP(new Client("tcp/localhost:3456", *repo));
+ }
+
+ virtual bool
+ supportsActiveState() const
+ {
+ return false;
+ }
+};
+} // namespace spi
+} // namespace storage
+
diff --git a/persistence/src/tests/spi/CMakeLists.txt b/persistence/src/tests/spi/CMakeLists.txt
new file mode 100644
index 00000000000..d23c9c44209
--- /dev/null
+++ b/persistence/src/tests/spi/CMakeLists.txt
@@ -0,0 +1,6 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(persistence_testspi
+ SOURCES
+ clusterstatetest.cpp
+ DEPENDS
+)
diff --git a/persistence/src/tests/spi/clusterstatetest.cpp b/persistence/src/tests/spi/clusterstatetest.cpp
new file mode 100644
index 00000000000..d89c99b912d
--- /dev/null
+++ b/persistence/src/tests/spi/clusterstatetest.cpp
@@ -0,0 +1,229 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/persistence/conformancetest/conformancetest.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+
+LOG_SETUP(".test.dummyimpl");
+
+namespace storage {
+namespace spi {
+
+struct ClusterStateTest : public CppUnit::TestFixture {
+ ClusterStateTest() {}
+
+ CPPUNIT_TEST_SUITE(ClusterStateTest);
+ CPPUNIT_TEST(testClusterUp);
+ CPPUNIT_TEST(testNodeUp);
+ CPPUNIT_TEST(testNodeInitializing);
+ CPPUNIT_TEST(testReady);
+ CPPUNIT_TEST_SUITE_END();
+
+ void testClusterUp();
+ void testNodeUp();
+ void testNodeInitializing();
+ void testReady();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(ClusterStateTest);
+
+void
+ClusterStateTest::testClusterUp()
+{
+ lib::Distribution d(lib::Distribution::getDefaultDistributionConfig(3, 3));
+
+ {
+ lib::ClusterState s("version:1 storage:3 distributor:3");
+ ClusterState state(s, 0, d);
+ CPPUNIT_ASSERT_EQUAL(true, state.clusterUp());
+ }
+
+ {
+ lib::ClusterState s("version:1 storage:3 .0.s:d distributor:3");
+ ClusterState state(s, 0, d);
+ CPPUNIT_ASSERT_EQUAL(true, state.clusterUp());
+ }
+
+ {
+ lib::ClusterState s("version:1 cluster:d storage:3 .0.s:d distributor:3");
+ ClusterState state(s, 0, d);
+ CPPUNIT_ASSERT_EQUAL(false, state.clusterUp());
+ }
+
+ {
+ lib::ClusterState s("version:1 cluster:d storage:3 distributor:3");
+ ClusterState state(s, 0, d);
+ CPPUNIT_ASSERT_EQUAL(false, state.clusterUp());
+ }
+}
+
+void
+ClusterStateTest::testNodeUp()
+{
+ lib::Distribution d(lib::Distribution::getDefaultDistributionConfig(3, 3));
+
+ {
+ lib::ClusterState s("version:1 storage:3 distributor:3");
+ ClusterState state(s, 0, d);
+ CPPUNIT_ASSERT_EQUAL(true, state.nodeUp());
+ }
+
+ {
+ lib::ClusterState s("version:1 storage:3 .0.s:d distributor:3");
+ ClusterState state(s, 0, d);
+ CPPUNIT_ASSERT_EQUAL(false, state.nodeUp());
+ }
+
+ {
+ lib::ClusterState s("version:1 storage:3 .0.s:d distributor:3");
+ ClusterState state(s, 1, d);
+ CPPUNIT_ASSERT_EQUAL(true, state.nodeUp());
+ }
+
+ {
+ lib::ClusterState s("version:1 cluster:d storage:3 distributor:3");
+ ClusterState state(s, 0, d);
+ CPPUNIT_ASSERT_EQUAL(true, state.nodeUp());
+ }
+
+ {
+ lib::ClusterState s("version:1 cluster:d storage:3 distributor:3 .0.s:d");
+ ClusterState state(s, 0, d);
+ CPPUNIT_ASSERT_EQUAL(true, state.nodeUp());
+ }
+
+ {
+ lib::ClusterState s("version:1 cluster:d storage:3 .0.s:d distributor:3");
+ ClusterState state(s, 0, d);
+ CPPUNIT_ASSERT_EQUAL(false, state.nodeUp());
+ }
+
+ {
+ lib::ClusterState s("version:1 cluster:d storage:3 .0.s:r distributor:3");
+ ClusterState state(s, 0, d);
+ CPPUNIT_ASSERT_EQUAL(true, state.nodeUp());
+ }
+
+ {
+ lib::ClusterState s("version:1 cluster:d storage:3 .0.s:i distributor:3");
+ ClusterState state(s, 0, d);
+ CPPUNIT_ASSERT_EQUAL(true, state.nodeUp());
+ }
+}
+
+namespace {
+
+bool
+nodeMarkedAsInitializingInState(const std::string& stateStr,
+ const lib::Distribution& d,
+ uint16_t node)
+{
+ lib::ClusterState s(stateStr);
+ ClusterState state(s, node, d);
+ return state.nodeInitializing();
+}
+
+} // anon ns
+
+void
+ClusterStateTest::testNodeInitializing()
+{
+ lib::Distribution d(lib::Distribution::getDefaultDistributionConfig(3, 3));
+
+ CPPUNIT_ASSERT(!nodeMarkedAsInitializingInState(
+ "version:1 storage:3 distributor:3", d, 0));
+ CPPUNIT_ASSERT(nodeMarkedAsInitializingInState(
+ "version:1 storage:3 .0.s:i distributor:3", d, 0));
+ CPPUNIT_ASSERT(!nodeMarkedAsInitializingInState(
+ "version:1 storage:3 .0.s:i distributor:3", d, 1));
+ // To mirror nodeUp functionality, we ignore cluster state.
+ CPPUNIT_ASSERT(nodeMarkedAsInitializingInState(
+ "version:1 cluster:d storage:3 .0.s:i distributor:3", d, 0));
+ // Distributors don't technically have init state, but just go with it.
+ CPPUNIT_ASSERT(!nodeMarkedAsInitializingInState(
+ "version:1 storage:3 distributor:3 .0.s:i", d, 0));
+ CPPUNIT_ASSERT(!nodeMarkedAsInitializingInState(
+ "version:1 storage:3 .0.s:d distributor:3", d, 0));
+ CPPUNIT_ASSERT(!nodeMarkedAsInitializingInState(
+ "version:1 storage:3 .0.s:r distributor:3", d, 0));
+ CPPUNIT_ASSERT(!nodeMarkedAsInitializingInState(
+ "version:1 storage:3 .0.s:m distributor:3", d, 0));
+}
+
+namespace {
+
+lib::Distribution::DistributionConfig getCfg(uint16_t redundancy,
+ uint16_t readyCopies)
+{
+ lib::Distribution::DistributionConfig config(
+ lib::Distribution::getDefaultDistributionConfig(redundancy, 100));
+ config.readyCopies = readyCopies;
+ return config;
+}
+
+}
+
+void
+ClusterStateTest::testReady()
+{
+ lib::ClusterState s("version:1 storage:3 distributor:3");
+
+ Bucket b(document::BucketId(16, 1), PartitionId(0));
+
+ // With 3 copies, this bucket has ideal state 0, 2, 1
+
+ // Nothing ready with 0 ready copies.
+ {
+ lib::Distribution d(getCfg(3, 0));
+ ClusterState state(s, 0, d);
+ CPPUNIT_ASSERT_EQUAL(false, state.shouldBeReady(b));
+ }
+
+ // Only node 0 with 1 ready copy.
+ for (uint32_t i = 0; i < 3; ++i) {
+ lib::Distribution d(getCfg(3, 1));
+ ClusterState state(s, i, d);
+ CPPUNIT_ASSERT_EQUAL(i == 0, state.shouldBeReady(b));
+ }
+
+ // All of them with 3 ready copies
+ for (uint32_t i = 0; i < 3; ++i) {
+ lib::Distribution d(getCfg(3, 3));
+ ClusterState state(s, i, d);
+ CPPUNIT_ASSERT_EQUAL(true, state.shouldBeReady(b));
+ }
+
+ // Node 0 and node 1 with 2 ready copies.
+ for (uint32_t i = 0; i < 3; ++i) {
+ lib::Distribution d(getCfg(3, 2));
+ ClusterState state(s, i, d);
+ CPPUNIT_ASSERT_EQUAL(i == 0 || i == 2, state.shouldBeReady(b));
+ }
+
+ // All of them with 3 ready copies
+ for (uint32_t i = 0; i < 3; ++i) {
+ lib::Distribution d(getCfg(3, 3));
+ ClusterState state(s, i, d);
+ CPPUNIT_ASSERT_EQUAL(true, state.shouldBeReady(b));
+ }
+
+ lib::ClusterState s2("version:1 storage:3 .0.s:d distributor:3");
+
+ // The two others should be ready now
+ for (uint32_t i = 0; i < 3; ++i) {
+ lib::Distribution d(getCfg(3, 2));
+ ClusterState state(s2, i, d);
+ CPPUNIT_ASSERT_EQUAL(i == 1 || i == 2, state.shouldBeReady(b));
+ }
+
+ for (uint32_t i = 0; i < 3; ++i) {
+ lib::Distribution d(getCfg(3, 1));
+ ClusterState state(s2, i, d);
+ CPPUNIT_ASSERT_EQUAL(i == 2, state.shouldBeReady(b));
+ }
+}
+
+} // spi
+} // storage
diff --git a/persistence/src/tests/testrunner.cpp b/persistence/src/tests/testrunner.cpp
new file mode 100644
index 00000000000..16027870c47
--- /dev/null
+++ b/persistence/src/tests/testrunner.cpp
@@ -0,0 +1,15 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <iostream>
+#include <vespa/log/log.h>
+#include <vespa/vdstestlib/cppunit/cppunittestrunner.h>
+
+LOG_SETUP("persistencecppunittests");
+
+int
+main(int argc, char **argv)
+{
+ vdstestlib::CppUnitTestRunner testRunner;
+ return testRunner.run(argc, argv);
+}
diff --git a/persistence/src/vespa/persistence/.gitignore b/persistence/src/vespa/persistence/.gitignore
new file mode 100644
index 00000000000..db7f6d217bd
--- /dev/null
+++ b/persistence/src/vespa/persistence/.gitignore
@@ -0,0 +1,5 @@
+/.depend
+/Makefile
+/features.h
+/libpersistence.so.5.1
+/libpersistence_conformancetest.so.5.1
diff --git a/persistence/src/vespa/persistence/CMakeLists.txt b/persistence/src/vespa/persistence/CMakeLists.txt
new file mode 100644
index 00000000000..a45ca2e25f4
--- /dev/null
+++ b/persistence/src/vespa/persistence/CMakeLists.txt
@@ -0,0 +1,15 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(persistence
+ SOURCES
+ $<TARGET_OBJECTS:persistence_dummyimpl>
+ $<TARGET_OBJECTS:persistence_spi>
+ $<TARGET_OBJECTS:persistence_proxy>
+ INSTALL lib64
+ DEPENDS
+)
+vespa_add_library(persistence_persistence_conformancetest
+ SOURCES
+ $<TARGET_OBJECTS:persistence_conformancetest_lib>
+ INSTALL lib64
+ DEPENDS
+)
diff --git a/persistence/src/vespa/persistence/conformancetest/.gitignore b/persistence/src/vespa/persistence/conformancetest/.gitignore
new file mode 100644
index 00000000000..7e7c0fe7fae
--- /dev/null
+++ b/persistence/src/vespa/persistence/conformancetest/.gitignore
@@ -0,0 +1,2 @@
+/.depend
+/Makefile
diff --git a/persistence/src/vespa/persistence/conformancetest/CMakeLists.txt b/persistence/src/vespa/persistence/conformancetest/CMakeLists.txt
new file mode 100644
index 00000000000..40271cf8cdc
--- /dev/null
+++ b/persistence/src/vespa/persistence/conformancetest/CMakeLists.txt
@@ -0,0 +1,7 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(persistence_conformancetest_lib OBJECT
+ SOURCES
+ conformancetest.cpp
+ DEPENDS
+ vdstestlib
+)
diff --git a/persistence/src/vespa/persistence/conformancetest/conformancetest.cpp b/persistence/src/vespa/persistence/conformancetest/conformancetest.cpp
new file mode 100644
index 00000000000..4de66da599f
--- /dev/null
+++ b/persistence/src/vespa/persistence/conformancetest/conformancetest.cpp
@@ -0,0 +1,2314 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+
+#include <vespa/document/base/testdocman.h>
+#include <vespa/log/log.h>
+#include <vespa/persistence/conformancetest/conformancetest.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/document/fieldset/fieldsets.h>
+
+LOG_SETUP(".test.conformance");
+
+using document::BucketId;
+
+namespace storage {
+namespace spi {
+
+namespace {
+
+LoadType defaultLoadType(0, "default");
+
+PersistenceProvider::UP getSpi(ConformanceTest::PersistenceFactory &factory,
+ const document::TestDocMan &testDocMan) {
+ PersistenceProvider::UP result(factory.getPersistenceImplementation(
+ testDocMan.getTypeRepoSP(), *testDocMan.getTypeConfig()));
+ CPPUNIT_ASSERT(!result->initialize().hasError());
+ CPPUNIT_ASSERT(!result->getPartitionStates().hasError());
+ return result;
+}
+
+enum SELECTION_FIELDS
+{
+ METADATA_ONLY = 0,
+ FIELDS_HEADER = 1,
+ FIELDS_BODY = 2
+};
+
+CreateIteratorResult
+createIterator(PersistenceProvider& spi,
+ const Bucket& b,
+ const Selection& sel,
+ IncludedVersions versions = NEWEST_DOCUMENT_ONLY,
+ int fields = FIELDS_HEADER | FIELDS_BODY)
+{
+ document::FieldSet::UP fieldSet;
+ if (fields & FIELDS_BODY) {
+ fieldSet.reset(new document::AllFields());
+ } else if (fields & FIELDS_HEADER) {
+ fieldSet.reset(new document::HeaderFields());
+ } else {
+ fieldSet.reset(new document::DocIdOnly());
+ }
+
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ return spi.createIterator(b, *fieldSet, sel, versions, context);
+}
+
+Selection
+createSelection(const string& docSel)
+{
+ return Selection(DocumentSelection(docSel));
+}
+
+
+ClusterState
+createClusterState(const lib::State& nodeState = lib::State::UP)
+{
+ using storage::lib::Distribution;
+ using storage::lib::Node;
+ using storage::lib::NodeState;
+ using storage::lib::NodeType;
+ using storage::lib::State;
+ using vespa::config::content::StorDistributionConfigBuilder;
+ typedef StorDistributionConfigBuilder::Group Group;
+ typedef Group::Nodes Nodes;
+ storage::lib::ClusterState cstate;
+ StorDistributionConfigBuilder dc;
+
+ cstate.setNodeState(Node(NodeType::STORAGE, 0),
+ NodeState(NodeType::STORAGE,
+ nodeState,
+ "dummy desc",
+ 1.0,
+ 1));
+ cstate.setClusterState(State::UP);
+ dc.redundancy = 1;
+ dc.readyCopies = 1;
+ dc.group.push_back(Group());
+ Group &g(dc.group[0]);
+ g.index = "invalid";
+ g.name = "invalid";
+ g.capacity = 1.0;
+ g.partitions = "";
+ g.nodes.push_back(Nodes());
+ Nodes &n(g.nodes[0]);
+ n.index = 0;
+ Distribution dist(dc);
+ return ClusterState(cstate, 0, dist);
+}
+
+struct DocAndTimestamp
+{
+ Document::SP doc;
+ spi::Timestamp timestamp;
+
+ DocAndTimestamp(const Document::SP& docptr, spi::Timestamp ts)
+ : doc(docptr), timestamp(ts)
+ {
+ }
+};
+
+/**
+ * A chunk represents the set of data received by the caller for any
+ * single invocation of iterate().
+ */
+struct Chunk
+{
+ std::vector<DocEntry::LP> _entries;
+};
+
+struct DocEntryIndirectTimestampComparator
+{
+ bool operator()(const DocEntry::LP& e1,
+ const DocEntry::LP& e2) const
+ {
+ return e1->getTimestamp() < e2->getTimestamp();
+ }
+};
+
+/**
+ * Do a full bucket iteration, returning a vector of DocEntry chunks.
+ */
+std::vector<Chunk>
+doIterate(PersistenceProvider& spi,
+ IteratorId id,
+ uint64_t maxByteSize,
+ size_t maxChunks = 0,
+ bool allowEmptyResult = false)
+{
+ (void)allowEmptyResult;
+
+ std::vector<Chunk> chunks;
+
+ while (true) {
+ std::vector<DocEntry::LP> entries;
+
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ IterateResult result(spi.iterate(id, maxByteSize, context));
+
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+
+ for (size_t i = 0; i < result.getEntries().size(); ++i) {
+ entries.push_back(result.getEntries()[i]);
+ }
+ chunks.push_back(Chunk());
+ chunks.back()._entries.swap(entries);
+ if (result.isCompleted()
+ || (maxChunks != 0 && chunks.size() >= maxChunks))
+ {
+ break;
+ }
+ }
+ return chunks;
+}
+
+size_t
+getRemoveEntryCount(const std::vector<spi::DocEntry::LP>& entries)
+{
+ size_t ret = 0;
+ for (size_t i = 0; i < entries.size(); ++i) {
+ if (entries[i]->isRemove()) {
+ ++ret;
+ }
+ }
+ return ret;
+}
+
+std::vector<DocEntry::LP>
+getEntriesFromChunks(const std::vector<Chunk>& chunks)
+{
+ std::vector<spi::DocEntry::LP> ret;
+ for (size_t chunk = 0; chunk < chunks.size(); ++chunk) {
+ for (size_t i = 0; i < chunks[chunk]._entries.size(); ++i) {
+ ret.push_back(chunks[chunk]._entries[i]);
+ }
+ }
+ std::sort(ret.begin(),
+ ret.end(),
+ DocEntryIndirectTimestampComparator());
+ return ret;
+}
+
+
+std::vector<DocEntry::LP>
+iterateBucket(PersistenceProvider& spi,
+ const Bucket& bucket,
+ IncludedVersions versions)
+{
+ std::vector<DocEntry::LP> ret;
+ DocumentSelection docSel("");
+ Selection sel(docSel);
+
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ CreateIteratorResult iter = spi.createIterator(
+ bucket,
+ document::AllFields(),
+ sel,
+ versions,
+ context);
+
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, iter.getErrorCode());
+
+ while (true) {
+ IterateResult result =
+ spi.iterate(iter.getIteratorId(),
+ std::numeric_limits<int64_t>().max(), context);
+ if (result.getErrorCode() != Result::NONE) {
+ return std::vector<DocEntry::LP>();
+ }
+ for (size_t i = 0; i < result.getEntries().size(); ++i) {
+ ret.push_back(result.getEntries()[i]);
+ }
+ if (result.isCompleted()) {
+ break;
+ }
+ }
+
+ spi.destroyIterator(iter.getIteratorId(), context);
+ std::sort(ret.begin(),
+ ret.end(),
+ DocEntryIndirectTimestampComparator());
+ return ret;
+}
+
+void
+verifyDocs(const std::vector<DocAndTimestamp>& wanted,
+ const std::vector<Chunk>& chunks,
+ const std::set<string>& removes = std::set<string>())
+{
+ std::vector<DocEntry::LP> retrieved(
+ getEntriesFromChunks(chunks));
+ size_t removeCount = getRemoveEntryCount(retrieved);
+ // Ensure that we've got the correct number of puts and removes
+ CPPUNIT_ASSERT_EQUAL(removes.size(), removeCount);
+ CPPUNIT_ASSERT_EQUAL(wanted.size(), retrieved.size() - removeCount);
+
+ size_t wantedIdx = 0;
+ for (size_t i = 0; i < retrieved.size(); ++i) {
+ DocEntry& entry(*retrieved[i]);
+ if (entry.getDocument() != 0) {
+ if (!(*wanted[wantedIdx].doc == *entry.getDocument())) {
+ std::ostringstream ss;
+ ss << "Documents differ! Wanted:\n"
+ << wanted[wantedIdx].doc->toString(true)
+ << "\n\nGot:\n"
+ << entry.getDocument()->toString(true);
+ CPPUNIT_FAIL(ss.str());
+ }
+ CPPUNIT_ASSERT_EQUAL(wanted[wantedIdx].timestamp, entry.getTimestamp());
+ size_t serSize = wanted[wantedIdx].doc->serialize()->getLength();
+ CPPUNIT_ASSERT_EQUAL(serSize + sizeof(DocEntry), size_t(entry.getSize()));
+ CPPUNIT_ASSERT_EQUAL(serSize, size_t(entry.getDocumentSize()));
+ ++wantedIdx;
+ } else {
+ // Remove-entry
+ CPPUNIT_ASSERT(entry.getDocumentId() != 0);
+ size_t serSize = entry.getDocumentId()->getSerializedSize();
+ CPPUNIT_ASSERT_EQUAL(serSize + sizeof(DocEntry), size_t(entry.getSize()));
+ CPPUNIT_ASSERT_EQUAL(serSize, size_t(entry.getDocumentSize()));
+ if (removes.find(entry.getDocumentId()->toString()) == removes.end()) {
+ std::ostringstream ss;
+ ss << "Got unexpected remove entry for document id "
+ << *entry.getDocumentId();
+ CPPUNIT_FAIL(ss.str());
+ }
+ }
+ }
+}
+
+// Feed numDocs documents, starting from timestamp 1000
+std::vector<DocAndTimestamp>
+feedDocs(PersistenceProvider& spi,
+ document::TestDocMan& testDocMan,
+ Bucket& bucket,
+ size_t numDocs,
+ uint32_t minSize = 110,
+ uint32_t maxSize = 110)
+{
+ std::vector<DocAndTimestamp> docs;
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ for (uint32_t i = 0; i < numDocs; ++i) {
+ Document::SP doc(
+ testDocMan.createRandomDocumentAtLocation(
+ bucket.getBucketId().getId() & 0xffffffff,
+ i,
+ minSize,
+ maxSize));
+ Result result = spi.put(bucket, Timestamp(1000 + i), doc, context);
+ CPPUNIT_ASSERT(!result.hasError());
+ docs.push_back(DocAndTimestamp(doc, Timestamp(1000 + i)));
+ }
+ spi.flush(bucket, context);
+ CPPUNIT_ASSERT_EQUAL(Result(), Result(spi.flush(bucket, context)));
+ return docs;
+}
+
+} // namespace
+
+void ConformanceTest::testBasics() {
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
+ spi->createBucket(bucket, context);
+ CPPUNIT_ASSERT_EQUAL(
+ Result(),
+ Result(spi->put(bucket, Timestamp(1), doc1, context)));
+
+ CPPUNIT_ASSERT_EQUAL(
+ Result(),
+ Result(spi->put(bucket, Timestamp(2), doc2, context)));
+
+ CPPUNIT_ASSERT_EQUAL(
+ Result(),
+ Result(spi->remove(bucket, Timestamp(3), doc1->getId(), context)));
+
+ CPPUNIT_ASSERT_EQUAL(Result(), Result(spi->flush(bucket, context)));
+
+ // Iterate first without removes, then with.
+ for (int iterPass = 0; iterPass < 2; ++iterPass) {
+ bool includeRemoves = (iterPass == 1);
+
+ DocumentSelection docSel("true");
+ Selection sel(docSel);
+
+ CreateIteratorResult iter = spi->createIterator(
+ bucket,
+ document::AllFields(),
+ sel,
+ includeRemoves
+ ? NEWEST_DOCUMENT_OR_REMOVE : NEWEST_DOCUMENT_ONLY,
+ context);
+
+ CPPUNIT_ASSERT_EQUAL(Result(), Result(iter));
+
+ IterateResult result =
+ spi->iterate(iter.getIteratorId(),
+ std::numeric_limits<int64_t>().max(), context);
+
+ CPPUNIT_ASSERT_EQUAL(Result(), Result(result));
+ CPPUNIT_ASSERT(result.isCompleted());
+ spi->destroyIterator(iter.getIteratorId(), context);
+
+ Timestamp timeDoc1(0);
+ Timestamp timeDoc2(0);
+ Timestamp timeRemoveDoc1(0);
+
+ for (uint32_t i=0; i<result.getEntries().size(); ++i) {
+ const DocumentId* did = result.getEntries()[i]->getDocumentId();
+ CPPUNIT_ASSERT_MSG("Supplied FieldSet requires id", did != 0);
+
+ if (*did == doc1->getId()) {
+ if (!includeRemoves) {
+ CPPUNIT_FAIL("Got removed document 1 when iterating without removes");
+ }
+ if (result.getEntries()[i]->isRemove()) {
+ timeRemoveDoc1 = result.getEntries()[i]->getTimestamp();
+ } else {
+ timeDoc1 = result.getEntries()[i]->getTimestamp();
+ }
+ } else if (*did == doc2->getId()) {
+ if (result.getEntries()[i]->isRemove()) {
+ CPPUNIT_FAIL("Document 2 should not be removed");
+ } else {
+ timeDoc2 = result.getEntries()[i]->getTimestamp();
+ }
+ } else {
+ CPPUNIT_FAIL("Unknown document " + did->toString());
+ }
+ }
+
+ CPPUNIT_ASSERT_EQUAL(Timestamp(2), timeDoc2);
+ CPPUNIT_ASSERT(timeDoc1 == Timestamp(0) || timeRemoveDoc1 != Timestamp(0));
+ }
+}
+
+void ConformanceTest::testListBuckets() {
+ //TODO: enable CPPUNIT_TEST(testListBuckets); when supported by provider in storage
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+
+ PartitionId partId(0);
+ BucketId bucketId1(8, 0x01);
+ BucketId bucketId2(8, 0x02);
+ BucketId bucketId3(8, 0x03);
+ Bucket bucket1(bucketId1, partId);
+ Bucket bucket2(bucketId2, partId);
+ Bucket bucket3(bucketId3, partId);
+
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x02, 2);
+ Document::SP doc3 = testDocMan.createRandomDocumentAtLocation(0x03, 3);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ spi->createBucket(bucket1, context);
+ spi->createBucket(bucket2, context);
+ spi->createBucket(bucket3, context);
+
+ spi->put(bucket1, Timestamp(1), doc1, context);
+ spi->flush(bucket1, context);
+ spi->put(bucket2, Timestamp(2), doc2, context);
+ spi->flush(bucket2, context);
+ spi->put(bucket3, Timestamp(3), doc3, context);
+ spi->flush(bucket3, context);
+
+ {
+ BucketIdListResult result = spi->listBuckets(PartitionId(1));
+ CPPUNIT_ASSERT(result.getList().empty());
+ }
+
+ {
+ BucketIdListResult result = spi->listBuckets(partId);
+ const BucketIdListResult::List &bucketList = result.getList();
+ CPPUNIT_ASSERT_EQUAL(3u, (uint32_t)bucketList.size());
+ CPPUNIT_ASSERT(std::find(bucketList.begin(), bucketList.end(), bucketId1) != bucketList.end());
+ CPPUNIT_ASSERT(std::find(bucketList.begin(), bucketList.end(), bucketId2) != bucketList.end());
+ CPPUNIT_ASSERT(std::find(bucketList.begin(), bucketList.end(), bucketId3) != bucketList.end());
+ }
+}
+
+
+void ConformanceTest::testBucketInfo() {
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ spi->createBucket(bucket, context);
+
+ spi->put(bucket, Timestamp(2), doc2, context);
+
+ const BucketInfo info1 = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+
+ {
+ CPPUNIT_ASSERT_EQUAL(1, (int)info1.getDocumentCount());
+ CPPUNIT_ASSERT(info1.getChecksum() != 0);
+ }
+
+ spi->put(bucket, Timestamp(3), doc1, context);
+
+ const BucketInfo info2 = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+
+ {
+ CPPUNIT_ASSERT_EQUAL(2, (int)info2.getDocumentCount());
+ CPPUNIT_ASSERT(info2.getChecksum() != 0);
+ CPPUNIT_ASSERT(info2.getChecksum() != info1.getChecksum());
+ }
+
+ spi->put(bucket, Timestamp(4), doc1, context);
+
+ const BucketInfo info3 = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+
+ {
+ CPPUNIT_ASSERT_EQUAL(2, (int)info3.getDocumentCount());
+ CPPUNIT_ASSERT(info3.getChecksum() != 0);
+ CPPUNIT_ASSERT(info3.getChecksum() != info2.getChecksum());
+ }
+
+ spi->remove(bucket, Timestamp(5), doc1->getId(), context);
+
+ const BucketInfo info4 = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+
+ {
+ CPPUNIT_ASSERT_EQUAL(1, (int)info4.getDocumentCount());
+ CPPUNIT_ASSERT(info4.getChecksum() != 0);
+ CPPUNIT_ASSERT_EQUAL(info4.getChecksum(), info4.getChecksum());
+ }
+}
+
+void
+ConformanceTest::testOrderIndependentBucketInfo()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ spi->createBucket(bucket, context);
+
+ BucketChecksum checksumOrdered(0);
+ {
+ spi->put(bucket, Timestamp(2), doc1, context);
+ spi->put(bucket, Timestamp(3), doc2, context);
+ spi->flush(bucket, context);
+ const BucketInfo info(spi->getBucketInfo(bucket).getBucketInfo());
+
+ checksumOrdered = info.getChecksum();
+ CPPUNIT_ASSERT(checksumOrdered != 0);
+ }
+
+ spi->deleteBucket(bucket, context);
+ spi->createBucket(bucket, context);
+ {
+ const BucketInfo info(spi->getBucketInfo(bucket).getBucketInfo());
+ CPPUNIT_ASSERT_EQUAL(BucketChecksum(0), info.getChecksum());
+ }
+
+ BucketChecksum checksumUnordered(0);
+ {
+ // Swap order of puts
+ spi->put(bucket, Timestamp(3), doc2, context);
+ spi->put(bucket, Timestamp(2), doc1, context);
+ spi->flush(bucket, context);
+ const BucketInfo info(spi->getBucketInfo(bucket).getBucketInfo());
+
+ checksumUnordered = info.getChecksum();
+ CPPUNIT_ASSERT(checksumUnordered != 0);
+ }
+ CPPUNIT_ASSERT_EQUAL(checksumOrdered, checksumUnordered);
+}
+
+void ConformanceTest::testPut() {
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
+ spi->createBucket(bucket, context);
+
+ Result result = spi->put(bucket, Timestamp(3), doc1, context);
+
+ {
+ const BucketInfo info = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)info.getDocumentCount());
+ CPPUNIT_ASSERT(info.getEntryCount() >= info.getDocumentCount());
+ CPPUNIT_ASSERT(info.getChecksum() != 0);
+ CPPUNIT_ASSERT(info.getDocumentSize() > 0);
+ CPPUNIT_ASSERT(info.getUsedSize() >= info.getDocumentSize());
+ }
+}
+
+void ConformanceTest::testPutNewDocumentVersion() {
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document::SP doc2(doc1->clone());
+ doc2->setValue("content", document::StringFieldValue("hiho silver"));
+ spi->createBucket(bucket, context);
+
+ Result result = spi->put(bucket, Timestamp(3), doc1, context);
+ {
+ const BucketInfo info = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)info.getDocumentCount());
+ CPPUNIT_ASSERT(info.getEntryCount() >= info.getDocumentCount());
+ CPPUNIT_ASSERT(info.getChecksum() != 0);
+ CPPUNIT_ASSERT(info.getDocumentSize() > 0);
+ CPPUNIT_ASSERT(info.getUsedSize() >= info.getDocumentSize());
+ }
+
+ result = spi->put(bucket, Timestamp(4), doc2, context);
+ {
+ const BucketInfo info = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)info.getDocumentCount());
+ CPPUNIT_ASSERT(info.getEntryCount() >= info.getDocumentCount());
+ CPPUNIT_ASSERT(info.getChecksum() != 0);
+ CPPUNIT_ASSERT(info.getDocumentSize() > 0);
+ CPPUNIT_ASSERT(info.getUsedSize() >= info.getDocumentSize());
+ }
+
+ GetResult gr = spi->get(bucket, document::AllFields(), doc1->getId(),
+ context);
+
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, gr.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(4), gr.getTimestamp());
+
+ if (!((*doc2)==gr.getDocument())) {
+ std::cerr << "Document returned is not the expected one: \n"
+ << "Expected: " << doc2->toString(true) << "\n"
+ << "Got: " << gr.getDocument().toString(true) << "\n";
+
+ CPPUNIT_ASSERT(false);
+ }
+}
+
+void ConformanceTest::testPutOlderDocumentVersion() {
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document::SP doc2(doc1->clone());
+ doc2->setValue("content", document::StringFieldValue("hiho silver"));
+ spi->createBucket(bucket, context);
+
+ Result result = spi->put(bucket, Timestamp(5), doc1, context);
+ const BucketInfo info1 = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+ {
+ CPPUNIT_ASSERT_EQUAL(1, (int)info1.getDocumentCount());
+ CPPUNIT_ASSERT(info1.getEntryCount() >= info1.getDocumentCount());
+ CPPUNIT_ASSERT(info1.getChecksum() != 0);
+ CPPUNIT_ASSERT(info1.getDocumentSize() > 0);
+ CPPUNIT_ASSERT(info1.getUsedSize() >= info1.getDocumentSize());
+ }
+
+ result = spi->put(bucket, Timestamp(4), doc2, context);
+ {
+ const BucketInfo info2 = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)info2.getDocumentCount());
+ CPPUNIT_ASSERT(info2.getEntryCount() >= info1.getDocumentCount());
+ CPPUNIT_ASSERT_EQUAL(info1.getChecksum(), info2.getChecksum());
+ CPPUNIT_ASSERT_EQUAL(info1.getDocumentSize(),
+ info2.getDocumentSize());
+ CPPUNIT_ASSERT(info2.getUsedSize() >= info1.getDocumentSize());
+ }
+
+ GetResult gr = spi->get(bucket, document::AllFields(), doc1->getId(),
+ context);
+
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, gr.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(5), gr.getTimestamp());
+ CPPUNIT_ASSERT_EQUAL(*doc1, gr.getDocument());
+}
+
+void ConformanceTest::testPutDuplicate() {
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ spi->createBucket(bucket, context);
+ CPPUNIT_ASSERT_EQUAL(Result(),
+ spi->put(bucket, Timestamp(3), doc1, context));
+
+ BucketChecksum checksum;
+ {
+ const BucketInfo info = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+ CPPUNIT_ASSERT_EQUAL(1, (int)info.getDocumentCount());
+ checksum = info.getChecksum();
+ }
+ CPPUNIT_ASSERT_EQUAL(Result(),
+ spi->put(bucket, Timestamp(3), doc1, context));
+
+ {
+ const BucketInfo info = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+ CPPUNIT_ASSERT_EQUAL(1, (int)info.getDocumentCount());
+ CPPUNIT_ASSERT_EQUAL(checksum, info.getChecksum());
+ }
+ std::vector<DocEntry::LP> entries(
+ iterateBucket(*spi, bucket, ALL_VERSIONS));
+ CPPUNIT_ASSERT_EQUAL(size_t(1), entries.size());
+}
+
+void ConformanceTest::testRemove() {
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
+ spi->createBucket(bucket, context);
+
+ Result result = spi->put(bucket, Timestamp(3), doc1, context);
+
+ {
+ const BucketInfo info = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)info.getDocumentCount());
+ CPPUNIT_ASSERT(info.getChecksum() != 0);
+
+ std::vector<DocEntry::LP> entries(
+ iterateBucket(*spi, bucket, NEWEST_DOCUMENT_ONLY));
+ CPPUNIT_ASSERT_EQUAL(size_t(1), entries.size());
+ }
+
+ // Add a remove entry
+ RemoveResult result2 = spi->remove(bucket,
+ Timestamp(5),
+ doc1->getId(),
+ context);
+
+ {
+ const BucketInfo info = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+
+ CPPUNIT_ASSERT_EQUAL(0, (int)info.getDocumentCount());
+ CPPUNIT_ASSERT_EQUAL(0, (int)info.getChecksum());
+ CPPUNIT_ASSERT_EQUAL(true, result2.wasFound());
+ }
+ {
+ std::vector<DocEntry::LP> entries(iterateBucket(*spi,
+ bucket,
+ NEWEST_DOCUMENT_ONLY));
+ CPPUNIT_ASSERT_EQUAL(size_t(0), entries.size());
+ }
+ {
+ std::vector<DocEntry::LP> entries(iterateBucket(*spi,
+ bucket,
+ NEWEST_DOCUMENT_OR_REMOVE));
+
+ CPPUNIT_ASSERT_EQUAL(size_t(1), entries.size());
+ }
+
+ // Result tagged as document not found
+ RemoveResult result3 = spi->remove(bucket,
+ Timestamp(7),
+ doc1->getId(),
+ context);
+ {
+ const BucketInfo info = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+
+ CPPUNIT_ASSERT_EQUAL(0, (int)info.getDocumentCount());
+ CPPUNIT_ASSERT_EQUAL(0, (int)info.getChecksum());
+ CPPUNIT_ASSERT_EQUAL(false, result3.wasFound());
+ }
+
+ Result result4 = spi->put(bucket, Timestamp(9), doc1, context);
+ spi->flush(bucket, context);
+
+ CPPUNIT_ASSERT(!result4.hasError());
+
+ RemoveResult result5 = spi->remove(bucket,
+ Timestamp(9),
+ doc1->getId(),
+ context);
+ {
+ const BucketInfo info = spi->getBucketInfo(bucket).getBucketInfo();
+ spi->flush(bucket, context);
+
+ CPPUNIT_ASSERT_EQUAL(0, (int)info.getDocumentCount());
+ CPPUNIT_ASSERT_EQUAL(0, (int)info.getChecksum());
+ CPPUNIT_ASSERT_EQUAL(true, result5.wasFound());
+ CPPUNIT_ASSERT(!result5.hasError());
+ }
+
+ GetResult getResult = spi->get(bucket,
+ document::AllFields(),
+ doc1->getId(),
+ context);
+
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, getResult.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(0), getResult.getTimestamp());
+ CPPUNIT_ASSERT(!getResult.hasDocument());
+}
+
+void ConformanceTest::testRemoveMerge() {
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ DocumentId removeId("id:fraggle:testdoctype1:n=1:rock");
+ spi->createBucket(bucket, context);
+
+ Result result = spi->put(bucket, Timestamp(3), doc1, context);
+
+ // Remove a document that does not exist
+ {
+ RemoveResult removeResult = spi->remove(bucket,
+ Timestamp(10),
+ removeId,
+ context);
+ spi->flush(bucket, context);
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, removeResult.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(false, removeResult.wasFound());
+ }
+ {
+ const BucketInfo info = spi->getBucketInfo(bucket).getBucketInfo();
+
+ CPPUNIT_ASSERT_EQUAL(uint32_t(1), info.getDocumentCount());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(2), info.getEntryCount());
+ CPPUNIT_ASSERT(info.getChecksum() != 0);
+ }
+
+ // Remove entry should exist afterwards
+ {
+ std::vector<DocEntry::LP> entries(iterateBucket(
+ *spi, bucket, ALL_VERSIONS));
+ CPPUNIT_ASSERT_EQUAL(size_t(2), entries.size());
+ // Timestamp-sorted by iterateBucket
+ CPPUNIT_ASSERT_EQUAL(removeId, *entries.back()->getDocumentId());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(10), entries.back()->getTimestamp());
+ CPPUNIT_ASSERT(entries.back()->isRemove());
+ }
+ // Add a _newer_ remove for the same document ID we already removed
+ {
+ RemoveResult removeResult = spi->remove(bucket,
+ Timestamp(11),
+ removeId,
+ context);
+ spi->flush(bucket, context);
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, removeResult.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(false, removeResult.wasFound());
+ }
+ // Old entry may or may not be present, depending on the provider.
+ {
+ const BucketInfo info = spi->getBucketInfo(bucket).getBucketInfo();
+
+ CPPUNIT_ASSERT_EQUAL(uint32_t(1), info.getDocumentCount());
+ CPPUNIT_ASSERT(info.getEntryCount() >= 2);
+ CPPUNIT_ASSERT(info.getChecksum() != 0);
+ }
+ // Must have new remove. We don't check for the presence of the old remove.
+ {
+ std::vector<DocEntry::LP> entries(iterateBucket(*spi, bucket, ALL_VERSIONS));
+ CPPUNIT_ASSERT(entries.size() >= 2);
+ CPPUNIT_ASSERT_EQUAL(removeId, *entries.back()->getDocumentId());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(11), entries.back()->getTimestamp());
+ CPPUNIT_ASSERT(entries.back()->isRemove());
+ }
+ // Add an _older_ remove for the same document ID we already removed.
+ // It may or may not be present in a subsequent iteration, but the
+ // newest timestamp must still be present.
+ {
+ RemoveResult removeResult = spi->remove(bucket,
+ Timestamp(7),
+ removeId,
+ context);
+ spi->flush(bucket, context);
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, removeResult.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(false, removeResult.wasFound());
+ }
+ {
+ const BucketInfo info = spi->getBucketInfo(bucket).getBucketInfo();
+
+ CPPUNIT_ASSERT_EQUAL(uint32_t(1), info.getDocumentCount());
+ CPPUNIT_ASSERT(info.getEntryCount() >= 2);
+ CPPUNIT_ASSERT(info.getChecksum() != 0);
+ }
+ // Must have newest remove. We don't check for the presence of the old remove.
+ {
+ std::vector<DocEntry::LP> entries(iterateBucket(*spi, bucket, ALL_VERSIONS));
+ CPPUNIT_ASSERT(entries.size() >= 2);
+ CPPUNIT_ASSERT_EQUAL(removeId, *entries.back()->getDocumentId());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(11), entries.back()->getTimestamp());
+ CPPUNIT_ASSERT(entries.back()->isRemove());
+ }
+}
+
+void ConformanceTest::testUpdate() {
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ spi->createBucket(bucket, context);
+
+ const document::DocumentType *docType(
+ testDocMan.getTypeRepo().getDocumentType("testdoctype1"));
+ document::DocumentUpdate::SP
+ update(new DocumentUpdate(*docType, doc1->getId()));
+ std::shared_ptr<document::AssignValueUpdate> assignUpdate(
+ new document::AssignValueUpdate(document::IntFieldValue(42)));
+ document::FieldUpdate fieldUpdate(docType->getField("headerval"));
+ fieldUpdate.addUpdate(*assignUpdate);
+ update->addUpdate(fieldUpdate);
+
+ {
+ UpdateResult result = spi->update(bucket, Timestamp(3), update,
+ context);
+ spi->flush(bucket, context);
+ CPPUNIT_ASSERT_EQUAL(Result(), Result(result));
+ CPPUNIT_ASSERT_EQUAL(Timestamp(0), result.getExistingTimestamp());
+ }
+
+ spi->put(bucket, Timestamp(3), doc1, context);
+ {
+ UpdateResult result = spi->update(bucket, Timestamp(4), update,
+ context);
+ spi->flush(bucket, context);
+
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(3), result.getExistingTimestamp());
+ }
+
+ {
+ GetResult result = spi->get(bucket,
+ document::AllFields(),
+ doc1->getId(),
+ context);
+
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(4), result.getTimestamp());
+ CPPUNIT_ASSERT_EQUAL(document::IntFieldValue(42),
+ static_cast<document::IntFieldValue&>(
+ *result.getDocument().getValue("headerval")));
+ }
+
+ spi->remove(bucket, Timestamp(5), doc1->getId(), context);
+ spi->flush(bucket, context);
+
+ {
+ GetResult result = spi->get(bucket,
+ document::AllFields(),
+ doc1->getId(),
+ context);
+
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(0), result.getTimestamp());
+ CPPUNIT_ASSERT(!result.hasDocument());
+ }
+
+
+ {
+ UpdateResult result = spi->update(bucket, Timestamp(6), update,
+ context);
+ spi->flush(bucket, context);
+
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(0), result.getExistingTimestamp());
+ }
+}
+
+void ConformanceTest::testGet() {
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ spi->createBucket(bucket, context);
+
+ {
+ GetResult result = spi->get(bucket, document::AllFields(),
+ doc1->getId(), context);
+
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(0), result.getTimestamp());
+ }
+
+ spi->put(bucket, Timestamp(3), doc1, context);
+ spi->flush(bucket, context);
+
+ {
+ GetResult result = spi->get(bucket, document::AllFields(),
+ doc1->getId(), context);
+ CPPUNIT_ASSERT_EQUAL(*doc1, result.getDocument());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(3), result.getTimestamp());
+ }
+
+ spi->remove(bucket, Timestamp(4), doc1->getId(), context);
+ spi->flush(bucket, context);
+
+ {
+ GetResult result = spi->get(bucket, document::AllFields(),
+ doc1->getId(), context);
+
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(0), result.getTimestamp());
+ }
+}
+
+void
+ConformanceTest::testIterateCreateIterator()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+
+ spi::CreateIteratorResult result(
+ createIterator(*spi, b, createSelection("")));
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+ // Iterator ID 0 means invalid iterator, so cannot be returned
+ // from a successful createIterator call.
+ CPPUNIT_ASSERT(result.getIteratorId() != IteratorId(0));
+
+ spi->destroyIterator(result.getIteratorId(), context);
+}
+
+void
+ConformanceTest::testIterateWithUnknownId()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+
+ IteratorId unknownId(123);
+ IterateResult result(spi->iterate(unknownId, 1024, context));
+ CPPUNIT_ASSERT_EQUAL(Result::PERMANENT_ERROR, result.getErrorCode());
+}
+
+void
+ConformanceTest::testIterateDestroyIterator()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+
+ CreateIteratorResult iter(createIterator(*spi, b, createSelection("")));
+ {
+ IterateResult result(spi->iterate(iter.getIteratorId(), 1024, context));
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+ }
+
+ {
+ Result destroyResult(
+ spi->destroyIterator(iter.getIteratorId(), context));
+ CPPUNIT_ASSERT(!destroyResult.hasError());
+ }
+ // Iteration should now fail
+ {
+ IterateResult result(spi->iterate(iter.getIteratorId(), 1024, context));
+ CPPUNIT_ASSERT_EQUAL(Result::PERMANENT_ERROR, result.getErrorCode());
+ }
+ {
+ Result destroyResult(
+ spi->destroyIterator(iter.getIteratorId(), context));
+ CPPUNIT_ASSERT(!destroyResult.hasError());
+ }
+}
+
+void
+ConformanceTest::testIterateAllDocs()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+
+ std::vector<DocAndTimestamp> docs(feedDocs(*spi, testDocMan, b, 100));
+ CreateIteratorResult iter(createIterator(*spi, b, createSelection("")));
+
+ std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 4096);
+ verifyDocs(docs, chunks);
+
+ spi->destroyIterator(iter.getIteratorId(), context);
+}
+
+void
+ConformanceTest::testIterateAllDocsNewestVersionOnly()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+
+ std::vector<DocAndTimestamp> docs(feedDocs(*spi, testDocMan, b, 100));
+ std::vector<DocAndTimestamp> newDocs;
+
+ for (size_t i = 0; i < docs.size(); ++i) {
+ Document::SP newDoc(docs[i].doc->clone());
+ Timestamp newTimestamp(2000 + i);
+ newDoc->setValue("headerval", document::IntFieldValue(5678 + i));
+ spi->put(b, newTimestamp, newDoc, context);
+ newDocs.push_back(DocAndTimestamp(newDoc, newTimestamp));
+ }
+ spi->flush(b, context);
+
+ CreateIteratorResult iter(createIterator(*spi, b, createSelection("")));
+
+ std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 4096);
+ verifyDocs(newDocs, chunks);
+
+ spi->destroyIterator(iter.getIteratorId(), context);
+}
+
+void
+ConformanceTest::testIterateChunked()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+
+ std::vector<DocAndTimestamp> docs(feedDocs(*spi, testDocMan, b, 100));
+ CreateIteratorResult iter(createIterator(*spi, b, createSelection("")));
+
+ // Max byte size is 1, so only 1 document should be included in each chunk.
+ std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 1);
+ CPPUNIT_ASSERT_EQUAL(size_t(100), chunks.size());
+ verifyDocs(docs, chunks);
+
+ spi->destroyIterator(iter.getIteratorId(), context);
+}
+
+void
+ConformanceTest::testMaxByteSize()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+
+ std::vector<DocAndTimestamp> docs(
+ feedDocs(*spi, testDocMan, b, 100, 4096, 4096));
+
+ Selection sel(createSelection(""));
+ CreateIteratorResult iter(createIterator(*spi, b, sel));
+
+ // Docs are 4k each and iterating with max combined size of 10k.
+ // Should receive no more than 3 docs in each chunk
+ std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 10000);
+ if (chunks.size() < 33) {
+ std::ostringstream ss;
+ ss << "Expected >= 33 chunks, but got "<< chunks.size();
+ CPPUNIT_FAIL(ss.str());
+ }
+ verifyDocs(docs, chunks);
+
+ spi->destroyIterator(iter.getIteratorId(), context);
+}
+
+void
+ConformanceTest::testIterateMatchTimestampRange()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+
+ std::vector<DocAndTimestamp> docsToVisit;
+ Timestamp fromTimestamp(1010);
+ Timestamp toTimestamp(1060);
+
+ for (uint32_t i = 0; i < 99; i++) {
+ Timestamp timestamp(1000 + i);
+ document::Document::SP doc(
+ testDocMan.createRandomDocumentAtLocation(
+ 1, timestamp, 110, 110));
+
+ spi->put(b, timestamp, doc, context);
+ if (timestamp >= fromTimestamp && timestamp <= toTimestamp) {
+ docsToVisit.push_back(
+ DocAndTimestamp(doc, Timestamp(1000 + i)));
+ }
+ }
+ spi->flush(b, context);
+
+ Selection sel = Selection(DocumentSelection(""));
+ sel.setFromTimestamp(fromTimestamp);
+ sel.setToTimestamp(toTimestamp);
+
+ CreateIteratorResult iter(createIterator(*spi, b, sel));
+
+ std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 2048);
+ verifyDocs(docsToVisit, chunks);
+
+ spi->destroyIterator(iter.getIteratorId(), context);
+}
+
+void
+ConformanceTest::testIterateExplicitTimestampSubset()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+
+ std::vector<DocAndTimestamp> docsToVisit;
+ std::vector<Timestamp> timestampsToVisit;
+ std::set<vespalib::string> removes;
+
+ for (uint32_t i = 0; i < 99; i++) {
+ Timestamp timestamp(1000 + i);
+ document::Document::SP doc(
+ testDocMan.createRandomDocumentAtLocation(
+ 1, timestamp, 110, 110));
+
+ spi->put(b, timestamp, doc, context);
+ if (timestamp % 3 == 0) {
+ docsToVisit.push_back(
+ DocAndTimestamp(doc, Timestamp(1000 + i)));
+ timestampsToVisit.push_back(Timestamp(timestamp));
+ }
+ }
+ // Timestamp subset should include removes without
+ // having to explicitly specify it
+ CPPUNIT_ASSERT(spi->remove(b,
+ Timestamp(2000),
+ docsToVisit.front().doc->getId(), context)
+ .wasFound());
+ spi->flush(b, context);
+
+ timestampsToVisit.push_back(Timestamp(2000));
+ removes.insert(docsToVisit.front().doc->getId().toString());
+ docsToVisit.erase(docsToVisit.begin());
+ timestampsToVisit.erase(timestampsToVisit.begin());
+
+ Selection sel(createSelection(""));
+ sel.setTimestampSubset(timestampsToVisit);
+
+ CreateIteratorResult iter(createIterator(*spi, b, sel));
+
+ std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 2048);
+ verifyDocs(docsToVisit, chunks, removes);
+
+ spi->destroyIterator(iter.getIteratorId(), context);
+}
+
+void
+ConformanceTest::testIterateRemoves()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+
+ int docCount = 10;
+ std::vector<DocAndTimestamp> docs(feedDocs(*spi, testDocMan, b, docCount));
+ std::set<vespalib::string> removedDocs;
+ std::vector<DocAndTimestamp> nonRemovedDocs;
+
+ for (int i = 0; i < docCount; ++i) {
+ if (i % 3 == 0) {
+ removedDocs.insert(docs[i].doc->getId().toString());
+ CPPUNIT_ASSERT(spi->remove(b,
+ Timestamp(2000 + i),
+ docs[i].doc->getId(),
+ context)
+ .wasFound());
+ } else {
+ nonRemovedDocs.push_back(docs[i]);
+ }
+ }
+ spi->flush(b, context);
+
+ // First, test iteration without removes
+ {
+ Selection sel(createSelection(""));
+ CreateIteratorResult iter(createIterator(*spi, b, sel));
+
+ std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 4096);
+ verifyDocs(nonRemovedDocs, chunks);
+ spi->destroyIterator(iter.getIteratorId(), context);
+ }
+
+ {
+ Selection sel(createSelection(""));
+ CreateIteratorResult iter(
+ createIterator(*spi, b, sel, NEWEST_DOCUMENT_OR_REMOVE));
+
+ std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 4096);
+ std::vector<DocEntry::LP> entries = getEntriesFromChunks(chunks);
+ CPPUNIT_ASSERT_EQUAL(docs.size(), entries.size());
+ verifyDocs(nonRemovedDocs, chunks, removedDocs);
+
+ spi->destroyIterator(iter.getIteratorId(), context);
+ }
+}
+
+void
+ConformanceTest::testIterateMatchSelection()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+
+ std::vector<DocAndTimestamp> docsToVisit;
+
+ for (uint32_t i = 0; i < 99; i++) {
+ document::Document::SP doc(testDocMan.createRandomDocumentAtLocation(
+ 1, 1000 + i, 110, 110));
+ doc->setValue("headerval", document::IntFieldValue(i));
+
+ spi->put(b, Timestamp(1000 + i), doc, context);
+ if ((i % 3) == 0) {
+ docsToVisit.push_back(
+ DocAndTimestamp(doc, Timestamp(1000 + i)));
+ }
+ }
+ spi->flush(b, context);
+
+ CreateIteratorResult iter(
+ createIterator(*spi,
+ b,
+ createSelection("testdoctype1.headerval % 3 == 0")));
+
+ std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 2048 * 1024);
+ verifyDocs(docsToVisit, chunks);
+
+ spi->destroyIterator(iter.getIteratorId(), context);
+}
+
+void
+ConformanceTest::testIterationRequiringDocumentIdOnlyMatching()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+
+ feedDocs(*spi, testDocMan, b, 100);
+ DocumentId removedId("id:blarg:testdoctype1:n=1:unknowndoc");
+
+ // Document does not already exist, remove should create a
+ // remove entry for it regardless.
+ CPPUNIT_ASSERT(
+ !spi->remove(b, Timestamp(2000), removedId, context).wasFound());
+ spi->flush(b, context);
+
+ Selection sel(createSelection("id == '" + removedId.toString() + "'"));
+
+ CreateIteratorResult iter(
+ createIterator(*spi, b, sel, NEWEST_DOCUMENT_OR_REMOVE));
+ CPPUNIT_ASSERT(iter.getErrorCode() == Result::NONE);
+
+ std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 4096);
+ std::vector<DocAndTimestamp> docs;
+ std::set<vespalib::string> removes;
+ removes.insert(removedId.toString());
+ verifyDocs(docs, chunks, removes);
+
+ spi->destroyIterator(iter.getIteratorId(), context);
+}
+
+void
+ConformanceTest::testIterateBadDocumentSelection()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+ {
+ CreateIteratorResult iter(
+ createIterator(*spi, b, createSelection("the muppet show")));
+ if (iter.getErrorCode() == Result::NONE) {
+ IterateResult result(
+ spi->iterate(iter.getIteratorId(), 4096, context));
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(size_t(0), result.getEntries().size());
+ CPPUNIT_ASSERT_EQUAL(true, result.isCompleted());
+ } else {
+ CPPUNIT_ASSERT_EQUAL(Result::PERMANENT_ERROR, iter.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(IteratorId(0), iter.getIteratorId());
+ }
+ }
+ {
+ CreateIteratorResult iter(
+ createIterator(*spi,
+ b,
+ createSelection(
+ "unknownddoctype.something=thatthing")));
+ if (iter.getErrorCode() == Result::NONE) {
+ IterateResult result(spi->iterate(
+ iter.getIteratorId(), 4096, context));
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(size_t(0), result.getEntries().size());
+ CPPUNIT_ASSERT_EQUAL(true, result.isCompleted());
+ } else {
+ CPPUNIT_ASSERT_EQUAL(Result::PERMANENT_ERROR, iter.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(IteratorId(0), iter.getIteratorId());
+ }
+ }
+}
+
+void
+ConformanceTest::testIterateAlreadyCompleted()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+
+ std::vector<DocAndTimestamp> docs = feedDocs(*spi, testDocMan, b, 10);
+ Selection sel(createSelection(""));
+ CreateIteratorResult iter(createIterator(*spi, b, sel));
+
+ std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 4096);
+ verifyDocs(docs, chunks);
+
+ IterateResult result(spi->iterate(iter.getIteratorId(), 4096, context));
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(size_t(0), result.getEntries().size());
+ CPPUNIT_ASSERT(result.isCompleted());
+
+ spi->destroyIterator(iter.getIteratorId(), context);
+}
+
+void
+ConformanceTest::testIterateEmptyBucket()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ spi->createBucket(b, context);
+ Selection sel(createSelection(""));
+
+ CreateIteratorResult iter(createIterator(*spi, b, sel));
+
+ IterateResult result(spi->iterate(iter.getIteratorId(), 4096, context));
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(size_t(0), result.getEntries().size());
+ CPPUNIT_ASSERT(result.isCompleted());
+
+ spi->destroyIterator(iter.getIteratorId(), context);
+}
+
+void
+ConformanceTest::testDeleteBucket()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ spi->createBucket(bucket, context);
+
+ spi->put(bucket, Timestamp(3), doc1, context);
+ spi->flush(bucket, context);
+
+ spi->deleteBucket(bucket, context);
+ testDeleteBucketPostCondition(spi, bucket, *doc1);
+ if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testDeleteBucketPostCondition(spi, bucket, *doc1);
+ }
+}
+
+
+void
+ConformanceTest::
+testDeleteBucketPostCondition(const PersistenceProvider::UP &spi,
+ const Bucket &bucket,
+ const Document &doc1)
+{
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ {
+ GetResult result = spi->get(bucket,
+ document::AllFields(),
+ doc1.getId(),
+ context);
+
+ CPPUNIT_ASSERT_EQUAL(Result::NONE, result.getErrorCode());
+ CPPUNIT_ASSERT_EQUAL(Timestamp(0), result.getTimestamp());
+ }
+}
+
+
+void
+ConformanceTest::testSplitNormalCase()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucketA(document::BucketId(3, 0x02), PartitionId(0));
+ Bucket bucketB(document::BucketId(3, 0x06), PartitionId(0));
+
+ Bucket bucketC(document::BucketId(2, 0x02), PartitionId(0));
+ spi->createBucket(bucketC, context);
+
+ TimestampList tsList;
+ for (uint32_t i = 0; i < 10; ++i) {
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ spi->put(bucketC, Timestamp(i + 1), doc1, context);
+ }
+
+ for (uint32_t i = 10; i < 20; ++i) {
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ spi->put(bucketC, Timestamp(i + 1), doc1, context);
+ }
+
+ spi->flush(bucketC, context);
+
+ spi->split(bucketC, bucketA, bucketB, context);
+ testSplitNormalCasePostCondition(spi, bucketA, bucketB, bucketC,
+ testDocMan);
+ if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testSplitNormalCasePostCondition(spi, bucketA, bucketB, bucketC,
+ testDocMan2);
+ }
+}
+
+
+void
+ConformanceTest::
+testSplitNormalCasePostCondition(const PersistenceProvider::UP &spi,
+ const Bucket &bucketA,
+ const Bucket &bucketB,
+ const Bucket &bucketC,
+ document::TestDocMan &testDocMan)
+{
+ CPPUNIT_ASSERT_EQUAL(10, (int)spi->getBucketInfo(bucketA).getBucketInfo().
+ getDocumentCount());
+ CPPUNIT_ASSERT_EQUAL(10, (int)spi->getBucketInfo(bucketB).getBucketInfo().
+ getDocumentCount());
+
+ document::AllFields fs;
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ for (uint32_t i = 0; i < 10; ++i) {
+ Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ CPPUNIT_ASSERT(
+ spi->get(bucketA, fs, doc1->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi->get(bucketB, fs, doc1->getId(), context).hasDocument());
+ }
+
+ for (uint32_t i = 10; i < 20; ++i) {
+ Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ CPPUNIT_ASSERT(
+ spi->get(bucketB, fs, doc1->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi->get(bucketA, fs, doc1->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
+ }
+}
+
+void
+ConformanceTest::testSplitTargetExists()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucketA(document::BucketId(3, 0x02), PartitionId(0));
+ Bucket bucketB(document::BucketId(3, 0x06), PartitionId(0));
+ spi->createBucket(bucketB, context);
+
+ Bucket bucketC(document::BucketId(2, 0x02), PartitionId(0));
+ spi->createBucket(bucketC, context);
+
+ TimestampList tsList;
+ for (uint32_t i = 0; i < 10; ++i) {
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ spi->put(bucketC, Timestamp(i + 1), doc1, context);
+ }
+
+ spi->flush(bucketC, context);
+
+ for (uint32_t i = 10; i < 20; ++i) {
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ spi->put(bucketB, Timestamp(i + 1), doc1, context);
+ }
+ spi->flush(bucketB, context);
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketB).getBucketInfo().isActive());
+
+ for (uint32_t i = 10; i < 20; ++i) {
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ spi->put(bucketC, Timestamp(i + 1), doc1, context);
+ }
+ spi->flush(bucketC, context);
+
+ for (uint32_t i = 20; i < 25; ++i) {
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ spi->put(bucketB, Timestamp(i + 1), doc1, context);
+ }
+
+ spi->flush(bucketB, context);
+
+ spi->split(bucketC, bucketA, bucketB, context);
+ testSplitTargetExistsPostCondition(spi, bucketA, bucketB, bucketC,
+ testDocMan);
+ if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testSplitTargetExistsPostCondition(spi, bucketA, bucketB, bucketC,
+ testDocMan2);
+ }
+}
+
+
+void
+ConformanceTest::
+testSplitTargetExistsPostCondition(const PersistenceProvider::UP &spi,
+ const Bucket &bucketA,
+ const Bucket &bucketB,
+ const Bucket &bucketC,
+ document::TestDocMan &testDocMan)
+{
+ CPPUNIT_ASSERT_EQUAL(10, (int)spi->getBucketInfo(bucketA).getBucketInfo().
+ getDocumentCount());
+ CPPUNIT_ASSERT_EQUAL(15, (int)spi->getBucketInfo(bucketB).getBucketInfo().
+ getDocumentCount());
+
+ document::AllFields fs;
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ for (uint32_t i = 0; i < 10; ++i) {
+ Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ CPPUNIT_ASSERT(
+ spi->get(bucketA, fs, doc1->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi->get(bucketB, fs, doc1->getId(), context).hasDocument());
+ }
+
+ for (uint32_t i = 10; i < 25; ++i) {
+ Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ CPPUNIT_ASSERT(
+ spi->get(bucketB, fs, doc1->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi->get(bucketA, fs, doc1->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
+ }
+}
+
+void
+ConformanceTest::testSplitSingleDocumentInSource()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket target1(document::BucketId(3, 0x02), PartitionId(0));
+ Bucket target2(document::BucketId(3, 0x06), PartitionId(0));
+
+ Bucket source(document::BucketId(2, 0x02), PartitionId(0));
+ spi->createBucket(source, context);
+
+ // Create doc belonging in target2 after split.
+ Document::SP doc = testDocMan.createRandomDocumentAtLocation(0x06, 0);
+ spi->put(source, Timestamp(1), doc, context);
+
+ spi->flush(source, context);
+
+ spi->split(source, target1, target2, context);
+ testSplitSingleDocumentInSourcePostCondition(
+ spi, source, target1, target2, testDocMan);
+
+ if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testSplitSingleDocumentInSourcePostCondition(
+ spi, source, target1, target2, testDocMan2);
+ }
+}
+
+void
+ConformanceTest::testSplitSingleDocumentInSourcePostCondition(
+ const PersistenceProvider::UP& spi,
+ const Bucket& source,
+ const Bucket& target1,
+ const Bucket& target2,
+ document::TestDocMan& testDocMan)
+{
+ CPPUNIT_ASSERT_EQUAL(uint32_t(0),
+ spi->getBucketInfo(source).getBucketInfo().
+ getDocumentCount());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(0),
+ spi->getBucketInfo(target1).getBucketInfo().
+ getDocumentCount());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(1),
+ spi->getBucketInfo(target2).getBucketInfo().
+ getDocumentCount());
+
+ document::AllFields fs;
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Document::UP doc = testDocMan.createRandomDocumentAtLocation(0x06, 0);
+ CPPUNIT_ASSERT(spi->get(target2, fs, doc->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(!spi->get(target1, fs, doc->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(!spi->get(source, fs, doc->getId(), context).hasDocument());
+}
+
+void
+ConformanceTest::createAndPopulateJoinSourceBuckets(
+ PersistenceProvider& spi,
+ const Bucket& source1,
+ const Bucket& source2,
+ document::TestDocMan& testDocMan)
+{
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ spi.createBucket(source1, context);
+ spi.createBucket(source2, context);
+
+ for (uint32_t i = 0; i < 10; ++i) {
+ Document::SP doc(
+ testDocMan.createRandomDocumentAtLocation(
+ source1.getBucketId().getId(), i));
+ spi.put(source1, Timestamp(i + 1), doc, context);
+ }
+ spi.flush(source1, context);
+
+ for (uint32_t i = 10; i < 20; ++i) {
+ Document::SP doc(
+ testDocMan.createRandomDocumentAtLocation(
+ source2.getBucketId().getId(), i));
+ spi.put(source2, Timestamp(i + 1), doc, context);
+ }
+ spi.flush(source2, context);
+}
+
+void
+ConformanceTest::doTestJoinNormalCase(const Bucket& source1,
+ const Bucket& source2,
+ const Bucket& target)
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+
+ createAndPopulateJoinSourceBuckets(*spi, source1, source2, testDocMan);
+
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ spi->join(source1, source2, target, context);
+
+ testJoinNormalCasePostCondition(spi, source1, source2, target,
+ testDocMan);
+ if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testJoinNormalCasePostCondition(spi, source1, source2, target,
+ testDocMan2);
+ }
+}
+
+void
+ConformanceTest::testJoinNormalCase()
+{
+ Bucket source1(document::BucketId(3, 0x02), PartitionId(0));
+ Bucket source2(document::BucketId(3, 0x06), PartitionId(0));
+ Bucket target(document::BucketId(2, 0x02), PartitionId(0));
+ doTestJoinNormalCase(source1, source2, target);
+}
+
+void
+ConformanceTest::testJoinNormalCaseWithMultipleBitsDecreased()
+{
+ Bucket source1(document::BucketId(3, 0x02), PartitionId(0));
+ Bucket source2(document::BucketId(3, 0x06), PartitionId(0));
+ Bucket target(document::BucketId(1, 0x00), PartitionId(0));
+ doTestJoinNormalCase(source1, source2, target);
+}
+
+void
+ConformanceTest::
+testJoinNormalCasePostCondition(const PersistenceProvider::UP &spi,
+ const Bucket &bucketA,
+ const Bucket &bucketB,
+ const Bucket &bucketC,
+ document::TestDocMan &testDocMan)
+{
+ CPPUNIT_ASSERT_EQUAL(20, (int)spi->getBucketInfo(bucketC).
+ getBucketInfo().getDocumentCount());
+
+ document::AllFields fs;
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ for (uint32_t i = 0; i < 10; ++i) {
+ Document::UP doc(
+ testDocMan.createRandomDocumentAtLocation(
+ bucketA.getBucketId().getId(), i));
+ CPPUNIT_ASSERT(
+ spi->get(bucketC, fs, doc->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi->get(bucketA, fs, doc->getId(), context).hasDocument());
+ }
+
+ for (uint32_t i = 10; i < 20; ++i) {
+ Document::UP doc(
+ testDocMan.createRandomDocumentAtLocation(
+ bucketB.getBucketId().getId(), i));
+ CPPUNIT_ASSERT(
+ spi->get(bucketC, fs, doc->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi->get(bucketB, fs, doc->getId(), context).hasDocument());
+ }
+}
+
+
+void
+ConformanceTest::testJoinTargetExists()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucketA(document::BucketId(3, 0x02), PartitionId(0));
+ spi->createBucket(bucketA, context);
+
+ Bucket bucketB(document::BucketId(3, 0x06), PartitionId(0));
+ spi->createBucket(bucketB, context);
+
+ Bucket bucketC(document::BucketId(2, 0x02), PartitionId(0));
+ spi->createBucket(bucketC, context);
+
+ for (uint32_t i = 0; i < 10; ++i) {
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ spi->put(bucketA, Timestamp(i + 1), doc1, context);
+ }
+
+ spi->flush(bucketA, context);
+
+ for (uint32_t i = 10; i < 20; ++i) {
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ spi->put(bucketB, Timestamp(i + 1), doc1, context);
+ }
+ spi->flush(bucketB, context);
+
+ for (uint32_t i = 20; i < 30; ++i) {
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ spi->put(bucketC, Timestamp(i + 1), doc1, context);
+ }
+
+ spi->flush(bucketC, context);
+
+ spi->join(bucketA, bucketB, bucketC, context);
+ testJoinTargetExistsPostCondition(spi, bucketA, bucketB, bucketC,
+ testDocMan);
+ if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testJoinTargetExistsPostCondition(spi, bucketA, bucketB, bucketC,
+ testDocMan2);
+ }
+}
+
+
+void
+ConformanceTest::
+testJoinTargetExistsPostCondition(const PersistenceProvider::UP &spi,
+ const Bucket &bucketA,
+ const Bucket &bucketB,
+ const Bucket &bucketC,
+ document::TestDocMan &testDocMan)
+{
+ CPPUNIT_ASSERT_EQUAL(30, (int)spi->getBucketInfo(bucketC).getBucketInfo().
+ getDocumentCount());
+
+ document::AllFields fs;
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ for (uint32_t i = 0; i < 10; ++i) {
+ Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ CPPUNIT_ASSERT(
+ spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi->get(bucketA, fs, doc1->getId(), context).hasDocument());
+ }
+
+ for (uint32_t i = 10; i < 20; ++i) {
+ Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ CPPUNIT_ASSERT(
+ spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi->get(bucketB, fs, doc1->getId(), context).hasDocument());
+ }
+
+ for (uint32_t i = 20; i < 30; ++i) {
+ Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
+ CPPUNIT_ASSERT(
+ spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
+ }
+}
+
+void
+ConformanceTest::populateBucket(const Bucket& b,
+ PersistenceProvider& spi,
+ Context& context,
+ uint32_t from,
+ uint32_t to,
+ document::TestDocMan& testDocMan)
+{
+ assert(from <= to);
+ for (uint32_t i = from; i < to; ++i) {
+ const uint32_t location = b.getBucketId().getId();
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(
+ location, i);
+ spi.put(b, Timestamp(i + 1), doc1, context);
+ }
+ spi.flush(b, context);
+}
+
+void
+ConformanceTest::testJoinOneBucket()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucketA(document::BucketId(3, 0x02), PartitionId(0));
+ spi->createBucket(bucketA, context);
+
+ Bucket bucketB(document::BucketId(3, 0x06), PartitionId(0));
+ Bucket bucketC(document::BucketId(2, 0x02), PartitionId(0));
+
+ populateBucket(bucketA, *spi, context, 0, 10, testDocMan);
+
+ spi->join(bucketA, bucketB, bucketC, context);
+ testJoinOneBucketPostCondition(spi, bucketA, bucketC, testDocMan);
+ if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testJoinOneBucketPostCondition(spi, bucketA, bucketC, testDocMan2);
+ }
+}
+
+void
+ConformanceTest::
+testJoinOneBucketPostCondition(const PersistenceProvider::UP &spi,
+ const Bucket &bucketA,
+ const Bucket &bucketC,
+ document::TestDocMan &testDocMan)
+{
+ CPPUNIT_ASSERT_EQUAL(10, (int)spi->getBucketInfo(bucketC).getBucketInfo().
+ getDocumentCount());
+
+ document::AllFields fs;
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ for (uint32_t i = 0; i < 10; ++i) {
+ Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ CPPUNIT_ASSERT(
+ spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi->get(bucketA, fs, doc1->getId(), context).hasDocument());
+ }
+}
+
+void
+ConformanceTest::
+testJoinSameSourceBucketsPostCondition(
+ const PersistenceProvider::UP& spi,
+ const Bucket& source,
+ const Bucket& target,
+ document::TestDocMan& testDocMan)
+{
+ // Same post conditions as joinOneBucket case
+ testJoinOneBucketPostCondition(spi, source, target, testDocMan);
+}
+
+void
+ConformanceTest::doTestJoinSameSourceBuckets(const Bucket& source,
+ const Bucket& target)
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ spi->createBucket(source, context);
+ populateBucket(source, *spi, context, 0, 10, testDocMan);
+
+ spi->join(source, source, target, context);
+ testJoinSameSourceBucketsPostCondition(spi, source, target, testDocMan);
+ if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testJoinSameSourceBucketsPostCondition(
+ spi, source, target, testDocMan2);
+ }
+}
+
+void
+ConformanceTest::testJoinSameSourceBuckets()
+{
+ Bucket source(document::BucketId(3, 0x02), PartitionId(0));
+ Bucket target(document::BucketId(2, 0x02), PartitionId(0));
+ doTestJoinSameSourceBuckets(source, target);
+}
+
+void
+ConformanceTest::testJoinSameSourceBucketsWithMultipleBitsDecreased()
+{
+ Bucket source(document::BucketId(3, 0x02), PartitionId(0));
+ Bucket target(document::BucketId(1, 0x00), PartitionId(0));
+ doTestJoinSameSourceBuckets(source, target);
+}
+
+void
+ConformanceTest::testJoinSameSourceBucketsTargetExistsPostCondition(
+ const PersistenceProvider& spi,
+ const Bucket& source,
+ const Bucket& target,
+ document::TestDocMan& testDocMan)
+{
+ CPPUNIT_ASSERT_EQUAL(20, (int)spi.getBucketInfo(target).getBucketInfo().
+ getDocumentCount());
+
+ document::AllFields fs;
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ for (uint32_t i = 0; i < 20; ++i) {
+ Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
+ CPPUNIT_ASSERT(
+ spi.get(target, fs, doc1->getId(), context).hasDocument());
+ CPPUNIT_ASSERT(
+ !spi.get(source, fs, doc1->getId(), context).hasDocument());
+ }
+}
+
+void
+ConformanceTest::testJoinSameSourceBucketsTargetExists()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket source(document::BucketId(3, 0x02), PartitionId(0));
+ spi->createBucket(source, context);
+
+ Bucket target(document::BucketId(2, 0x02), PartitionId(0));
+ spi->createBucket(target, context);
+
+ populateBucket(source, *spi, context, 0, 10, testDocMan);
+ populateBucket(target, *spi, context, 10, 20, testDocMan);
+
+ spi->join(source, source, target, context);
+ testJoinSameSourceBucketsTargetExistsPostCondition(
+ *spi, source, target, testDocMan);
+ if (_factory->hasPersistence()) {
+ spi.reset();
+ document::TestDocMan testDocMan2;
+ spi = getSpi(*_factory, testDocMan2);
+ testJoinSameSourceBucketsTargetExistsPostCondition(
+ *spi, source, target, testDocMan2);
+ }
+}
+
+void ConformanceTest::testMaintain()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ spi->createBucket(bucket, context);
+
+ spi->put(bucket, Timestamp(3), doc1, context);
+ spi->flush(bucket, context);
+
+ CPPUNIT_ASSERT_EQUAL(Result::NONE,
+ spi->maintain(bucket, LOW).getErrorCode());
+}
+
+void ConformanceTest::testGetModifiedBuckets()
+{
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ CPPUNIT_ASSERT_EQUAL(0,
+ (int)spi->getModifiedBuckets().getList().size());
+}
+
+void ConformanceTest::testBucketActivation()
+{
+ if (!_factory->supportsActiveState()) {
+ return;
+ }
+
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+
+ spi->setClusterState(createClusterState());
+ spi->createBucket(bucket, context);
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucket).getBucketInfo().isActive());
+
+ spi->setActiveState(bucket, BucketInfo::ACTIVE);
+ CPPUNIT_ASSERT(spi->getBucketInfo(bucket).getBucketInfo().isActive());
+
+ // Add and remove a document, so document goes to zero, to check that
+ // active state isn't cleared then.
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ CPPUNIT_ASSERT_EQUAL(
+ Result(),
+ Result(spi->put(bucket, Timestamp(1), doc1, context)));
+ CPPUNIT_ASSERT_EQUAL(
+ Result(),
+ Result(spi->remove(bucket, Timestamp(5), doc1->getId(), context)));
+ CPPUNIT_ASSERT(spi->getBucketInfo(bucket).getBucketInfo().isActive());
+
+ // Setting node down should clear active flag.
+ spi->setClusterState(createClusterState(lib::State::DOWN));
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucket).getBucketInfo().isActive());
+ spi->setClusterState(createClusterState(lib::State::UP));
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucket).getBucketInfo().isActive());
+
+ // Actively clearing it should of course also clear it
+ spi->setActiveState(bucket, BucketInfo::ACTIVE);
+ CPPUNIT_ASSERT(spi->getBucketInfo(bucket).getBucketInfo().isActive());
+ spi->setActiveState(bucket, BucketInfo::NOT_ACTIVE);
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucket).getBucketInfo().isActive());
+}
+
+void ConformanceTest::testBucketActivationSplitAndJoin()
+{
+ if (!_factory->supportsActiveState()) {
+ return;
+ }
+
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucketA(document::BucketId(3, 0x02), PartitionId(0));
+ Bucket bucketB(document::BucketId(3, 0x06), PartitionId(0));
+ Bucket bucketC(document::BucketId(2, 0x02), PartitionId(0));
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, 1);
+ Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x06, 2);
+
+ spi->setClusterState(createClusterState());
+ spi->createBucket(bucketC, context);
+ spi->put(bucketC, Timestamp(1), doc1, context);
+ spi->put(bucketC, Timestamp(2), doc2, context);
+ spi->flush(bucketC, context);
+
+ spi->setActiveState(bucketC, BucketInfo::ACTIVE);
+ CPPUNIT_ASSERT(spi->getBucketInfo(bucketC).getBucketInfo().isActive());
+ spi->split(bucketC, bucketA, bucketB, context);
+ CPPUNIT_ASSERT(spi->getBucketInfo(bucketA).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(spi->getBucketInfo(bucketB).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketC).getBucketInfo().isActive());
+
+ spi->setActiveState(bucketA, BucketInfo::NOT_ACTIVE);
+ spi->setActiveState(bucketB, BucketInfo::NOT_ACTIVE);
+ spi->join(bucketA, bucketB, bucketC, context);
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketA).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketB).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketC).getBucketInfo().isActive());
+
+ spi->split(bucketC, bucketA, bucketB, context);
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketA).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketB).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketC).getBucketInfo().isActive());
+
+ spi->setActiveState(bucketA, BucketInfo::ACTIVE);
+ spi->join(bucketA, bucketB, bucketC, context);
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketA).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketB).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(spi->getBucketInfo(bucketC).getBucketInfo().isActive());
+
+ // Redo test with empty bucket, to ensure new buckets are generated
+ // even if empty
+ spi->deleteBucket(bucketA, context);
+ spi->deleteBucket(bucketB, context);
+ spi->deleteBucket(bucketC, context);
+
+ spi->createBucket(bucketC, context);
+ spi->setActiveState(bucketC, BucketInfo::NOT_ACTIVE);
+ spi->split(bucketC, bucketA, bucketB, context);
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketA).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketB).getBucketInfo().isActive());
+ spi->join(bucketA, bucketB, bucketC, context);
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketA).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketB).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketC).getBucketInfo().isActive());
+
+ spi->deleteBucket(bucketA, context);
+ spi->deleteBucket(bucketB, context);
+ spi->deleteBucket(bucketC, context);
+
+ spi->createBucket(bucketC, context);
+ spi->setActiveState(bucketC, BucketInfo::ACTIVE);
+ spi->split(bucketC, bucketA, bucketB, context);
+ CPPUNIT_ASSERT(spi->getBucketInfo(bucketA).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(spi->getBucketInfo(bucketB).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketC).getBucketInfo().isActive());
+ spi->join(bucketA, bucketB, bucketC, context);
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketA).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(!spi->getBucketInfo(bucketB).getBucketInfo().isActive());
+ CPPUNIT_ASSERT(spi->getBucketInfo(bucketC).getBucketInfo().isActive());
+}
+
+void ConformanceTest::testRemoveEntry()
+{
+ if (!_factory->supportsRemoveEntry()) {
+ return;
+ }
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+
+ Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
+ Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
+ spi->createBucket(bucket, context);
+
+ spi->put(bucket, Timestamp(3), doc1, context);
+ spi->flush(bucket, context);
+ BucketInfo info1 = spi->getBucketInfo(bucket).getBucketInfo();
+
+ {
+ spi->put(bucket, Timestamp(4), doc2, context);
+ spi->flush(bucket, context);
+ spi->removeEntry(bucket, Timestamp(4), context);
+ spi->flush(bucket, context);
+ BucketInfo info2 = spi->getBucketInfo(bucket).getBucketInfo();
+ CPPUNIT_ASSERT_EQUAL(info1, info2);
+ }
+
+ // Test case where there exists a previous version of the document.
+ {
+ spi->put(bucket, Timestamp(5), doc1, context);
+ spi->flush(bucket, context);
+ spi->removeEntry(bucket, Timestamp(5), context);
+ spi->flush(bucket, context);
+ BucketInfo info2 = spi->getBucketInfo(bucket).getBucketInfo();
+ CPPUNIT_ASSERT_EQUAL(info1, info2);
+ }
+
+ // Test case where the newest document version after removeEntrying is a remove.
+ {
+ spi->remove(bucket, Timestamp(6), doc1->getId(), context);
+ spi->flush(bucket, context);
+ BucketInfo info2 = spi->getBucketInfo(bucket).getBucketInfo();
+ CPPUNIT_ASSERT_EQUAL(uint32_t(0), info2.getDocumentCount());
+
+ spi->put(bucket, Timestamp(7), doc1, context);
+ spi->flush(bucket, context);
+ spi->removeEntry(bucket, Timestamp(7), context);
+ spi->flush(bucket, context);
+ BucketInfo info3 = spi->getBucketInfo(bucket).getBucketInfo();
+ CPPUNIT_ASSERT_EQUAL(info2, info3);
+ }
+}
+
+void ConformanceTest::detectAndTestOptionalBehavior() {
+ // Report if implementation supports setting bucket size info.
+
+ // Report if joining same bucket on multiple partitions work.
+ // (Where target equals one of the sources). (If not supported service
+ // layer must die if a bucket is found during init on multiple partitions)
+ // Test functionality if it works.
+}
+
+
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/conformancetest/conformancetest.h b/persistence/src/vespa/persistence/conformancetest/conformancetest.h
new file mode 100644
index 00000000000..ff1127079d7
--- /dev/null
+++ b/persistence/src/vespa/persistence/conformancetest/conformancetest.h
@@ -0,0 +1,274 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * This conformance test class has been created in order to run the same tests
+ * on multiple implementations of the persistence SPI.
+ *
+ * To run conformance tests on a given implementation, just add a little wrapper
+ * such as the dummy persistence implementation does. (See dummyimpltest.cpp)
+ */
+#pragma once
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/persistence/spi/persistenceprovider.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+// Use an ordering such that the most basic stuff is tested before more advanced
+// stuff, such that if there is a catastrophic failure crashing tests, it fails
+// on simple operations rather than complex, to ease debugging.
+#define DEFINE_CONFORMANCE_TESTS() \
+ CPPUNIT_TEST(testBasics); \
+ CPPUNIT_TEST(testPut); \
+ CPPUNIT_TEST(testPutNewDocumentVersion); \
+ CPPUNIT_TEST(testPutOlderDocumentVersion); \
+ CPPUNIT_TEST(testPutDuplicate); \
+ CPPUNIT_TEST(testRemove); \
+ CPPUNIT_TEST(testRemoveMerge); \
+ CPPUNIT_TEST(testUpdate); \
+ CPPUNIT_TEST(testGet); \
+ CPPUNIT_TEST(testIterateCreateIterator); \
+ CPPUNIT_TEST(testIterateWithUnknownId); \
+ CPPUNIT_TEST(testIterateDestroyIterator); \
+ CPPUNIT_TEST(testIterateAllDocs); \
+ CPPUNIT_TEST(testIterateAllDocsNewestVersionOnly); \
+ CPPUNIT_TEST(testIterateChunked); \
+ CPPUNIT_TEST(testMaxByteSize); \
+ CPPUNIT_TEST(testIterateMatchTimestampRange); \
+ CPPUNIT_TEST(testIterateExplicitTimestampSubset); \
+ CPPUNIT_TEST(testIterateRemoves); \
+ CPPUNIT_TEST(testIterateMatchSelection); \
+ CPPUNIT_TEST(testIterationRequiringDocumentIdOnlyMatching); \
+ CPPUNIT_TEST(testIterateBadDocumentSelection); \
+ CPPUNIT_TEST(testIterateAlreadyCompleted); \
+ CPPUNIT_TEST(testIterateEmptyBucket); \
+ CPPUNIT_TEST(testBucketInfo); \
+ CPPUNIT_TEST(testOrderIndependentBucketInfo); \
+ CPPUNIT_TEST(testDeleteBucket); \
+ CPPUNIT_TEST(testSplitNormalCase); \
+ CPPUNIT_TEST(testSplitTargetExists); \
+ CPPUNIT_TEST(testSplitSingleDocumentInSource); \
+ CPPUNIT_TEST(testJoinNormalCase); \
+ CPPUNIT_TEST(testJoinNormalCaseWithMultipleBitsDecreased); \
+ CPPUNIT_TEST(testJoinOneBucket); \
+ CPPUNIT_TEST(testJoinTargetExists); \
+ CPPUNIT_TEST(testJoinSameSourceBuckets); \
+ CPPUNIT_TEST(testJoinSameSourceBucketsWithMultipleBitsDecreased); \
+ CPPUNIT_TEST(testJoinSameSourceBucketsTargetExists); \
+ CPPUNIT_TEST(testMaintain); \
+ CPPUNIT_TEST(testGetModifiedBuckets); \
+ CPPUNIT_TEST(testBucketActivation); \
+ CPPUNIT_TEST(testBucketActivationSplitAndJoin); \
+ CPPUNIT_TEST(testRemoveEntry); \
+ CPPUNIT_TEST(detectAndTestOptionalBehavior);
+
+namespace document
+{
+
+class TestDocMan;
+
+}
+
+namespace storage {
+namespace spi {
+
+struct ConformanceTest : public CppUnit::TestFixture {
+ struct PersistenceFactory {
+ typedef std::unique_ptr<PersistenceFactory> UP;
+
+ virtual ~PersistenceFactory() {}
+ virtual PersistenceProvider::UP getPersistenceImplementation(
+ const document::DocumentTypeRepo::SP &repo,
+ const document::DocumenttypesConfig &typesCfg) = 0;
+
+ virtual void
+ clear(void)
+ {
+ // clear persistent state, i.e. remove files/directories
+ }
+
+ virtual bool
+ hasPersistence(void) const
+ {
+ return false;
+ }
+ virtual bool
+ supportsActiveState() const
+ {
+ return false;
+ }
+ virtual bool
+ supportsRemoveEntry() const
+ {
+ return false;
+ }
+ };
+ PersistenceFactory::UP _factory;
+
+private:
+ void populateBucket(const Bucket& b,
+ PersistenceProvider& spi,
+ Context& context,
+ uint32_t from,
+ uint32_t to,
+ document::TestDocMan& testDocMan);
+
+ void
+ testDeleteBucketPostCondition(const PersistenceProvider::UP &spi,
+ const Bucket &bucket,
+ const Document &doc1);
+
+ void
+ testSplitNormalCasePostCondition(const PersistenceProvider::UP &spi,
+ const Bucket &bucketA,
+ const Bucket &bucketB,
+ const Bucket &bucketC,
+ document::TestDocMan &testDocMan);
+
+ void
+ testSplitTargetExistsPostCondition(const PersistenceProvider::UP &spi,
+ const Bucket &bucketA,
+ const Bucket &bucketB,
+ const Bucket &bucketC,
+ document::TestDocMan &testDocMan);
+
+ void
+ testSplitSingleDocumentInSourcePostCondition(
+ const PersistenceProvider::UP& spi,
+ const Bucket& source,
+ const Bucket& target1,
+ const Bucket& target2,
+ document::TestDocMan& testDocMan);
+
+ void
+ createAndPopulateJoinSourceBuckets(
+ PersistenceProvider& spi,
+ const Bucket& source1,
+ const Bucket& source2,
+ document::TestDocMan& testDocMan);
+
+ void
+ doTestJoinNormalCase(const Bucket& source1,
+ const Bucket& source2,
+ const Bucket& target);
+
+ void
+ testJoinNormalCasePostCondition(const PersistenceProvider::UP &spi,
+ const Bucket &bucketA,
+ const Bucket &bucketB,
+ const Bucket &bucketC,
+ document::TestDocMan &testDocMan);
+
+ void
+ testJoinTargetExistsPostCondition(const PersistenceProvider::UP &spi,
+ const Bucket &bucketA,
+ const Bucket &bucketB,
+ const Bucket &bucketC,
+ document::TestDocMan &testDocMan);
+
+ void
+ testJoinOneBucketPostCondition(const PersistenceProvider::UP &spi,
+ const Bucket &bucketA,
+ const Bucket &bucketC,
+ document::TestDocMan &testDocMan);
+
+ void
+ doTestJoinSameSourceBuckets(const Bucket& source,
+ const Bucket& target);
+
+ void
+ testJoinSameSourceBucketsPostCondition(
+ const PersistenceProvider::UP& spi,
+ const Bucket& source,
+ const Bucket& target,
+ document::TestDocMan& testDocMan);
+
+ void
+ testJoinSameSourceBucketsTargetExistsPostCondition(
+ const PersistenceProvider& spi,
+ const Bucket& source,
+ const Bucket& target,
+ document::TestDocMan& testDocMan);
+public:
+ ConformanceTest(PersistenceFactory::UP f) : _factory(std::move(f)) {}
+
+ /**
+ * Tests that one can put and remove entries to the persistence
+ * implementation, and iterate over the content. This functionality is
+ * needed by most other tests in order to verify correct behavior, so
+ * this needs to work for other tests to work.
+ */
+ void testBasics();
+
+ /**
+ * Test that listing of buckets works as intended.
+ */
+ void testListBuckets();
+
+ /**
+ * Test that bucket info is generated in a legal fashion. (Such that
+ * split/join/merge can work as intended)
+ */
+ void testBucketInfo();
+ /**
+ * Test that given a set of operations with certain timestamps, the bucket
+ * info is the same no matter what order we feed these in.
+ */
+ void testOrderIndependentBucketInfo();
+
+ /** Test that the various document operations work as intended. */
+ void testPut();
+ void testPutNewDocumentVersion();
+ void testPutOlderDocumentVersion();
+ void testPutDuplicate();
+ void testRemove();
+ void testRemoveMerge();
+ void testUpdate();
+ void testGet();
+
+ /** Test that iterating special cases works. */
+ void testIterateCreateIterator();
+ void testIterateWithUnknownId();
+ void testIterateDestroyIterator();
+ void testIterateAllDocs();
+ void testIterateAllDocsNewestVersionOnly();
+ void testIterateChunked();
+ void testMaxByteSize();
+ void testIterateMatchTimestampRange();
+ void testIterateExplicitTimestampSubset();
+ void testIterateRemoves();
+ void testIterateMatchSelection();
+ void testIterationRequiringDocumentIdOnlyMatching();
+ void testIterateBadDocumentSelection();
+ void testIterateAlreadyCompleted();
+ void testIterateEmptyBucket();
+
+ /** Test that the various bucket operations work as intended. */
+ void testCreateBucket();
+ void testDeleteBucket();
+ void testSplitTargetExists();
+ void testSplitNormalCase();
+ void testSplitSingleDocumentInSource();
+ void testJoinNormalCase();
+ void testJoinNormalCaseWithMultipleBitsDecreased();
+ void testJoinTargetExists();
+ void testJoinOneBucket();
+ void testJoinSameSourceBuckets();
+ void testJoinSameSourceBucketsWithMultipleBitsDecreased();
+ void testJoinSameSourceBucketsTargetExists();
+ void testMaintain();
+ void testGetModifiedBuckets();
+ void testBucketActivation();
+ void testBucketActivationSplitAndJoin();
+
+ void testRemoveEntry();
+
+ /**
+ * Reports what optional behavior is supported by implementation and not.
+ * Tests functionality if supported.
+ */
+ void detectAndTestOptionalBehavior();
+};
+
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/dummyimpl/.gitignore b/persistence/src/vespa/persistence/dummyimpl/.gitignore
new file mode 100644
index 00000000000..7e7c0fe7fae
--- /dev/null
+++ b/persistence/src/vespa/persistence/dummyimpl/.gitignore
@@ -0,0 +1,2 @@
+/.depend
+/Makefile
diff --git a/persistence/src/vespa/persistence/dummyimpl/CMakeLists.txt b/persistence/src/vespa/persistence/dummyimpl/CMakeLists.txt
new file mode 100644
index 00000000000..568e4b0f246
--- /dev/null
+++ b/persistence/src/vespa/persistence/dummyimpl/CMakeLists.txt
@@ -0,0 +1,6 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(persistence_dummyimpl OBJECT
+ SOURCES
+ dummypersistence.cpp
+ DEPENDS
+)
diff --git a/persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp b/persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp
new file mode 100644
index 00000000000..1bfd92d2b42
--- /dev/null
+++ b/persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp
@@ -0,0 +1,943 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+#include <algorithm>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <vespa/document/select/parser.h>
+#include <vespa/vespalib/util/crc.h>
+#include <vespa/vespalib/util/atomic.h>
+#include <vespa/vespalib/util/vstringfmt.h>
+#include <vespa/document/fieldset/fieldsetrepo.h>
+#include <vespa/vespalib/stllike/hash_set.h>
+
+using std::binary_search;
+using std::lower_bound;
+
+LOG_SETUP(".dummypersistence");
+
+namespace storage {
+namespace spi {
+namespace dummy {
+
+uint32_t
+BucketContent::computeEntryChecksum(const BucketEntry& e) const
+{
+ vespalib::crc_32_type checksummer;
+
+ uint64_t ts(e.entry->getTimestamp());
+ checksummer.process_bytes(&e.gid, sizeof(GlobalId));
+ checksummer.process_bytes(&ts, sizeof(uint64_t));
+ return checksummer.checksum();
+}
+
+BucketChecksum
+BucketContent::updateRollingChecksum(uint32_t entryChecksum)
+{
+ uint32_t checksum = _info.getChecksum();
+ checksum ^= entryChecksum;
+ if (checksum == 0) {
+ checksum = 1;
+ }
+ return BucketChecksum(checksum);
+}
+
+const BucketInfo&
+BucketContent::getBucketInfo() const
+{
+ if (!_outdatedInfo) {
+ return _info;
+ }
+
+ // Checksum should only depend on the newest entry for each document that
+ // has not been removed.
+ uint32_t unique = 0;
+ uint32_t uniqueSize = 0;
+ uint32_t totalSize = 0;
+ uint32_t checksum = 0;
+
+ for (std::vector<BucketEntry>::const_iterator
+ it = _entries.begin(); it != _entries.end(); ++it)
+ {
+ const DocEntry& entry(*it->entry);
+ const GlobalId& gid(it->gid);
+
+ GidMapType::const_iterator gidIt(_gidMap.find(gid));
+ assert(gidIt != _gidMap.end());
+
+ totalSize += entry.getSize();
+ if (entry.isRemove()) {
+ continue;
+ }
+ // Only include if we're newest entry for the particular GID
+ if (gidIt->second.get() != &entry) {
+ continue;
+ }
+ ++unique;
+ uniqueSize += entry.getSize();
+
+ checksum ^= computeEntryChecksum(*it);
+ }
+ if (!unique) {
+ checksum = 0;
+ } else if (checksum == 0) {
+ checksum = 1;
+ }
+
+ _info = BucketInfo(BucketChecksum(checksum),
+ unique,
+ uniqueSize,
+ _entries.size(),
+ totalSize,
+ BucketInfo::READY,
+ _active ? BucketInfo::ACTIVE : BucketInfo::NOT_ACTIVE);
+
+ _outdatedInfo = false;
+ return _info;
+}
+
+namespace {
+struct HasDocId {
+ const DocumentId &_did;
+ HasDocId(const DocumentId &did) : _did(did) {}
+ bool operator()(const DocEntry::LP &entry)
+ { return *entry->getDocumentId() == _did; }
+};
+
+struct TimestampLess {
+ bool operator()(const BucketEntry &bucketEntry, Timestamp t)
+ { return bucketEntry.entry->getTimestamp() < t; }
+ bool operator()(Timestamp t, const BucketEntry &bucketEntry)
+ { return t < bucketEntry.entry->getTimestamp(); }
+};
+
+template <typename Iter>
+typename std::iterator_traits<Iter>::value_type
+dereferenceOrDefaultIfAtEnd(Iter it, Iter end) {
+ if (it == end) {
+ return typename std::iterator_traits<Iter>::value_type();
+ }
+ return *it;
+}
+
+} // namespace
+
+bool
+BucketContent::hasTimestamp(Timestamp t) const
+{
+ if (!_entries.empty() && _entries.back().entry->getTimestamp() < t) {
+ return false;
+ }
+ return binary_search(_entries.begin(), _entries.end(), t, TimestampLess());
+}
+
+/**
+ * GID map semantics:
+ * The GID map always points to the newest entry for any given GID, no matter
+ * its state (that is to say, the GID map will point at both puts and removes).
+ *
+ * When inserting any valid entry (i.e. not a duplicate), we check the map to
+ * see if a mapping exists for this GID already. If it does not, we insert one
+ * pointing to the newly inserted entry. If it does exist, we change the mapping
+ * to point to the new entry if and only if the new entry has a newer timestamp.
+ *
+ * When reverting an entry, we must walk through the entries vector and look for
+ * the newest entry that will be logically reverted to, then point the GID map
+ * to this entry. If no such entry exists (i.e. reverting the only put for a
+ * document), we can remove the mapping entirely.
+ */
+
+void
+BucketContent::insert(DocEntry::LP e)
+{
+ LOG(spam, "insert(%s)", e->toString().c_str());
+ const DocumentId* docId(e->getDocumentId());
+ assert(docId != 0);
+ GlobalId gid(docId->getGlobalId());
+ GidMapType::iterator gidIt(_gidMap.find(gid));
+
+ if (!_entries.empty() &&
+ _entries.back().entry->getTimestamp() < e->getTimestamp()) {
+ _entries.push_back(BucketEntry(e, gid));
+ } else {
+ std::vector<BucketEntry>::iterator it =
+ lower_bound(_entries.begin(),
+ _entries.end(),
+ e->getTimestamp(),
+ TimestampLess());
+ if (it != _entries.end()) {
+ if (it->entry->getTimestamp() == e->getTimestamp()) {
+ if (*it->entry.get() == *e) {
+ LOG(debug, "Ignoring duplicate put entry %s",
+ e->toString().c_str());
+ return;
+ } else {
+ LOG(error, "Entry %s was already present."
+ "Was trying to insert %s.",
+ it->entry->toString().c_str(),
+ e->toString().c_str());
+ assert(false);
+ }
+ }
+ }
+ _entries.insert(it, BucketEntry(e, gid));
+ }
+
+ // GID map points to newest entry for that particular GID
+ if (gidIt != _gidMap.end()) {
+ if (gidIt->second->getTimestamp() < e->getTimestamp()) {
+ // TODO(vekterli): add support for cheap info updates for putting
+ // newer versions of a document etc. by XORing away old checksum.
+ gidIt->second = e;
+ } else {
+ LOG(spam,
+ "Newly inserted entry %s was older than existing entry %s; "
+ "not updating GID mapping",
+ e->toString().c_str(),
+ gidIt->second->toString().c_str());
+ }
+ _outdatedInfo = true;
+ } else {
+ _gidMap.insert(GidMapType::value_type(gid, e));
+ // Since GID didn't exist before, it means we can do a running
+ // update of the bucket info. Bucket checksum is XOR of all entry
+ // checksums, which is commutative.
+ // Only bother to update if we don't have to re-do it all afterwards
+ // anyway.
+ // Updating bucketinfo before we update entries since we assume rest
+ // of function is nothrow.
+ if (!_outdatedInfo) {
+ if (!e->isRemove()) {
+ _info = BucketInfo(updateRollingChecksum(
+ computeEntryChecksum(BucketEntry(e, gid))),
+ _info.getDocumentCount() + 1,
+ _info.getDocumentSize() + e->getSize(),
+ _info.getEntryCount() + 1,
+ _info.getUsedSize() + e->getSize(),
+ _info.getReady(),
+ _info.getActive());
+ } else {
+ _info = BucketInfo(_info.getChecksum(),
+ _info.getDocumentCount(),
+ _info.getDocumentSize(),
+ _info.getEntryCount() + 1,
+ _info.getUsedSize() + e->getSize(),
+ _info.getReady(),
+ _info.getActive());
+ }
+
+ LOG(spam,
+ "After cheap bucketinfo update, state is %s (inserted %s)",
+ _info.toString().c_str(),
+ e->toString().c_str());
+ }
+ }
+
+ assert(_outdatedInfo || _info.getEntryCount() == _entries.size());
+}
+
+DocEntry::LP
+BucketContent::getEntry(const DocumentId& did) const
+{
+ GidMapType::const_iterator it(_gidMap.find(did.getGlobalId()));
+ if (it != _gidMap.end()) {
+ return it->second;
+ }
+ return DocEntry::LP();
+}
+
+DocEntry::LP
+BucketContent::getEntry(Timestamp t) const
+{
+ std::vector<BucketEntry>::const_iterator iter =
+ lower_bound(_entries.begin(), _entries.end(), t, TimestampLess());
+
+ if (iter == _entries.end() || iter->entry->getTimestamp() != t) {
+ return DocEntry::LP();
+ } else {
+ return iter->entry;
+ }
+}
+
+void
+BucketContent::eraseEntry(Timestamp t)
+{
+ std::vector<BucketEntry>::iterator iter =
+ lower_bound(_entries.begin(), _entries.end(), t, TimestampLess());
+
+ if (iter != _entries.end() && iter->entry->getTimestamp() == t) {
+ assert(iter->entry->getDocumentId() != 0);
+ GidMapType::iterator gidIt(
+ _gidMap.find(iter->entry->getDocumentId()->getGlobalId()));
+ assert(gidIt != _gidMap.end());
+ _entries.erase(iter);
+ if (gidIt->second->getTimestamp() == t) {
+ LOG(debug, "erasing timestamp %zu from GID map", t.getValue());
+ // TODO(vekterli): O(1) bucket info update for this case
+ // FIXME: is this correct? seems like it could cause wrong behavior!
+ _gidMap.erase(gidIt);
+ } // else: not erasing newest entry, cannot erase from GID map
+ _outdatedInfo = true;
+ }
+}
+
+DummyPersistence::DummyPersistence(
+ const document::DocumentTypeRepo::SP& repo,
+ uint16_t partitionCount)
+ : _initialized(false),
+ _repo(repo),
+ _partitions(partitionCount),
+ _content(partitionCount),
+ _nextIterator(1),
+ _iterators(),
+ _monitor(),
+ _clusterState(),
+ _simulateMaintainFailure(false)
+{
+}
+
+document::select::Node::UP
+DummyPersistence::parseDocumentSelection(const string& documentSelection,
+ bool allowLeaf)
+{
+ document::select::Node::UP ret;
+ try {
+ document::select::Parser parser(
+ *_repo, document::BucketIdFactory());
+ ret = parser.parse(documentSelection);
+ } catch (document::select::ParsingFailedException& e) {
+ return document::select::Node::UP();
+ }
+ if (ret->isLeafNode() && !allowLeaf) {
+ return document::select::Node::UP();
+ }
+ return ret;
+}
+
+PartitionStateListResult
+DummyPersistence::getPartitionStates() const
+{
+ _initialized = true;
+ LOG(debug, "getPartitionStates()");
+ vespalib::MonitorGuard lock(_monitor);
+ return PartitionStateListResult(_partitions);
+}
+
+#define DUMMYPERSISTENCE_VERIFY_INITIALIZED \
+ if (!_initialized) throw vespalib::IllegalStateException( \
+ "getPartitionStates() must always be called first in order to " \
+ "trigger lazy initialization.", VESPA_STRLOC)
+
+
+BucketIdListResult
+DummyPersistence::listBuckets(PartitionId id) const
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(debug, "listBuckets(%u)", uint16_t(id));
+ vespalib::MonitorGuard lock(_monitor);
+ BucketIdListResult::List list;
+ for (PartitionContent::const_iterator it = _content[id].begin();
+ it != _content[id].end(); ++it)
+ {
+ list.push_back(it->first);
+ }
+ return BucketIdListResult(list);
+}
+
+void
+DummyPersistence::setModifiedBuckets(const BucketIdListResult::List& buckets)
+{
+ vespalib::MonitorGuard lock(_monitor);
+ _modifiedBuckets = buckets;
+}
+
+BucketIdListResult
+DummyPersistence::getModifiedBuckets() const
+{
+ vespalib::MonitorGuard lock(_monitor);
+ return BucketIdListResult(_modifiedBuckets);
+}
+
+Result
+DummyPersistence::setClusterState(const ClusterState& c)
+{
+ vespalib::MonitorGuard lock(_monitor);
+ _clusterState.reset(new ClusterState(c));
+ if (!_clusterState->nodeUp()) {
+ for (uint32_t i=0, n=_content.size(); i<n; ++i) {
+ for (PartitionContent::iterator it = _content[i].begin();
+ it != _content[i].end(); ++it)
+ {
+ it->second->setActive(false);
+ }
+ }
+ }
+ return Result();
+}
+
+Result
+DummyPersistence::setActiveState(const Bucket& b,
+ BucketInfo::ActiveState newState)
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(debug, "setCurrentState(%s, %s)",
+ b.toString().c_str(),
+ newState == BucketInfo::ACTIVE ? "ACTIVE" : "INACTIVE");
+
+ BucketContentGuard::UP bc(acquireBucketWithLock(b));
+ if (!bc.get()) {
+ return BucketInfoResult(Result::TRANSIENT_ERROR, "Bucket not found");
+ }
+ (*bc)->setActive(newState == BucketInfo::ACTIVE);
+ return Result();
+}
+
+BucketInfoResult
+DummyPersistence::getBucketInfo(const Bucket& b) const
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ BucketContentGuard::UP bc(acquireBucketWithLock(b));
+ if (!bc.get()) {
+ LOG(debug, "getBucketInfo(%s) : (bucket not found)",
+ b.toString().c_str());
+ return BucketInfoResult(Result::TRANSIENT_ERROR, "Bucket not found");
+ }
+
+ BucketInfo info((*bc)->getBucketInfo());
+ LOG(debug, "getBucketInfo(%s) -> %s",
+ b.toString().c_str(),
+ info.toString().c_str());
+ return BucketInfoResult(info);
+}
+
+Result
+DummyPersistence::put(const Bucket& b, Timestamp t, const Document::SP& doc,
+ Context&)
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(debug, "put(%s, %zu, %s)",
+ b.toString().c_str(),
+ uint64_t(t),
+ doc->getId().toString().c_str());
+ BucketContentGuard::UP bc(acquireBucketWithLock(b));
+ if (!bc.get()) {
+ return BucketInfoResult(Result::TRANSIENT_ERROR, "Bucket not found");
+ }
+
+ DocEntry::LP existing = (*bc)->getEntry(t);
+ if (existing.get()) {
+ if (doc->getId() == *existing->getDocumentId()) {
+ return Result();
+ } else {
+ return Result(Result::TIMESTAMP_EXISTS,
+ "Timestamp already existed");
+ }
+ }
+
+ LOG(spam, "Inserting document %s", doc->toString(true).c_str());
+
+ DocEntry::LP entry(new DocEntry(t, NONE, Document::UP(doc->clone())));
+ (*bc)->insert(entry);
+ return Result();
+}
+
+Result
+DummyPersistence::maintain(const Bucket& b,
+ MaintenanceLevel)
+{
+ if (_simulateMaintainFailure) {
+ BucketContentGuard::UP bc(acquireBucketWithLock(b));
+ if (!bc.get()) {
+ return BucketInfoResult(Result::TRANSIENT_ERROR, "Bucket not found");
+ }
+
+ if (!(*bc)->_entries.empty()) {
+ // Simulate a corruption in a document, remove it.
+ (*bc)->_entries.pop_back();
+ }
+ (*bc)->setOutdatedInfo(true);
+ _simulateMaintainFailure = false;
+ }
+
+ return Result();
+}
+
+RemoveResult
+DummyPersistence::remove(const Bucket& b,
+ Timestamp t,
+ const DocumentId& did,
+ Context&)
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(debug, "remove(%s, %zu, %s)",
+ b.toString().c_str(),
+ uint64_t(t),
+ did.toString().c_str());
+
+ BucketContentGuard::UP bc(acquireBucketWithLock(b));
+ if (!bc.get()) {
+ return RemoveResult(Result::TRANSIENT_ERROR, "Bucket not found");
+ }
+
+ DocEntry::LP entry((*bc)->getEntry(did));
+ bool foundPut(entry.get() && !entry->isRemove());
+ DocEntry::LP remEntry(new DocEntry(t, REMOVE_ENTRY, did));
+
+ if ((*bc)->hasTimestamp(t)) {
+ (*bc)->eraseEntry(t);
+ }
+ (*bc)->insert(remEntry);
+ return RemoveResult(foundPut);
+}
+
+GetResult
+DummyPersistence::get(const Bucket& b,
+ const document::FieldSet& fieldSet,
+ const DocumentId& did,
+ Context&) const
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(debug, "get(%s, %s)",
+ b.toString().c_str(),
+ did.toString().c_str());
+ BucketContentGuard::UP bc(acquireBucketWithLock(b));
+ if (!bc.get()) {
+ } else {
+ DocEntry::LP entry((*bc)->getEntry(did));
+ if (entry.get() == 0 || entry->isRemove()) {
+ } else {
+ Document::UP doc(entry->getDocument()->clone());
+ if (fieldSet.getType() != document::FieldSet::ALL) {
+ document::FieldSet::stripFields(*doc, fieldSet);
+ }
+ return GetResult(std::move(doc), entry->getTimestamp());
+ }
+ }
+
+ return GetResult();
+}
+
+CreateIteratorResult
+DummyPersistence::createIterator(
+ const Bucket& b,
+ const document::FieldSet& fs,
+ const Selection& s,
+ IncludedVersions v,
+ Context&)
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(debug, "createIterator(%s)", b.toString().c_str());
+ vespalib::LinkedPtr<document::select::Node> docSelection;
+ if (!s.getDocumentSelection().getDocumentSelection().empty()) {
+ docSelection.reset(
+ parseDocumentSelection(
+ s.getDocumentSelection().getDocumentSelection(),
+ true).release());
+ if (!docSelection.get()) {
+ return CreateIteratorResult(
+ Result::PERMANENT_ERROR,
+ "Got invalid/unparseable document selection string");
+ }
+ }
+ BucketContentGuard::UP bc(acquireBucketWithLock(b));
+ if (!bc.get()) {
+ return CreateIteratorResult(Result::TRANSIENT_ERROR, "Bucket not found");
+ }
+
+ Iterator* it;
+ IteratorId id;
+ {
+ vespalib::MonitorGuard lock(_monitor);
+ id = _nextIterator;
+ ++_nextIterator;
+ assert(_iterators.find(id) == _iterators.end());
+ it = new Iterator;
+ _iterators[id] = Iterator::LP(it);
+ assert(it->_bucket.getBucketId().getRawId() == 0); // Wrap detection
+ it->_bucket = b;
+ }
+ // Memory pointed to by 'it' should now be valid from here on out
+
+ it->_fieldSet = vespalib::LinkedPtr<document::FieldSet>(fs.clone());
+ const BucketContent::GidMapType& gidMap((*bc)->_gidMap);
+
+ if (s.getTimestampSubset().empty()) {
+ typedef std::vector<BucketEntry>::const_reverse_iterator reverse_iterator;
+ for (reverse_iterator entryIter((*bc)->_entries.rbegin()),
+ entryEnd((*bc)->_entries.rend());
+ entryIter != entryEnd; ++entryIter)
+ {
+ const BucketEntry& bucketEntry(*entryIter);
+ const DocEntry& entry(*bucketEntry.entry);
+ if (entry.getTimestamp() < s.getFromTimestamp() ||
+ entry.getTimestamp() > s.getToTimestamp()) {
+ continue;
+ }
+ BucketContent::GidMapType::const_iterator gidIt(
+ gidMap.find(bucketEntry.gid));
+ assert(gidIt != gidMap.end());
+
+ if (entry.isRemove()) {
+ if (v == NEWEST_DOCUMENT_ONLY) {
+ continue;
+ }
+ if (docSelection.get()
+ && (docSelection->contains(*entry.getDocumentId())
+ != document::select::Result::True))
+ {
+ continue;
+ }
+ it->_leftToIterate.push_back(entry.getTimestamp());
+ } else {
+ if (v != ALL_VERSIONS && gidIt->second.get() != &entry) {
+ // Not newest version of document; skip it. Commonly, the
+ // document may have been removed, meaning the GID map entry
+ // points to a remove instead.
+ continue;
+ }
+ if (docSelection.get()
+ && (docSelection->contains(*entry.getDocument())
+ != document::select::Result::True))
+ {
+ continue;
+ }
+ it->_leftToIterate.push_back(entry.getTimestamp());
+ }
+ }
+ } else {
+ it->_leftToIterate = s.getTimestampSubset();
+ }
+ return CreateIteratorResult(id);
+}
+
+IterateResult
+DummyPersistence::iterate(IteratorId id, uint64_t maxByteSize, Context& ctx) const
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(debug, "iterate(%zu, %zu)", uint64_t(id), maxByteSize);
+ ctx.trace(9, "started iterate()");
+ Iterator* it;
+ {
+ vespalib::MonitorGuard lock(_monitor);
+ std::map<IteratorId, Iterator::LP>::iterator iter(_iterators.find(id));
+ if (iter == _iterators.end()) {
+ return IterateResult(Result::PERMANENT_ERROR,
+ "Bug! Used iterate without sending createIterator first");
+ }
+ it = iter->second.get();
+ }
+
+ BucketContentGuard::UP bc(acquireBucketWithLock(it->_bucket));
+ if (!bc.get()) {
+ ctx.trace(9, "finished iterate(); bucket not found");
+ return IterateResult(Result::TRANSIENT_ERROR, "Bucket not found");
+ }
+ LOG(debug, "Iterator %zu acquired bucket lock", uint64_t(id));
+
+ std::vector<DocEntry::LP> entries;
+ uint32_t currentSize = 0;
+ uint32_t fastPath = 0;
+ while (!it->_leftToIterate.empty()) {
+ Timestamp next(it->_leftToIterate.back());
+ DocEntry::LP entry((*bc)->getEntry(next));
+ if (entry.get() != 0) {
+ uint32_t size = entry->getSize();
+ if (currentSize != 0 && currentSize + size > maxByteSize) break;
+ currentSize += size;
+ if (!entry->isRemove()
+ && it->_fieldSet->getType() != document::FieldSet::ALL)
+ {
+ assert(entry->getDocument());
+ // Create new document with only wanted fields.
+ Document::UP filtered(
+ document::FieldSet::createDocumentSubsetCopy(
+ *entry->getDocument(),
+ *it->_fieldSet));
+ DocEntry::LP ret(new DocEntry(entry->getTimestamp(),
+ entry->getFlags(),
+ std::move(filtered),
+ entry->getPersistedDocumentSize()));
+ entries.push_back(ret);
+ } else {
+ // Use entry as-is.
+ entries.push_back(DocEntry::LP(entry->clone()));
+ ++fastPath;
+ }
+ }
+ it->_leftToIterate.pop_back();
+ }
+ if (ctx.shouldTrace(9)) {
+ ctx.trace(9, vespalib::make_vespa_string("finished iterate(), returning %zu "
+ "documents with %u bytes of data",
+ entries.size(),
+ currentSize));
+ }
+ LOG(debug, "finished iterate(%zu, %zu), returning %zu documents "
+ "with %u bytes of data. %u docs cloned in fast path",
+ uint64_t(id),
+ maxByteSize,
+ entries.size(),
+ currentSize,
+ fastPath);
+ if (it->_leftToIterate.empty()) {
+ return IterateResult(entries, true);
+ }
+
+ return IterateResult(entries, false);
+}
+
+Result
+DummyPersistence::destroyIterator(IteratorId id, Context&)
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(debug, "destroyIterator(%zu)", uint64_t(id));
+ vespalib::MonitorGuard lock(_monitor);
+ if (_iterators.find(id) != _iterators.end()) {
+ _iterators.erase(id);
+ }
+ return Result();
+}
+
+Result
+DummyPersistence::createBucket(const Bucket& b, Context&)
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(debug, "createBucket(%s)", b.toString().c_str());
+ vespalib::MonitorGuard lock(_monitor);
+ if (_content[b.getPartition()].find(b) == _content[b.getPartition()].end()) {
+ _content[b.getPartition()][b] = BucketContent::LP(new BucketContent);
+ } else {
+ assert(!_content[b.getPartition()][b]->_inUse);
+ LOG(debug, "%s already existed", b.toString().c_str());
+ }
+ return Result();
+}
+
+Result
+DummyPersistence::deleteBucket(const Bucket& b, Context&)
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(debug, "deleteBucket(%s)", b.toString().c_str());
+ vespalib::MonitorGuard lock(_monitor);
+ if (_content[b.getPartition()][b].get()) {
+ assert(!_content[b.getPartition()][b]->_inUse);
+ }
+ _content[b.getPartition()].erase(b);
+ return Result();
+}
+
+Result
+DummyPersistence::split(const Bucket& source,
+ const Bucket& target1,
+ const Bucket& target2,
+ Context& context)
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(debug, "split(%s -> %s, %s)",
+ source.toString().c_str(),
+ target1.toString().c_str(),
+ target2.toString().c_str());
+ createBucket(source, context);
+ createBucket(target1, context);
+ createBucket(target2, context);
+
+ BucketContentGuard::UP sourceGuard(acquireBucketWithLock(source));
+ if (!sourceGuard.get()) {
+ LOG(debug, "%s not found", source.toString().c_str());
+ return Result(Result::TRANSIENT_ERROR, "Bucket not found");
+ }
+ BucketContentGuard::UP target1Guard(acquireBucketWithLock(target1));
+ BucketContentGuard::UP target2Guard(acquireBucketWithLock(target2));
+ assert(target1Guard.get());
+ assert(target2Guard.get());
+
+ BucketContent& sour(**sourceGuard);
+ BucketContent& targ1(**target1Guard);
+ BucketContent& targ2(**target2Guard);
+
+ document::BucketIdFactory idFactory;
+
+ // Add entries
+ for (uint32_t i=0; i<sour._entries.size(); ++i) {
+ DocEntry::LP entry(sour._entries[i].entry);
+
+ document::BucketId bId(
+ target1.getBucketId().getUsedBits(),
+ idFactory.getBucketId(*entry->getDocumentId()).getRawId());
+
+ if (bId == target1.getBucketId()) {
+ targ1.insert(entry);
+ } else {
+ targ2.insert(entry);
+ }
+ }
+ targ1.setActive(sour.isActive());
+ targ2.setActive(sour.isActive());
+ sourceGuard.reset(0);
+ LOG(debug, "erasing split source %s",
+ source.toString().c_str());
+ deleteBucket(source, context);
+
+ return Result();
+}
+
+Result
+DummyPersistence::join(const Bucket& source1, const Bucket& source2,
+ const Bucket& target, Context& context)
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(debug, "join(%s, %s -> %s)",
+ source1.toString().c_str(),
+ source2.toString().c_str(),
+ target.toString().c_str());
+ createBucket(target, context);
+ BucketContentGuard::UP targetGuard(acquireBucketWithLock(target));
+ assert(targetGuard.get());
+
+ bool active = false;
+ for (uint32_t j=0; j<2; ++j) {
+ Bucket source(j == 0 ? source1 : source2);
+ BucketContentGuard::UP sourceGuard(acquireBucketWithLock(source));
+
+ if (!sourceGuard.get()) {
+ continue;
+ }
+ BucketContent& sour(**sourceGuard);
+ active |= sour.isActive();
+
+ for (uint32_t i=0; i<sour._entries.size(); ++i) {
+ DocEntry::LP entry(sour._entries[i].entry);
+ (*targetGuard)->insert(entry);
+ }
+ sourceGuard.reset(0);
+ deleteBucket(source, context);
+ }
+ (*targetGuard)->setActive(active);
+
+ return Result();
+}
+
+Result
+DummyPersistence::revert(const Bucket& b, Timestamp t, Context&)
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(debug, "revert(%s, %zu)",
+ b.toString().c_str(),
+ uint64_t(t));
+
+ BucketContentGuard::UP bc(acquireBucketWithLock(b));
+ if (!bc.get()) {
+ return BucketInfoResult(Result::TRANSIENT_ERROR, "Bucket not found");
+ }
+
+ BucketContent& content(**bc);
+ DocEntry::LP docEntry(content.getEntry(t));
+ if (!docEntry.get()) {
+ return Result();
+ }
+
+ GlobalId gid(docEntry->getDocumentId()->getGlobalId());
+ BucketContent::GidMapType::iterator gidIt(content._gidMap.find(gid));
+ assert(gidIt != content._gidMap.end());
+
+ std::vector<BucketEntry> newEntries;
+ newEntries.reserve(content._entries.size() - 1);
+ Timestamp timestampToRestore(0);
+ for (uint32_t i=0; i<content._entries.size(); ++i) {
+ BucketEntry e(content._entries[i]);
+ if (e.entry->getTimestamp() == t) continue;
+ if (e.gid == gid
+ && e.entry->getTimestamp() > timestampToRestore)
+ {
+ // Set GID map entry to newest non-reverted doc entry
+ assert(e.entry.get() != gidIt->second.get());
+ LOG(spam, "Remapping GID to point to %s",
+ e.entry->toString().c_str());
+ gidIt->second = e.entry;
+ timestampToRestore = e.entry->getTimestamp();
+ }
+ newEntries.push_back(e);
+ }
+ if (timestampToRestore == 0) {
+ LOG(spam, "Found no entry to revert to for %s; erasing from GID map",
+ docEntry->toString().c_str());
+ content._gidMap.erase(gidIt);
+ }
+ newEntries.swap(content._entries);
+ content.setOutdatedInfo(true);
+
+ return Result();
+}
+
+std::string
+DummyPersistence::dumpBucket(const Bucket& b) const
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ LOG(spam, "dumpBucket(%s)", b.toString().c_str());
+ vespalib::MonitorGuard lock(_monitor);
+ PartitionContent::const_iterator it(_content[b.getPartition()].find(b));
+ if (it == _content[b.getPartition()].end()) {
+ return "DOESN'T EXIST";
+ } else {
+ std::ostringstream ost;
+ for (uint32_t i=0; i<it->second->_entries.size(); ++i) {
+ const DocEntry& entry(*it->second->_entries[i].entry);
+ ost << entry << "\n";
+ }
+
+ return ost.str();
+ }
+}
+
+bool
+DummyPersistence::isActive(const Bucket& b) const
+{
+ DUMMYPERSISTENCE_VERIFY_INITIALIZED;
+ vespalib::MonitorGuard lock(_monitor);
+ LOG(spam, "isActive(%s)", b.toString().c_str());
+ PartitionContent::const_iterator it(_content[b.getPartition()].find(b));
+ if (it == _content[b.getPartition()].end()) {
+ return false;
+ }
+ return it->second->isActive();
+}
+
+BucketContentGuard::~BucketContentGuard()
+{
+ _persistence.releaseBucketNoLock(_content);
+}
+
+BucketContentGuard::UP
+DummyPersistence::acquireBucketWithLock(const Bucket& b) const
+{
+ vespalib::MonitorGuard lock(_monitor);
+ DummyPersistence& ncp(const_cast<DummyPersistence&>(*this));
+ PartitionContent::iterator it(ncp._content[b.getPartition()].find(b));
+ if (it == ncp._content[b.getPartition()].end()) {
+ return BucketContentGuard::UP();
+ }
+ // Sanity check that SPI-level locking is doing its job correctly.
+ // Atomic CAS might be a bit overkill, but since we "release" the bucket
+ // outside of the mutex, we want to ensure the write is visible across all
+ // threads.
+ bool bucketNotInUse(vespalib::Atomic::cmpSwap(&it->second->_inUse, 1, 0));
+ if (!bucketNotInUse) {
+ LOG(error, "Attempted to acquire %s, but it was already marked as being in use!",
+ b.toString().c_str());
+ assert(false);
+ }
+
+ return BucketContentGuard::UP(new BucketContentGuard(ncp, *it->second));
+}
+
+void
+DummyPersistence::releaseBucketNoLock(const BucketContent& bc) const
+{
+ bool bucketInUse(vespalib::Atomic::cmpSwap(&bc._inUse, 0, 1));
+ assert(bucketInUse);
+ (void) bucketInUse;
+}
+
+} // dummy
+} // spi
+} // storage
diff --git a/persistence/src/vespa/persistence/dummyimpl/dummypersistence.h b/persistence/src/vespa/persistence/dummyimpl/dummypersistence.h
new file mode 100644
index 00000000000..eca3f2d48cc
--- /dev/null
+++ b/persistence/src/vespa/persistence/dummyimpl/dummypersistence.h
@@ -0,0 +1,255 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * \class storage::spi::dummy::DummyPersistence
+ * \ingroup dummy
+ *
+ * \brief Simple implementation of the persistence SPI.
+ */
+
+#pragma once
+
+#include <vespa/persistence/spi/abstractpersistenceprovider.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/document/fieldset/fieldsets.h>
+#include <vespa/vespalib/util/sync.h>
+#include <vespa/vespalib/stllike/hash_map.h>
+
+namespace document {
+
+class FieldSet;
+
+namespace select {
+
+class Node;
+
+}
+}
+
+namespace storage {
+namespace spi {
+namespace dummy {
+
+struct BucketEntry
+{
+ DocEntry::LP entry;
+ GlobalId gid;
+
+ BucketEntry(const DocEntry::LP& e, const GlobalId& g)
+ : entry(e),
+ gid(g)
+ {
+ }
+};
+
+struct BucketContent {
+ typedef vespalib::hash_map<
+ document::GlobalId,
+ DocEntry::LP,
+ document::GlobalId::hash
+ > GidMapType;
+
+ typedef vespalib::LinkedPtr<BucketContent> LP;
+
+ std::vector<BucketEntry> _entries;
+ GidMapType _gidMap;
+ mutable BucketInfo _info;
+ mutable uint32_t _inUse;
+ mutable bool _outdatedInfo;
+ bool _active;
+
+ BucketContent()
+ : _entries(),
+ _gidMap(),
+ _info(),
+ _inUse(false),
+ _outdatedInfo(true),
+ _active(false)
+ {
+ }
+
+ uint32_t computeEntryChecksum(const BucketEntry&) const;
+ BucketChecksum updateRollingChecksum(uint32_t entryChecksum);
+
+ /**
+ * Get bucket info, potentially recomputing it if it's outdated. In the
+ * latter case, the cached bucket info will be updated.
+ */
+ const BucketInfo& getBucketInfo() const;
+ BucketInfo& getMutableBucketInfo() { return _info; }
+ bool hasTimestamp(Timestamp) const;
+ void insert(DocEntry::LP);
+ DocEntry::LP getEntry(const DocumentId&) const;
+ DocEntry::LP getEntry(Timestamp) const;
+ void eraseEntry(Timestamp t);
+ void setActive(bool active = true) {
+ _active = active;
+ _info = BucketInfo(_info.getChecksum(),
+ _info.getDocumentCount(),
+ _info.getDocumentSize(),
+ _info.getEntryCount(),
+ _info.getUsedSize(),
+ _info.getReady(),
+ active ? BucketInfo::ACTIVE : BucketInfo::NOT_ACTIVE);
+ }
+ bool isActive() const { return _active; }
+ void setOutdatedInfo(bool outdated) { _outdatedInfo = outdated; }
+ bool hasOutdatedInfo() const { return _outdatedInfo; }
+};
+
+struct Iterator {
+ typedef vespalib::LinkedPtr<Iterator> LP;
+ Bucket _bucket;
+ std::vector<Timestamp> _leftToIterate;
+ vespalib::LinkedPtr<document::FieldSet> _fieldSet;
+};
+
+class DummyPersistence;
+
+class BucketContentGuard
+{
+ BucketContentGuard(const BucketContentGuard&);
+ BucketContentGuard& operator=(const BucketContentGuard&);
+public:
+ typedef std::unique_ptr<BucketContentGuard> UP;
+
+ BucketContentGuard(DummyPersistence& persistence,
+ BucketContent& content)
+ : _persistence(persistence),
+ _content(content)
+ {
+ }
+ ~BucketContentGuard();
+
+ BucketContent& getContent() {
+ return _content;
+ }
+
+ BucketContent* operator->() {
+ return &_content;
+ }
+
+ BucketContent& operator*() {
+ return _content;
+ }
+private:
+ DummyPersistence& _persistence;
+ BucketContent& _content;
+};
+
+class DummyPersistence : public AbstractPersistenceProvider
+{
+public:
+ DummyPersistence(const document::DocumentTypeRepo::SP& repo,
+ uint16_t partitionCount = 1);
+
+ PartitionStateListResult getPartitionStates() const;
+ BucketIdListResult listBuckets(PartitionId) const;
+
+ void setModifiedBuckets(const BucketIdListResult::List& result);
+
+ /**
+ * Returns the list set by setModifiedBuckets(), then clears
+ * the list.
+ */
+ BucketIdListResult getModifiedBuckets() const;
+
+ Result setClusterState(const ClusterState& newState);
+
+ Result setActiveState(const Bucket& bucket,
+ BucketInfo::ActiveState newState);
+
+ BucketInfoResult getBucketInfo(const Bucket&) const;
+
+ Result put(const Bucket&, Timestamp, const Document::SP&, Context&);
+ GetResult get(const Bucket&,
+ const document::FieldSet& fieldSet,
+ const DocumentId&,
+ Context&) const;
+
+ RemoveResult remove(const Bucket& b,
+ Timestamp t,
+ const DocumentId& did,
+ Context&);
+
+ CreateIteratorResult createIterator(const Bucket&,
+ const document::FieldSet& fs,
+ const Selection&,
+ IncludedVersions,
+ Context&);
+
+ IterateResult iterate(IteratorId, uint64_t maxByteSize, Context&) const;
+ Result destroyIterator(IteratorId, Context&);
+
+ Result createBucket(const Bucket&, Context&);
+ Result deleteBucket(const Bucket&, Context&);
+
+ Result split(const Bucket& source,
+ const Bucket& target1,
+ const Bucket& target2,
+ Context&);
+
+ Result join(const Bucket& source1,
+ const Bucket& source2,
+ const Bucket& target,
+ Context&);
+
+ Result revert(const Bucket&, Timestamp, Context&);
+
+ Result maintain(const Bucket& bucket,
+ MaintenanceLevel level);
+
+ /**
+ * The following methods are used only for unit testing.
+ * DummyPersistence is used many places to test the framework around it.
+ */
+
+ /*
+ * Dumps the contents of a bucket to a string and returns it.
+ */
+ std::string dumpBucket(const Bucket&) const;
+
+ /**
+ * Returns true if the given bucket has been tagged as active.
+ */
+ bool isActive(const Bucket&) const;
+
+ const ClusterState& getClusterState() const {
+ return *_clusterState;
+ }
+
+ void simulateMaintenanceFailure() {
+ _simulateMaintainFailure = true;
+ }
+
+private:
+ friend class BucketContentGuard;
+ // Const since funcs only alter mutable field in BucketContent
+ BucketContentGuard::UP acquireBucketWithLock(const Bucket& b) const;
+ void releaseBucketNoLock(const BucketContent& bc) const;
+
+ mutable bool _initialized;
+ document::DocumentTypeRepo::SP _repo;
+ PartitionStateList _partitions;
+ typedef vespalib::hash_map<Bucket, BucketContent::LP, document::BucketId::hash>
+ PartitionContent;
+
+ std::vector<PartitionContent> _content;
+ IteratorId _nextIterator;
+ mutable std::map<IteratorId, Iterator::LP> _iterators;
+ vespalib::Monitor _monitor;
+
+ std::unique_ptr<ClusterState> _clusterState;
+
+ bool _simulateMaintainFailure;
+
+ document::select::Node::UP parseDocumentSelection(
+ const string& documentSelection,
+ bool allowLeaf);
+
+ mutable BucketIdListResult::List _modifiedBuckets;
+};
+
+} // dummy
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/proxy/.gitignore b/persistence/src/vespa/persistence/proxy/.gitignore
new file mode 100644
index 00000000000..7e7c0fe7fae
--- /dev/null
+++ b/persistence/src/vespa/persistence/proxy/.gitignore
@@ -0,0 +1,2 @@
+/.depend
+/Makefile
diff --git a/persistence/src/vespa/persistence/proxy/CMakeLists.txt b/persistence/src/vespa/persistence/proxy/CMakeLists.txt
new file mode 100644
index 00000000000..279bc779ed0
--- /dev/null
+++ b/persistence/src/vespa/persistence/proxy/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(persistence_proxy OBJECT
+ SOURCES
+ buildid.cpp
+ providerproxy.cpp
+ providerstub.cpp
+ DEPENDS
+)
diff --git a/persistence/src/vespa/persistence/proxy/buildid.cpp b/persistence/src/vespa/persistence/proxy/buildid.cpp
new file mode 100644
index 00000000000..e102288610c
--- /dev/null
+++ b/persistence/src/vespa/persistence/proxy/buildid.cpp
@@ -0,0 +1,8 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include "buildid.h"
+
+const char *storage::spi::getBuildId() {
+ return V_TAG_COMPONENT;
+}
diff --git a/persistence/src/vespa/persistence/proxy/buildid.h b/persistence/src/vespa/persistence/proxy/buildid.h
new file mode 100644
index 00000000000..e911141ae9b
--- /dev/null
+++ b/persistence/src/vespa/persistence/proxy/buildid.h
@@ -0,0 +1,12 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace storage {
+namespace spi {
+
+const char *getBuildId();
+
+} // namespace spi
+} // namespace storage
+
diff --git a/persistence/src/vespa/persistence/proxy/providerproxy.cpp b/persistence/src/vespa/persistence/proxy/providerproxy.cpp
new file mode 100644
index 00000000000..8c37a4da5ca
--- /dev/null
+++ b/persistence/src/vespa/persistence/proxy/providerproxy.cpp
@@ -0,0 +1,493 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP(".providerproxy");
+
+#include "buildid.h"
+#include "providerproxy.h"
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/document/fieldset/fieldsetrepo.h>
+#include <vespa/document/serialization/vespadocumentdeserializer.h>
+#include <vespa/document/serialization/vespadocumentserializer.h>
+#include <vespa/document/util/bytebuffer.h>
+#include <vespa/vespalib/objects/nbostream.h>
+#include <vespa/vespalib/util/noncopyable.hpp>
+
+using document::BucketId;
+using document::ByteBuffer;
+using document::DocumentTypeRepo;
+using document::VespaDocumentDeserializer;
+using document::VespaDocumentSerializer;
+using vespalib::nbostream;
+
+namespace storage {
+namespace spi {
+namespace {
+void addBucket(FRT_Values &values, const Bucket &bucket) {
+ values.AddInt64(bucket.getBucketId().getId());
+ values.AddInt64(bucket.getPartition());
+}
+
+void addDocument(FRT_Values &values, const Document &doc) {
+ nbostream stream;
+ VespaDocumentSerializer serializer(stream);
+ serializer.write(doc, document::COMPLETE);
+ values.AddData(stream.c_str(), stream.size());
+}
+
+void addString(FRT_Values &values, const string &s) {
+ values.AddString(s.data(), s.size());
+}
+
+void addSelection(FRT_Values &values, const Selection &selection) {
+ addString(values, selection.getDocumentSelection().getDocumentSelection());
+ values.AddInt64(selection.getFromTimestamp());
+ values.AddInt64(selection.getToTimestamp());
+ std::copy(selection.getTimestampSubset().begin(),
+ selection.getTimestampSubset().end(),
+ values.AddInt64Array(selection.getTimestampSubset().size()));
+}
+
+void addDocumentUpdate(FRT_Values &values, const DocumentUpdate &update) {
+ nbostream stream;
+ update.serializeHEAD(stream);
+ values.AddData(stream.c_str(), stream.size());
+}
+
+Document::UP readDocument(nbostream &stream, const DocumentTypeRepo &repo) {
+ const uint16_t version = 8;
+ VespaDocumentDeserializer deserializer(repo, stream, version);
+ Document::UP doc(new Document);
+ deserializer.read(*doc);
+ return doc;
+}
+
+string getString(const FRT_StringValue &str) {
+ return string(str._str, str._len);
+}
+
+string getString(const FRT_Value &value) {
+ return getString(value._string);
+}
+
+template <typename ResultType>
+ResultType readError(const FRT_Values &values) {
+ uint8_t error_code = values[0]._intval8;
+ string error_msg = getString(values[1]);
+ return ResultType(Result::ErrorType(error_code), error_msg);
+}
+
+bool invokeRpc(FRT_Target *target, FRT_RPCRequest &req, const char *res_spec) {
+ target->InvokeSync(&req, 0.0); // no timeout
+ req.CheckReturnTypes(res_spec);
+ return req.GetErrorCode() == FRTE_NO_ERROR;
+}
+
+struct RequestScopedPtr : vespalib::noncopyable {
+ FRT_RPCRequest *req;
+ RequestScopedPtr(FRT_RPCRequest *r) : req(r) { assert(req); }
+ ~RequestScopedPtr() { req->SubRef(); }
+ FRT_RPCRequest *operator->() { return req; }
+ FRT_RPCRequest &operator*() { return *req; }
+};
+} // namespace
+
+template <typename ResultType>
+ResultType ProviderProxy::invokeRpc_Return(FRT_RPCRequest &req,
+ const char *res_spec) const
+{
+ if (!invokeRpc(_target, req, res_spec)) {
+
+
+ return ResultType(Result::FATAL_ERROR,
+ vespalib::make_string("Error %s when running RPC request %s",
+ req.GetErrorMessage(),
+ req.GetMethodName()));
+ }
+ return readResult<ResultType>(*req.GetReturn());
+}
+
+template <typename ResultType>
+ResultType ProviderProxy::readResult(const FRT_Values &values) const {
+ if (values[0]._intval8 != Result::NONE) {
+ return readError<ResultType>(values);
+ }
+ return readNoError<ResultType>(values);
+}
+
+template <>
+Result ProviderProxy::readNoError(const FRT_Values &) const {
+ return Result();
+}
+
+template <>
+PartitionStateListResult
+ProviderProxy::readNoError(const FRT_Values &values) const {
+ FRT_LPT(uint32_t) state_array = values[2]._int32_array;
+ FRT_LPT(FRT_StringValue) reason_array = values[3]._string_array;
+ PartitionStateList states(state_array._len);
+ for (size_t i = 0; i < state_array._len; ++i) {
+ PartitionState::State state =
+ static_cast<PartitionState::State>(state_array._pt[i]);
+ string reason = getString(reason_array._pt[i]);
+ states[i] = PartitionState(state, reason);
+ }
+ return PartitionStateListResult(states);
+}
+
+template <>
+BucketIdListResult ProviderProxy::readNoError(const FRT_Values &values) const {
+ BucketIdListResult::List list;
+ for (uint32_t i = 0; i < values[2]._int64_array._len; ++i) {
+ list.push_back(BucketId(values[2]._int64_array._pt[i]));
+ }
+ return BucketIdListResult(list);
+}
+
+template <>
+BucketInfoResult ProviderProxy::readNoError(const FRT_Values &values) const {
+ BucketInfo info(BucketChecksum(values[2]._intval32),
+ values[3]._intval32,
+ values[4]._intval32,
+ values[5]._intval32,
+ values[6]._intval32,
+ static_cast<BucketInfo::ReadyState>(
+ values[7]._intval8),
+ static_cast<BucketInfo::ActiveState>(
+ values[8]._intval8));
+ return BucketInfoResult(info);
+}
+
+template <>
+RemoveResult ProviderProxy::readNoError(const FRT_Values &values) const {
+ return RemoveResult(values[2]._intval8);
+}
+
+template <>
+UpdateResult ProviderProxy::readNoError(const FRT_Values &values) const {
+ return UpdateResult(Timestamp(values[2]._intval64));
+}
+
+template <>
+GetResult ProviderProxy::readNoError(const FRT_Values &values) const {
+ nbostream stream(values[3]._data._buf, values[3]._data._len);
+ if (stream.empty()) {
+ return GetResult();
+ }
+ return GetResult(readDocument(stream, *_repo),
+ Timestamp(values[2]._intval64));
+}
+
+template <>
+CreateIteratorResult ProviderProxy::readNoError(const FRT_Values &values) const
+{
+ return CreateIteratorResult(IteratorId(values[2]._intval64));
+}
+
+template <>
+IterateResult ProviderProxy::readNoError(const FRT_Values &values) const {
+ IterateResult::List result;
+ assert(values[2]._int64_array._len == values[3]._int32_array._len &&
+ values[2]._int64_array._len == values[4]._string_array._len &&
+ values[2]._int64_array._len == values[5]._data_array._len);
+ for (uint32_t i = 0; i < values[2]._int64_array._len; ++i) {
+ Timestamp timestamp(values[2]._int64_array._pt[i]);
+ uint32_t meta_flags = values[3]._int32_array._pt[i];
+ string doc_id(getString(values[4]._string_array._pt[i]));
+ nbostream stream(values[5]._data_array._pt[i]._buf,
+ values[5]._data_array._pt[i]._len);
+ DocEntry::LP entry;
+ if (!stream.empty()) {
+ Document::UP doc = readDocument(stream, *_repo);
+ entry.reset(new DocEntry(timestamp, meta_flags, std::move(doc)));
+ } else if (!doc_id.empty()) {
+ entry.reset(
+ new DocEntry(timestamp, meta_flags, DocumentId(doc_id)));
+ } else {
+ entry.reset(new DocEntry(timestamp, meta_flags));
+ }
+ result.push_back(entry);
+ }
+
+ return IterateResult(result, values[6]._intval8);
+}
+
+namespace {
+bool shouldFailFast(uint32_t error_code) {
+ return error_code != FRTE_RPC_TIMEOUT
+ && error_code != FRTE_RPC_CONNECTION
+ && error_code != FRTE_RPC_OVERLOAD
+ && error_code != FRTE_NO_ERROR;
+}
+} // namespace
+
+ProviderProxy::ProviderProxy(const vespalib::string &connect_spec,
+ const DocumentTypeRepo &repo)
+ : _supervisor(),
+ _target(0),
+ _repo(&repo)
+{
+ _supervisor.Start();
+ bool connected = false;
+ _target = _supervisor.GetTarget(connect_spec.c_str());
+ for (size_t i = 0; !connected && (i < (100 + 300)); ++i) {
+ FRT_RPCRequest *req = new FRT_RPCRequest();
+ req->SetMethodName("vespa.persistence.connect");
+ const string build_id = getBuildId();
+ req->GetParams()->AddString(build_id.data(), build_id.size());
+ _target->InvokeSync(req, 5.0);
+ connected = req->CheckReturnTypes("");
+ uint32_t error_code = req->GetErrorCode();
+ req->SubRef();
+ if (!connected) {
+ if (shouldFailFast(error_code)) {
+ break;
+ }
+ _target->SubRef();
+ if (i < 100) {
+ FastOS_Thread::Sleep(100); // retry each 100ms for 10s
+ } else {
+ FastOS_Thread::Sleep(1000); // retry each 1s for 5m
+ }
+ _target = _supervisor.GetTarget(connect_spec.c_str());
+ }
+ }
+ if (!connected) {
+ LOG(error, "could not connect to peer");
+ }
+}
+
+ProviderProxy::~ProviderProxy() {
+ _target->SubRef();
+ _supervisor.ShutDown(true);
+}
+
+Result ProviderProxy::initialize() {
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.initialize");
+ return invokeRpc_Return<Result>(*req, "bs");
+}
+
+PartitionStateListResult ProviderProxy::getPartitionStates() const {
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.getPartitionStates");
+ return invokeRpc_Return<PartitionStateListResult>(*req, "bsIS");
+}
+
+BucketIdListResult ProviderProxy::listBuckets(PartitionId partition) const {
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.listBuckets");
+ req->GetParams()->AddInt64(partition);
+
+ return invokeRpc_Return<BucketIdListResult>(*req, "bsL");
+}
+
+Result ProviderProxy::setClusterState(const ClusterState& clusterState) {
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.setClusterState");
+
+ vespalib::nbostream o;
+ clusterState.serialize(o);
+ req->GetParams()->AddData(o.c_str(), o.size());
+ return invokeRpc_Return<Result>(*req, "bs");
+}
+
+Result ProviderProxy::setActiveState(const Bucket &bucket,
+ BucketInfo::ActiveState newState) {
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.setActiveState");
+ addBucket(*req->GetParams(), bucket);
+ req->GetParams()->AddInt8(newState);
+ return invokeRpc_Return<Result>(*req, "bs");
+}
+
+BucketInfoResult ProviderProxy::getBucketInfo(const Bucket &bucket) const {
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.getBucketInfo");
+ addBucket(*req->GetParams(), bucket);
+ return invokeRpc_Return<BucketInfoResult>(*req, "bsiiiiibb");
+}
+
+Result ProviderProxy::put(const Bucket &bucket, Timestamp timestamp,
+ const Document::SP& doc, Context&)
+{
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.put");
+ addBucket(*req->GetParams(), bucket);
+ req->GetParams()->AddInt64(timestamp);
+ addDocument(*req->GetParams(), *doc);
+ return invokeRpc_Return<Result>(*req, "bs");
+}
+
+RemoveResult ProviderProxy::remove(const Bucket &bucket,
+ Timestamp timestamp,
+ const DocumentId &id,
+ Context&)
+{
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.removeById");
+ addBucket(*req->GetParams(), bucket);
+ req->GetParams()->AddInt64(timestamp);
+ addString(*req->GetParams(), id.toString());
+ return invokeRpc_Return<RemoveResult>(*req, "bsb");
+}
+
+RemoveResult ProviderProxy::removeIfFound(const Bucket &bucket,
+ Timestamp timestamp,
+ const DocumentId &id,
+ Context&)
+{
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.removeIfFound");
+ addBucket(*req->GetParams(), bucket);
+ req->GetParams()->AddInt64(timestamp);
+ addString(*req->GetParams(), id.toString());
+ return invokeRpc_Return<RemoveResult>(*req, "bsb");
+}
+
+UpdateResult ProviderProxy::update(const Bucket &bucket, Timestamp timestamp,
+ const DocumentUpdate::SP& doc_update,
+ Context&)
+{
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.update");
+ addBucket(*req->GetParams(), bucket);
+ req->GetParams()->AddInt64(timestamp);
+ addDocumentUpdate(*req->GetParams(), *doc_update);
+ return invokeRpc_Return<UpdateResult>(*req, "bsl");
+}
+
+Result ProviderProxy::flush(const Bucket &bucket, Context&) {
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.flush");
+ addBucket(*req->GetParams(), bucket);
+ return invokeRpc_Return<Result>(*req, "bs");
+}
+
+GetResult ProviderProxy::get(const Bucket &bucket,
+ const document::FieldSet& fieldSet,
+ const DocumentId &doc_id,
+ Context&) const
+{
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.get");
+ document::FieldSetRepo repo;
+ addBucket(*req->GetParams(), bucket);
+ addString(*req->GetParams(), repo.serialize(fieldSet));
+ addString(*req->GetParams(), doc_id.toString());
+ return invokeRpc_Return<GetResult>(*req, "bslx");
+}
+
+CreateIteratorResult ProviderProxy::createIterator(const Bucket &bucket,
+ const document::FieldSet& fieldSet,
+ const Selection &select,
+ IncludedVersions versions,
+ Context&)
+{
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.createIterator");
+ addBucket(*req->GetParams(), bucket);
+
+ document::FieldSetRepo repo;
+ addString(*req->GetParams(), repo.serialize(fieldSet));
+ addSelection(*req->GetParams(), select);
+ req->GetParams()->AddInt8(versions);
+ return invokeRpc_Return<CreateIteratorResult>(*req, "bsl");
+}
+
+IterateResult ProviderProxy::iterate(IteratorId id,
+ uint64_t max_byte_size,
+ Context&) const
+{
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.iterate");
+ req->GetParams()->AddInt64(id);
+ req->GetParams()->AddInt64(max_byte_size);
+ return invokeRpc_Return<IterateResult>(*req, "bsLISXb");
+}
+
+Result ProviderProxy::destroyIterator(IteratorId id, Context&) {
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.destroyIterator");
+ req->GetParams()->AddInt64(id);
+ return invokeRpc_Return<Result>(*req, "bs");
+}
+
+Result ProviderProxy::createBucket(const Bucket &bucket, Context&) {
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.createBucket");
+ addBucket(*req->GetParams(), bucket);
+ return invokeRpc_Return<Result>(*req, "bs");
+}
+
+Result ProviderProxy::deleteBucket(const Bucket &bucket, Context&) {
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.deleteBucket");
+ addBucket(*req->GetParams(), bucket);
+ return invokeRpc_Return<Result>(*req, "bs");
+}
+
+BucketIdListResult ProviderProxy::getModifiedBuckets() const {
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.getModifiedBuckets");
+ return invokeRpc_Return<BucketIdListResult>(*req, "bsL");
+}
+
+Result ProviderProxy::split(const Bucket &source,
+ const Bucket &target1,
+ const Bucket &target2,
+ Context&)
+{
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.split");
+ addBucket(*req->GetParams(), source);
+ addBucket(*req->GetParams(), target1);
+ addBucket(*req->GetParams(), target2);
+ return invokeRpc_Return<Result>(*req, "bs");
+}
+
+Result ProviderProxy::join(const Bucket &source1,
+ const Bucket &source2,
+ const Bucket &target,
+ Context&)
+{
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.join");
+ addBucket(*req->GetParams(), source1);
+ addBucket(*req->GetParams(), source2);
+ addBucket(*req->GetParams(), target);
+ return invokeRpc_Return<Result>(*req, "bs");
+}
+
+Result ProviderProxy::move(const Bucket &source,
+ PartitionId target,
+ Context&)
+{
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.move");
+ addBucket(*req->GetParams(), source);
+ req->GetParams()->AddInt64(target);
+ return invokeRpc_Return<Result>(*req, "bs");
+}
+
+Result ProviderProxy::maintain(const Bucket &bucket, MaintenanceLevel level) {
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.maintain");
+ addBucket(*req->GetParams(), bucket);
+ req->GetParams()->AddInt8(level);
+ return invokeRpc_Return<Result>(*req, "bs");
+}
+
+Result ProviderProxy::removeEntry(const Bucket &bucket, Timestamp timestamp,
+ Context&)
+{
+ RequestScopedPtr req(_supervisor.AllocRPCRequest());
+ req->SetMethodName("vespa.persistence.removeEntry");
+ addBucket(*req->GetParams(), bucket);
+ req->GetParams()->AddInt64(timestamp);
+ return invokeRpc_Return<Result>(*req, "bs");
+}
+
+} // namespace spi
+} // namespace storage
diff --git a/persistence/src/vespa/persistence/proxy/providerproxy.h b/persistence/src/vespa/persistence/proxy/providerproxy.h
new file mode 100644
index 00000000000..389fb0a3857
--- /dev/null
+++ b/persistence/src/vespa/persistence/proxy/providerproxy.h
@@ -0,0 +1,90 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/fnet/frt/frt.h>
+#include <vespa/persistence/spi/persistenceprovider.h>
+
+namespace storage {
+namespace spi {
+
+class ProviderProxy : public PersistenceProvider {
+ mutable FRT_Supervisor _supervisor;
+ FRT_Target *_target;
+ const document::DocumentTypeRepo *_repo;
+
+ template <typename ResultType>
+ ResultType invokeRpc_Return(FRT_RPCRequest &req,
+ const char *res_spec) const;
+ template <typename ResultType>
+ ResultType readResult(const FRT_Values &values) const;
+ template <typename ResultType>
+ ResultType readNoError(const FRT_Values &values) const;
+
+public:
+ typedef std::unique_ptr<ProviderProxy> UP;
+
+ ProviderProxy(const vespalib::string &connect_spec,
+ const document::DocumentTypeRepo &repo);
+ ~ProviderProxy();
+
+ void setRepo(const document::DocumentTypeRepo &repo) {
+ _repo = &repo;
+ }
+
+ virtual Result initialize();
+ virtual PartitionStateListResult getPartitionStates() const;
+ virtual BucketIdListResult listBuckets(PartitionId) const;
+ virtual Result setClusterState(const ClusterState&);
+ virtual Result setActiveState(const Bucket&, BucketInfo::ActiveState);
+ virtual BucketInfoResult getBucketInfo(const Bucket &) const;
+
+ virtual Result put(const Bucket &, Timestamp, const Document::SP&, Context&);
+ virtual RemoveResult remove(const Bucket &, Timestamp, const DocumentId &,
+ Context&);
+ virtual RemoveResult removeIfFound(const Bucket &, Timestamp,
+ const DocumentId &, Context&);
+ virtual UpdateResult update(const Bucket &, Timestamp,
+ const DocumentUpdate::SP&, Context&);
+
+ virtual Result flush(const Bucket &, Context&);
+
+ virtual GetResult get(const Bucket &,
+ const document::FieldSet&,
+ const DocumentId &,
+ Context&) const;
+
+ virtual CreateIteratorResult createIterator(const Bucket &,
+ const document::FieldSet&,
+ const Selection&,
+ IncludedVersions versions,
+ Context&);
+
+ virtual IterateResult iterate(IteratorId, uint64_t max_byte_size,
+ Context&) const;
+ virtual Result destroyIterator(IteratorId, Context&);
+
+ virtual Result createBucket(const Bucket &, Context&);
+ virtual Result deleteBucket(const Bucket &, Context&);
+ virtual BucketIdListResult getModifiedBuckets() const;
+ virtual Result split(const Bucket &source,
+ const Bucket &target1,
+ const Bucket &target2,
+ Context&);
+
+ virtual Result join(const Bucket &source1,
+ const Bucket &source2,
+ const Bucket &target,
+ Context&);
+
+ virtual Result move(const Bucket &source,
+ PartitionId partition,
+ Context&);
+
+ virtual Result maintain(const Bucket &, MaintenanceLevel);
+ virtual Result removeEntry(const Bucket &, Timestamp, Context&);
+};
+
+} // namespace spi
+} // namespace storage
+
diff --git a/persistence/src/vespa/persistence/proxy/providerstub.cpp b/persistence/src/vespa/persistence/proxy/providerstub.cpp
new file mode 100644
index 00000000000..6f2d565a5f2
--- /dev/null
+++ b/persistence/src/vespa/persistence/proxy/providerstub.cpp
@@ -0,0 +1,931 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP(".providerstub");
+
+#include "buildid.h"
+#include "providerstub.h"
+#include <vespa/document/serialization/vespadocumentdeserializer.h>
+#include <vespa/document/serialization/vespadocumentserializer.h>
+#include <vespa/document/util/bytebuffer.h>
+#include <vespa/persistence/spi/persistenceprovider.h>
+#include <persistence/spi/types.h>
+#include <vespa/vespalib/objects/nbostream.h>
+#include <vespa/vespalib/util/closuretask.h>
+#include <sstream>
+#include <vespa/document/fieldset/fieldsetrepo.h>
+
+using document::BucketId;
+using document::ByteBuffer;
+using document::DocumentTypeRepo;
+using document::VespaDocumentDeserializer;
+using document::VespaDocumentSerializer;
+using std::map;
+using std::ostringstream;
+using std::vector;
+using vespalib::Closure;
+using vespalib::makeClosure;
+using vespalib::makeTask;
+using vespalib::nbostream;
+
+namespace storage {
+namespace spi {
+namespace {
+
+LoadType defaultLoadType(0, "default");
+
+// Serialize return values
+void addResult(FRT_Values &ret, const Result &result) {
+ ret.AddInt8(result.getErrorCode());
+ ret.AddString(result.getErrorMessage().data(),
+ result.getErrorMessage().size());
+}
+
+void addPartitionStateListResult(FRT_Values &ret,
+ const PartitionStateListResult &result) {
+ addResult(ret, result);
+ PartitionStateList states = result.getList();
+ uint32_t *stateValues = ret.AddInt32Array(states.size());
+ FRT_StringValue *reasons = ret.AddStringArray(states.size());
+ for (size_t i = 0; i < states.size(); ++i) {
+ stateValues[i] = states[i].getState();
+ string reason(states[i].getReason());
+ ret.SetString(&reasons[i], reason.data(), reason.size());
+ }
+}
+
+void addBucketInfoResult(FRT_Values &ret, const BucketInfoResult &result) {
+ addResult(ret, result);
+ const BucketInfo& info = result.getBucketInfo();
+ ret.AddInt32(info.getChecksum());
+ ret.AddInt32(info.getDocumentCount());
+ ret.AddInt32(info.getDocumentSize());
+ ret.AddInt32(info.getEntryCount());
+ ret.AddInt32(info.getUsedSize());
+ ret.AddInt8(static_cast<uint8_t>(info.isReady()));
+ ret.AddInt8(static_cast<uint8_t>(info.isActive()));
+}
+
+void addRemoveResult(FRT_Values &ret, const RemoveResult &result) {
+ addResult(ret, result);
+ ret.AddInt8(result.wasFound());
+}
+
+void addUpdateResult(FRT_Values &ret, const UpdateResult &result) {
+ addResult(ret, result);
+ ret.AddInt64(result.getExistingTimestamp());
+}
+
+void addGetResult(FRT_Values &ret, const GetResult &result) {
+ addResult(ret, result);
+ ret.AddInt64(result.getTimestamp());
+ if (result.hasDocument()) {
+ nbostream stream;
+ VespaDocumentSerializer serializer(stream);
+ serializer.write(result.getDocument(), document::COMPLETE);
+ ret.AddData(stream.c_str(), stream.size());
+ } else {
+ ret.AddData(0, 0);
+ }
+}
+
+void addCreateIteratorResult(FRT_Values &ret,
+ const CreateIteratorResult &result)
+{
+ addResult(ret, result);
+ ret.AddInt64(result.getIteratorId());
+}
+
+void addIterateResult(FRT_Values &ret, const IterateResult &result)
+{
+ addResult(ret, result);
+
+ const vector<DocEntry::LP> &entries = result.getEntries();
+ uint64_t *timestamps = ret.AddInt64Array(entries.size());
+ uint32_t *flags = ret.AddInt32Array(entries.size());
+ assert(sizeof(DocEntry::SizeType) == sizeof(uint32_t));
+ FRT_StringValue *doc_id_array = ret.AddStringArray(entries.size());
+ FRT_DataValue *doc_array = ret.AddDataArray(entries.size());
+
+ for (size_t i = 0; i < entries.size(); ++i) {
+ string doc_id_str;
+ nbostream stream;
+ const DocumentId *doc_id = entries[i]->getDocumentId();
+ if (doc_id) {
+ doc_id_str = doc_id->toString();
+ }
+ const Document *doc = entries[i]->getDocument();
+ if (doc) {
+ VespaDocumentSerializer serializer(stream);
+ serializer.write(*doc, document::COMPLETE);
+ }
+
+ timestamps[i] = entries[i]->getTimestamp();
+ flags[i] = entries[i]->getFlags();
+ ret.SetString(&doc_id_array[i], doc_id_str.data(), doc_id_str.size());
+ ret.SetData(&doc_array[i], stream.c_str(), stream.size());
+ }
+
+ ret.AddInt8(result.isCompleted());
+}
+
+void addBucketIdListResult(FRT_Values &ret, const BucketIdListResult& result) {
+ addResult(ret, result);
+
+ size_t modified_bucket_size = result.getList().size();
+ uint64_t *bucket_id = ret.AddInt64Array(modified_bucket_size);
+ for (size_t i = 0; i < modified_bucket_size; ++i) {
+ bucket_id[i] = result.getList()[i].getRawId();
+ }
+}
+
+string getString(const FRT_StringValue &str) {
+ return string(str._str, str._len);
+}
+
+string getString(const FRT_Value &value) {
+ return getString(value._string);
+}
+
+Bucket getBucket(const FRT_Value &bucket_val, const FRT_Value &partition_val) {
+ BucketId bucket_id(bucket_val._intval64);
+ PartitionId partition_id(partition_val._intval64);
+ return Bucket(bucket_id, partition_id);
+}
+
+Document::UP getDocument(const FRT_Value &val, const DocumentTypeRepo &repo) {
+ nbostream stream(val._data._buf, val._data._len);
+ const uint16_t version = 8;
+ VespaDocumentDeserializer deserializer(repo, stream, version);
+ Document::UP doc(new Document);
+ deserializer.read(*doc);
+ return doc;
+}
+
+Selection getSelection(const FRT_Values &params, int i) {
+ DocumentSelection doc_sel(getString(params[i]));
+ Timestamp timestamp_from(params[i + 1]._intval64);
+ Timestamp timestamp_to(params[i + 2]._intval64);
+ FRT_Array<uint64_t> array = params[i + 3]._int64_array;
+ TimestampList timestamp_subset(array._pt, array._pt + array._len);
+
+ Selection selection(doc_sel);
+ selection.setFromTimestamp(timestamp_from);
+ selection.setToTimestamp(timestamp_to);
+ selection.setTimestampSubset(timestamp_subset);
+ return selection;
+}
+
+void addConnect(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.connect",
+ "s", "", true, func, obj);
+ rb.MethodDesc("Set up connection to proxy.");
+ rb.ParamDesc("build_id", "Id to make sure client and server come from the "
+ "same build.");
+}
+
+void addGetPartitionStates(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.getPartitionStates",
+ "", "bsIS", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ReturnDesc("ret", "An array of serialized PartitionStates.");
+}
+
+void doGetPartitionStates(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &ret = *req->GetReturn();
+ addPartitionStateListResult(ret, provider->getPartitionStates());
+ req->Return();
+}
+
+void addInitialize(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.initialize",
+ "", "bs", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+}
+
+void doInitialize(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &ret = *req->GetReturn();
+ addResult(ret, provider->initialize());
+ req->Return();
+}
+
+void addListBuckets(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.listBuckets",
+ "l", "bsL", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("partition_id", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+ rb.ReturnDesc("bucket_ids", "An array of BucketIds.");
+}
+
+void doListBuckets(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ PartitionId partition_id(params[0]._intval64);
+
+ FRT_Values &ret = *req->GetReturn();
+ addBucketIdListResult(ret, provider->listBuckets(partition_id));
+ req->Return();
+}
+
+void addSetClusterState(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.setClusterState",
+ "x", "bs", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("cluster_state", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+}
+
+void doSetClusterState(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ vespalib::nbostream stream(params[0]._data._buf, params[0]._data._len);
+
+ ClusterState state(stream);
+ FRT_Values &ret = *req->GetReturn();
+ addResult(ret, provider->setClusterState(state));
+ req->Return();
+}
+
+void addSetActiveState(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.setActiveState",
+ "llb", "bs", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("bucket_id", "");
+ rb.ParamDesc("partition_id", "");
+ rb.ParamDesc("bucket_state", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+}
+
+void doSetActiveState(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ Bucket bucket = getBucket(params[0], params[1]);
+ BucketInfo::ActiveState state = BucketInfo::ActiveState(params[2]._intval8);
+
+ FRT_Values &ret = *req->GetReturn();
+ addResult(ret, provider->setActiveState(bucket, state));
+ req->Return();
+}
+
+void addGetBucketInfo(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.getBucketInfo",
+ "ll", "bsiiiiibb", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("bucket_id", "");
+ rb.ParamDesc("partition_id", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+ rb.ReturnDesc("checksum", "");
+ rb.ReturnDesc("document_count", "");
+ rb.ReturnDesc("document_size", "");
+ rb.ReturnDesc("entry_count", "");
+ rb.ReturnDesc("used_size", "");
+ rb.ReturnDesc("ready", "");
+ rb.ReturnDesc("active", "");
+}
+
+void doGetBucketInfo(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ Bucket bucket = getBucket(params[0], params[1]);
+
+ FRT_Values &ret = *req->GetReturn();
+ addBucketInfoResult(ret, provider->getBucketInfo(bucket));
+ req->Return();
+}
+
+void addPut(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.put",
+ "lllx", "bs", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("bucket_id", "");
+ rb.ParamDesc("partition_id", "");
+ rb.ParamDesc("timestamp", "");
+ rb.ParamDesc("document", "A serialized document");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+}
+
+void doPut(FRT_RPCRequest *req, PersistenceProvider *provider,
+ const DocumentTypeRepo *repo)
+{
+ FRT_Values &params = *req->GetParams();
+ Bucket bucket = getBucket(params[0], params[1]);
+ Timestamp timestamp(params[2]._intval64);
+ Document::SP doc(getDocument(params[3], *repo).release());
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addResult(ret, provider->put(bucket, timestamp, doc, context));
+ req->Return();
+}
+
+void addRemoveById(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.removeById",
+ "llls", "bsb", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("bucket_id", "");
+ rb.ParamDesc("partition_id", "");
+ rb.ParamDesc("timestamp", "");
+ rb.ParamDesc("document_id", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+ rb.ReturnDesc("existed", "");
+}
+
+void doRemoveById(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ Bucket bucket = getBucket(params[0], params[1]);
+ Timestamp timestamp(params[2]._intval64);
+ DocumentId id(getString(params[3]));
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addRemoveResult(ret, provider->remove(bucket, timestamp, id, context));
+ req->Return();
+}
+
+void addRemoveIfFound(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.removeIfFound",
+ "llls", "bsb", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("bucket_id", "");
+ rb.ParamDesc("partition_id", "");
+ rb.ParamDesc("timestamp", "");
+ rb.ParamDesc("document_id", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+ rb.ReturnDesc("existed", "");
+}
+
+void doRemoveIfFound(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ Bucket bucket = getBucket(params[0], params[1]);
+ Timestamp timestamp(params[2]._intval64);
+ DocumentId id(getString(params[3]));
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addRemoveResult(ret,
+ provider->removeIfFound(bucket, timestamp, id, context));
+ req->Return();
+}
+
+void addUpdate(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.update",
+ "lllx", "bsl", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("bucket_id", "");
+ rb.ParamDesc("partition_id", "");
+ rb.ParamDesc("timestamp", "");
+ rb.ParamDesc("document_update", "A serialized DocumentUpdate");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+ rb.ReturnDesc("existing timestamp", "");
+}
+
+void doUpdate(FRT_RPCRequest *req, PersistenceProvider *provider,
+ const DocumentTypeRepo *repo)
+{
+ FRT_Values &params = *req->GetParams();
+ Bucket bucket = getBucket(params[0], params[1]);
+ Timestamp timestamp(params[2]._intval64);
+ ByteBuffer buffer(params[3]._data._buf, params[3]._data._len);
+ DocumentUpdate::SP update(new DocumentUpdate(*repo, buffer,
+ DocumentUpdate::
+ SerializeVersion::
+ SERIALIZE_HEAD));
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addUpdateResult(ret, provider->update(bucket, timestamp, update, context));
+ req->Return();
+}
+
+void addFlush(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.flush", "ll", "bs", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("bucket_id", "");
+ rb.ParamDesc("partition_id", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+}
+
+void doFlush(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ Bucket bucket = getBucket(params[0], params[1]);
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addResult(ret, provider->flush(bucket, context));
+ req->Return();
+}
+
+void addGet(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.get",
+ "llss", "bslx", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("bucket_id", "");
+ rb.ParamDesc("partition_id", "");
+ rb.ParamDesc("field_set", "Array of fields in the set");
+ rb.ParamDesc("document_id", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+ rb.ReturnDesc("timestamp", "");
+ rb.ReturnDesc("document", "A serialized document");
+}
+
+void doGet(FRT_RPCRequest *req,
+ PersistenceProvider *provider,
+ const DocumentTypeRepo* repo)
+{
+ FRT_Values &params = *req->GetParams();
+ Bucket bucket = getBucket(params[0], params[1]);
+
+ document::FieldSetRepo fsr;
+ document::FieldSet::UP fieldSet = fsr.parse(*repo, getString(params[2]));
+ DocumentId id(getString(params[3]));
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addGetResult(ret, provider->get(bucket, *fieldSet, id, context));
+ req->Return();
+}
+
+void addCreateIterator(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.createIterator",
+ "llssllLb", "bsl", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("bucket_id", "");
+ rb.ParamDesc("partition_id", "");
+ rb.ParamDesc("field_set", "Field set string (comma-separated list of strings)");
+ rb.ParamDesc("document_selection_string", "");
+ rb.ParamDesc("timestamp_from", "");
+ rb.ParamDesc("timestamp_to", "");
+ rb.ParamDesc("timestamp_subset", "");
+ rb.ParamDesc("includedversions", "");
+
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+ rb.ReturnDesc("iterator_id", "");
+}
+
+void doCreateIterator(FRT_RPCRequest *req, PersistenceProvider *provider,
+ const DocumentTypeRepo* repo)
+{
+ FRT_Values &params = *req->GetParams();
+ Bucket bucket = getBucket(params[0], params[1]);
+
+ document::FieldSetRepo fsr;
+ document::FieldSet::UP fieldSet = fsr.parse(*repo, getString(params[2]));
+ Selection selection = getSelection(params, 3);
+ IncludedVersions versions =
+ static_cast<IncludedVersions>(params[7]._intval8);
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addCreateIteratorResult(ret, provider->createIterator(
+ bucket, *fieldSet, selection, versions, context));
+ req->Return();
+}
+
+void addIterate(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.iterate",
+ "ll", "bsLISXb", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("iterator_id", "");
+ rb.ParamDesc("max_byte_size", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+ rb.ReturnDesc("doc_entry_timestamp", "Array of timestamps for DocEntries");
+ rb.ReturnDesc("doc_entry_flags", "Array of flags for DocEntries");
+ rb.ReturnDesc("doc_entry_doc_id", "Array of DocumentIds for DocEntries");
+ rb.ReturnDesc("doc_entry_doc", "Array of Documents for DocEntries");
+ rb.ReturnDesc("completed", "bool");
+}
+
+void doIterate(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ IteratorId id(params[0]._intval64);
+ uint64_t max_byte_size = params[1]._intval64;
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addIterateResult(ret, provider->iterate(id, max_byte_size, context));
+ req->Return();
+}
+
+void addDestroyIterator(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.destroyIterator",
+ "l", "bs", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("iterator_id", "");
+}
+
+void doDestroyIterator(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ IteratorId id(params[0]._intval64);
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addResult(ret, provider->destroyIterator(id, context));
+ req->Return();
+}
+
+void addCreateBucket(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.createBucket",
+ "ll", "bs", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("bucket_id", "");
+ rb.ParamDesc("partition_id", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+}
+
+void doCreateBucket(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ Bucket bucket = getBucket(params[0], params[1]);
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addResult(ret, provider->createBucket(bucket, context));
+ req->Return();
+}
+
+void addDeleteBucket(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.deleteBucket",
+ "ll", "bs", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("bucket_id", "");
+ rb.ParamDesc("partition_id", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+}
+
+void doDeleteBucket(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ Bucket bucket = getBucket(params[0], params[1]);
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addResult(ret, provider->deleteBucket(bucket, context));
+ req->Return();
+}
+
+void addGetModifiedBuckets(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.getModifiedBuckets",
+ "", "bsL", true, func, obj);
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+ rb.ReturnDesc("modified_buckets_bucket_ids", "Array of bucket ids");
+}
+
+void doGetModifiedBuckets(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &ret = *req->GetReturn();
+ addBucketIdListResult(ret, provider->getModifiedBuckets());
+ req->Return();
+}
+
+void addSplit(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.split",
+ "llllll", "bs", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("source_bucket_id", "");
+ rb.ParamDesc("source_partition_id", "");
+ rb.ParamDesc("target1_bucket_id", "");
+ rb.ParamDesc("target1_partition_id", "");
+ rb.ParamDesc("target2_bucket_id", "");
+ rb.ParamDesc("target2_partition_id", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+}
+
+void doSplit(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ Bucket source = getBucket(params[0], params[1]);
+ Bucket target1 = getBucket(params[2], params[3]);
+ Bucket target2 = getBucket(params[4], params[5]);
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addResult(ret, provider->split(source, target1, target2, context));
+ req->Return();
+}
+
+void addJoin(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.join",
+ "llllll", "bs", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("source1_bucket_id", "");
+ rb.ParamDesc("source1_partition_id", "");
+ rb.ParamDesc("source2_bucket_id", "");
+ rb.ParamDesc("source2_partition_id", "");
+ rb.ParamDesc("target_bucket_id", "");
+ rb.ParamDesc("target_partition_id", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+}
+
+void doJoin(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ Bucket source1 = getBucket(params[0], params[1]);
+ Bucket source2 = getBucket(params[2], params[3]);
+ Bucket target = getBucket(params[4], params[5]);
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addResult(ret, provider->join(source1, source2, target, context));
+ req->Return();
+}
+
+void addMove(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.move",
+ "lll", "bs", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("source_bucket_id", "");
+ rb.ParamDesc("source_partition_id", "");
+ rb.ParamDesc("target_partition_id", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+}
+
+void doMove(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ Bucket source = getBucket(params[0], params[1]);
+ PartitionId partition_id(params[2]._intval64);
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addResult(ret, provider->move(source, partition_id, context));
+ req->Return();
+}
+
+
+void addMaintain(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.maintain",
+ "llb", "bs", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("bucket_id", "");
+ rb.ParamDesc("partition_id", "");
+ rb.ParamDesc("verification_level", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+}
+
+void doMaintain(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ Bucket bucket = getBucket(params[0], params[1]);
+ MaintenanceLevel level =
+ static_cast<MaintenanceLevel>(params[2]._intval8);
+
+ FRT_Values &ret = *req->GetReturn();
+ addResult(ret, provider->maintain(bucket, level));
+ req->Return();
+}
+
+void addRemoveEntry(
+ FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
+ rb.DefineMethod("vespa.persistence.removeEntry",
+ "lll", "bs", true, func, obj);
+ rb.MethodDesc("???");
+ rb.ParamDesc("bucket_id", "");
+ rb.ParamDesc("partition_id", "");
+ rb.ParamDesc("timestamp", "");
+ rb.ReturnDesc("error_code", "");
+ rb.ReturnDesc("error_message", "");
+}
+
+void doRemoveEntry(FRT_RPCRequest *req, PersistenceProvider *provider) {
+ FRT_Values &params = *req->GetParams();
+ Bucket bucket = getBucket(params[0], params[1]);
+ Timestamp timestamp(params[2]._intval64);
+
+ FRT_Values &ret = *req->GetReturn();
+ Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
+ addResult(ret, provider->removeEntry(bucket, timestamp, context));
+ req->Return();
+}
+
+const uint32_t magic_number = 0xf00ba2;
+
+bool checkConnection(FNET_Connection *connection) {
+ return connection && connection->GetContext()._value.INT == magic_number;
+}
+} //namespace
+
+void ProviderStub::HOOK_fini(FRT_RPCRequest *req) {
+ FNET_Connection *connection = req->GetConnection();
+ if (checkConnection(connection)) {
+ assert(_provider.get() != 0);
+ _providerCleanupTask.ScheduleNow();
+ }
+}
+
+void ProviderStub::RPC_connect(FRT_RPCRequest *req) {
+ FRT_Values &params = *req->GetParams();
+ FNET_Connection *connection = req->GetConnection();
+ if (checkConnection(connection)) {
+ return;
+ }
+ string build_id = getString(params[0]);
+ if (build_id != getBuildId()) {
+ req->SetError(FRTE_RPC_METHOD_FAILED,
+ ("Wrong build id. Got '" + build_id +
+ "', required '" + getBuildId() + "'").c_str());
+ return;
+ } else if (_provider.get()) {
+ req->SetError(FRTE_RPC_METHOD_FAILED, "Server is already connected");
+ return;
+ }
+ if (!connection) {
+ req->SetError(FRTE_RPC_METHOD_FAILED);
+ return;
+ }
+ connection->SetContext(FNET_Context(magic_number));
+ _provider = _factory.create();
+}
+
+void ProviderStub::detachAndRun(FRT_RPCRequest *req, Closure::UP closure) {
+ if (!checkConnection(req->GetConnection())) {
+ req->SetError(FRTE_RPC_METHOD_FAILED);
+ return;
+ }
+ assert(_provider.get() != 0);
+ req->Detach();
+ _executor.execute(makeTask(std::move(closure)));
+}
+
+void ProviderStub::RPC_getPartitionStates(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doGetPartitionStates, req, _provider.get()));
+}
+
+void ProviderStub::RPC_initialize(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doInitialize, req, _provider.get()));
+}
+
+void ProviderStub::RPC_listBuckets(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doListBuckets, req, _provider.get()));
+}
+
+void ProviderStub::RPC_setClusterState(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doSetClusterState, req, _provider.get()));
+}
+
+void ProviderStub::RPC_setActiveState(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doSetActiveState, req, _provider.get()));
+}
+
+void ProviderStub::RPC_getBucketInfo(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doGetBucketInfo, req, _provider.get()));
+}
+
+void ProviderStub::RPC_put(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doPut, req, _provider.get(), _repo));
+}
+
+void ProviderStub::RPC_removeById(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doRemoveById, req, _provider.get()));
+}
+
+void ProviderStub::RPC_removeIfFound(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doRemoveIfFound, req, _provider.get()));
+}
+
+void ProviderStub::RPC_update(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doUpdate, req, _provider.get(), _repo));
+}
+
+void ProviderStub::RPC_flush(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doFlush, req, _provider.get()));
+}
+
+void ProviderStub::RPC_get(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doGet, req, _provider.get(), _repo));
+}
+
+void ProviderStub::RPC_createIterator(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doCreateIterator, req, _provider.get(), _repo));
+}
+
+void ProviderStub::RPC_iterate(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doIterate, req, _provider.get()));
+}
+
+void ProviderStub::RPC_destroyIterator(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doDestroyIterator, req, _provider.get()));
+}
+
+void ProviderStub::RPC_createBucket(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doCreateBucket, req, _provider.get()));
+}
+
+void ProviderStub::RPC_deleteBucket(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doDeleteBucket, req, _provider.get()));
+}
+
+void ProviderStub::RPC_getModifiedBuckets(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doGetModifiedBuckets, req, _provider.get()));
+}
+
+void ProviderStub::RPC_split(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doSplit, req, _provider.get()));
+}
+
+void ProviderStub::RPC_join(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doJoin, req, _provider.get()));
+}
+
+void ProviderStub::RPC_move(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doMove, req, _provider.get()));
+}
+
+void ProviderStub::RPC_maintain(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doMaintain, req, _provider.get()));
+}
+
+void ProviderStub::RPC_removeEntry(FRT_RPCRequest *req) {
+ detachAndRun(req, makeClosure(doRemoveEntry, req, _provider.get()));
+}
+
+void ProviderStub::SetupRpcCalls() {
+ FRT_ReflectionBuilder rb(&_supervisor);
+ addConnect(rb, FRT_METHOD(ProviderStub::RPC_connect), this);
+ addInitialize(
+ rb, FRT_METHOD(ProviderStub::RPC_initialize), this);
+ addGetPartitionStates(
+ rb, FRT_METHOD(ProviderStub::RPC_getPartitionStates), this);
+ addListBuckets(rb, FRT_METHOD(ProviderStub::RPC_listBuckets), this);
+ addSetClusterState(rb, FRT_METHOD(ProviderStub::RPC_setClusterState), this);
+ addSetActiveState(
+ rb, FRT_METHOD(ProviderStub::RPC_setActiveState), this);
+ addGetBucketInfo(rb, FRT_METHOD(ProviderStub::RPC_getBucketInfo), this);
+ addPut(rb, FRT_METHOD(ProviderStub::RPC_put), this);
+ addRemoveById(rb, FRT_METHOD(ProviderStub::RPC_removeById), this);
+ addRemoveIfFound(rb, FRT_METHOD(ProviderStub::RPC_removeIfFound), this);
+ addUpdate(rb, FRT_METHOD(ProviderStub::RPC_update), this);
+ addFlush(rb, FRT_METHOD(ProviderStub::RPC_flush), this);
+ addGet(rb, FRT_METHOD(ProviderStub::RPC_get), this);
+ addCreateIterator(rb, FRT_METHOD(ProviderStub::RPC_createIterator), this);
+ addIterate(rb, FRT_METHOD(ProviderStub::RPC_iterate), this);
+ addDestroyIterator(
+ rb, FRT_METHOD(ProviderStub::RPC_destroyIterator), this);
+ addCreateBucket(rb, FRT_METHOD(ProviderStub::RPC_createBucket), this);
+ addDeleteBucket(rb, FRT_METHOD(ProviderStub::RPC_deleteBucket), this);
+ addGetModifiedBuckets(
+ rb, FRT_METHOD(ProviderStub::RPC_getModifiedBuckets), this);
+ addSplit(rb, FRT_METHOD(ProviderStub::RPC_split), this);
+ addJoin(rb, FRT_METHOD(ProviderStub::RPC_join), this);
+ addMove(rb, FRT_METHOD(ProviderStub::RPC_move), this);
+ addMaintain(rb, FRT_METHOD(ProviderStub::RPC_maintain), this);
+ addRemoveEntry(rb, FRT_METHOD(ProviderStub::RPC_removeEntry), this);
+}
+
+ProviderStub::ProviderStub(int port, uint32_t threads,
+ const document::DocumentTypeRepo &repo,
+ PersistenceProviderFactory &factory)
+ : _supervisor(),
+ _executor(threads, 256*1024),
+ _repo(&repo),
+ _factory(factory),
+ _provider(),
+ _providerCleanupTask(_supervisor.GetScheduler(), _executor, _provider)
+{
+ SetupRpcCalls();
+ _supervisor.SetSessionFiniHook(FRT_METHOD(ProviderStub::HOOK_fini), this);
+ _supervisor.Start();
+ _supervisor.Listen(port);
+}
+
+ProviderStub::~ProviderStub() {
+ _supervisor.ShutDown(true);
+ sync();
+}
+
+} // namespace spi
+} // namespace storage
diff --git a/persistence/src/vespa/persistence/proxy/providerstub.h b/persistence/src/vespa/persistence/proxy/providerstub.h
new file mode 100644
index 00000000000..3d946845fa5
--- /dev/null
+++ b/persistence/src/vespa/persistence/proxy/providerstub.h
@@ -0,0 +1,94 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/fnet/frt/frt.h>
+#include <vespa/vespalib/util/closure.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <memory>
+
+namespace document { class DocumentTypeRepo; }
+
+namespace storage {
+namespace spi {
+class PersistenceProvider;
+
+class ProviderStub : private FRT_Invokable
+{
+public:
+ struct PersistenceProviderFactory {
+ virtual std::unique_ptr<PersistenceProvider> create() const = 0;
+ virtual ~PersistenceProviderFactory() {}
+ };
+
+private:
+ struct ProviderCleanupTask : FNET_Task {
+ vespalib::ThreadStackExecutor &executor;
+ std::unique_ptr<PersistenceProvider> &provider;
+ ProviderCleanupTask(FNET_Scheduler *s,
+ vespalib::ThreadStackExecutor &e,
+ std::unique_ptr<PersistenceProvider> &p)
+ : FNET_Task(s), executor(e), provider(p) {}
+ virtual void PerformTask() {
+ executor.sync();
+ assert(provider.get() != 0);
+ provider.reset();
+ }
+ };
+
+ FRT_Supervisor _supervisor;
+ vespalib::ThreadStackExecutor _executor;
+ const document::DocumentTypeRepo *_repo;
+ PersistenceProviderFactory &_factory;
+ std::unique_ptr<PersistenceProvider> _provider;
+ ProviderCleanupTask _providerCleanupTask;
+
+ void HOOK_fini(FRT_RPCRequest *req);
+
+ void detachAndRun(FRT_RPCRequest *req, vespalib::Closure::UP closure);
+ void RPC_connect(FRT_RPCRequest *req);
+ void RPC_initialize(FRT_RPCRequest *req);
+ void RPC_getPartitionStates(FRT_RPCRequest *req);
+ void RPC_listBuckets(FRT_RPCRequest *req);
+ void RPC_setClusterState(FRT_RPCRequest *req);
+ void RPC_setActiveState(FRT_RPCRequest *req);
+ void RPC_getBucketInfo(FRT_RPCRequest *req);
+ void RPC_put(FRT_RPCRequest *req);
+ void RPC_removeById(FRT_RPCRequest *req);
+ void RPC_removeIfFound(FRT_RPCRequest *req);
+ void RPC_update(FRT_RPCRequest *req);
+ void RPC_flush(FRT_RPCRequest *req);
+ void RPC_get(FRT_RPCRequest *req);
+ void RPC_createIterator(FRT_RPCRequest *req);
+ void RPC_iterate(FRT_RPCRequest *req);
+ void RPC_destroyIterator(FRT_RPCRequest *req);
+ void RPC_createBucket(FRT_RPCRequest *req);
+ void RPC_deleteBucket(FRT_RPCRequest *req);
+ void RPC_getModifiedBuckets(FRT_RPCRequest *req);
+ void RPC_split(FRT_RPCRequest *req);
+ void RPC_join(FRT_RPCRequest *req);
+ void RPC_move(FRT_RPCRequest *req);
+ void RPC_maintain(FRT_RPCRequest *req);
+ void RPC_removeEntry(FRT_RPCRequest *req);
+
+ void SetupRpcCalls();
+
+public:
+ typedef std::unique_ptr<ProviderStub> UP;
+
+ ProviderStub(int port, uint32_t threads,
+ const document::DocumentTypeRepo &repo,
+ PersistenceProviderFactory &factory);
+ ~ProviderStub();
+
+ int getPort() const { return _supervisor.GetListenPort(); }
+ bool hasClient() const { return (_provider.get() != 0); }
+ void setRepo(const document::DocumentTypeRepo &repo) {
+ _repo = &repo;
+ }
+ void sync() { _executor.sync(); }
+};
+
+} // namespace spi
+} // namespace storage
+
diff --git a/persistence/src/vespa/persistence/spi/.gitignore b/persistence/src/vespa/persistence/spi/.gitignore
new file mode 100644
index 00000000000..7e7c0fe7fae
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/.gitignore
@@ -0,0 +1,2 @@
+/.depend
+/Makefile
diff --git a/persistence/src/vespa/persistence/spi/CMakeLists.txt b/persistence/src/vespa/persistence/spi/CMakeLists.txt
new file mode 100644
index 00000000000..17cf823279b
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/CMakeLists.txt
@@ -0,0 +1,15 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(persistence_spi OBJECT
+ SOURCES
+ bucket.cpp
+ bucketinfo.cpp
+ exceptions.cpp
+ persistenceprovider.cpp
+ partitionstate.cpp
+ abstractpersistenceprovider.cpp
+ clusterstate.cpp
+ context.cpp
+ metricpersistenceprovider.cpp
+ read_consistency.cpp
+ DEPENDS
+)
diff --git a/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.cpp b/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.cpp
new file mode 100644
index 00000000000..31e7975a040
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.cpp
@@ -0,0 +1,77 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/persistence/spi/abstractpersistenceprovider.h>
+#include <vespa/document/update/documentupdate.h>
+#include <vespa/document/fieldset/fieldsets.h>
+
+
+namespace storage {
+
+namespace spi {
+
+UpdateResult
+AbstractPersistenceProvider::update(const Bucket& bucket,
+ Timestamp ts,
+ const DocumentUpdate::SP& upd,
+ Context& context)
+{
+ GetResult getResult = get(bucket,
+ document::AllFields(),
+ upd->getId(), context);
+
+ if (getResult.hasError()) {
+ return UpdateResult(getResult.getErrorCode(),
+ getResult.getErrorMessage());
+ }
+
+ if (!getResult.hasDocument()) {
+ return UpdateResult();
+ }
+
+ upd->applyTo(getResult.getDocument());
+
+ Result putResult = put(bucket,
+ ts,
+ getResult.getDocumentPtr(),
+ context);
+
+ if (putResult.hasError()) {
+ return UpdateResult(putResult.getErrorCode(),
+ putResult.getErrorMessage());
+ }
+
+ return UpdateResult(getResult.getTimestamp());
+}
+
+RemoveResult
+AbstractPersistenceProvider::removeIfFound(const Bucket& b,
+ Timestamp timestamp,
+ const DocumentId& id,
+ Context& context)
+{
+ return remove(b, timestamp, id, context);
+}
+
+BucketIdListResult
+AbstractPersistenceProvider::getModifiedBuckets() const
+{
+ BucketIdListResult::List list;
+ return BucketIdListResult(list);
+}
+
+Result
+AbstractPersistenceProvider::move(const Bucket& source,
+ PartitionId target,
+ Context& context)
+{
+ spi::Bucket to(source.getBucketId(), spi::PartitionId(target));
+
+ return join(source, source, to, context);
+}
+
+}
+
+}
+
+
+
diff --git a/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.h b/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.h
new file mode 100644
index 00000000000..f06d20860ad
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.h
@@ -0,0 +1,90 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/persistence/spi/persistenceprovider.h>
+
+namespace storage {
+
+namespace spi {
+
+/**
+ * Simplified abstract persistence provider class. Implements
+ * some of the less used functions. Implementors are still encouraged
+ * to review the full PersistenceProvider class to verify that
+ * their desired behaviour is implemented.
+ */
+class AbstractPersistenceProvider : public PersistenceProvider
+{
+public:
+ /**
+ * Default impl is empty.
+ */
+ virtual Result initialize() { return Result(); };
+
+ /**
+ * Updates the document by calling get(), updating the document,
+ * then calling put() on the result.
+ */
+ virtual UpdateResult update(const Bucket&,
+ Timestamp,
+ const DocumentUpdate::SP&,
+ Context&);
+
+ /**
+ * Default impl empty.
+ */
+ virtual Result createBucket(const Bucket&, Context&) { return Result(); }
+
+ /**
+ * Default impl is empty.
+ */
+ virtual Result maintain(const Bucket&,
+ MaintenanceLevel) { return Result(); }
+
+ /**
+ * Default impl is empty.
+ */
+ virtual Result removeEntry(const Bucket&,
+ Timestamp, Context&) { return Result(); }
+
+ /**
+ * Default impl is getBucketInfo();
+ */
+ virtual Result flush(const Bucket&, Context&) { return Result(); }
+
+ /**
+ * Default impl is remove().
+ */
+ virtual RemoveResult removeIfFound(const Bucket&,
+ Timestamp,
+ const DocumentId&,
+ Context&);
+
+ /**
+ * Default impl empty.
+ */
+ virtual Result setClusterState(const ClusterState&)
+ { return Result(); }
+
+ /**
+ * Default impl empty.
+ */
+ virtual Result setActiveState(const Bucket&,
+ BucketInfo::ActiveState)
+ { return Result(); }
+
+ /**
+ * Default impl empty.
+ */
+ virtual BucketIdListResult getModifiedBuckets() const;
+
+ /**
+ * Uses join by default.
+ */
+ virtual Result move(const Bucket& source, PartitionId id, Context&);
+};
+
+}
+
+}
+
diff --git a/persistence/src/vespa/persistence/spi/bucket.cpp b/persistence/src/vespa/persistence/spi/bucket.cpp
new file mode 100644
index 00000000000..423ffdd496c
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/bucket.cpp
@@ -0,0 +1,28 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/persistence/spi/bucket.h>
+#include <sstream>
+#include <iomanip>
+
+namespace storage {
+namespace spi {
+
+std::string Bucket::toString() const {
+ std::ostringstream ost;
+ print(ost);
+ return ost.str();
+}
+
+void
+Bucket::print(std::ostream& out) const
+{
+ out << "Bucket(0x"
+ << std::hex << std::setw(sizeof(document::BucketId::Type) * 2)
+ << std::setfill('0') << _bucket.getId()
+ << std::dec
+ << ", partition " << _partition
+ << ")";
+}
+
+} // spi
+} // storage
diff --git a/persistence/src/vespa/persistence/spi/bucket.h b/persistence/src/vespa/persistence/spi/bucket.h
new file mode 100644
index 00000000000..dc9419bbdae
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/bucket.h
@@ -0,0 +1,53 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * \class storage::spi::Bucket
+ * \ingroup spi
+ *
+ * \brief Wrapper class for a bucket identifier.
+ *
+ * We don't want the persistence implementation having to know how to map
+ * buckets to partitions. Thus we want the service layer to always provide a
+ * partition identifier together with bucket identifiers. This wrapper class
+ * exist to ensure we always have partition, and to make interfaces look
+ * simpler.
+ */
+
+#pragma once
+
+#include <persistence/spi/types.h>
+
+namespace storage {
+namespace spi {
+
+class Bucket {
+ document::BucketId _bucket;
+ PartitionId _partition;
+
+public:
+ Bucket() : _bucket(0), _partition(0) {}
+ Bucket(const document::BucketId& b, PartitionId p)
+ : _bucket(b), _partition(p) {}
+
+ const document::BucketId& getBucketId() const { return _bucket; }
+ PartitionId getPartition() const { return _partition; }
+
+ /** Convert easily to a document bucket id to make class easy to use. */
+ operator const document::BucketId&() const { return _bucket; }
+
+ bool operator==(const Bucket& o) const {
+ return (_bucket == o._bucket && _partition == o._partition);
+ }
+
+ void print(std::ostream& out) const;
+
+ std::string toString() const;
+};
+
+inline std::ostream& operator<<(std::ostream& out, const Bucket& bucket) {
+ bucket.print(out);
+ return out;
+}
+
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/spi/bucketinfo.cpp b/persistence/src/vespa/persistence/spi/bucketinfo.cpp
new file mode 100644
index 00000000000..4637b5b598b
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/bucketinfo.cpp
@@ -0,0 +1,75 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/persistence/spi/bucketinfo.h>
+
+namespace storage {
+namespace spi {
+
+BucketInfo::BucketInfo()
+ : _checksum(0),
+ _documentCount(0),
+ _documentSize(0),
+ _entryCount(0),
+ _size(0),
+ _ready(NOT_READY),
+ _active(NOT_ACTIVE)
+{
+}
+
+BucketInfo::BucketInfo(BucketChecksum checksum,
+ uint32_t docCount,
+ uint32_t docSize,
+ uint32_t metaEntryCount,
+ uint32_t size,
+ ReadyState ready,
+ ActiveState active)
+ : _checksum(checksum),
+ _documentCount(docCount),
+ _documentSize(docSize),
+ _entryCount(metaEntryCount),
+ _size(size),
+ _ready(ready),
+ _active(active)
+{
+}
+
+bool
+BucketInfo::operator==(const BucketInfo& o) const
+{
+ return (_checksum == o._checksum
+ && _documentCount == o._documentCount
+ && _documentSize == o._documentSize
+ && _entryCount == o._entryCount
+ && _size == o._size
+ && _ready == o._ready
+ && _active == o._active);
+}
+
+void
+BucketInfo::print(std::ostream& out) const
+{
+ out << "BucketInfo(";
+ out << "crc 0x" << std::hex << _checksum << std::dec
+ << ", documentCount " << _documentCount;
+ if (_documentSize != 0) {
+ out << ", documentSize " << _documentSize;
+ }
+ out << ", entryCount " << _entryCount;
+ if (_size != 0) {
+ out << ", usedSize " << _size;
+ }
+ out << ", ready " << (_ready ? "true" : "false")
+ << ", active " << (_active ? "true" : "false");
+ out << ")";
+}
+
+std::string
+BucketInfo::toString() const {
+ std::ostringstream ost;
+ print(ost);
+ return ost.str();
+}
+
+} // spi
+} // storage
diff --git a/persistence/src/vespa/persistence/spi/bucketinfo.h b/persistence/src/vespa/persistence/spi/bucketinfo.h
new file mode 100644
index 00000000000..2bc74f8204f
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/bucketinfo.h
@@ -0,0 +1,110 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * \class storage::spi::BucketInfo
+ * \ingroup spi
+ */
+
+#pragma once
+
+#include <boost/utility.hpp>
+#include <persistence/spi/types.h>
+
+namespace storage {
+namespace spi {
+
+class BucketInfo {
+public:
+ enum ReadyState {
+ NOT_READY,
+ READY
+ };
+
+ enum ActiveState {
+ NOT_ACTIVE,
+ ACTIVE
+ };
+
+ /** Create an invalid bucket info object. */
+ BucketInfo();
+
+ BucketInfo(BucketChecksum checksum,
+ uint32_t docCount,
+ uint32_t docSize,
+ uint32_t entryCount,
+ uint32_t size,
+ ReadyState ready = READY,
+ ActiveState active = NOT_ACTIVE);
+
+ bool operator==(const BucketInfo& o) const;
+ void print(std::ostream& out) const;
+
+ std::string toString() const;
+
+ /**
+ * Get the checksum of the bucket. An empty bucket should have checksum of
+ * zero. The checksum should only include data from the latest versions of
+ * non-removed documents. Otherwise, the checksum implementation is up to
+ * the persistence implementation. (Unless one wants to run multiple
+ * persistence implementations in the same cluster, in which case they have
+ * to match).
+ */
+ BucketChecksum getChecksum() const { return _checksum; }
+
+ /**
+ * The number of unique documents that have not been removed from the
+ * bucket. A unique document count above the splitting threshold will cause
+ * the bucket to be split.
+ */
+ uint32_t getDocumentCount() const { return _documentCount; }
+
+ /**
+ * The total size of all the unique documents in this bucket. A size above
+ * the splitting threshold will cause the bucket to be split. Knowing size
+ * is optional, but a bucket with more than zero unique documents should
+ * always return a non-zero value for size. If splitting on size is not
+ * required or desired, a simple solution here is to just set the number
+ * of unique documents as the size.
+ */
+ uint32_t getDocumentSize() const { return _documentSize; }
+
+ /**
+ * The number of entries in the bucket. For a persistence layer
+ * keeping history of data (multiple versions of a document or remove
+ * entries), it may use more meta entries in the bucket than it has unique
+ * documents If the sum of meta entries from a pair of joinable buckets go
+ * below the join threshold, the buckets will be joined.
+ */
+ uint32_t getEntryCount() const { return _entryCount; }
+
+ /**
+ * The total size used by the persistence layer to store all the documents
+ * for a given bucket. Possibly excluding pre-allocated space not currently
+ * in use. Knowing size is optional, but if the bucket contains more than
+ * zero entries, it should return a non-zero value for used size.
+ */
+ uint32_t getUsedSize() const { return _size; }
+
+ ReadyState getReady() const { return _ready; }
+ ActiveState getActive() const { return _active; }
+
+ bool isReady() const { return _ready == READY; }
+ bool isActive() const { return _active == ACTIVE; }
+
+private:
+ BucketChecksum _checksum;
+ uint32_t _documentCount;
+ uint32_t _documentSize;
+ uint32_t _entryCount;
+ uint32_t _size;
+ ReadyState _ready;
+ ActiveState _active;
+};
+
+inline std::ostream& operator<<(std::ostream& out, const BucketInfo& info) {
+ info.print(out);
+ return out;
+}
+
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/spi/clusterstate.cpp b/persistence/src/vespa/persistence/spi/clusterstate.cpp
new file mode 100644
index 00000000000..26897a79bc1
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/clusterstate.cpp
@@ -0,0 +1,108 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/persistence/spi/clusterstate.h>
+
+namespace storage {
+namespace spi {
+
+ClusterState::ClusterState(const lib::ClusterState& state,
+ uint16_t nodeIndex,
+ const lib::Distribution& distribution)
+ : _state(new lib::ClusterState(state)),
+ _nodeIndex(nodeIndex),
+ _distribution(new lib::Distribution(distribution.serialize()))
+{
+
+}
+
+void
+ClusterState::deserialize(vespalib::nbostream& i)
+{
+ vespalib::string clusterState;
+ vespalib::string distribution;
+
+ i >> clusterState;
+ i >> _nodeIndex;
+ i >> distribution;
+
+ _state.reset(new lib::ClusterState(clusterState));
+ _distribution.reset(new lib::Distribution(distribution));
+}
+
+ClusterState::ClusterState(vespalib::nbostream& i)
+{
+ deserialize(i);
+}
+
+ClusterState::ClusterState(const ClusterState& other)
+{
+ vespalib::nbostream o;
+ other.serialize(o);
+ deserialize(o);
+}
+
+ClusterState&
+ClusterState::operator=(const ClusterState& other)
+{
+ ClusterState copy(other);
+ _state = std::move(copy._state);
+ _nodeIndex = copy._nodeIndex;
+ _distribution = std::move(copy._distribution);
+ return *this;
+}
+
+bool
+ClusterState::shouldBeReady(const Bucket& b) const
+{
+ assert(_distribution.get());
+ assert(_state.get());
+
+ if (_distribution->getReadyCopies() >= _distribution->getRedundancy()) {
+ return true; // all copies should be ready
+ }
+
+ std::vector<uint16_t> idealNodes;
+ _distribution->getIdealNodes(lib::NodeType::STORAGE, *_state,
+ b.getBucketId(), idealNodes,
+ "uim", _distribution->getReadyCopies());
+ for (uint32_t i=0, n=idealNodes.size(); i<n; ++i) {
+ if (idealNodes[i] == _nodeIndex) return true;
+ }
+ return false;
+}
+
+bool
+ClusterState::clusterUp() const
+{
+ return _state.get() && _state->getClusterState() == lib::State::UP;
+}
+
+bool
+ClusterState::nodeUp() const
+{
+ return _state.get() &&
+ _state->getNodeState(lib::Node(lib::NodeType::STORAGE, _nodeIndex)).
+ getState().oneOf("uir");
+}
+
+bool
+ClusterState::nodeInitializing() const
+{
+ return _state.get() &&
+ _state->getNodeState(lib::Node(lib::NodeType::STORAGE, _nodeIndex)).
+ getState().oneOf("i");
+}
+
+void
+ClusterState::serialize(vespalib::nbostream& o) const
+{
+ assert(_distribution.get());
+ assert(_state.get());
+ vespalib::asciistream tmp;
+ _state->serialize(tmp, false);
+ o << tmp.str() << _nodeIndex;
+ o << _distribution->serialize();
+}
+
+} // spi
+} // storage
diff --git a/persistence/src/vespa/persistence/spi/clusterstate.h b/persistence/src/vespa/persistence/spi/clusterstate.h
new file mode 100644
index 00000000000..5d29c7ce122
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/clusterstate.h
@@ -0,0 +1,72 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/persistence/spi/bucket.h>
+#include <vespa/vdslib/state/clusterstate.h>
+#include <vespa/vdslib/distribution/distribution.h>
+
+namespace storage {
+
+namespace spi {
+
+/**
+ * Used to determine the state of the current node and its buckets.
+ */
+class ClusterState {
+public:
+ typedef std::shared_ptr<ClusterState> SP;
+
+ ClusterState(const lib::ClusterState& state,
+ uint16_t nodeIndex,
+ const lib::Distribution& distribution);
+
+ ClusterState(vespalib::nbostream& i);
+
+ ClusterState(const ClusterState& other);
+ ClusterState& operator=(const ClusterState& other);
+
+ /**
+ * Returns true if the system has been set up to have
+ * "ready" nodes, and the given bucket is in the ideal state
+ * for readiness.
+ *
+ * @param b The bucket to check.
+ */
+ bool shouldBeReady(const Bucket& b) const;
+
+ /**
+ * Returns false if the cluster has been deemed down. This can happen
+ * if the fleet controller has detected that too many nodes are down
+ * compared to the complete list of nodes, and deigns the system to be
+ * unusable.
+ */
+ bool clusterUp() const;
+
+ /**
+ * Returns false if this node has been set in a state where it should not
+ * receive external load.
+ */
+ bool nodeUp() const;
+
+ /**
+ * Returns true if this node is marked as Initializing in the cluster state.
+ */
+ bool nodeInitializing() const;
+
+ /**
+ * Returns a serialized form of this object.
+ */
+ void serialize(vespalib::nbostream& o) const;
+
+private:
+ std::unique_ptr<lib::ClusterState> _state;
+ uint16_t _nodeIndex;
+ std::unique_ptr<lib::Distribution> _distribution;
+
+ void deserialize(vespalib::nbostream&);
+};
+
+}
+
+}
+
diff --git a/persistence/src/vespa/persistence/spi/clusterstateimpl.h b/persistence/src/vespa/persistence/spi/clusterstateimpl.h
new file mode 100644
index 00000000000..62cd7dce285
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/clusterstateimpl.h
@@ -0,0 +1,66 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/persistence/spi/bucket.h>
+#include <vespa/persistence/spi/clusterstate.h>
+
+namespace storage {
+
+namespace spi {
+
+/**
+ * Used to determine the state of the current node and its buckets.
+ */
+class ClusterStateImpl : public ClusterState{
+public:
+ ClusterStateImpl();
+
+ ClusterStateImpl(const lib::ClusterState& state,
+ uint16_t nodeIndex,
+ const lib::Distribution& distribution);
+
+ ClusterStateImpl(vespalib::nbostream& i);
+
+ ClusterStateImpl(const ClusterStateImpl& other);
+
+ ClusterStateImpl& operator=(const ClusterStateImpl& other);
+
+ /**
+ * Returns true if the given bucket is in the ideal state
+ * for readiness.
+ *
+ * @param b The bucket to check.
+ */
+ bool shouldBeReady(const Bucket& b) const;
+
+ /**
+ * Returns false if the cluster has been deemed down. This can happen
+ * if the fleet controller has detected that too many nodes are down
+ * compared to the complete list of nodes, and deigns the system to be
+ * unusable.
+ */
+ bool clusterUp() const;
+
+ /**
+ * Returns false if this node has been set in a state where it should not
+ * receive external load.
+ */
+ bool nodeUp() const;
+
+ /**
+ * Returns a serialized form of this object.
+ */
+ void serialize(vespalib::nbostream& o) const;
+
+private:
+ std::unique_ptr<lib::ClusterState> _state;
+ uint16_t _nodeIndex;
+ std::unique_ptr<lib::Distribution> _distribution;
+
+ void deserialize(vespalib::nbostream&);
+};
+
+}
+
+}
+
diff --git a/persistence/src/vespa/persistence/spi/context.cpp b/persistence/src/vespa/persistence/spi/context.cpp
new file mode 100644
index 00000000000..dc84aaf0e34
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/context.cpp
@@ -0,0 +1,9 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/persistence/spi/context.h>
+
+namespace storage {
+namespace spi {
+
+} // spi
+} // storage
diff --git a/persistence/src/vespa/persistence/spi/context.h b/persistence/src/vespa/persistence/spi/context.h
new file mode 100644
index 00000000000..e25338552e2
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/context.h
@@ -0,0 +1,98 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * The context object is used to pass optional per operation data down to the
+ * persistence layer. It contains the following:
+ *
+ * The load type of the operation. Users can tag their load with load types,
+ * such that the backend can be configured to handle them differently. This can
+ * for instance be used to:
+ * - Control what should be cached.
+ * - Keep different metrics per load type, such that users can see metrics of
+ * what they are interested in without getting them polluted with data from
+ * other types of load.
+ *
+ * The priority used by the service layer is given. The service layer keeps a
+ * priority queue so the highest priority operations pending should be issued
+ * first, but priority can also be useful in the provider, for instance for the
+ * following:
+ * - Prioritize load through SPI against other load in provider.
+ * - Pause low priority load when we have high priority load running at the
+ * same time using the same resources.
+ *
+ * Our messagebus protocol allows tracing, which simplifies debugging. For
+ * instance, if some operation is slow, one can add tracing and see where it
+ * uses time, whether it has hit caches etc. As the persistence provider itself
+ * can become complex, we want that also to be able to add to the trace. Thus we
+ * want to give it a way to specify something that we will add to the mbus
+ * trace.
+ */
+
+#pragma once
+
+#include <vespa/metrics/loadmetric.h>
+#include <persistence/spi/types.h>
+#include <vespa/persistence/spi/read_consistency.h>
+#include <vector>
+#include <vespa/vespalib/stllike/string.h>
+#include <vespa/vespalib/trace/trace.h>
+#include <vespa/vespalib/util/linkedptr.h>
+
+namespace storage {
+namespace spi {
+
+typedef metrics::LoadType LoadType;
+
+typedef uint16_t Priority; // 0 - max pri, 255 - min pri
+
+// Define this type just because a ton of tests currently use it.
+struct Trace {
+ typedef int TraceLevel;
+};
+
+class Context {
+ const LoadType* _loadType;
+ Priority _priority;
+ vespalib::Trace _trace;
+ ReadConsistency _readConsistency;
+
+public:
+ Context(const LoadType& loadType, Priority pri, int maxTraceLevel)
+ : _loadType(&loadType),
+ _priority(pri),
+ _trace(maxTraceLevel),
+ _readConsistency(ReadConsistency::STRONG)
+ {}
+
+ const LoadType& getLoadType() const { return *_loadType; }
+ Priority getPriority() const { return _priority; }
+ int getMaxTraceLevel() const { return _trace.getLevel(); }
+ void addTrace(const vespalib::TraceNode& traceNode)
+ { _trace.getRoot().addChild(traceNode); }
+
+ /**
+ * A read operation might choose to relax its consistency requirements,
+ * allowing the persistence provider to perform optimizations on the
+ * operation as a result.
+ *
+ * A persistence provider is not required to support relaxed consistency
+ * and it might only support this on a subset of read operations, so this
+ * should only be considered a hint.
+ */
+ void setReadConsistency(ReadConsistency consistency) noexcept {
+ _readConsistency = consistency;
+ }
+ ReadConsistency getReadConsistency() const noexcept {
+ return _readConsistency;
+ }
+
+ vespalib::Trace& getTrace() { return _trace; }
+ const vespalib::Trace& getTrace() const { return _trace; }
+
+ bool shouldTrace(int level) { return _trace.shouldTrace(level); }
+ void trace(int level, vespalib::stringref msg, bool addTime = true)
+ { _trace.trace(level, msg, addTime); }
+};
+
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/spi/docentry.h b/persistence/src/vespa/persistence/spi/docentry.h
new file mode 100644
index 00000000000..78db866d70e
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/docentry.h
@@ -0,0 +1,229 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * \class storage::spi::DocEntry
+ * \ingroup spi
+ *
+ * \brief Represents a document with metadata stored.
+ *
+ * To do merge, all SPI implementations need some common metadata. To do iterate
+ * efficiently, we also want options to only return metadata or similar. Thus
+ * we need a class to contain all generic parts stored by all SPI
+ * implementations.
+ */
+
+#pragma once
+
+#include <persistence/spi/types.h>
+
+namespace storage {
+namespace spi {
+
+enum DocumentMetaFlags {
+ NONE = 0x0,
+ REMOVE_ENTRY = 0x1
+};
+
+class DocEntry : public document::Printable {
+public:
+ typedef uint32_t SizeType;
+private:
+ Timestamp _timestamp;
+ int _metaFlags;
+ SizeType _persistedDocumentSize;
+ SizeType _size;
+ DocumentId::UP _documentId;
+ Document::UP _document;
+public:
+ typedef vespalib::LinkedPtr<DocEntry> LP;
+ typedef std::unique_ptr<DocEntry> UP;
+
+ DocEntry(Timestamp t, int metaFlags, Document::UP doc)
+ : _timestamp(t),
+ _metaFlags(metaFlags),
+ _persistedDocumentSize(doc->getSerializedSize()),
+ _size(_persistedDocumentSize + sizeof(DocEntry)),
+ _documentId(),
+ _document(std::move(doc))
+ {
+ }
+
+ /**
+ * Constructor that can be used by providers that already know
+ * the serialized size of the document, so the potentially expensive
+ * call to getSerializedSize can be avoided.
+ */
+ DocEntry(Timestamp t,
+ int metaFlags,
+ Document::UP doc,
+ size_t serializedDocumentSize)
+ : _timestamp(t),
+ _metaFlags(metaFlags),
+ _persistedDocumentSize(serializedDocumentSize),
+ _size(_persistedDocumentSize + sizeof(DocEntry)),
+ _documentId(),
+ _document(std::move(doc))
+ {
+ }
+
+ DocEntry(Timestamp t, int metaFlags, const DocumentId& docId)
+ : _timestamp(t),
+ _metaFlags(metaFlags),
+ _persistedDocumentSize(docId.getSerializedSize()),
+ _size(_persistedDocumentSize + sizeof(DocEntry)),
+ _documentId(new DocumentId(docId)),
+ _document()
+ {
+ }
+
+ DocEntry(Timestamp t, int metaFlags)
+ : _timestamp(t),
+ _metaFlags(metaFlags),
+ _persistedDocumentSize(0),
+ _size(sizeof(DocEntry)),
+ _documentId(),
+ _document()
+ {
+ }
+
+ DocEntry* clone() const {
+ DocEntry* ret;
+ if (_documentId.get() != 0) {
+ ret = new DocEntry(_timestamp, _metaFlags, *_documentId);
+ ret->setPersistedDocumentSize(_persistedDocumentSize);
+ } else if (_document.get()) {
+ ret = new DocEntry(_timestamp, _metaFlags,
+ Document::UP(new Document(*_document)),
+ _persistedDocumentSize);
+ } else {
+ ret = new DocEntry(_timestamp, _metaFlags);
+ ret->setPersistedDocumentSize(_persistedDocumentSize);
+ }
+ return ret;
+ }
+
+ const Document* getDocument() const { return _document.get(); }
+ const DocumentId* getDocumentId() const {
+ return (_document.get() != 0 ? &_document->getId()
+ : _documentId.get());
+ }
+ Document::UP releaseDocument() { return std::move(_document); }
+ bool isRemove() const { return (_metaFlags & REMOVE_ENTRY); }
+ Timestamp getTimestamp() const { return _timestamp; }
+
+ int getFlags() const { return _metaFlags; }
+ void setFlags(int flags) { _metaFlags = flags; }
+ /**
+ * @return In-memory size of this doc entry, including document instance.
+ * In essence: serialized size of document + sizeof(DocEntry).
+ */
+ SizeType getSize() const { return _size; }
+ /**
+ * If entry contains a document, returns its serialized size.
+ * If entry contains a document id, returns the serialized size of
+ * the id alone.
+ * Otherwise (i.e. metadata only), returns zero.
+ */
+ SizeType getDocumentSize() const
+ {
+ assert(_size >= sizeof(DocEntry));
+ return _size - sizeof(DocEntry);
+ }
+ /**
+ * Return size of document as it exists in persisted form. By default
+ * this will return the serialized size of the entry's document instance,
+ * but for persistence providers that are able to provide this information
+ * efficiently, this value can be set explicitly to provide better statistical
+ * tracking for e.g. visiting operations in the service layer.
+ * If explicitly set, this value shall be the size of the document _before_
+ * any field filtering is performed.
+ */
+ SizeType getPersistedDocumentSize() const { return _persistedDocumentSize; }
+ /**
+ * Set persisted size of document. Optional.
+ * @see getPersistedDocumentSize
+ */
+ void setPersistedDocumentSize(SizeType persistedDocumentSize) {
+ _persistedDocumentSize = persistedDocumentSize;
+ }
+
+ void print(std::ostream& out, bool, const std::string&) const
+ {
+ out << "DocEntry(" << _timestamp << ", "
+ << _metaFlags << ", ";
+ if (_documentId.get() != 0) {
+ out << *_documentId;
+ } else if (_document.get()) {
+ out << "Doc(" << _document->getId() << ")";
+ } else {
+ out << "metadata only";
+ }
+ out << ")";
+ }
+
+ void prettyPrint(std::ostream& out) const
+ {
+ std::string flags;
+ if (_metaFlags == REMOVE_ENTRY) {
+ flags = " (remove)";
+ }
+
+ out << "DocEntry(Timestamp: " << _timestamp
+ << ", size " << getPersistedDocumentSize() << ", ";
+ if (_documentId.get() != 0) {
+ out << *_documentId;
+ } else if (_document.get()) {
+ out << "Doc(" << _document->getId() << ")";
+ } else {
+ out << "metadata only";
+ }
+ out << flags << ")";
+ }
+
+ bool operator==(const DocEntry& entry) const {
+ if (_timestamp != entry._timestamp) {
+ return false;
+ }
+
+ if (_metaFlags != entry._metaFlags) {
+ return false;
+ }
+
+ if (_documentId.get()) {
+ if (!entry._documentId.get()) {
+ return false;
+ }
+
+ if (*_documentId != *entry._documentId) {
+ return false;
+ }
+ } else {
+ if (entry._documentId.get()) {
+ return false;
+ }
+ }
+
+ if (_document.get()) {
+ if (!entry._document.get()) {
+ return false;
+ }
+
+ if (*_document != *entry._document) {
+ return false;
+ }
+ } else {
+ if (entry._document.get()) {
+ return false;
+ }
+ }
+ if (_persistedDocumentSize != entry._persistedDocumentSize) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+} // spi
+} // storage
+
+
diff --git a/persistence/src/vespa/persistence/spi/documentselection.h b/persistence/src/vespa/persistence/spi/documentselection.h
new file mode 100644
index 00000000000..c3d4f050198
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/documentselection.h
@@ -0,0 +1,33 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * \class storage::spi::DocumentSelection
+ * \ingroup spi
+ *
+ * \brief
+ */
+
+#pragma once
+
+#include <string>
+#include <vespa/document/fieldvalue/document.h>
+
+namespace storage {
+namespace spi {
+
+class DocumentSelection
+{
+ std::string _documentSelection;
+ public:
+ explicit DocumentSelection(const std::string& docSel)
+ : _documentSelection(docSel) {}
+
+ bool match(const document::Document&) const { return true; }
+
+ const std::string& getDocumentSelection() const {
+ return _documentSelection;
+ }
+};
+
+}
+}
+
diff --git a/persistence/src/vespa/persistence/spi/exceptions.cpp b/persistence/src/vespa/persistence/spi/exceptions.cpp
new file mode 100644
index 00000000000..be0cf5dd2b2
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/exceptions.cpp
@@ -0,0 +1,12 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "exceptions.h"
+
+namespace storage {
+namespace spi {
+
+VESPA_IMPLEMENT_EXCEPTION(HandledException, vespalib::Exception);
+
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/spi/exceptions.h b/persistence/src/vespa/persistence/spi/exceptions.h
new file mode 100644
index 00000000000..240fddab908
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/exceptions.h
@@ -0,0 +1,21 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/vespalib/util/exceptions.h>
+
+namespace storage {
+namespace spi {
+
+/**
+ * Exception used where the cause has already been reported to the user, so
+ * one only wants to wind back to caller, and not have it log it or print
+ * backtrace.
+ *
+ * Used to create good log errors, and avoid caller printing backtrace, or an
+ * inspecific error.
+ */
+VESPA_DEFINE_EXCEPTION(HandledException, vespalib::Exception);
+
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/spi/matcher.h b/persistence/src/vespa/persistence/spi/matcher.h
new file mode 100644
index 00000000000..81e06627674
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/matcher.h
@@ -0,0 +1,42 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * \class storage::spi::Matcher
+ * \ingroup spi
+ *
+ * \brief Use a matcher to find what documents one is interested in.
+ */
+
+#pragma once
+
+#include <vespa/persistence/spi/docentry.h>
+#include <persistence/spi/documentsubset.h>
+#include <persistence/spi/types.h>
+
+namespace storage {
+namespace spi {
+
+class Matcher {
+ DocumentSubset _subset;
+
+public:
+ Matcher(const DocumentSubset& subset) : _subset(subset) {}
+ virtual ~Matcher() {}
+
+ virtual bool match(const DocEntry&) const = 0;
+
+ /**
+ * Get the document subset that this matcher needs in order to decide
+ * whether a document entry should be matched or not. When match is called,
+ * specified information is guarantueed to be set.
+ */
+ const DocumentSubset& getNeededParts() const { return _subset; }
+};
+
+struct AllMatcher : public Matcher {
+ AllMatcher() : Matcher(DocumentSubset(0)) {}
+ bool match(const DocEntry&) const { return true; }
+};
+
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp b/persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp
new file mode 100644
index 00000000000..e30ad182e95
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp
@@ -0,0 +1,300 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/persistence/spi/metricpersistenceprovider.h>
+
+#include <vespa/log/log.h>
+
+LOG_SETUP(".persistence.spi.metrics");
+
+#define PRE_PROCESS(opIndex) \
+ metrics::MetricTimer metricTimer;
+
+#define POST_PROCESS(opIndex, result) \
+ metricTimer.stop( \
+ *_functionMetrics[opIndex]->_metric[result.getErrorCode()]); \
+ if (result.hasError()) { \
+ LOG(debug, "SPI::%s failed: %s", \
+ _functionMetrics[opIndex]->getName().c_str(), \
+ result.toString().c_str()); \
+ }
+
+namespace storage {
+namespace spi {
+
+namespace {
+ typedef MetricPersistenceProvider Impl;
+}
+
+Impl::ResultMetrics::ResultMetrics(const char* opName)
+ : metrics::MetricSet(opName, "", ""),
+ _metric(Result::ERROR_COUNT)
+{
+ typedef vespalib::LinkedPtr<metrics::LongAverageMetric> ptr;
+ _metric[Result::NONE] = ptr(
+ new metrics::LongAverageMetric("success", "", "", this));
+ _metric[Result::TRANSIENT_ERROR] = ptr(
+ new metrics::LongAverageMetric("transient_error", "", "", this));
+ _metric[Result::PERMANENT_ERROR] = ptr(
+ new metrics::LongAverageMetric("permanent_error", "", "", this));
+ _metric[Result::TIMESTAMP_EXISTS] = ptr(
+ new metrics::LongAverageMetric("timestamp_exists", "", "", this));
+ _metric[Result::FATAL_ERROR] = ptr(
+ new metrics::LongAverageMetric("fatal_error", "", "", this));
+ _metric[Result::RESOURCE_EXHAUSTED] = ptr(
+ new metrics::LongAverageMetric("resource_exhausted", "", "", this));
+ // Assert that the above initialized all entries in vector
+ for (size_t i=0; i<_metric.size(); ++i) assert(_metric[i].get());
+}
+
+Impl::MetricPersistenceProvider(PersistenceProvider& next)
+ : metrics::MetricSet("spi", "", ""),
+ _next(&next),
+ _functionMetrics(23)
+{
+ defineResultMetrics(0, "initialize");
+ defineResultMetrics(1, "getPartitionStates");
+ defineResultMetrics(2, "listBuckets");
+ defineResultMetrics(3, "setClusterState");
+ defineResultMetrics(4, "setActiveState");
+ defineResultMetrics(5, "getBucketInfo");
+ defineResultMetrics(6, "put");
+ defineResultMetrics(7, "remove");
+ defineResultMetrics(8, "removeIfFound");
+ defineResultMetrics(9, "removeEntry");
+ defineResultMetrics(10, "update");
+ defineResultMetrics(11, "flush");
+ defineResultMetrics(12, "get");
+ defineResultMetrics(13, "createIterator");
+ defineResultMetrics(14, "iterate");
+ defineResultMetrics(15, "destroyIterator");
+ defineResultMetrics(16, "createBucket");
+ defineResultMetrics(17, "deleteBucket");
+ defineResultMetrics(18, "getModifiedBuckets");
+ defineResultMetrics(19, "maintain");
+ defineResultMetrics(20, "split");
+ defineResultMetrics(21, "join");
+ defineResultMetrics(22, "move");
+}
+
+void
+Impl::defineResultMetrics(int index, const char* name)
+{
+ _functionMetrics[index] = ResultMetrics::LP(new ResultMetrics(name));
+ registerMetric(*_functionMetrics[index]);
+}
+
+// Implementation of SPI functions
+
+Result
+Impl::initialize()
+{
+ PRE_PROCESS(0);
+ Result r(_next->initialize());
+ POST_PROCESS(0, r);
+ return r;
+}
+
+PartitionStateListResult
+Impl::getPartitionStates() const
+{
+ PRE_PROCESS(1);
+ PartitionStateListResult r(_next->getPartitionStates());
+ POST_PROCESS(1, r);
+ return r;
+}
+
+BucketIdListResult
+Impl::listBuckets(PartitionId v1) const
+{
+ PRE_PROCESS(2);
+ BucketIdListResult r(_next->listBuckets(v1));
+ POST_PROCESS(2, r);
+ return r;
+}
+
+Result
+Impl::setClusterState(const ClusterState& v1)
+{
+ PRE_PROCESS(3);
+ Result r(_next->setClusterState(v1));
+ POST_PROCESS(3, r);
+ return r;
+}
+
+Result
+Impl::setActiveState(const Bucket& v1, BucketInfo::ActiveState v2)
+{
+ PRE_PROCESS(4);
+ Result r(_next->setActiveState(v1, v2));
+ POST_PROCESS(4, r);
+ return r;
+}
+
+BucketInfoResult
+Impl::getBucketInfo(const Bucket& v1) const
+{
+ PRE_PROCESS(5);
+ BucketInfoResult r(_next->getBucketInfo(v1));
+ POST_PROCESS(5, r);
+ return r;
+}
+
+Result
+Impl::put(const Bucket& v1, Timestamp v2, const Document::SP& v3, Context& v4)
+{
+ PRE_PROCESS(6);
+ Result r(_next->put(v1, v2, v3, v4));
+ POST_PROCESS(6, r);
+ return r;
+}
+
+RemoveResult
+Impl::remove(const Bucket& v1, Timestamp v2, const DocumentId& v3, Context& v4)
+{
+ PRE_PROCESS(7);
+ RemoveResult r(_next->remove(v1, v2, v3, v4));
+ POST_PROCESS(7, r);
+ return r;
+}
+
+RemoveResult
+Impl::removeIfFound(const Bucket& v1, Timestamp v2, const DocumentId& v3,
+ Context& v4)
+{
+ PRE_PROCESS(8);
+ RemoveResult r(_next->removeIfFound(v1, v2, v3, v4));
+ POST_PROCESS(8, r);
+ return r;
+}
+
+Result
+Impl::removeEntry(const Bucket& v1, Timestamp v2, Context& v3)
+{
+ PRE_PROCESS(9);
+ Result r(_next->removeEntry(v1, v2, v3));
+ POST_PROCESS(9, r);
+ return r;
+}
+
+UpdateResult
+Impl::update(const Bucket& v1, Timestamp v2, const DocumentUpdate::SP& v3,
+ Context& v4)
+{
+ PRE_PROCESS(10);
+ UpdateResult r(_next->update(v1, v2, v3, v4));
+ POST_PROCESS(10, r);
+ return r;
+}
+
+Result
+Impl::flush(const Bucket& v1, Context& v2)
+{
+ PRE_PROCESS(11);
+ Result r(_next->flush(v1, v2));
+ POST_PROCESS(11, r);
+ return r;
+}
+
+GetResult
+Impl::get(const Bucket& v1, const document::FieldSet& v2, const DocumentId& v3,
+ Context& v4) const
+{
+ PRE_PROCESS(12);
+ GetResult r(_next->get(v1, v2, v3, v4));
+ POST_PROCESS(12, r);
+ return r;
+}
+
+CreateIteratorResult
+Impl::createIterator(const Bucket& v1, const document::FieldSet& v2,
+ const Selection& v3, IncludedVersions v4, Context& v5)
+{
+ PRE_PROCESS(13);
+ CreateIteratorResult r(_next->createIterator(v1, v2, v3, v4, v5));
+ POST_PROCESS(13, r);
+ return r;
+}
+
+IterateResult
+Impl::iterate(IteratorId v1, uint64_t v2, Context& v3) const
+{
+ PRE_PROCESS(14);
+ IterateResult r(_next->iterate(v1, v2, v3));
+ POST_PROCESS(14, r);
+ return r;
+}
+
+Result
+Impl::destroyIterator(IteratorId v1, Context& v2)
+{
+ PRE_PROCESS(15);
+ Result r(_next->destroyIterator(v1, v2));
+ POST_PROCESS(15, r);
+ return r;
+}
+
+Result
+Impl::createBucket(const Bucket& v1, Context& v2)
+{
+ PRE_PROCESS(16);
+ Result r(_next->createBucket(v1, v2));
+ POST_PROCESS(16, r);
+ return r;
+}
+
+Result
+Impl::deleteBucket(const Bucket& v1, Context& v2)
+{
+ PRE_PROCESS(17);
+ Result r(_next->deleteBucket(v1, v2));
+ POST_PROCESS(17, r);
+ return r;
+}
+
+BucketIdListResult
+Impl::getModifiedBuckets() const
+{
+ PRE_PROCESS(18);
+ BucketIdListResult r(_next->getModifiedBuckets());
+ POST_PROCESS(18, r);
+ return r;
+}
+
+Result
+Impl::maintain(const Bucket& v1, MaintenanceLevel v2)
+{
+ PRE_PROCESS(19);
+ Result r(_next->maintain(v1, v2));
+ POST_PROCESS(19, r);
+ return r;
+}
+
+Result
+Impl::split(const Bucket& v1, const Bucket& v2, const Bucket& v3, Context& v4)
+{
+ PRE_PROCESS(20);
+ Result r(_next->split(v1, v2, v3, v4));
+ POST_PROCESS(20, r);
+ return r;
+}
+
+Result
+Impl::join(const Bucket& v1, const Bucket& v2, const Bucket& v3, Context& v4)
+{
+ PRE_PROCESS(21);
+ Result r(_next->join(v1, v2, v3, v4));
+ POST_PROCESS(21, r);
+ return r;
+}
+
+Result
+Impl::move(const Bucket& v1, PartitionId v2, Context& v3)
+{
+ PRE_PROCESS(22);
+ Result r(_next->move(v1, v2, v3));
+ POST_PROCESS(22, r);
+ return r;
+}
+
+} // spi
+} // storage
diff --git a/persistence/src/vespa/persistence/spi/metricpersistenceprovider.h b/persistence/src/vespa/persistence/spi/metricpersistenceprovider.h
new file mode 100644
index 00000000000..168f332614c
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/metricpersistenceprovider.h
@@ -0,0 +1,73 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * SPI implementation wrapper to add metrics.
+ */
+
+#pragma once
+
+#include <vespa/persistence/spi/persistenceprovider.h>
+#include <vespa/metrics/metrics.h>
+
+namespace storage {
+namespace spi {
+
+class MetricPersistenceProvider : public PersistenceProvider,
+ public metrics::MetricSet
+{
+ struct ResultMetrics : public metrics::MetricSet {
+ typedef vespalib::LinkedPtr<ResultMetrics> LP;
+ std::vector<vespalib::LinkedPtr<metrics::LongAverageMetric> > _metric;
+
+ ResultMetrics(const char* opName);
+ };
+ PersistenceProvider* _next;
+ std::vector<ResultMetrics::LP> _functionMetrics;
+
+public:
+ typedef std::unique_ptr<MetricPersistenceProvider> UP;
+
+ MetricPersistenceProvider(PersistenceProvider&);
+
+ void setNextProvider(PersistenceProvider& p) { _next = &p; }
+
+ // Implementation of the PersistenceProvider API
+ virtual Result initialize();
+ virtual PartitionStateListResult getPartitionStates() const;
+ virtual BucketIdListResult listBuckets(PartitionId) const;
+ virtual Result setClusterState(const ClusterState&);
+ virtual Result setActiveState(const Bucket&, BucketInfo::ActiveState);
+ virtual BucketInfoResult getBucketInfo(const Bucket&) const;
+ virtual Result put(const Bucket&, Timestamp, const Document::SP&, Context&);
+ virtual RemoveResult remove(const Bucket&, Timestamp,
+ const DocumentId&, Context&);
+ virtual RemoveResult removeIfFound(const Bucket&, Timestamp,
+ const DocumentId&, Context&);
+ virtual Result removeEntry(const Bucket&, Timestamp, Context&);
+ virtual UpdateResult update(const Bucket&, Timestamp,
+ const DocumentUpdate::SP&, Context&);
+ virtual Result flush(const Bucket&, Context&);
+ virtual GetResult get(const Bucket&, const document::FieldSet&,
+ const DocumentId&, Context&) const;
+ virtual CreateIteratorResult createIterator(
+ const Bucket&, const document::FieldSet&, const Selection&,
+ IncludedVersions, Context&);
+ virtual IterateResult iterate(IteratorId, uint64_t maxByteSize,
+ Context&) const;
+ virtual Result destroyIterator(IteratorId, Context&);
+ virtual Result createBucket(const Bucket&, Context&);
+ virtual Result deleteBucket(const Bucket&, Context&);
+ virtual BucketIdListResult getModifiedBuckets() const;
+ virtual Result maintain(const Bucket&,
+ MaintenanceLevel level);
+ virtual Result split(const Bucket& source, const Bucket& target1,
+ const Bucket& target2, Context&);
+ virtual Result join(const Bucket& source1, const Bucket& source2,
+ const Bucket& target, Context&);
+ virtual Result move(const Bucket&, PartitionId target, Context&);
+
+private:
+ void defineResultMetrics(int index, const char* name);
+};
+
+} // spi
+} // storage
diff --git a/persistence/src/vespa/persistence/spi/partitionstate.cpp b/persistence/src/vespa/persistence/spi/partitionstate.cpp
new file mode 100644
index 00000000000..72f0ed863e1
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/partitionstate.cpp
@@ -0,0 +1,39 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/persistence/spi/partitionstate.h>
+
+namespace storage {
+namespace spi {
+
+PartitionState::PartitionState()
+ : _state(UP),
+ _reason()
+{
+}
+
+PartitionState::PartitionState(State s, vespalib::stringref reason)
+ : _state(s),
+ _reason(reason)
+{
+}
+
+
+PartitionStateList::PartitionStateList(PartitionId::Type partitionCount)
+ : _states(partitionCount)
+{
+}
+
+PartitionState&
+PartitionStateList::operator[](PartitionId::Type index)
+{
+ if (index >= _states.size()) {
+ std::ostringstream ost;
+ ost << "Cannot return disk " << index << " of " << _states.size();
+ throw vespalib::IllegalArgumentException(ost.str(), VESPA_STRLOC);
+ }
+ return _states[index];
+}
+
+} // spi
+} // storage
diff --git a/persistence/src/vespa/persistence/spi/partitionstate.h b/persistence/src/vespa/persistence/spi/partitionstate.h
new file mode 100644
index 00000000000..296945b4444
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/partitionstate.h
@@ -0,0 +1,53 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * \class storage::spi::PartitionState
+ * \ingroup spi
+ *
+ * \brief Information service layer needs about providers partitions.
+ *
+ * In order to be able to utilize all hardware in parallel, the service layer
+ * is aware of partitions, and what buckets exist in various partitions.
+ *
+ * The service layer needs information about how many partitions exist, and if
+ * any of them are currently unavailable. This object describes what the
+ * service layer need to know about disks.
+ */
+#pragma once
+
+#include <persistence/spi/types.h>
+
+namespace storage {
+namespace spi {
+
+struct PartitionState {
+ enum State { UP, DOWN };
+
+ PartitionState();
+ PartitionState(State s, vespalib::stringref reason);
+
+ State getState() const { return _state; }
+ const string& getReason() const { return _reason; }
+
+ bool isUp() const { return (_state == UP); }
+
+private:
+ State _state;
+ string _reason; // If not up, there should be a reason
+};
+
+class PartitionStateList {
+ std::vector<PartitionState> _states;
+
+public:
+ PartitionStateList(PartitionId::Type partitionCount);
+
+ PartitionState& operator[](PartitionId::Type index);
+ const PartitionState& operator[](PartitionId::Type index) const
+ { return const_cast<PartitionStateList&>(*this)[index]; }
+
+ PartitionId size() const { return PartitionId(_states.size()); }
+};
+
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/spi/persistenceprovider.cpp b/persistence/src/vespa/persistence/spi/persistenceprovider.cpp
new file mode 100644
index 00000000000..e6befc36c7e
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/persistenceprovider.cpp
@@ -0,0 +1,15 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/persistence/spi/persistenceprovider.h>
+
+namespace storage {
+namespace spi {
+
+PersistenceProvider::~PersistenceProvider()
+{
+}
+
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/spi/persistenceprovider.h b/persistence/src/vespa/persistence/spi/persistenceprovider.h
new file mode 100644
index 00000000000..9e0088214b8
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/persistenceprovider.h
@@ -0,0 +1,436 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/persistence/spi/bucket.h>
+#include <vespa/persistence/spi/bucketinfo.h>
+#include <vespa/persistence/spi/context.h>
+#include <vespa/persistence/spi/docentry.h>
+#include <vespa/persistence/spi/documentselection.h>
+#include <vespa/persistence/spi/partitionstate.h>
+#include <vespa/persistence/spi/result.h>
+#include <vespa/persistence/spi/selection.h>
+#include <persistence/spi/types.h>
+#include <vespa/persistence/spi/clusterstate.h>
+#include <vespa/document/fieldset/fieldset.h>
+
+namespace storage {
+namespace spi {
+
+/**
+ * This interface is the basis for a persistence provider in Vespa. A
+ * persistence provider is used by Vespa Storage to provide an elastic stateful
+ * system.
+ * <p/>
+ * The Vespa distribution mechanisms are based on distributing "buckets"
+ * between the nodes in the system. A bucket is an abstract concept that
+ * groups a set of documents. The persistence provider can choose freely
+ * how to implement a bucket, but it needs to be able to access a bucket as
+ * a unit. The placement of these units is controlled by the distributors.
+ * <p/>
+ * A persistence provider may support multiple "partitions". One example of
+ * a partition is a physical disk, but the exact meaning of "partitions"
+ * is left to the provider. It must be able to report to the service layer
+ * though.
+ * <p/>
+ * All operations return a Result object. The base Result class only
+ * encapsulates potential errors, which can be <i>transient</i>,
+ * <i>permanent</i> or <i>fatal</i>. Transient errors are errors where it's
+ * conceivable that retrying the operation would lead to success, either on
+ * this data copy or on others. Permanent errors are errors where the request
+ * itself is faulty. Fatal errors are transient errors that have uncovered a
+ * problem with this instance of the provider (such as a failing disk), and
+ * where the provider wants the process to be shut down.
+ * <p/>
+ * All write operations have a timestamp. This timestamp is generated
+ * by the distributor, and is guaranteed to be unique for the bucket we're
+ * writing to. A persistence provider is required to store "entries" for each of
+ * these operations, and associate the timestamp with that entry.
+ * Iteration code can retrieve these entries, including entries for remove
+ * operations. The provider is not required to keep any history beyond the last
+ * operation that was performed on a given document.
+ * <p/>
+ * The contract for all write operations is that after returning from the
+ * function, provider read methods (get, iterate) should reflect the modified
+ * state.
+ * <p/>
+ */
+struct PersistenceProvider
+{
+ typedef std::unique_ptr<PersistenceProvider> UP;
+
+ virtual ~PersistenceProvider();
+
+ /**
+ * Initializes the persistence provider. This function is called exactly
+ * once when the persistence provider starts. If any error is returned
+ * here, the service layer will shut down.
+ *
+ * Also note that this function is called in the application main thread,
+ * and any time spent in initialize will be while service layer node is
+ * considered down and unavailable.
+ */
+ virtual Result initialize() = 0;
+
+ /**
+ * Returns a list of the partitions available, and which are up and down.
+ * Currently called once on startup. Partitions are not allowed to change
+ * runtime.
+ */
+ virtual PartitionStateListResult getPartitionStates() const = 0;
+
+ /**
+ * Return list of buckets that provider has stored on the given partition.
+ * Typically called once per partition on startup.
+ */
+ virtual BucketIdListResult listBuckets(PartitionId) const = 0;
+
+ /**
+ * Updates the persistence provider with the last cluster state.
+ * Only cluster states that are assumed relevant for the provider are
+ * supplied (changes that relate to the distributor will not cause an
+ * update here).
+ */
+ virtual Result setClusterState(const ClusterState&) = 0;
+
+ /**
+ * Sets the bucket state to active or inactive. After this returns,
+ * other buckets may be deactivated, so the node must be able to serve
+ * the data from its secondary index or get reduced coverage.
+ */
+ virtual Result setActiveState(const Bucket&,
+ BucketInfo::ActiveState) = 0;
+
+ /**
+ * Retrieve metadata for a bucket, previously returned in listBuckets(),
+ * or created through SPI explicitly (createBucket) or implicitly
+ * (split, join).
+ */
+ virtual BucketInfoResult getBucketInfo(const Bucket&) const = 0;
+
+ /**
+ * Store the given document at the given microsecond time.
+ */
+ virtual Result put(const Bucket&, Timestamp, const Document::SP&, Context&) = 0;
+
+ /**
+ * This remove function assumes that there exist something to be removed.
+ * The data to be removed may not exist on this node though, so all remove
+ * entries inserted with this function should be kept for some time in
+ * order for data not to be reintroduced from other nodes that may be
+ * temporarily down. To avoid reintroduction of removed documents, nodes
+ * that has been down longer than removes are kept, should have their data
+ * cleared before being reintroduced into the cluster.
+ * <p/>
+ * You may choose to ignore the remove if the document already exist (or has
+ * a remove entry) at a newer timestamp than the given one.
+ * <p/>
+ * In the special case where the document exist at the same timestamp
+ * given, this entry should be turned into a remove entry. This is
+ * functionality needed in order for the cluster to be able to remove a
+ * subset of data not known ahead of the remove request.
+ *
+ * Postconditions:
+ * A successful invocation of this function shall cause a remove entry
+ * for the given timestamp and document ID pair to be present in a
+ * subsequent full iteration over the bucket if:
+ * - there did not already exist any entries for the document
+ * - OR: any existing entries are older than the remove's timestamp.
+ * A provider capable of preserving historical
+ * document entry information MAY choose to persist the remove even if
+ * these conditions are not met, but this is not mandatory. All instances of
+ * the provider in the cluster must operate deterministically in the same
+ * manner to ensure that applying a set of timestamped operations will end
+ * up with a consistent result across all the replica nodes.
+ * <p/>
+ * NOTE: "subsequent full iteration" in this context means an iteration
+ * operation that happens within the period in which removes are to be kept
+ * by the persistence provider and which is tagged to include removes and/or
+ * all versions.
+ * <p/>
+ * NOTE: if the given timestamp is higher to or equal than any
+ * existing put entry, those entries should not be returned in subsequent
+ * get calls. If the timestamp is lower than an existing put entry,
+ * those entries should still be available.
+ * <p/>
+ * EXAMPLE: A provider not supporting historical document entries is
+ * still fully conformant if it maintains the following invariants:
+ * - a remove for a document that does not have any existing entries is
+ * always persisted.
+ * - a remove with an older timestamp than any existing entries for the
+ * given document identifier (puts and/or removes) is not persisted, but
+ * ignored.
+ * - a put or remove with a newer timestamp than all existing entries
+ * for the given document identifier is persisted, causing older
+ * entries to be effectively discarded.
+ * For such a provider, iterating with removes and all versions should
+ * semantically be the same thing and yield the same results.
+ *
+ * @param timestamp The timestamp for the new bucket entry.
+ * @param id The ID to remove
+ */
+ virtual RemoveResult remove(const Bucket&,
+ Timestamp timestamp,
+ const DocumentId& id,
+ Context&) = 0;
+ /**
+ * @see remove()
+ * <p/>
+ * Used for external remove operations. removeIfFound() works as remove(),
+ * but you are not required to insert a remove entry if document does not
+ * exist locally. This difference exist, such that users can't fill the
+ * cluster up with remove entries by misspelling identifiers or repeatedly
+ * resend removes. It is legal to still store a remove entry, but note that
+ * you will then be prone to user patterns mentioned above to fill up your
+ * buckets.
+ * <p/>
+ * @param timestamp The timestamp for the new bucket entry.
+ * @param id The ID to remove
+ */
+ virtual RemoveResult removeIfFound(const Bucket&,
+ Timestamp timestamp,
+ const DocumentId& id,
+ Context&) = 0;
+
+ /**
+ * Remove any trace of the entry with the given timestamp. (Be it a document
+ * or a remove entry) This is usually used to revert previously performed
+ * operations, in order to try best effort to not keep data we say we have
+ * failed to insert. This operation should be successful even if there
+ * doesn't exist such an entry.
+ */
+ virtual Result removeEntry(const Bucket&, Timestamp, Context&) = 0;
+
+ /**
+ * Partially modifies a document referenced by the document update.
+ *
+ * @param timestamp The timestamp to use for the new update entry.
+ * @param update The document update to apply to the stored document.
+ */
+ virtual UpdateResult update(const Bucket&,
+ Timestamp timestamp,
+ const DocumentUpdate::SP& update,
+ Context&) = 0;
+
+ /**
+ * The service layer may choose to batch certain commands. This means that
+ * the service layer will lock the bucket only once, then perform several
+ * commands, and finally get the bucket info from the bucket, and then
+ * flush it. This can be used to improve performance by caching the
+ * modifications, and persisting them to disk only when flush is called.
+ * The service layer guarantees that after one of these operations, flush()
+ * is called, regardless of whether the operation succeeded or not, before
+ * another bucket is processed in the same worker thead. The following
+ * operations can be batched and have the guarantees
+ * above:
+ * - put
+ * - get
+ * - remove (all versions)
+ * - update
+ * - revert
+ * - join
+ * <p/>
+ * A provider may of course choose to not sync to disk at flush time either,
+ * but then data may be more prone to being lost on node issues, and the
+ * provider must figure out when to flush its cache itself.
+ */
+ virtual Result flush(const Bucket&, Context&) = 0;
+
+ /**
+ * Retrieves the latest version of the document specified by the
+ * document id. If no versions were found, or the document was removed,
+ * the result should be successful, but contain no document (see GetResult).
+ *
+ * @param fieldSet A set of fields that should be retrieved.
+ * @param id The document id to retrieve.
+ */
+ virtual GetResult get(const Bucket&,
+ const document::FieldSet& fieldSet,
+ const DocumentId& id,
+ Context&) const = 0;
+
+ /**
+ * Create an iterator for a given bucket and selection criteria, returning
+ * a unique, non-zero iterator identifier that can be used by the caller as
+ * an argument to iterate and destroyIterator.
+ *
+ * Each successful invocation of createIterator shall be paired with
+ * a later invocation of destroyIterator by the caller to ensure
+ * resources are freed up. NOTE: this may not apply in a shutdown
+ * situation due to service layer communication channels closing down.
+ *
+ * It is assumed that a successful invocation of this function will result
+ * in some state being established in the persistence provider, holding
+ * the information required to match iterator ids up to their current
+ * iteration progress and selection criteria. destroyIterator will NOT
+ * be called when createIterator returns an error.
+ *
+ * @param selection Selection criteria used to limit the subset of
+ * the bucket's documents that will be returned by the iterator. The
+ * provider implementation may use these criteria to optimize its
+ * operation as it sees fit, as long as doing so does not violate
+ * selection correctness.
+ * @return A process-globally unique iterator identifier iff the result
+ * is successful and internal state has been created, otherwise an
+ * error. Identifier must be non-zero, as zero is used internally to
+ * signify an invalid iterator ID.
+ */
+ virtual CreateIteratorResult createIterator(
+ const Bucket&,
+ const document::FieldSet& fieldSet,
+ const Selection& selection, //TODO: Make AST
+ IncludedVersions versions,
+ Context&) = 0;
+
+ /**
+ * Iterate over a bucket's document space using a valid iterator id
+ * received from createIterator. Each invocation of iterate upon an
+ * iterator that has not yet fully exhausted its document space shall
+ * return a minimum of 1 document entry per IterateResult to ensure
+ * progress. An implementation shall limit the result set per invocation
+ * to document entries whose combined in-memory/serialized size is a "soft"
+ * maximum of maxByteSize. More specifically, the sum of getSize() over all
+ * returned DocEntry instances should be <= (maxByteSize + the size of the
+ * last document in the result set). This special case allows for limiting
+ * the result set both by observing "before the fact" that the next
+ * potential document to include would exceed the max size and by observing
+ * "after the fact" that the document that was just added caused the max
+ * size to be exceeded. However, if a document exceeds maxByteSize and not
+ * including it implies the result set would be empty, it must be included
+ * in the result anyway in order to not violate the progress requirement.
+ *
+ * The caller shall not make any assumptions on whether or not documents
+ * that arrive to--or are removed from--the bucket in the time between
+ * separate invocations of iterate for the same iterator id will show up
+ * in the results, assuming that these documents do not violate the
+ * selection criteria. This means that there is no requirement for
+ * maintaining a "snapshot" view of the bucket's state as it existed upon
+ * the initial createIterator call. Neither shall the caller make any
+ * assumptions on the ordering of the returned documents.
+ *
+ * The IterateResult shall--for each document entry that matches the
+ * selection criteria and falls within the maxByteSize limit mentioned
+ * above--return the following information in its result:
+ *
+ * -- For non-removed entries: A DocEntry where getDocument() will
+ * return a valid document instance and getSize() will return the
+ * serialized size of the document.
+ * -- For removed entries: A DocEntry where getDocumentId() will
+ * return a valid document identifier. Remove entries shall not
+ * contain document instances.
+ * -- For meta entries: A DocEntry that shall not contain a document
+ * instance nor should it include a document id instance (if
+ * included, would be ignored by the service layer in any context
+ * where metadata-only is requested).
+ *
+ * The service layer shall guarantee that no two invocations of iterate
+ * will happen simultaneously/concurrently for the same iterator id.
+ *
+ * Upon a successful invocation of iterate, the persistence provider shall
+ * update its internal state to account for the progress made so that new
+ * invocations will cover a new subset of the document space. When an
+ * IterateResult contains the final documents for the iteration, i.e. the
+ * iterator has reached its end, setCompleted() must be set on the result
+ * to indicate this to the caller. Calling iterate on an already completed
+ * iterator must only set this flag on the result and return without any
+ * documents.
+ *
+ * @param id An iterator ID returned by a previous call to createIterator
+ * @param maxByteSize An indication of the maximum number of bytes that
+ * should be returned.
+ */
+ virtual IterateResult iterate(IteratorId id,
+ uint64_t maxByteSize,
+ Context&) const = 0;
+
+ /**
+ * Destroys the iterator specified by the given id.
+ * <p/>
+ * IMPORTANT: this method has different invocation semantics than
+ * the other provider methods! It may be called from the context of
+ * ANY service layer thread, NOT just from the thread in which
+ * createIterator was invoked! The reason for this is because internal
+ * iterator destroy messages aren't mapped to partition threads in the
+ * way other messages are due to their need for guaranteed execution.
+ * <p/>
+ * This in turn implies that iterator states must be shared between
+ * partitions (and thus protected against cross-partition concurrent
+ * access).
+ * <p/>
+ * @param id The iterator id previously returned by createIterator.
+ */
+ virtual Result destroyIterator(IteratorId id, Context&) = 0;
+
+ /**
+ * Tells the provider that the given bucket has been created in the
+ * service layer. There is no requirement to do anything here.
+ */
+ virtual Result createBucket(const Bucket&, Context&) = 0;
+
+ /**
+ * Deletes the given bucket and all entries contained in that bucket.
+ * After this operation has succeeded, a restart of the provider should
+ * not yield the bucket in getBucketList().
+ */
+ virtual Result deleteBucket(const Bucket&, Context&) = 0;
+
+ /**
+ * This function is called continuously by the service layer. It allows the
+ * provider to signify whether it has done any out-of-band changes to
+ * buckets that need to be recognized by the rest of the system. The
+ * service layer will proceed to call getBucketInfo() on each of the
+ * returned buckets. After a call to getModifiedBuckets(), the provider
+ * should clear it's list of modified buckets, so that the next call does
+ * not return the same buckets.
+ */
+ virtual BucketIdListResult getModifiedBuckets() const = 0;
+
+ /**
+ * Allows the provider to do periodic maintenance and verification.
+ *
+ * @param level The level of maintenance to do. LOW maintenance is
+ * scheduled more often than HIGH maintenance, allowing costly operations
+ * to be run less.
+ */
+ virtual Result maintain(const Bucket&,
+ MaintenanceLevel level) = 0;
+
+ /**
+ * Splits the source bucket into the two target buckets.
+ * After the split, all documents belonging to target1 should be
+ * in that bucket, and all documents belonging to target2 should be
+ * there. The information in SplitResult should reflect
+ * this.
+ * <p/>
+ * Before calling this function, the service layer will iterate the bucket
+ * to figure out which buckets the source should be split into. This may
+ * result in splitting more than one bucket bit at a time.
+ * <p/>
+ * In some cases, we might want to just up used bit count in bucket, as we
+ * don't want to split far enough to split content in two. In these cases
+ * target2 will specify invalid bucket 0 (with 0 used bits).
+ */
+ virtual Result split(const Bucket& source,
+ const Bucket& target1,
+ const Bucket& target2,
+ Context&) = 0;
+
+ /**
+ * Joins two buckets into one. After the join, all documents from
+ * source1 and source2 should be stored in the target bucket.
+ */
+ virtual Result join(const Bucket& source1,
+ const Bucket& source2,
+ const Bucket& target,
+ Context&) = 0;
+
+ /**
+ * Moves a bucket from one partition to another.
+ *
+ * @param target The partition to move to. (From partition is in bucket)
+ */
+ virtual Result move(const Bucket&, PartitionId target, Context&) = 0;
+};
+
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/spi/providerfactory.h b/persistence/src/vespa/persistence/spi/providerfactory.h
new file mode 100644
index 00000000000..118d03c33db
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/providerfactory.h
@@ -0,0 +1,30 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * \class storage::spi::ProviderFactory
+ * \ingroup spi
+ *
+ * \brief Factory class to generate a persistence provider interface
+ */
+
+#pragma once
+
+#include <vespa/persistence/spi/persistenceprovider.h>
+
+namespace document {
+ class DocumentTypeRepo;
+}
+
+namespace storage {
+namespace spi {
+
+struct ProviderFactory {
+ virtual ~ProviderFactory() {}
+
+ virtual PersistenceProvider::UP createProviderInstance(
+ document::DocumentTypeRepo&) = 0;
+};
+
+} // spi
+} // storage
+
+
diff --git a/persistence/src/vespa/persistence/spi/read_consistency.cpp b/persistence/src/vespa/persistence/spi/read_consistency.cpp
new file mode 100644
index 00000000000..cd22a4740d7
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/read_consistency.cpp
@@ -0,0 +1,27 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "read_consistency.h"
+#include <iostream>
+#include <cassert>
+
+namespace storage {
+namespace spi {
+
+std::ostream&
+operator<<(std::ostream& os, ReadConsistency consistency)
+{
+ switch (consistency) {
+ case ReadConsistency::STRONG:
+ os << "STRONG";
+ break;
+ case ReadConsistency::WEAK:
+ os << "WEAK";
+ break;
+ default:
+ assert(false);
+ }
+ return os;
+}
+
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/spi/read_consistency.h b/persistence/src/vespa/persistence/spi/read_consistency.h
new file mode 100644
index 00000000000..51a6ea3f1c8
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/read_consistency.h
@@ -0,0 +1,36 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <iosfwd>
+#include <stdint.h>
+
+namespace storage {
+namespace spi {
+
+enum class ReadConsistency : uint8_t {
+ /**
+ * A read operation with a strong consistency requirement requires that
+ * any ACKed write operations must be visible to the operation.
+ *
+ * Formally, STRONG implies that read operations are linearizable with
+ * regards to their corresponding writes.
+ */
+ STRONG,
+ /**
+ * A read operation with a weak consistency requirement implies that
+ * visibility of recently ACKed operations is allowed to be on a best-
+ * effort basis. This means it's possible to read stale data for operations
+ * that have not yet been applied to the visible state.
+ *
+ * Formally, WEAK implies that read operations are NOT linearizable with
+ * regards to their corresponding writes.
+ */
+ WEAK
+};
+
+std::ostream&
+operator<<(std::ostream&, ReadConsistency);
+
+} // spi
+} // storage
+
diff --git a/persistence/src/vespa/persistence/spi/result.h b/persistence/src/vespa/persistence/spi/result.h
new file mode 100644
index 00000000000..80d5ff585d9
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/result.h
@@ -0,0 +1,307 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include "partitionstate.h"
+#include <vespa/persistence/spi/bucketinfo.h>
+#include <vespa/persistence/spi/docentry.h>
+#include <persistence/spi/types.h>
+
+namespace storage {
+
+namespace spi {
+
+class Result : public document::Printable {
+public:
+ typedef std::unique_ptr<Result> UP;
+
+ enum ErrorType {
+ NONE,
+ TRANSIENT_ERROR,
+ PERMANENT_ERROR,
+ TIMESTAMP_EXISTS,
+ FATAL_ERROR,
+ RESOURCE_EXHAUSTED,
+ ERROR_COUNT
+ };
+
+ /**
+ * Constructor to use for a result where there is no error.
+ */
+ Result() : _errorCode(NONE), _errorMessage() {}
+
+ /**
+ * Constructor to use when an error has been detected.
+ */
+ Result(ErrorType error, const vespalib::string& errorMessage)
+ : _errorCode(error),
+ _errorMessage(errorMessage) {}
+
+ bool operator==(const Result& o) const {
+ return _errorCode == o._errorCode
+ && _errorMessage == o._errorMessage;
+ }
+
+ bool hasError() const {
+ return _errorCode != NONE;
+ }
+
+ ErrorType getErrorCode() const {
+ return _errorCode;
+ }
+
+ const vespalib::string& getErrorMessage() const {
+ return _errorMessage;
+ }
+
+ void print(std::ostream& out, bool, const std::string&) const
+ {
+ out << "Result(" << _errorCode << ", " << _errorMessage << ")";
+ }
+
+private:
+ ErrorType _errorCode;
+ vespalib::string _errorMessage;
+};
+
+class BucketInfoResult : public Result {
+public:
+ /**
+ * Constructor to use for a result where an error has been detected.
+ * The service layer will not update the bucket information in this case,
+ * so it should not be returned either.
+ */
+ BucketInfoResult(ErrorType error, const vespalib::string& errorMessage)
+ : Result(error, errorMessage) {};
+
+ /**
+ * Constructor to use when the write operation was successful,
+ * and the bucket info was modified.
+ */
+ BucketInfoResult(const BucketInfo& info) : _info(info) {}
+
+ const BucketInfo& getBucketInfo() const {
+ return _info;
+ }
+
+private:
+ BucketInfo _info;
+};
+
+class UpdateResult : public Result
+{
+public:
+ /**
+ * Constructor to use for a result where an error has been detected.
+ * The service layer will not update the bucket information in this case,
+ * so it should not be returned either.
+ */
+ UpdateResult(ErrorType error, const vespalib::string& errorMessage)
+ : Result(error, errorMessage),
+ _existingTimestamp(0) {}
+
+ /**
+ * Constructor to use when no document to update was found.
+ */
+ UpdateResult()
+ : _existingTimestamp(0) {}
+
+ /**
+ * Constructor to use when the update was successful.
+ */
+ UpdateResult(Timestamp existingTimestamp)
+ : _existingTimestamp(existingTimestamp) {}
+
+ Timestamp getExistingTimestamp() const { return _existingTimestamp; }
+
+private:
+ // Set to 0 if non-existing.
+ Timestamp _existingTimestamp;
+};
+
+class RemoveResult : public Result
+{
+public:
+ /**
+ * Constructor to use for a result where an error has been detected.
+ * The service layer will not update the bucket information in this case,
+ * so it should not be returned either.
+ */
+ RemoveResult(ErrorType error, const vespalib::string& errorMessage)
+ : Result(error, errorMessage),
+ _wasFound(false)
+ {}
+
+ /**
+ * Constructor to use when the remove was successful.
+ */
+ RemoveResult(bool foundDocument)
+ : _wasFound(foundDocument) {};
+
+ bool wasFound() const {
+ return _wasFound;
+ }
+
+private:
+ bool _wasFound;
+};
+
+class GetResult : public Result {
+public:
+ /**
+ * Constructor to use when there was an error retrieving the document.
+ * Not finding the document is not an error in this context.
+ */
+ GetResult(ErrorType error, const vespalib::string& errorMessage)
+ : Result(error, errorMessage),
+ _timestamp(0) {};
+
+ /**
+ * Constructor to use when we didn't find the document in question.
+ */
+ GetResult()
+ : _timestamp(0) {};
+
+ /**
+ * Constructor to use when we found the document asked for.
+ *
+ * @param doc The document we found
+ * @param timestamp The timestamp with which the document was stored.
+ */
+ GetResult(Document::UP doc, Timestamp timestamp)
+ : Result(),
+ _timestamp(timestamp),
+ _doc(std::move(doc))
+ {}
+
+ Timestamp getTimestamp() const { return _timestamp; }
+
+ bool hasDocument() const {
+ return _doc.get() != NULL;
+ }
+
+ const Document& getDocument() const {
+ return *_doc;
+ }
+
+ Document& getDocument() {
+ return *_doc;
+ }
+
+ const Document::SP & getDocumentPtr() {
+ return _doc;
+ }
+
+private:
+ Timestamp _timestamp;
+ Document::SP _doc;
+};
+
+class BucketIdListResult : public Result {
+public:
+ typedef document::BucketId::List List;
+
+ /**
+ * Constructor used when there was an error listing the buckets.
+ */
+ BucketIdListResult(ErrorType error, const vespalib::string& errorMessage)
+ : Result(error, errorMessage) {};
+
+ /**
+ * Constructor used when the bucket listing was successful.
+ *
+ * @param list The list of bucket ids this partition has. Is swapped with
+ * the list internal to this object.
+ */
+ BucketIdListResult(List& list)
+ : Result()
+ {
+ _info.swap(list);
+ };
+
+ const List& getList() const { return _info; }
+ List& getList() { return _info; }
+
+private:
+ List _info;
+};
+
+class CreateIteratorResult : public Result {
+public:
+ /**
+ * Constructor used when there was an error creating the iterator.
+ */
+ CreateIteratorResult(ErrorType error, const vespalib::string& errorMessage)
+ : Result(error, errorMessage),
+ _iterator(0) {};
+
+ /**
+ * Constructor used when the iterator state was successfully created.
+ */
+ CreateIteratorResult(const IteratorId& id)
+ : _iterator(id)
+ {}
+
+ const IteratorId& getIteratorId() const { return _iterator; }
+
+private:
+ IteratorId _iterator;
+};
+
+class IterateResult : public Result {
+public:
+ typedef std::vector<DocEntry::LP> List;
+
+ /**
+ * Constructor used when there was an error creating the iterator.
+ */
+ IterateResult(ErrorType error, const vespalib::string& errorMessage)
+ : Result(error, errorMessage),
+ _completed(false)
+ {}
+
+ /**
+ * Constructor used when the iteration was successful.
+ * For performance concerns, the entries in the input vector
+ * are swapped with the internal vector.
+ *
+ * @param completed Set to true if iteration has been completed.
+ */
+ IterateResult(List entries, bool completed)
+ : _completed(completed),
+ _entries(std::move(entries))
+ { }
+
+ const List& getEntries() const { return _entries; }
+
+ bool isCompleted() const { return _completed; }
+
+private:
+ bool _completed;
+ std::vector<DocEntry::LP> _entries;
+};
+
+class PartitionStateListResult : public Result
+{
+public:
+ /**
+ * Constructor to use for a result where an error has been detected.
+ */
+ PartitionStateListResult(ErrorType error, const vespalib::string& msg)
+ : Result(error, msg),
+ _list(0)
+ {}
+
+ /**
+ * Constructor to use when the operation was successful.
+ */
+ PartitionStateListResult(PartitionStateList list) : _list(list) {};
+
+ PartitionStateList getList() const { return _list; }
+
+private:
+ PartitionStateList _list;
+};
+
+} // namespace spi
+} // namespace storage
+
diff --git a/persistence/src/vespa/persistence/spi/selection.h b/persistence/src/vespa/persistence/spi/selection.h
new file mode 100644
index 00000000000..260a85992f7
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/selection.h
@@ -0,0 +1,93 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * \class storage::spi::Selection
+ * \ingroup spi
+ *
+ * \brief Use a matcher to find what documents one is interested in.
+ */
+
+#pragma once
+
+#include <map>
+#include <vector>
+#include <persistence/spi/types.h>
+#include <vespa/persistence/spi/documentselection.h>
+
+namespace storage {
+namespace spi {
+
+class MetaData {
+ Timestamp timestamp;
+};
+
+class Selection {
+public:
+ typedef std::vector<Timestamp> TimestampSubset;
+private:
+ DocumentSelection _documentSelection;
+ Timestamp _fromTimestamp;
+ Timestamp _toTimestamp;
+ TimestampSubset _timestampSubset;
+
+public:
+ Selection(const DocumentSelection& docSel)
+ : _documentSelection(docSel),
+ _fromTimestamp(0),
+ _toTimestamp(INT64_MAX),
+ _timestampSubset()
+ {}
+
+ const DocumentSelection& getDocumentSelection() const {
+ return _documentSelection;
+ }
+
+ /**
+ * All the timestamp stuff will disappear when we rewrite selection.
+ */
+ /**
+ * Specifies that only documents with a timestamp newer than or equal
+ * to the given value shall be included in the result.
+ */
+ void setFromTimestamp(Timestamp fromTimestamp) {
+ _fromTimestamp = fromTimestamp;
+ }
+ /**
+ * Specifies that only documents with a timestamp older than or equal
+ * to the given value shall be included in the result.
+ */
+ void setToTimestamp(Timestamp toTimestamp) {
+ _toTimestamp = toTimestamp;
+ }
+
+ /**
+ * Assign an explicit subset of timestamps to iterate over.
+ * If non-empty, document selection, timestamp range and include removes
+ * will be ignored; all specified entries are returned if they exist.
+ * Timestamps MUST be in strictly increasing order.
+ */
+ void setTimestampSubset(const TimestampSubset& timestampSubset) {
+ _timestampSubset = timestampSubset;
+ }
+ const TimestampSubset& getTimestampSubset() const {
+ return _timestampSubset;
+ }
+
+ Timestamp getFromTimestamp() const { return _fromTimestamp; }
+ Timestamp getToTimestamp() const { return _toTimestamp; }
+
+ std::string requiredFields();
+
+ /**
+ * Regular usage.
+ */
+ bool match(const Document& doc, const MetaData& metaData) const;
+
+ /**
+ * Can be used if requiredFields is empty.
+ */
+ bool match(const MetaData& metaData) const;
+};
+
+} // spi
+} // storage
+
diff --git a/persistence/testrun/.gitignore b/persistence/testrun/.gitignore
new file mode 100644
index 00000000000..c6773b6c086
--- /dev/null
+++ b/persistence/testrun/.gitignore
@@ -0,0 +1,6 @@
+/test-report.html
+/test-report.html.*
+/test.*.*.*
+/tmp.*
+/test.*.*.result
+Makefile