summaryrefslogtreecommitdiffstats
path: root/eval/src/tests/tensor
diff options
context:
space:
mode:
authorHaavard <havardpe@yahoo-inc.com>2017-01-23 12:14:40 +0000
committerHaavard <havardpe@yahoo-inc.com>2017-01-23 12:14:40 +0000
commit145659f1d677face587b710726285df872a319c0 (patch)
tree074eafbf9d3b9ee030ff2ec584667b0386f37618 /eval/src/tests/tensor
parent31690a1baa64d046d7ba25510b4570aa20792134 (diff)
move code
Diffstat (limited to 'eval/src/tests/tensor')
-rw-r--r--eval/src/tests/tensor/dense_dot_product_function/CMakeLists.txt9
-rw-r--r--eval/src/tests/tensor/dense_dot_product_function/FILES1
-rw-r--r--eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp177
-rw-r--r--eval/src/tests/tensor/dense_tensor_address_combiner/CMakeLists.txt9
-rw-r--r--eval/src/tests/tensor/dense_tensor_address_combiner/FILES1
-rw-r--r--eval/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp32
-rw-r--r--eval/src/tests/tensor/dense_tensor_builder/.gitignore1
-rw-r--r--eval/src/tests/tensor/dense_tensor_builder/CMakeLists.txt9
-rw-r--r--eval/src/tests/tensor/dense_tensor_builder/FILES1
-rw-r--r--eval/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp251
-rw-r--r--eval/src/tests/tensor/dense_tensor_function_compiler/CMakeLists.txt9
-rw-r--r--eval/src/tests/tensor/dense_tensor_function_compiler/FILES1
-rw-r--r--eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp65
-rw-r--r--eval/src/tests/tensor/sparse_tensor_builder/.gitignore1
-rw-r--r--eval/src/tests/tensor/sparse_tensor_builder/CMakeLists.txt9
-rw-r--r--eval/src/tests/tensor/sparse_tensor_builder/FILES1
-rw-r--r--eval/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp97
-rw-r--r--eval/src/tests/tensor/tensor_address/.gitignore1
-rw-r--r--eval/src/tests/tensor/tensor_address/CMakeLists.txt9
-rw-r--r--eval/src/tests/tensor/tensor_address/FILES1
-rw-r--r--eval/src/tests/tensor/tensor_address/tensor_address_test.cpp39
-rw-r--r--eval/src/tests/tensor/tensor_conformance/CMakeLists.txt9
-rw-r--r--eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp19
-rw-r--r--eval/src/tests/tensor/tensor_mapper/.gitignore1
-rw-r--r--eval/src/tests/tensor/tensor_mapper/CMakeLists.txt9
-rw-r--r--eval/src/tests/tensor/tensor_mapper/FILES1
-rw-r--r--eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp245
-rw-r--r--eval/src/tests/tensor/tensor_performance/.gitignore1
-rw-r--r--eval/src/tests/tensor/tensor_performance/CMakeLists.txt13
-rw-r--r--eval/src/tests/tensor/tensor_performance/FILES1
-rw-r--r--eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp378
-rw-r--r--eval/src/tests/tensor/tensor_serialization/.gitignore1
-rw-r--r--eval/src/tests/tensor/tensor_serialization/CMakeLists.txt9
-rw-r--r--eval/src/tests/tensor/tensor_serialization/FILES1
-rw-r--r--eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp255
-rw-r--r--eval/src/tests/tensor/tensor_slime_serialization/.gitignore1
-rw-r--r--eval/src/tests/tensor/tensor_slime_serialization/CMakeLists.txt9
-rw-r--r--eval/src/tests/tensor/tensor_slime_serialization/FILES1
-rw-r--r--eval/src/tests/tensor/tensor_slime_serialization/tensor_slime_serialization_test.cpp185
39 files changed, 1863 insertions, 0 deletions
diff --git a/eval/src/tests/tensor/dense_dot_product_function/CMakeLists.txt b/eval/src/tests/tensor/dense_dot_product_function/CMakeLists.txt
new file mode 100644
index 00000000000..d02f2cf7646
--- /dev/null
+++ b/eval/src/tests/tensor/dense_dot_product_function/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_dense_dot_product_function_test_app TEST
+ SOURCES
+ dense_dot_product_function_test.cpp
+ DEPENDS
+ vespalib
+ vespalib_vespalib_tensor
+)
+vespa_add_test(NAME vespalib_dense_dot_product_function_test_app COMMAND vespalib_dense_dot_product_function_test_app)
diff --git a/eval/src/tests/tensor/dense_dot_product_function/FILES b/eval/src/tests/tensor/dense_dot_product_function/FILES
new file mode 100644
index 00000000000..c79d4ae29de
--- /dev/null
+++ b/eval/src/tests/tensor/dense_dot_product_function/FILES
@@ -0,0 +1 @@
+dense_dot_product_function_test.cpp
diff --git a/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp b/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp
new file mode 100644
index 00000000000..3ffcdd7a567
--- /dev/null
+++ b/eval/src/tests/tensor/dense_dot_product_function/dense_dot_product_function_test.cpp
@@ -0,0 +1,177 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/log/log.h>
+LOG_SETUP("dense_dot_product_function_test");
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/eval/tensor_function.h>
+#include <vespa/vespalib/tensor/dense/dense_dot_product_function.h>
+#include <vespa/vespalib/tensor/dense/dense_tensor.h>
+#include <vespa/vespalib/tensor/dense/dense_tensor_builder.h>
+#include <vespa/vespalib/tensor/dense/dense_tensor_view.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::tensor;
+
+ValueType
+makeType(size_t numCells)
+{
+ return ValueType::tensor_type({{"x", numCells}});
+}
+
+tensor::Tensor::UP
+makeTensor(size_t numCells, double cellBias)
+{
+ DenseTensorBuilder builder;
+ DenseTensorBuilder::Dimension dim = builder.defineDimension("x", numCells);
+ for (size_t i = 0; i < numCells; ++i) {
+ builder.addLabel(dim, i).addCell(i + cellBias);
+ }
+ return builder.build();
+}
+
+double
+calcDotProduct(const DenseTensor &lhs, const DenseTensor &rhs)
+{
+ size_t numCells = std::min(lhs.cells().size(), rhs.cells().size());
+ double result = 0;
+ for (size_t i = 0; i < numCells; ++i) {
+ result += (lhs.cells()[i] * rhs.cells()[i]);
+ }
+ return result;
+}
+
+const DenseTensor &
+asDenseTensor(const tensor::Tensor &tensor)
+{
+ return dynamic_cast<const DenseTensor &>(tensor);
+}
+
+class FunctionInput : public TensorFunction::Input
+{
+private:
+ tensor::Tensor::UP _lhsTensor;
+ tensor::Tensor::UP _rhsTensor;
+ const DenseTensor &_lhsDenseTensor;
+ const DenseTensor &_rhsDenseTensor;
+ TensorValue _lhsValue;
+ TensorValue _rhsValue;
+
+public:
+ FunctionInput(size_t lhsNumCells, size_t rhsNumCells)
+ : _lhsTensor(makeTensor(lhsNumCells, 3.0)),
+ _rhsTensor(makeTensor(rhsNumCells, 5.0)),
+ _lhsDenseTensor(asDenseTensor(*_lhsTensor)),
+ _rhsDenseTensor(asDenseTensor(*_rhsTensor)),
+ _lhsValue(std::make_unique<DenseTensor>(_lhsDenseTensor.type(),
+ _lhsDenseTensor.cells())),
+ _rhsValue(std::make_unique<DenseTensor>(_rhsDenseTensor.type(),
+ _rhsDenseTensor.cells()))
+ {}
+ virtual const Value &get_tensor(size_t id) const override {
+ if (id == 0) {
+ return _lhsValue;
+ } else {
+ return _rhsValue;
+ }
+ }
+ virtual const UnaryOperation &get_map_operation(size_t) const override {
+ abort();
+ }
+ double expectedDotProduct() const {
+ return calcDotProduct(_lhsDenseTensor, _rhsDenseTensor);
+ }
+};
+
+struct Fixture
+{
+ DenseDotProductFunction function;
+ FunctionInput input;
+ Fixture(size_t lhsNumCells, size_t rhsNumCells)
+ : function(0, 1),
+ input(lhsNumCells, rhsNumCells)
+ {
+ }
+ double eval() const {
+ Stash stash;
+ const Value &result = function.eval(input, stash);
+ ASSERT_TRUE(result.is_double());
+ LOG(info, "eval(): (%s) * (%s) = %f",
+ input.get_tensor(0).type().to_spec().c_str(),
+ input.get_tensor(1).type().to_spec().c_str(),
+ result.as_double());
+ return result.as_double();
+ }
+};
+
+void
+assertDotProduct(size_t numCells)
+{
+ Fixture f(numCells, numCells);
+ EXPECT_EQUAL(f.input.expectedDotProduct(), f.eval());
+}
+
+void
+assertDotProduct(size_t lhsNumCells, size_t rhsNumCells)
+{
+ Fixture f(lhsNumCells, rhsNumCells);
+ EXPECT_EQUAL(f.input.expectedDotProduct(), f.eval());
+}
+
+TEST_F("require that empty dot product is correct", Fixture(0, 0))
+{
+ EXPECT_EQUAL(0.0, f.eval());
+}
+
+TEST_F("require that basic dot product with equal sizes is correct", Fixture(2, 2))
+{
+ EXPECT_EQUAL((3.0 * 5.0) + (4.0 * 6.0), f.eval());
+}
+
+TEST_F("require that basic dot product with un-equal sizes is correct", Fixture(2, 3))
+{
+ EXPECT_EQUAL((3.0 * 5.0) + (4.0 * 6.0), f.eval());
+}
+
+TEST_F("require that basic dot product with un-equal sizes is correct", Fixture(3, 2))
+{
+ EXPECT_EQUAL((3.0 * 5.0) + (4.0 * 6.0), f.eval());
+}
+
+TEST("require that dot product with equal sizes is correct")
+{
+ TEST_DO(assertDotProduct(8));
+ TEST_DO(assertDotProduct(16));
+ TEST_DO(assertDotProduct(32));
+ TEST_DO(assertDotProduct(64));
+ TEST_DO(assertDotProduct(128));
+ TEST_DO(assertDotProduct(256));
+ TEST_DO(assertDotProduct(512));
+ TEST_DO(assertDotProduct(1024));
+
+ TEST_DO(assertDotProduct(8 + 3));
+ TEST_DO(assertDotProduct(16 + 3));
+ TEST_DO(assertDotProduct(32 + 3));
+ TEST_DO(assertDotProduct(64 + 3));
+ TEST_DO(assertDotProduct(128 + 3));
+ TEST_DO(assertDotProduct(256 + 3));
+ TEST_DO(assertDotProduct(512 + 3));
+ TEST_DO(assertDotProduct(1024 + 3));
+}
+
+TEST("require that dot product with un-equal sizes is correct")
+{
+ TEST_DO(assertDotProduct(8, 8 + 3));
+ TEST_DO(assertDotProduct(16, 16 + 3));
+ TEST_DO(assertDotProduct(32, 32 + 3));
+ TEST_DO(assertDotProduct(64, 64 + 3));
+ TEST_DO(assertDotProduct(128, 128 + 3));
+ TEST_DO(assertDotProduct(256, 256 + 3));
+ TEST_DO(assertDotProduct(512, 512 + 3));
+ TEST_DO(assertDotProduct(1024, 1024 + 3));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/dense_tensor_address_combiner/CMakeLists.txt b/eval/src/tests/tensor/dense_tensor_address_combiner/CMakeLists.txt
new file mode 100644
index 00000000000..65e7c711b19
--- /dev/null
+++ b/eval/src/tests/tensor/dense_tensor_address_combiner/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_dense_tensor_address_combiner_test_app TEST
+ SOURCES
+ dense_tensor_address_combiner_test.cpp
+ DEPENDS
+ vespalib
+ vespalib_vespalib_tensor
+)
+vespa_add_test(NAME vespalib_dense_tensor_address_combiner_test_app COMMAND vespalib_dense_tensor_address_combiner_test_app)
diff --git a/eval/src/tests/tensor/dense_tensor_address_combiner/FILES b/eval/src/tests/tensor/dense_tensor_address_combiner/FILES
new file mode 100644
index 00000000000..0a49bd4647b
--- /dev/null
+++ b/eval/src/tests/tensor/dense_tensor_address_combiner/FILES
@@ -0,0 +1 @@
+dense_tensor_address_combiner_test.cpp
diff --git a/eval/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp b/eval/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp
new file mode 100644
index 00000000000..37f95172251
--- /dev/null
+++ b/eval/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp
@@ -0,0 +1,32 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+
+using namespace vespalib::tensor;
+using vespalib::eval::ValueType;
+
+ValueType
+combine(const std::vector<ValueType::Dimension> &lhs,
+ const std::vector<ValueType::Dimension> &rhs)
+{
+ return DenseTensorAddressCombiner::combineDimensions(
+ ValueType::tensor_type(lhs),
+ ValueType::tensor_type(rhs));
+}
+
+TEST("require that dimensions can be combined")
+{
+ EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 5}}), combine({{"a", 3}}, {{"b", 5}}));
+ EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 5}}), combine({{"a", 3}, {"b", 5}}, {{"b", 5}}));
+ EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 5}}), combine({{"a", 3}, {"b", 7}}, {{"b", 5}}));
+ EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 11}, {"c", 5}, {"d", 7}, {"e", 17}}),
+ combine({{"a", 3}, {"c", 5}, {"d", 7}},
+ {{"b", 11}, {"c", 13}, {"e", 17}}));
+ EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 11}, {"c", 5}, {"d", 7}, {"e", 17}}),
+ combine({{"b", 11}, {"c", 13}, {"e", 17}},
+ {{"a", 3}, {"c", 5}, {"d", 7}}));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/dense_tensor_builder/.gitignore b/eval/src/tests/tensor/dense_tensor_builder/.gitignore
new file mode 100644
index 00000000000..5b3598a205d
--- /dev/null
+++ b/eval/src/tests/tensor/dense_tensor_builder/.gitignore
@@ -0,0 +1 @@
+vespalib_dense_tensor_builder_test_app
diff --git a/eval/src/tests/tensor/dense_tensor_builder/CMakeLists.txt b/eval/src/tests/tensor/dense_tensor_builder/CMakeLists.txt
new file mode 100644
index 00000000000..9028138ab87
--- /dev/null
+++ b/eval/src/tests/tensor/dense_tensor_builder/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_dense_tensor_builder_test_app TEST
+ SOURCES
+ dense_tensor_builder_test.cpp
+ DEPENDS
+ vespalib
+ vespalib_vespalib_tensor
+)
+vespa_add_test(NAME vespalib_dense_tensor_builder_test_app COMMAND vespalib_dense_tensor_builder_test_app)
diff --git a/eval/src/tests/tensor/dense_tensor_builder/FILES b/eval/src/tests/tensor/dense_tensor_builder/FILES
new file mode 100644
index 00000000000..448dd3c1e3c
--- /dev/null
+++ b/eval/src/tests/tensor/dense_tensor_builder/FILES
@@ -0,0 +1 @@
+dense_tensor_builder_test.cpp
diff --git a/eval/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp b/eval/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp
new file mode 100644
index 00000000000..5036f247db3
--- /dev/null
+++ b/eval/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp
@@ -0,0 +1,251 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/test/insertion_operators.h>
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/tensor/dense/dense_tensor_builder.h>
+#include <vespa/vespalib/util/exceptions.h>
+
+using namespace vespalib::tensor;
+using vespalib::IllegalArgumentException;
+using Builder = DenseTensorBuilder;
+using vespalib::eval::TensorSpec;
+using vespalib::eval::ValueType;
+
+void
+assertTensor(const std::vector<ValueType::Dimension> &expDims,
+ const DenseTensor::Cells &expCells,
+ const Tensor &tensor)
+{
+ const DenseTensor &realTensor = dynamic_cast<const DenseTensor &>(tensor);
+ EXPECT_EQUAL(ValueType::tensor_type(expDims), realTensor.type());
+ EXPECT_EQUAL(expCells, realTensor.cells());
+}
+
+void
+assertTensorSpec(const TensorSpec &expSpec, const Tensor &tensor)
+{
+ TensorSpec actSpec = tensor.toSpec();
+ EXPECT_EQUAL(expSpec, actSpec);
+}
+
+struct Fixture
+{
+ Builder builder;
+};
+
+Tensor::UP
+build1DTensor(Builder &builder)
+{
+ Builder::Dimension dimX = builder.defineDimension("x", 3);
+ builder.addLabel(dimX, 0).addCell(10).
+ addLabel(dimX, 1).addCell(11).
+ addLabel(dimX, 2).addCell(12);
+ return builder.build();
+}
+
+TEST_F("require that 1d tensor can be constructed", Fixture)
+{
+ assertTensor({{"x",3}}, {10,11,12}, *build1DTensor(f.builder));
+}
+
+TEST_F("require that 1d tensor can be converted to tensor spec", Fixture)
+{
+ assertTensorSpec(TensorSpec("tensor(x[3])").
+ add({{"x", 0}}, 10).
+ add({{"x", 1}}, 11).
+ add({{"x", 2}}, 12),
+ *build1DTensor(f.builder));
+}
+
+Tensor::UP
+build2DTensor(Builder &builder)
+{
+ Builder::Dimension dimX = builder.defineDimension("x", 3);
+ Builder::Dimension dimY = builder.defineDimension("y", 2);
+ builder.addLabel(dimX, 0).addLabel(dimY, 0).addCell(10).
+ addLabel(dimX, 0).addLabel(dimY, 1).addCell(11).
+ addLabel(dimX, 1).addLabel(dimY, 0).addCell(12).
+ addLabel(dimX, 1).addLabel(dimY, 1).addCell(13).
+ addLabel(dimX, 2).addLabel(dimY, 0).addCell(14).
+ addLabel(dimX, 2).addLabel(dimY, 1).addCell(15);
+ return builder.build();
+}
+
+TEST_F("require that 2d tensor can be constructed", Fixture)
+{
+ assertTensor({{"x",3},{"y",2}}, {10,11,12,13,14,15}, *build2DTensor(f.builder));
+}
+
+TEST_F("require that 2d tensor can be converted to tensor spec", Fixture)
+{
+ assertTensorSpec(TensorSpec("tensor(x[3],y[2])").
+ add({{"x", 0},{"y", 0}}, 10).
+ add({{"x", 0},{"y", 1}}, 11).
+ add({{"x", 1},{"y", 0}}, 12).
+ add({{"x", 1},{"y", 1}}, 13).
+ add({{"x", 2},{"y", 0}}, 14).
+ add({{"x", 2},{"y", 1}}, 15),
+ *build2DTensor(f.builder));
+}
+
+TEST_F("require that 3d tensor can be constructed", Fixture)
+{
+ Builder::Dimension dimX = f.builder.defineDimension("x", 3);
+ Builder::Dimension dimY = f.builder.defineDimension("y", 2);
+ Builder::Dimension dimZ = f.builder.defineDimension("z", 2);
+ f.builder.addLabel(dimX, 0).addLabel(dimY, 0).addLabel(dimZ, 0).addCell(10).
+ addLabel(dimX, 0).addLabel(dimY, 0).addLabel(dimZ, 1).addCell(11).
+ addLabel(dimX, 0).addLabel(dimY, 1).addLabel(dimZ, 0).addCell(12).
+ addLabel(dimX, 0).addLabel(dimY, 1).addLabel(dimZ, 1).addCell(13).
+ addLabel(dimX, 1).addLabel(dimY, 0).addLabel(dimZ, 0).addCell(14).
+ addLabel(dimX, 1).addLabel(dimY, 0).addLabel(dimZ, 1).addCell(15).
+ addLabel(dimX, 1).addLabel(dimY, 1).addLabel(dimZ, 0).addCell(16).
+ addLabel(dimX, 1).addLabel(dimY, 1).addLabel(dimZ, 1).addCell(17).
+ addLabel(dimX, 2).addLabel(dimY, 0).addLabel(dimZ, 0).addCell(18).
+ addLabel(dimX, 2).addLabel(dimY, 0).addLabel(dimZ, 1).addCell(19).
+ addLabel(dimX, 2).addLabel(dimY, 1).addLabel(dimZ, 0).addCell(20).
+ addLabel(dimX, 2).addLabel(dimY, 1).addLabel(dimZ, 1).addCell(21);
+ assertTensor({{"x",3},{"y",2},{"z",2}},
+ {10,11,12,13,14,15,16,17,18,19,20,21},
+ *f.builder.build());
+}
+
+TEST_F("require that cells get default value 0 if not specified", Fixture)
+{
+ Builder::Dimension dimX = f.builder.defineDimension("x", 3);
+ f.builder.addLabel(dimX, 1).addCell(11);
+ assertTensor({{"x",3}}, {0,11,0},
+ *f.builder.build());
+}
+
+TEST_F("require that labels can be added in arbitrarily order", Fixture)
+{
+ Builder::Dimension dimX = f.builder.defineDimension("x", 2);
+ Builder::Dimension dimY = f.builder.defineDimension("y", 3);
+ f.builder.addLabel(dimY, 0).addLabel(dimX, 1).addCell(10);
+ assertTensor({{"x",2},{"y",3}}, {0,0,0,10,0,0},
+ *f.builder.build());
+}
+
+TEST_F("require that builder can be re-used", Fixture)
+{
+ {
+ Builder::Dimension dimX = f.builder.defineDimension("x", 2);
+ f.builder.addLabel(dimX, 0).addCell(10).
+ addLabel(dimX, 1).addCell(11);
+ assertTensor({{"x",2}}, {10,11},
+ *f.builder.build());
+ }
+ {
+ Builder::Dimension dimY = f.builder.defineDimension("y", 3);
+ f.builder.addLabel(dimY, 0).addCell(20).
+ addLabel(dimY, 1).addCell(21).
+ addLabel(dimY, 2).addCell(22);
+ assertTensor({{"y",3}}, {20,21,22},
+ *f.builder.build());
+ }
+}
+
+void
+assertTensorCell(const std::vector<size_t> &expAddress,
+ double expCell,
+ const DenseTensor::CellsIterator &itr)
+{
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(expAddress, itr.address());
+ EXPECT_EQUAL(expCell, itr.cell());
+}
+
+TEST_F("require that dense tensor cells iterator works for 1d tensor", Fixture)
+{
+ Tensor::UP tensor;
+ {
+ Builder::Dimension dimX = f.builder.defineDimension("x", 2);
+ f.builder.addLabel(dimX, 0).addCell(2).
+ addLabel(dimX, 1).addCell(3);
+ tensor = f.builder.build();
+ }
+
+ const DenseTensor &denseTensor = dynamic_cast<const DenseTensor &>(*tensor);
+ DenseTensor::CellsIterator itr = denseTensor.cellsIterator();
+
+ assertTensorCell({0}, 2, itr);
+ itr.next();
+ assertTensorCell({1}, 3, itr);
+ itr.next();
+ EXPECT_FALSE(itr.valid());
+}
+
+TEST_F("require that dense tensor cells iterator works for 2d tensor", Fixture)
+{
+ Tensor::UP tensor;
+ {
+ Builder::Dimension dimX = f.builder.defineDimension("x", 2);
+ Builder::Dimension dimY = f.builder.defineDimension("y", 2);
+ f.builder.addLabel(dimX, 0).addLabel(dimY, 0).addCell(2).
+ addLabel(dimX, 0).addLabel(dimY, 1).addCell(3).
+ addLabel(dimX, 1).addLabel(dimY, 0).addCell(5).
+ addLabel(dimX, 1).addLabel(dimY, 1).addCell(7);
+ tensor = f.builder.build();
+ }
+
+ const DenseTensor &denseTensor = dynamic_cast<const DenseTensor &>(*tensor);
+ DenseTensor::CellsIterator itr = denseTensor.cellsIterator();
+
+ assertTensorCell({0,0}, 2, itr);
+ itr.next();
+ assertTensorCell({0,1}, 3, itr);
+ itr.next();
+ assertTensorCell({1,0}, 5, itr);
+ itr.next();
+ assertTensorCell({1,1}, 7, itr);
+ itr.next();
+ EXPECT_FALSE(itr.valid());
+}
+
+TEST_F("require that undefined label for a dimension throws exception", Fixture)
+{
+ Builder::Dimension dimX = f.builder.defineDimension("x", 2);
+ f.builder.defineDimension("y", 3);
+ EXPECT_EXCEPTION(f.builder.addLabel(dimX, 0).addCell(10),
+ IllegalArgumentException,
+ "Label for dimension 'y' is undefined. Expected a value in the range [0, 3>");
+}
+
+TEST_F("require that label outside range throws exception", Fixture)
+{
+ Builder::Dimension dimX = f.builder.defineDimension("x", 2);
+ EXPECT_EXCEPTION(f.builder.addLabel(dimX, 2).addCell(10),
+ IllegalArgumentException,
+ "Label '2' for dimension 'x' is outside range [0, 2>");
+}
+
+TEST_F("require that already specified label throws exception", Fixture)
+{
+ Builder::Dimension dimX = f.builder.defineDimension("x", 2);
+ EXPECT_EXCEPTION(f.builder.addLabel(dimX, 0).addLabel(dimX, 1).addCell(10),
+ IllegalArgumentException,
+ "Label for dimension 'x' is already specified with value '0'");
+}
+
+TEST_F("require that dimensions are sorted", Fixture)
+{
+ Builder::Dimension dimY = f.builder.defineDimension("y", 3);
+ Builder::Dimension dimX = f.builder.defineDimension("x", 5);
+ f.builder.addLabel(dimX, 0).addLabel(dimY, 0).addCell(10);
+ f.builder.addLabel(dimX, 0).addLabel(dimY, 1).addCell(11);
+ f.builder.addLabel(dimX, 1).addLabel(dimY, 0).addCell(12);
+ std::unique_ptr<Tensor> tensor = f.builder.build();
+ const DenseTensor &denseTensor = dynamic_cast<const DenseTensor &>(*tensor);
+ assertTensor({{"x", 5}, {"y", 3}},
+ {10, 11, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ denseTensor);
+ EXPECT_EQUAL("tensor(x[5],y[3])", denseTensor.getType().to_spec());
+}
+
+
+
+
+
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/dense_tensor_function_compiler/CMakeLists.txt b/eval/src/tests/tensor/dense_tensor_function_compiler/CMakeLists.txt
new file mode 100644
index 00000000000..a34b39abb70
--- /dev/null
+++ b/eval/src/tests/tensor/dense_tensor_function_compiler/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_dense_tensor_function_compiler_test_app TEST
+ SOURCES
+ dense_tensor_function_compiler_test.cpp
+ DEPENDS
+ vespalib
+ vespalib_vespalib_tensor
+)
+vespa_add_test(NAME vespalib_dense_tensor_function_compiler_test_app COMMAND vespalib_dense_tensor_function_compiler_test_app)
diff --git a/eval/src/tests/tensor/dense_tensor_function_compiler/FILES b/eval/src/tests/tensor/dense_tensor_function_compiler/FILES
new file mode 100644
index 00000000000..3c4ec2f1753
--- /dev/null
+++ b/eval/src/tests/tensor/dense_tensor_function_compiler/FILES
@@ -0,0 +1 @@
+dense_tensor_function_compiler_test.cpp
diff --git a/eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp b/eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp
new file mode 100644
index 00000000000..c1420f2b8d2
--- /dev/null
+++ b/eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp
@@ -0,0 +1,65 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/tensor/dense/dense_dot_product_function.h>
+#include <vespa/vespalib/tensor/dense/dense_tensor_function_compiler.h>
+
+using namespace vespalib::eval;
+using namespace vespalib::eval::operation;
+using namespace vespalib::eval::tensor_function;
+using namespace vespalib::tensor;
+
+template <typename T>
+const T *as(const TensorFunction &function) { return dynamic_cast<const T *>(&function); }
+
+TensorFunction::UP
+compileDotProduct(const vespalib::string &lhsType,
+ const vespalib::string &rhsType)
+{
+ Node_UP reduceNode = reduce(apply(Mul(),
+ inject(ValueType::from_spec(lhsType), 1),
+ inject(ValueType::from_spec(rhsType), 3)),
+ Add(), {});
+ return DenseTensorFunctionCompiler::compile(std::move(reduceNode));
+}
+
+void
+assertCompiledDotProduct(const vespalib::string &lhsType,
+ const vespalib::string &rhsType)
+{
+ TensorFunction::UP func = compileDotProduct(lhsType, rhsType);
+ const DenseDotProductFunction *dotProduct = as<DenseDotProductFunction>(*func);
+ ASSERT_TRUE(dotProduct);
+ EXPECT_EQUAL(1u, dotProduct->lhsTensorId());
+ EXPECT_EQUAL(3u, dotProduct->rhsTensorId());
+}
+
+void
+assertNotCompiledDotProduct(const vespalib::string &lhsType,
+ const vespalib::string &rhsType)
+{
+ TensorFunction::UP func = compileDotProduct(lhsType, rhsType);
+ const Reduce *reduce = as<Reduce>(*func);
+ EXPECT_TRUE(reduce);
+}
+
+TEST("require that dot product with compatible dimensions is compiled")
+{
+ TEST_DO(assertCompiledDotProduct("tensor(x[5])", "tensor(x[5])"));
+ TEST_DO(assertCompiledDotProduct("tensor(x[3])", "tensor(x[5])"));
+ TEST_DO(assertCompiledDotProduct("tensor(x[5])", "tensor(x[3])"));
+ TEST_DO(assertCompiledDotProduct("tensor(x[])", "tensor(x[5])"));
+ TEST_DO(assertCompiledDotProduct("tensor(x[5])", "tensor(x[])"));
+ TEST_DO(assertCompiledDotProduct("tensor(x[])", "tensor(x[])"));
+}
+
+TEST("require that dot product with incompatible dimensions is NOT compiled")
+{
+ TEST_DO(assertNotCompiledDotProduct("tensor(x[5])", "tensor(y[5])"));
+ TEST_DO(assertNotCompiledDotProduct("tensor(y[5])", "tensor(x[5])"));
+ TEST_DO(assertNotCompiledDotProduct("tensor(y[])", "tensor(x[])"));
+ TEST_DO(assertNotCompiledDotProduct("tensor(x[5])", "tensor(x[5],y[7])"));
+ TEST_DO(assertNotCompiledDotProduct("tensor(x[5],y[7])", "tensor(x[5],y[7])"));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/sparse_tensor_builder/.gitignore b/eval/src/tests/tensor/sparse_tensor_builder/.gitignore
new file mode 100644
index 00000000000..e0316d190bb
--- /dev/null
+++ b/eval/src/tests/tensor/sparse_tensor_builder/.gitignore
@@ -0,0 +1 @@
+vespalib_sparse_tensor_builder_test_app
diff --git a/eval/src/tests/tensor/sparse_tensor_builder/CMakeLists.txt b/eval/src/tests/tensor/sparse_tensor_builder/CMakeLists.txt
new file mode 100644
index 00000000000..c8ae7ece908
--- /dev/null
+++ b/eval/src/tests/tensor/sparse_tensor_builder/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_sparse_tensor_builder_test_app TEST
+ SOURCES
+ sparse_tensor_builder_test.cpp
+ DEPENDS
+ vespalib
+ vespalib_vespalib_tensor
+)
+vespa_add_test(NAME vespalib_sparse_tensor_builder_test_app COMMAND vespalib_sparse_tensor_builder_test_app)
diff --git a/eval/src/tests/tensor/sparse_tensor_builder/FILES b/eval/src/tests/tensor/sparse_tensor_builder/FILES
new file mode 100644
index 00000000000..ad47666278e
--- /dev/null
+++ b/eval/src/tests/tensor/sparse_tensor_builder/FILES
@@ -0,0 +1 @@
+sparse_tensor_builder_test.cpp
diff --git a/eval/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp b/eval/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp
new file mode 100644
index 00000000000..d1ad41e8a7e
--- /dev/null
+++ b/eval/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp
@@ -0,0 +1,97 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+
+using namespace vespalib::tensor;
+using vespalib::eval::TensorSpec;
+using vespalib::eval::ValueType;
+
+void
+assertCellValue(double expValue, const TensorAddress &address,
+ const ValueType &type,
+ const SparseTensor::Cells &cells)
+{
+ SparseTensorAddressBuilder addressBuilder;
+ auto dimsItr = type.dimensions().cbegin();
+ auto dimsItrEnd = type.dimensions().cend();
+ for (const auto &element : address.elements()) {
+ while ((dimsItr < dimsItrEnd) && (dimsItr->name < element.dimension())) {
+ addressBuilder.add("");
+ ++dimsItr;
+ }
+ assert((dimsItr != dimsItrEnd) && (dimsItr->name == element.dimension()));
+ addressBuilder.add(element.label());
+ ++dimsItr;
+ }
+ while (dimsItr < dimsItrEnd) {
+ addressBuilder.add("");
+ ++dimsItr;
+ }
+ SparseTensorAddressRef addressRef(addressBuilder.getAddressRef());
+ auto itr = cells.find(addressRef);
+ EXPECT_FALSE(itr == cells.end());
+ EXPECT_EQUAL(expValue, itr->second);
+}
+
+Tensor::UP
+buildTensor()
+{
+ SparseTensorBuilder builder;
+ builder.define_dimension("c");
+ builder.define_dimension("d");
+ builder.define_dimension("a");
+ builder.define_dimension("b");
+ builder.add_label(builder.define_dimension("a"), "1").
+ add_label(builder.define_dimension("b"), "2").add_cell(10).
+ add_label(builder.define_dimension("c"), "3").
+ add_label(builder.define_dimension("d"), "4").add_cell(20);
+ return builder.build();
+}
+
+TEST("require that tensor can be constructed")
+{
+ Tensor::UP tensor = buildTensor();
+ const SparseTensor &sparseTensor = dynamic_cast<const SparseTensor &>(*tensor);
+ const ValueType &type = sparseTensor.type();
+ const SparseTensor::Cells &cells = sparseTensor.cells();
+ EXPECT_EQUAL(2u, cells.size());
+ assertCellValue(10, TensorAddress({{"a","1"},{"b","2"}}),
+ type, cells);
+ assertCellValue(20, TensorAddress({{"c","3"},{"d","4"}}),
+ type, cells);
+}
+
+TEST("require that tensor can be converted to tensor spec")
+{
+ Tensor::UP tensor = buildTensor();
+ TensorSpec expSpec("tensor(a{},b{},c{},d{})");
+ expSpec.add({{"a", "1"}, {"b", "2"}, {"c", ""}, {"d", ""}}, 10).
+ add({{"a", ""},{"b",""},{"c", "3"}, {"d", "4"}}, 20);
+ TensorSpec actSpec = tensor->toSpec();
+ EXPECT_EQUAL(expSpec, actSpec);
+}
+
+TEST("require that dimensions are extracted")
+{
+ SparseTensorBuilder builder;
+ builder.define_dimension("c");
+ builder.define_dimension("a");
+ builder.define_dimension("b");
+ builder.
+ add_label(builder.define_dimension("a"), "1").
+ add_label(builder.define_dimension("b"), "2").add_cell(10).
+ add_label(builder.define_dimension("b"), "3").
+ add_label(builder.define_dimension("c"), "4").add_cell(20);
+ Tensor::UP tensor = builder.build();
+ const SparseTensor &sparseTensor = dynamic_cast<const SparseTensor &>(*tensor);
+ const auto &dims = sparseTensor.type().dimensions();
+ EXPECT_EQUAL(3u, dims.size());
+ EXPECT_EQUAL("a", dims[0].name);
+ EXPECT_EQUAL("b", dims[1].name);
+ EXPECT_EQUAL("c", dims[2].name);
+ EXPECT_EQUAL("tensor(a{},b{},c{})", sparseTensor.getType().to_spec());
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/tensor_address/.gitignore b/eval/src/tests/tensor/tensor_address/.gitignore
new file mode 100644
index 00000000000..189adb8710b
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_address/.gitignore
@@ -0,0 +1 @@
+vespalib_tensor_address_test_app
diff --git a/eval/src/tests/tensor/tensor_address/CMakeLists.txt b/eval/src/tests/tensor/tensor_address/CMakeLists.txt
new file mode 100644
index 00000000000..43c45f913a5
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_address/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_tensor_address_test_app TEST
+ SOURCES
+ tensor_address_test.cpp
+ DEPENDS
+ vespalib
+ vespalib_vespalib_tensor
+)
+vespa_add_test(NAME vespalib_tensor_address_test_app COMMAND vespalib_tensor_address_test_app)
diff --git a/eval/src/tests/tensor/tensor_address/FILES b/eval/src/tests/tensor/tensor_address/FILES
new file mode 100644
index 00000000000..1d7d1c533a0
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_address/FILES
@@ -0,0 +1 @@
+tensor_address_test.cpp
diff --git a/eval/src/tests/tensor/tensor_address/tensor_address_test.cpp b/eval/src/tests/tensor/tensor_address/tensor_address_test.cpp
new file mode 100644
index 00000000000..70f33bdf0c4
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_address/tensor_address_test.cpp
@@ -0,0 +1,39 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/tensor/tensor_address.h>
+
+using namespace vespalib::tensor;
+
+void
+assertSortOrder(const TensorAddress::Elements &exp,
+ const TensorAddress::Elements &input)
+{
+ TensorAddress address(input);
+ EXPECT_EQUAL(exp, address.elements());
+}
+
+TEST("require that elements are sorted in constructor")
+{
+ assertSortOrder({{"a","1"},{"b","1"},{"c","1"}},
+ {{"c","1"},{"a","1"},{"b","1"}});
+}
+
+TEST("require that we can check whether a dimension is present")
+{
+ TensorAddress address({{"a","1"},{"b","1"}});
+ EXPECT_TRUE(address.hasDimension("a"));
+ EXPECT_TRUE(address.hasDimension("b"));
+ EXPECT_FALSE(address.hasDimension("c"));
+}
+
+TEST("require that tensor address sort order is defined")
+{
+ TensorAddress::Elements single = {{"a","1"}};
+ EXPECT_LESS(TensorAddress(single),
+ TensorAddress({{"a","1"},{"b","1"}}));
+ EXPECT_LESS(TensorAddress({{"a","1"},{"b","1"}}),
+ TensorAddress({{"a","1"},{"c","1"}}));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/tensor_conformance/CMakeLists.txt b/eval/src/tests/tensor/tensor_conformance/CMakeLists.txt
new file mode 100644
index 00000000000..0aaddb481cc
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_conformance/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_tensor_tensor_conformance_test_app TEST
+ SOURCES
+ tensor_conformance_test.cpp
+ DEPENDS
+ vespalib
+ vespalib_vespalib_tensor
+)
+vespa_add_test(NAME vespalib_tensor_tensor_conformance_test_app COMMAND vespalib_tensor_tensor_conformance_test_app)
diff --git a/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp b/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
new file mode 100644
index 00000000000..238d0604ee7
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
@@ -0,0 +1,19 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/eval/test/tensor_conformance.h>
+#include <vespa/vespalib/eval/simple_tensor_engine.h>
+#include <vespa/vespalib/tensor/default_tensor_engine.h>
+
+using vespalib::eval::SimpleTensorEngine;
+using vespalib::eval::test::TensorConformance;
+using vespalib::tensor::DefaultTensorEngine;
+
+TEST("require that reference tensor implementation passes all conformance tests") {
+ TEST_DO(TensorConformance::run_tests(SimpleTensorEngine::ref(), true));
+}
+
+IGNORE_TEST("require that production tensor implementation passes non-mixed conformance tests") {
+ TEST_DO(TensorConformance::run_tests(DefaultTensorEngine::ref(), false));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/tensor_mapper/.gitignore b/eval/src/tests/tensor/tensor_mapper/.gitignore
new file mode 100644
index 00000000000..8a312ff3157
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_mapper/.gitignore
@@ -0,0 +1 @@
+vespalib_tensor_mapper_test_app
diff --git a/eval/src/tests/tensor/tensor_mapper/CMakeLists.txt b/eval/src/tests/tensor/tensor_mapper/CMakeLists.txt
new file mode 100644
index 00000000000..fb18883f7ef
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_mapper/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_tensor_mapper_test_app TEST
+ SOURCES
+ tensor_mapper_test.cpp
+ DEPENDS
+ vespalib
+ vespalib_vespalib_tensor
+)
+vespa_add_test(NAME vespalib_tensor_mapper_test_app COMMAND vespalib_tensor_mapper_test_app)
diff --git a/eval/src/tests/tensor/tensor_mapper/FILES b/eval/src/tests/tensor/tensor_mapper/FILES
new file mode 100644
index 00000000000..8678f175be1
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_mapper/FILES
@@ -0,0 +1 @@
+tensor_mapper_test.cpp
diff --git a/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp b/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
new file mode 100644
index 00000000000..f4edd8901e4
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
@@ -0,0 +1,245 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/tensor/sparse/sparse_tensor.h>
+#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h>
+#include <vespa/vespalib/tensor/dense/dense_tensor.h>
+#include <vespa/vespalib/tensor/dense/dense_tensor_builder.h>
+#include <vespa/vespalib/tensor/types.h>
+#include <vespa/vespalib/tensor/tensor_factory.h>
+#include <vespa/vespalib/tensor/tensor_mapper.h>
+#include <vespa/vespalib/tensor/default_tensor.h>
+#include <ostream>
+
+using vespalib::eval::ValueType;
+using namespace vespalib::tensor;
+
+namespace vespalib {
+namespace tensor {
+
+static bool operator==(const Tensor &lhs, const Tensor &rhs)
+{
+ return lhs.equals(rhs);
+}
+
+}
+}
+
+template <typename BuilderType>
+bool defaultBuilder() { return false; }
+
+template <>
+bool defaultBuilder<DefaultTensor::builder>() { return true; }
+
+template <typename BuilderType>
+struct TensorTFromBuilder;
+
+template <>
+struct TensorTFromBuilder<SparseTensorBuilder> {
+ using TensorT = SparseTensor;
+};
+
+template <typename BuilderType>
+using TensorTFromBuilder_t = typename TensorTFromBuilder<BuilderType>::TensorT;
+
+struct FixtureBase
+{
+ Tensor::UP createDenseTensor(const DenseTensorCells &cells) {
+ return TensorFactory::createDense(cells);
+ }
+};
+
+template <typename BuilderType>
+struct Fixture : public FixtureBase
+{
+ BuilderType _builder;
+ using TensorT = TensorTFromBuilder_t<BuilderType>;
+ Fixture() : FixtureBase(), _builder() {}
+
+ Tensor::UP createTensor(const TensorCells &cells,
+ const TensorDimensions &dimensions) {
+ return TensorFactory::create(cells, dimensions, _builder);
+ }
+
+ void assertSparseMapImpl(const Tensor &exp,
+ const ValueType &tensorType,
+ const Tensor &rhs, bool isDefaultBuilder)
+ {
+ EXPECT_TRUE(tensorType.is_sparse());
+ if (isDefaultBuilder) {
+ TensorMapper mapper(tensorType);
+ std::unique_ptr<Tensor> mapped = mapper.map(rhs);
+ EXPECT_TRUE(!!mapped);
+ EXPECT_EQUAL(exp, *mapped);
+ }
+ std::unique_ptr<Tensor> mapped =
+ TensorMapper::mapToSparse<TensorT>(rhs, tensorType);
+ EXPECT_TRUE(!!mapped);
+ EXPECT_EQUAL(exp, *mapped);
+ }
+
+ void assertDenseMapImpl(const Tensor &exp,
+ const ValueType &tensorType,
+ const Tensor &rhs)
+ {
+ EXPECT_TRUE(tensorType.is_dense());
+ TensorMapper mapper(tensorType);
+ std::unique_ptr<Tensor> mapped = mapper.map(rhs);
+ EXPECT_TRUE(!!mapped);
+ EXPECT_EQUAL(exp, *mapped);
+ }
+
+ void
+ assertSparseMap(const TensorCells &expTensor,
+ const TensorDimensions &expDimensions,
+ const vespalib::string &typeSpec,
+ const TensorCells &rhsTensor,
+ const TensorDimensions &rhsDimensions)
+ {
+ assertSparseMapImpl(*createTensor(expTensor, expDimensions),
+ ValueType::from_spec(typeSpec),
+ *createTensor(rhsTensor, rhsDimensions),
+ defaultBuilder<BuilderType>());
+ }
+
+ void
+ assertDenseMap(const DenseTensorCells &expTensor,
+ const vespalib::string &typeSpec,
+ const TensorCells &rhsTensor,
+ const TensorDimensions &rhsDimensions)
+ {
+ assertDenseMapImpl(*createDenseTensor(expTensor),
+ ValueType::from_spec(typeSpec),
+ *createTensor(rhsTensor, rhsDimensions));
+ }
+};
+
+using SparseFixture = Fixture<SparseTensorBuilder>;
+
+template <typename FixtureType>
+void
+testTensorMapper(FixtureType &f)
+{
+ TEST_DO(f.assertSparseMap({
+ {{{"y","1"}}, 4},
+ {{{"y","2"}}, 12}
+ },
+ { "y" },
+ "tensor(y{})",
+ {
+ {{{"x","1"},{"y","1"}}, 1},
+ {{{"x","2"},{"y","1"}}, 3},
+ {{{"x","1"},{"y","2"}}, 5},
+ {{{"x","2"},{"y","2"}}, 7}
+ },
+ { "x", "y" }));
+ TEST_DO(f.assertSparseMap({
+ {{{"x","1"}}, 6},
+ {{{"x","2"}}, 10}
+ },
+ { "x" },
+ "tensor(x{})",
+ {
+ {{{"x","1"},{"y","1"}}, 1},
+ {{{"x","2"},{"y","1"}}, 3},
+ {{{"x","1"},{"y","2"}}, 5},
+ {{{"x","2"},{"y","2"}}, 7}
+ },
+ { "x", "y" }));
+ TEST_DO(f.assertDenseMap({
+ {{{"y",0}}, 4},
+ {{{"y",1}}, 12},
+ {{{"y",2}}, 0}
+ },
+ "tensor(y[3])",
+ {
+ {{{"x","1"},{"y","0"}}, 1},
+ {{{"x","2"},{"y","0"}}, 3},
+ {{{"x","1"},{"y","1"}}, 5},
+ {{{"x","2"},{"y","1"}}, 7}
+ },
+ { "x", "y" }));
+ TEST_DO(f.assertDenseMap({
+ {{{"y",0}}, 3},
+ {{{"y",1}}, 5},
+ {{{"y",2}}, 0}
+ },
+ "tensor(y[3])",
+ {
+ {{{"x","1"},{"y","0x"}}, 1},
+ {{{"x","2"},{"y",""}}, 3},
+ {{{"x","1"},{"y","1"}}, 5},
+ {{{"x","2"},{"y","10"}}, 7}
+ },
+ { "x", "y" }));
+ TEST_DO(f.assertDenseMap({
+ {{{"x",0},{"y",0}}, 1},
+ {{{"x",0},{"y",1}}, 5},
+ {{{"x",0},{"y",2}}, 0},
+ {{{"x",1},{"y",0}}, 3},
+ {{{"x",1},{"y",1}}, 0},
+ {{{"x",1},{"y",2}}, 0}
+ },
+ "tensor(x[2], y[3])",
+ {
+ {{{"x","0"},{"y","0"}}, 1},
+ {{{"x","1"},{"y","0"}}, 3},
+ {{{"x","0"},{"y","1"}}, 5},
+ {{{"x","10"},{"y","1"}}, 7}
+ },
+ { "x", "y" }));
+ TEST_DO(f.assertDenseMap({
+ {{{"x",0},{"y",0}}, 1},
+ {{{"x",0},{"y",1}}, 5},
+ {{{"x",1},{"y",0}}, 3},
+ {{{"x",1},{"y",1}}, 0}
+ },
+ "tensor(x[2], y[])",
+ {
+ {{{"x","0"},{"y","0"}}, 1},
+ {{{"x","1"},{"y","0"}}, 3},
+ {{{"x","0"},{"y","1"}}, 5},
+ {{{"x","10"},{"y","1"}}, 7}
+ },
+ { "x", "y" }));
+ TEST_DO(f.assertDenseMap({
+ {{{"x",0},{"y",0}}, 1},
+ {{{"x",0},{"y",1}}, 5},
+ {{{"x",1},{"y",0}}, 3},
+ {{{"x",1},{"y",1}}, 0},
+ {{{"x",2},{"y",0}}, 7},
+ {{{"x",2},{"y",1}}, 0}
+ },
+ "tensor(x[], y[])",
+ {
+ {{{"x","0"},{"y","0"}}, 1},
+ {{{"x","1"},{"y","0"}}, 3},
+ {{{"x","0"},{"y","1"}}, 5},
+ {{{"x","2"},{"y","0"}}, 7}
+ },
+ { "x", "y" }));
+ TEST_DO(f.assertDenseMap({
+ {{{"x",0},{"y",0}}, 1},
+ {{{"x",0},{"y",1}}, 5},
+ {{{"x",0},{"y",2}}, 0},
+ {{{"x",1},{"y",0}}, 3},
+ {{{"x",1},{"y",1}}, 0},
+ {{{"x",1},{"y",2}}, 0}
+ },
+ "tensor(x[], y[3])",
+ {
+ {{{"x","0"},{"y","0"}}, 1},
+ {{{"x","1"},{"y","0"}}, 3},
+ {{{"x","0"},{"y","1"}}, 5},
+ {{{"x","10"},{"y","3"}}, 7}
+ },
+ { "x", "y" }));
+}
+
+TEST_F("test tensor mapper for SparseTensor", SparseFixture)
+{
+ testTensorMapper(f);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/tensor_performance/.gitignore b/eval/src/tests/tensor/tensor_performance/.gitignore
new file mode 100644
index 00000000000..c9401246324
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_performance/.gitignore
@@ -0,0 +1 @@
+vespalib_tensor_performance_test_app
diff --git a/eval/src/tests/tensor/tensor_performance/CMakeLists.txt b/eval/src/tests/tensor/tensor_performance/CMakeLists.txt
new file mode 100644
index 00000000000..a2f041db265
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_performance/CMakeLists.txt
@@ -0,0 +1,13 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_tensor_performance_test_app TEST
+ SOURCES
+ tensor_performance_test.cpp
+ DEPENDS
+ vespalib
+ vespalib_vespalib_tensor
+)
+vespa_add_test(
+ NAME vespalib_tensor_performance_test_app
+ COMMAND vespalib_tensor_performance_test_app
+ ENVIRONMENT "TEST_SUBSET=SMOKETEST"
+)
diff --git a/eval/src/tests/tensor/tensor_performance/FILES b/eval/src/tests/tensor/tensor_performance/FILES
new file mode 100644
index 00000000000..4cec89055e5
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_performance/FILES
@@ -0,0 +1 @@
+tensor_performance_test.cpp
diff --git a/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp b/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
new file mode 100644
index 00000000000..8dc57bd0f71
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
@@ -0,0 +1,378 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/eval/function.h>
+#include <vespa/vespalib/eval/interpreted_function.h>
+#include <vespa/vespalib/eval/tensor_nodes.h>
+#include <vespa/vespalib/eval/tensor_spec.h>
+#include <vespa/vespalib/tensor/sparse/sparse_tensor.h>
+#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h>
+#include <vespa/vespalib/tensor/dense/dense_tensor_builder.h>
+#include <vespa/vespalib/tensor/tensor.h>
+#include <vespa/vespalib/tensor/tensor_builder.h>
+#include <vespa/vespalib/util/benchmark_timer.h>
+#include <vespa/vespalib/tensor/default_tensor_engine.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::tensor;
+
+//-----------------------------------------------------------------------------
+
+const vespalib::string dot_product_match_expr = "sum(query*document)";
+const vespalib::string dot_product_multiply_expr = "sum(query*document)";
+const vespalib::string model_match_expr = "sum((query*document)*model)";
+const vespalib::string matrix_product_expr = "sum(sum((query+document)*model,x))";
+
+//-----------------------------------------------------------------------------
+
+Value::UP wrap(std::unique_ptr<eval::Tensor> tensor) {
+ return Value::UP(new TensorValue(std::move(tensor)));
+}
+
+//-----------------------------------------------------------------------------
+
+struct Params {
+ std::map<vespalib::string, Value::UP> map;
+ Params &add(const vespalib::string &name, Value::UP value) {
+ map.emplace(name, std::move(value));
+ return *this;
+ }
+ Params &add(const vespalib::string &name, std::unique_ptr<eval::Tensor> value) {
+ return add(name, wrap(std::move(value)));
+ }
+};
+
+void inject_params(const Function &function, const Params &params,
+ InterpretedFunction::Context &ctx)
+{
+ ctx.clear_params();
+ EXPECT_EQUAL(params.map.size(), function.num_params());
+ for (size_t i = 0; i < function.num_params(); ++i) {
+ auto param = params.map.find(function.param_name(i));
+ ASSERT_TRUE(param != params.map.end());
+ ctx.add_param(*(param->second));
+ }
+}
+
+std::vector<ValueType> extract_param_types(const Function &function, const Params &params) {
+ std::vector<ValueType> result;
+ EXPECT_EQUAL(params.map.size(), function.num_params());
+ for (size_t i = 0; i < function.num_params(); ++i) {
+ auto param = params.map.find(function.param_name(i));
+ ASSERT_TRUE(param != params.map.end());
+ result.push_back(param->second->type());
+ }
+ return result;
+}
+
+double calculate_expression(const vespalib::string &expression, const Params &params) {
+ const Function function = Function::parse(expression);
+ const NodeTypes types(function, extract_param_types(function, params));
+ const InterpretedFunction interpreted(tensor::DefaultTensorEngine::ref(), function, types);
+ InterpretedFunction::Context context;
+ inject_params(function, params, context);
+ const Value &result = interpreted.eval(context);
+ EXPECT_TRUE(result.is_double());
+ return result.as_double();
+}
+
+DoubleValue dummy_result(0.0);
+const Value &dummy_ranking(InterpretedFunction::Context &) { return dummy_result; }
+
+double benchmark_expression_us(const vespalib::string &expression, const Params &params) {
+ const Function function = Function::parse(expression);
+ const NodeTypes types(function, extract_param_types(function, params));
+ const InterpretedFunction interpreted(tensor::DefaultTensorEngine::ref(), function, types);
+ InterpretedFunction::Context context;
+ inject_params(function, params, context);
+ auto ranking = [&](){ interpreted.eval(context); };
+ auto baseline = [&](){ dummy_ranking(context); };
+ return BenchmarkTimer::benchmark(ranking, baseline, 5.0) * 1000.0 * 1000.0;
+}
+
+//-----------------------------------------------------------------------------
+
+tensor::Tensor::UP make_tensor(const TensorSpec &spec) {
+ auto tensor = DefaultTensorEngine::ref().create(spec);
+ return tensor::Tensor::UP(dynamic_cast<tensor::Tensor*>(tensor.release()));
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("SMOKETEST - require that dot product benchmark expressions produce expected results") {
+ Params params;
+ params.add("query", make_tensor(TensorSpec("tensor(x{})")
+ .add({{"x","0"}}, 1.0)
+ .add({{"x","1"}}, 2.0)
+ .add({{"x","2"}}, 3.0)));
+ params.add("document", make_tensor(TensorSpec("tensor(x{})")
+ .add({{"x","0"}}, 2.0)
+ .add({{"x","1"}}, 2.0)
+ .add({{"x","2"}}, 2.0)));
+ EXPECT_EQUAL(calculate_expression(dot_product_match_expr, params), 12.0);
+ EXPECT_EQUAL(calculate_expression(dot_product_multiply_expr, params), 12.0);
+}
+
+TEST("SMOKETEST - require that model match benchmark expression produces expected result") {
+ Params params;
+ params.add("query", make_tensor(TensorSpec("tensor(x{})")
+ .add({{"x","0"}}, 1.0)
+ .add({{"x","1"}}, 2.0)));
+ params.add("document", make_tensor(TensorSpec("tensor(y{})")
+ .add({{"y","0"}}, 3.0)
+ .add({{"y","1"}}, 4.0)));
+ params.add("model", make_tensor(TensorSpec("tensor(x{},y{})")
+ .add({{"x","0"},{"y","0"}}, 2.0)
+ .add({{"x","0"},{"y","1"}}, 2.0)
+ .add({{"x","1"},{"y","0"}}, 2.0)
+ .add({{"x","1"},{"y","1"}}, 2.0)));
+ EXPECT_EQUAL(calculate_expression(model_match_expr, params), 42.0);
+}
+
+TEST("SMOKETEST - require that matrix product benchmark expression produces expected result") {
+ Params params;
+ params.add("query", make_tensor(TensorSpec("tensor(x{})")
+ .add({{"x","0"}}, 1.0)
+ .add({{"x","1"}}, 0.0)));
+ params.add("document", make_tensor(TensorSpec("tensor(x{})")
+ .add({{"x","0"}}, 0.0)
+ .add({{"x","1"}}, 2.0)));
+ params.add("model", make_tensor(TensorSpec("tensor(x{},y{})")
+ .add({{"x","0"},{"y","0"}}, 1.0)
+ .add({{"x","0"},{"y","1"}}, 2.0)
+ .add({{"x","1"},{"y","0"}}, 3.0)
+ .add({{"x","1"},{"y","1"}}, 4.0)));
+ EXPECT_EQUAL(calculate_expression(matrix_product_expr, params), 17.0);
+}
+
+//-----------------------------------------------------------------------------
+
+struct DummyBuilder : TensorBuilder {
+ Dimension define_dimension(const vespalib::string &) override { return 0; }
+ TensorBuilder &add_label(Dimension, const vespalib::string &) override { return *this; }
+ TensorBuilder &add_cell(double) override { return *this; }
+ tensor::Tensor::UP build() override { return tensor::Tensor::UP(); }
+};
+
+
+struct DummyDenseTensorBuilder
+{
+ using Dimension = TensorBuilder::Dimension;
+ Dimension defineDimension(const vespalib::string &, size_t) { return 0; }
+ DummyDenseTensorBuilder &addLabel(Dimension, size_t) { return *this; }
+ DummyDenseTensorBuilder &addCell(double) { return *this; }
+ tensor::Tensor::UP build() { return tensor::Tensor::UP(); }
+};
+
+struct DimensionSpec {
+ vespalib::string name;
+ size_t count;
+ size_t offset;
+ DimensionSpec(const vespalib::string &name_in, size_t count_in, size_t offset_in = 0)
+ : name(name_in), count(count_in), offset(offset_in) {}
+};
+
+struct StringBinding {
+ TensorBuilder::Dimension dimension;
+ vespalib::string label;
+ StringBinding(TensorBuilder &builder, const DimensionSpec &dimension_in)
+ : dimension(builder.define_dimension(dimension_in.name)),
+ label()
+ {
+ }
+ void set_label(size_t id) {
+ label = vespalib::make_string("%zu", id);
+ }
+ static void add_cell(TensorBuilder &builder, double value) {
+ builder.add_cell(value);
+ }
+ void add_label(TensorBuilder &builder) const {
+ builder.add_label(dimension, label);
+ }
+};
+
+struct NumberBinding {
+ TensorBuilder::Dimension dimension;
+ size_t label;
+ template <typename Builder>
+ NumberBinding(Builder &builder, const DimensionSpec &dimension_in)
+ : dimension(builder.defineDimension(dimension_in.name,
+ dimension_in.offset +
+ dimension_in.count)),
+ label()
+ {
+ }
+ void set_label(size_t id) {
+ label = id;
+ }
+ template <typename Builder>
+ static void add_cell(Builder &builder, double value) {
+ builder.addCell(value);
+ }
+ template <typename Builder>
+ void add_label(Builder &builder) const {
+ builder.addLabel(dimension, label);
+ }
+};
+
+
+template <typename Builder, typename Binding>
+void build_tensor(Builder &builder, const std::vector<DimensionSpec> &dimensions,
+ std::vector<Binding> &bindings)
+{
+ if (bindings.size() == dimensions.size()) {
+ for (const auto &bound: bindings) {
+ bound.add_label(builder);
+ }
+ Binding::add_cell(builder, 42);
+ } else {
+ const auto &spec = dimensions[bindings.size()];
+ bindings.emplace_back(builder, spec);
+ for (size_t i = 0; i < spec.count; ++i) {
+ bindings.back().set_label(spec.offset + i);
+ build_tensor(builder, dimensions, bindings);
+ }
+ bindings.pop_back();
+ }
+}
+
+template <typename Builder, typename IBuilder, typename Binding>
+tensor::Tensor::UP make_tensor_impl(const std::vector<DimensionSpec> &dimensions) {
+ Builder builder;
+ std::vector<Binding> bindings;
+ bindings.reserve(dimensions.size());
+ build_tensor<IBuilder, Binding>(builder, dimensions, bindings);
+ return builder.build();
+}
+
+//-----------------------------------------------------------------------------
+
+enum class BuilderType { DUMMY, SPARSE, NUMBERDUMMY,
+ DENSE };
+
+const BuilderType DUMMY = BuilderType::DUMMY;
+const BuilderType SPARSE = BuilderType::SPARSE;
+const BuilderType NUMBERDUMMY = BuilderType::NUMBERDUMMY;
+const BuilderType DENSE = BuilderType::DENSE;
+
+const char *name(BuilderType type) {
+ switch (type) {
+ case BuilderType::DUMMY: return " dummy";
+ case BuilderType::SPARSE: return "sparse";
+ case BuilderType::NUMBERDUMMY: return "numberdummy";
+ case BuilderType::DENSE: return "dense";
+ }
+ abort();
+}
+
+tensor::Tensor::UP make_tensor(BuilderType type, const std::vector<DimensionSpec> &dimensions) {
+ switch (type) {
+ case BuilderType::DUMMY:
+ return make_tensor_impl<DummyBuilder, TensorBuilder, StringBinding>
+ (dimensions);
+ case BuilderType::SPARSE:
+ return make_tensor_impl<SparseTensorBuilder, TensorBuilder,
+ StringBinding>(dimensions);
+ case BuilderType::NUMBERDUMMY:
+ return make_tensor_impl<DummyDenseTensorBuilder,
+ DummyDenseTensorBuilder, NumberBinding>(dimensions);
+ case BuilderType::DENSE:
+ return make_tensor_impl<DenseTensorBuilder, DenseTensorBuilder,
+ NumberBinding>(dimensions);
+ }
+ abort();
+}
+
+//-----------------------------------------------------------------------------
+
+struct BuildTask {
+ BuilderType type;
+ std::vector<DimensionSpec> spec;
+ BuildTask(BuilderType type_in, const std::vector<DimensionSpec> &spec_in) : type(type_in), spec(spec_in) {}
+ void operator()() { tensor::Tensor::UP tensor = make_tensor(type, spec); }
+};
+
+double benchmark_build_us(BuilderType type, const std::vector<DimensionSpec> &spec) {
+ BuildTask build_task(type, spec);
+ BuildTask dummy_task((type == DENSE) ? NUMBERDUMMY : DUMMY, spec);
+ return BenchmarkTimer::benchmark(build_task, dummy_task, 5.0) * 1000.0 * 1000.0;
+}
+
+TEST("benchmark create/destroy time for 1d tensors") {
+ for (size_t size: {5, 10, 25, 50, 100, 250, 500}) {
+ for (auto type: {SPARSE, DENSE}) {
+ double time_us = benchmark_build_us(type, {DimensionSpec("x", size)});
+ fprintf(stderr, "-- 1d tensor create/destroy (%s) with size %zu: %g us\n", name(type), size, time_us);
+ }
+ }
+}
+
+TEST("benchmark create/destroy time for 2d tensors") {
+ for (size_t size: {5, 10, 25, 50, 100}) {
+ for (auto type: {SPARSE, DENSE}) {
+ double time_us = benchmark_build_us(type, {DimensionSpec("x", size), DimensionSpec("y", size)});
+ fprintf(stderr, "-- 2d tensor create/destroy (%s) with size %zux%zu: %g us\n", name(type), size, size, time_us);
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("benchmark dot product using match") {
+ for (size_t size: {10, 25, 50, 100, 250}) {
+ for (auto type: {SPARSE, DENSE}) {
+ Params params;
+ params.add("query", make_tensor(type, {DimensionSpec("x", size)}));
+ params.add("document", make_tensor(type, {DimensionSpec("x", size)}));
+ double time_us = benchmark_expression_us(dot_product_match_expr, params);
+ fprintf(stderr, "-- dot product (%s) using match %zu vs %zu: %g us\n", name(type), size, size, time_us);
+ }
+ }
+}
+
+TEST("benchmark dot product using multiply") {
+ for (size_t size: {10, 25, 50, 100, 250}) {
+ for (auto type: {SPARSE, DENSE}) {
+ Params params;
+ params.add("query", make_tensor(type, {DimensionSpec("x", size)}));
+ params.add("document", make_tensor(type, {DimensionSpec("x", size)}));
+ double time_us = benchmark_expression_us(dot_product_multiply_expr, params);
+ fprintf(stderr, "-- dot product (%s) using multiply %zu vs %zu: %g us\n", name(type), size, size, time_us);
+ }
+ }
+}
+
+TEST("benchmark model match") {
+ for (size_t model_size: {25, 50, 100}) {
+ for (size_t vector_size: {5, 10, 25, 50, 100}) {
+ if (vector_size <= model_size) {
+ for (auto type: {SPARSE}) {
+ Params params;
+ params.add("query", make_tensor(type, {DimensionSpec("x", vector_size)}));
+ params.add("document", make_tensor(type, {DimensionSpec("y", vector_size)}));
+ params.add("model", make_tensor(type, {DimensionSpec("x", model_size), DimensionSpec("y", model_size)}));
+ double time_us = benchmark_expression_us(model_match_expr, params);
+ fprintf(stderr, "-- model match (%s) %zu * %zu vs %zux%zu: %g us\n", name(type), vector_size, vector_size, model_size, model_size, time_us);
+ }
+ }
+ }
+ }
+}
+
+TEST("benchmark matrix product") {
+ for (size_t vector_size: {5, 10, 25, 50}) {
+ size_t matrix_size = vector_size * 2;
+ for (auto type: {SPARSE, DENSE}) {
+ Params params;
+ params.add("query", make_tensor(type, {DimensionSpec("x", matrix_size)}));
+ params.add("document", make_tensor(type, {DimensionSpec("x", matrix_size)}));
+ params.add("model", make_tensor(type, {DimensionSpec("x", matrix_size), DimensionSpec("y", matrix_size)}));
+ double time_us = benchmark_expression_us(matrix_product_expr, params);
+ fprintf(stderr, "-- matrix product (%s) %zu + %zu vs %zux%zu: %g us\n", name(type), vector_size, vector_size, matrix_size, matrix_size, time_us);
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/tensor_serialization/.gitignore b/eval/src/tests/tensor/tensor_serialization/.gitignore
new file mode 100644
index 00000000000..f8525561c6b
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_serialization/.gitignore
@@ -0,0 +1 @@
+vespalib_tensor_serialization_test_app
diff --git a/eval/src/tests/tensor/tensor_serialization/CMakeLists.txt b/eval/src/tests/tensor/tensor_serialization/CMakeLists.txt
new file mode 100644
index 00000000000..2fdf47d4738
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_serialization/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_tensor_serialization_test_app TEST
+ SOURCES
+ tensor_serialization_test.cpp
+ DEPENDS
+ vespalib
+ vespalib_vespalib_tensor
+)
+vespa_add_test(NAME vespalib_tensor_serialization_test_app COMMAND vespalib_tensor_serialization_test_app)
diff --git a/eval/src/tests/tensor/tensor_serialization/FILES b/eval/src/tests/tensor/tensor_serialization/FILES
new file mode 100644
index 00000000000..882dd368f5c
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_serialization/FILES
@@ -0,0 +1 @@
+tensor_serialization_test.cpp
diff --git a/eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp b/eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp
new file mode 100644
index 00000000000..95d6a45f196
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp
@@ -0,0 +1,255 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/tensor/sparse/sparse_tensor.h>
+#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h>
+#include <vespa/vespalib/tensor/types.h>
+#include <vespa/vespalib/tensor/default_tensor.h>
+#include <vespa/vespalib/tensor/tensor_factory.h>
+#include <vespa/vespalib/tensor/serialization/typed_binary_format.h>
+#include <vespa/vespalib/tensor/serialization/sparse_binary_format.h>
+#include <vespa/vespalib/objects/nbostream.h>
+#include <vespa/vespalib/objects/hexdump.h>
+#include <ostream>
+
+using namespace vespalib::tensor;
+using vespalib::nbostream;
+using ExpBuffer = std::vector<uint8_t>;
+
+namespace std {
+
+bool operator==(const std::vector<uint8_t> &exp, const nbostream &stream)
+{
+ return ((exp.size() == stream.size()) &&
+ (memcmp(&exp[0], stream.peek(), exp.size()) == 0));
+}
+
+std::ostream &operator<<(std::ostream &out, const std::vector<uint8_t> &rhs)
+{
+ out << vespalib::HexDump(&rhs[0], rhs.size());
+ return out;
+}
+
+}
+
+namespace vespalib {
+
+namespace tensor {
+
+static bool operator==(const Tensor &lhs, const Tensor &rhs)
+{
+ return lhs.equals(rhs);
+}
+
+}
+}
+
+template <class BuilderType>
+void
+checkDeserialize(vespalib::nbostream &stream, const Tensor &rhs)
+{
+ (void) stream;
+ (void) rhs;
+}
+
+template <>
+void
+checkDeserialize<DefaultTensor::builder>(nbostream &stream, const Tensor &rhs)
+{
+ nbostream wrapStream(stream.peek(), stream.size());
+ auto chk = TypedBinaryFormat::deserialize(wrapStream);
+ EXPECT_EQUAL(0u, wrapStream.size());
+ EXPECT_EQUAL(*chk, rhs);
+}
+
+template <typename BuilderType>
+struct Fixture
+{
+ BuilderType _builder;
+ Fixture() : _builder() {}
+
+ Tensor::UP createTensor(const TensorCells &cells) {
+ return vespalib::tensor::TensorFactory::create(cells, _builder);
+ }
+ Tensor::UP createTensor(const TensorCells &cells, const TensorDimensions &dimensions) {
+ return TensorFactory::create(cells, dimensions, _builder);
+ }
+
+ void serialize(nbostream &stream, const Tensor &tensor) {
+ TypedBinaryFormat::serialize(stream, tensor);
+ }
+ Tensor::UP deserialize(nbostream &stream) {
+ BuilderType builder;
+ nbostream wrapStream(stream.peek(), stream.size());
+ auto formatId = wrapStream.getInt1_4Bytes();
+ ASSERT_EQUAL(formatId, 1); // sparse format
+ SparseBinaryFormat::deserialize(wrapStream, builder);
+ EXPECT_TRUE(wrapStream.size() == 0);
+ auto ret = builder.build();
+ checkDeserialize<BuilderType>(stream, *ret);
+ stream.adjustReadPos(stream.size());
+ return ret;
+ }
+ void assertSerialized(const ExpBuffer &exp, const TensorCells &rhs,
+ const TensorDimensions &rhsDimensions) {
+ Tensor::UP rhsTensor(createTensor(rhs, rhsDimensions));
+ nbostream rhsStream;
+ serialize(rhsStream, *rhsTensor);
+ EXPECT_EQUAL(exp, rhsStream);
+ auto rhs2 = deserialize(rhsStream);
+ EXPECT_EQUAL(*rhs2, *rhsTensor);
+ }
+};
+
+using SparseFixture = Fixture<SparseTensorBuilder>;
+
+
+template <typename FixtureType>
+void
+testTensorSerialization(FixtureType &f)
+{
+ TEST_DO(f.assertSerialized({ 0x01, 0x00, 0x00 }, {}, {}));
+ TEST_DO(f.assertSerialized({ 0x01, 0x01, 0x01, 0x78, 0x00 },
+ {}, { "x" }));
+ TEST_DO(f.assertSerialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79, 0x00 },
+ {}, { "x", "y" }));
+ TEST_DO(f.assertSerialized({ 0x01, 0x01, 0x01, 0x78, 0x01, 0x01, 0x31, 0x40,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { {{{"x","1"}}, 3} }, { "x" }));
+ TEST_DO(f.assertSerialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79, 0x01, 0x00,
+ 0x00, 0x40, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00 },
+ { {{}, 3} }, { "x", "y"}));
+ TEST_DO(f.assertSerialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79, 0x01, 0x01,
+ 0x31, 0x00, 0x40, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { {{{"x","1"}}, 3} }, { "x", "y" }));
+ TEST_DO(f.assertSerialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79, 0x01, 0x00,
+ 0x01, 0x33, 0x40, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { {{{"y","3"}}, 3} }, { "x", "y" }));
+ TEST_DO(f.assertSerialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79, 0x01, 0x01,
+ 0x32, 0x01, 0x34, 0x40, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00 },
+ { {{{"x","2"}, {"y", "4"}}, 3} }, { "x", "y" }));
+ TEST_DO(f.assertSerialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79,
+ 0x01, 0x01, 0x31, 0x00, 0x40, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { {{{"x","1"}}, 3} }, {"x", "y"}));
+}
+
+TEST_F("test tensor serialization for SparseTensor", SparseFixture)
+{
+ testTensorSerialization(f);
+}
+
+
+struct DenseFixture
+{
+ Tensor::UP createTensor(const DenseTensorCells &cells) {
+ return TensorFactory::createDense(cells);
+ }
+
+ void serialize(nbostream &stream, const Tensor &tensor) {
+ TypedBinaryFormat::serialize(stream, tensor);
+ }
+
+ Tensor::UP deserialize(nbostream &stream) {
+ nbostream wrapStream(stream.peek(), stream.size());
+ auto ret = TypedBinaryFormat::deserialize(wrapStream);
+ EXPECT_TRUE(wrapStream.size() == 0);
+ stream.adjustReadPos(stream.size());
+ return ret;
+ }
+ void assertSerialized(const ExpBuffer &exp, const DenseTensorCells &rhs) {
+ Tensor::UP rhsTensor(createTensor(rhs));
+ nbostream rhsStream;
+ serialize(rhsStream, *rhsTensor);
+ EXPECT_EQUAL(exp, rhsStream);
+ auto rhs2 = deserialize(rhsStream);
+ EXPECT_EQUAL(*rhs2, *rhsTensor);
+ }
+};
+
+
+TEST_F("test tensor serialization for DenseTensor", DenseFixture)
+{
+ TEST_DO(f.assertSerialized({ 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00},
+ {}));
+ TEST_DO(f.assertSerialized({ 0x02, 0x01, 0x01, 0x78, 0x01,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00},
+ { {{{"x",0}}, 0} }));
+ TEST_DO(f.assertSerialized({ 0x02, 0x02, 0x01, 0x78, 0x01,
+ 0x01, 0x79, 0x01,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 },
+ { {{{"x",0},{"y", 0}}, 0} }));
+ TEST_DO(f.assertSerialized({ 0x02, 0x01, 0x01, 0x78, 0x02,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 },
+ { {{{"x",1}}, 3} }));
+ TEST_DO(f.assertSerialized({ 0x02, 0x02, 0x01, 0x78, 0x01,
+ 0x01, 0x79, 0x01,
+ 0x40, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 },
+ { {{{"x",0},{"y",0}}, 3} }));
+ TEST_DO(f.assertSerialized({ 0x02, 0x02, 0x01, 0x78, 0x02,
+ 0x01, 0x79, 0x01,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 },
+ { {{{"x",1},{"y",0}}, 3} }));
+ TEST_DO(f.assertSerialized({ 0x02, 0x02, 0x01, 0x78, 0x01,
+ 0x01, 0x79, 0x04,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 },
+ { {{{"x",0},{"y",3}}, 3} }));
+ TEST_DO(f.assertSerialized({ 0x02, 0x02, 0x01, 0x78, 0x03,
+ 0x01, 0x79, 0x05,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 },
+ { {{{"x",2}, {"y",4}}, 3} }));
+}
+
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/tensor_slime_serialization/.gitignore b/eval/src/tests/tensor/tensor_slime_serialization/.gitignore
new file mode 100644
index 00000000000..9cb3b664d58
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_slime_serialization/.gitignore
@@ -0,0 +1 @@
+vespalib_tensor_slime_serialization_test_app
diff --git a/eval/src/tests/tensor/tensor_slime_serialization/CMakeLists.txt b/eval/src/tests/tensor/tensor_slime_serialization/CMakeLists.txt
new file mode 100644
index 00000000000..a0323928fd3
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_slime_serialization/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_tensor_slime_serialization_test_app TEST
+ SOURCES
+ tensor_slime_serialization_test.cpp
+ DEPENDS
+ vespalib
+ vespalib_vespalib_tensor
+)
+vespa_add_test(NAME vespalib_tensor_slime_serialization_test_app COMMAND vespalib_tensor_slime_serialization_test_app)
diff --git a/eval/src/tests/tensor/tensor_slime_serialization/FILES b/eval/src/tests/tensor/tensor_slime_serialization/FILES
new file mode 100644
index 00000000000..874f951beb5
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_slime_serialization/FILES
@@ -0,0 +1 @@
+tensor_slime_serialization_test.cpp
diff --git a/eval/src/tests/tensor/tensor_slime_serialization/tensor_slime_serialization_test.cpp b/eval/src/tests/tensor/tensor_slime_serialization/tensor_slime_serialization_test.cpp
new file mode 100644
index 00000000000..f3005a21730
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_slime_serialization/tensor_slime_serialization_test.cpp
@@ -0,0 +1,185 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/tensor/sparse/sparse_tensor.h>
+#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h>
+#include <vespa/vespalib/tensor/types.h>
+#include <vespa/vespalib/tensor/default_tensor.h>
+#include <vespa/vespalib/tensor/tensor_factory.h>
+#include <vespa/vespalib/tensor/serialization/typed_binary_format.h>
+#include <vespa/vespalib/tensor/serialization/slime_binary_format.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <iostream>
+
+using namespace vespalib::tensor;
+
+template <typename BuilderType>
+struct Fixture
+{
+ BuilderType _builder;
+ Fixture() : _builder() {}
+
+ Tensor::UP createTensor(const TensorCells &cells) {
+ return vespalib::tensor::TensorFactory::create(cells, _builder);
+ }
+ Tensor::UP createTensor(const TensorCells &cells, const TensorDimensions &dimensions) {
+ return TensorFactory::create(cells, dimensions, _builder);
+ }
+
+ static inline uint32_t getTensorTypeId();
+
+ void assertSerialized(const vespalib::string &exp, const TensorCells &rhs,
+ const TensorDimensions &rhsDimensions) {
+ Tensor::UP rhsTensor(createTensor(rhs, rhsDimensions));
+ auto slime = SlimeBinaryFormat::serialize(*rhsTensor);
+ vespalib::slime::Memory memory_exp(exp);
+ vespalib::Slime expSlime;
+ size_t used = vespalib::slime::JsonFormat::decode(memory_exp, expSlime);
+ EXPECT_EQUAL(used, memory_exp.size);
+ EXPECT_EQUAL(expSlime, *slime);
+ }
+};
+
+template <>
+uint32_t
+Fixture<SparseTensorBuilder>::getTensorTypeId() { return 2u; }
+
+
+using SparseFixture = Fixture<SparseTensorBuilder>;
+
+
+namespace {
+vespalib::string twoCellsJson[3] =
+{
+ "{ dimensions: [ 'x', 'y' ],"
+ " cells: ["
+ "{ address: { y:'3'}, value: 4.0 },"
+ "{ address: { x:'1'}, value: 3.0 }"
+ "] }",
+ "{ dimensions: [ 'x', 'y' ],"
+ " cells: ["
+ "{ address: { x:'1'}, value: 3.0 },"
+ "{ address: { y:'3'}, value: 4.0 }"
+ "] }",
+ "{ dimensions: [ 'x', 'y' ],"
+ " cells: ["
+ "{ address: { x:'1'}, value: 3.0 },"
+ "{ address: { y:'3'}, value: 4.0 }"
+ "] }",
+};
+}
+
+
+template <typename FixtureType>
+void
+testTensorSlimeSerialization(FixtureType &f)
+{
+ TEST_DO(f.assertSerialized("{ dimensions: [], cells: [] }", {}, {}));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x' ], cells: [] }",
+ {}, { "x" }));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x', 'y' ], cells: [] }",
+ {}, { "x", "y" }));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x' ],"
+ "cells: ["
+ "{ address: { x: '1' }, value: 3.0 }"
+ "] }",
+ { {{{"x","1"}}, 3} }, { "x" }));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x', 'y' ],"
+ " cells: ["
+ "{ address: { }, value: 3.0 }"
+ "] }",
+ { {{}, 3} }, { "x", "y"}));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x', 'y' ],"
+ " cells: ["
+ "{ address: { x: '1' }, value: 3.0 }"
+ "] }",
+ { {{{"x","1"}}, 3} }, { "x", "y" }));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x', 'y' ],"
+ " cells: ["
+ "{ address: { y: '3' }, value: 3.0 }"
+ "] }",
+ { {{{"y","3"}}, 3} }, { "x", "y" }));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x', 'y' ],"
+ " cells: ["
+ "{ address: { x:'2', y:'4'}, value: 3.0 }"
+ "] }",
+ { {{{"x","2"}, {"y", "4"}}, 3} }, { "x", "y" }));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x', 'y' ],"
+ " cells: ["
+ "{ address: { x:'1'}, value: 3.0 }"
+ "] }",
+ { {{{"x","1"}}, 3} }, {"x", "y"}));
+ TEST_DO(f.assertSerialized(twoCellsJson[FixtureType::getTensorTypeId()],
+ { {{{"x","1"}}, 3}, {{{"y","3"}}, 4} },
+ {"x", "y"}));
+}
+
+TEST_F("test tensor slime serialization for SparseTensor", SparseFixture)
+{
+ testTensorSlimeSerialization(f);
+}
+
+
+struct DenseFixture
+{
+ DenseFixture() {}
+
+ Tensor::UP createTensor(const DenseTensorCells &cells) {
+ return vespalib::tensor::TensorFactory::createDense(cells);
+ }
+
+ void assertSerialized(const vespalib::string &exp,
+ const DenseTensorCells &rhs) {
+ Tensor::UP rhsTensor(createTensor(rhs));
+ auto slime = SlimeBinaryFormat::serialize(*rhsTensor);
+ vespalib::slime::Memory memory_exp(exp);
+ vespalib::Slime expSlime;
+ size_t used = vespalib::slime::JsonFormat::decode(memory_exp, expSlime);
+ EXPECT_EQUAL(used, memory_exp.size);
+ EXPECT_EQUAL(expSlime, *slime);
+ }
+};
+
+
+TEST_F("test tensor slime serialization for DenseTensor", DenseFixture)
+{
+ TEST_DO(f.assertSerialized("{ dimensions: [], cells: ["
+ "{ address: { }, value: 0.0 }"
+ "] }", {}));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x' ], cells: ["
+ "{ address: { x: '0' }, value: 0.0 }"
+ "] }",
+ { {{{"x",0}}, 0} }));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x', 'y' ], cells: ["
+ "{ address: { x: '0', y: '0' }, value: 0.0 }"
+ "] }",
+ { {{{"x",0},{"y",0}}, 0} }));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x' ],"
+ "cells: ["
+ "{ address: { x: '0' }, value: 0.0 },"
+ "{ address: { x: '1' }, value: 3.0 }"
+ "] }",
+ { {{{"x",1}}, 3} }));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x', 'y' ],"
+ " cells: ["
+ "{ address: { x: '0', y: '0' }, value: 3.0 }"
+ "] }",
+ { {{{"x",0},{"y",0}}, 3} }));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x', 'y' ],"
+ " cells: ["
+ "{ address: { x: '0', y: '0' }, value: 0.0 },"
+ "{ address: { x: '1', y: '0' }, value: 3.0 }"
+ "] }",
+ { {{{"x",1},{"y", 0}}, 3} }));
+ TEST_DO(f.assertSerialized("{ dimensions: [ 'x', 'y' ],"
+ " cells: ["
+ "{ address: { x: '0', y: '0' }, value: 0.0 },"
+ "{ address: { x: '0', y: '1' }, value: 0.0 },"
+ "{ address: { x: '0', y: '2' }, value: 0.0 },"
+ "{ address: { x: '0', y: '3' }, value: 3.0 }"
+ "] }",
+ { {{{"x",0},{"y",3}}, 3} }));
+}
+
+
+TEST_MAIN() { TEST_RUN_ALL(); }