summaryrefslogtreecommitdiffstats
path: root/vespalib/src
diff options
context:
space:
mode:
Diffstat (limited to 'vespalib/src')
-rw-r--r--vespalib/src/testlist.txt3
-rw-r--r--vespalib/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp32
-rw-r--r--vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp5
-rw-r--r--vespalib/src/tests/tensor/dense_tensor_operations/.gitignore1
-rw-r--r--vespalib/src/tests/tensor/dense_tensor_operations/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/tensor/dense_tensor_operations/FILES1
-rw-r--r--vespalib/src/tests/tensor/dense_tensor_operations/dense_tensor_operations_test.cpp484
-rw-r--r--vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp4
-rw-r--r--vespalib/src/tests/tensor/tensor_function/.gitignore1
-rw-r--r--vespalib/src/tests/tensor/tensor_function/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/tensor/tensor_function/tensor_function_test.cpp168
-rw-r--r--vespalib/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp1
-rw-r--r--vespalib/src/tests/tensor/tensor_operations/.gitignore1
-rw-r--r--vespalib/src/tests/tensor/tensor_operations/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/tensor/tensor_operations/FILES1
-rw-r--r--vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp631
-rw-r--r--vespalib/src/vespa/vespalib/eval/tensor_spec.h7
-rw-r--r--vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp221
-rw-r--r--vespalib/src/vespa/vespalib/tensor/CMakeLists.txt1
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp120
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h57
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp45
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h7
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp4
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_builder.cpp52
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_builder.h2
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp61
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp24
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h5
-rw-r--r--vespalib/src/vespa/vespalib/tensor/serialization/dense_binary_format.cpp28
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp4
-rw-r--r--vespalib/src/vespa/vespalib/tensor/tensor_function.cpp360
-rw-r--r--vespalib/src/vespa/vespalib/tensor/tensor_function.h110
-rw-r--r--vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp18
34 files changed, 418 insertions, 2068 deletions
diff --git a/vespalib/src/testlist.txt b/vespalib/src/testlist.txt
index 67982805df7..02e66607eeb 100644
--- a/vespalib/src/testlist.txt
+++ b/vespalib/src/testlist.txt
@@ -64,13 +64,10 @@ tests/stringfmt
tests/sync
tests/tensor/sparse_tensor_builder
tests/tensor/dense_tensor_builder
-tests/tensor/dense_tensor_operations
tests/tensor/simple_tensor_builder
tests/tensor/tensor
tests/tensor/tensor_address
-tests/tensor/tensor_function
tests/tensor/tensor_mapper
-tests/tensor/tensor_operations
tests/tensor/tensor_performance
tests/tensor/tensor_serialization
tests/tensor/tensor_slime_serialization
diff --git a/vespalib/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp b/vespalib/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp
index 1192469e006..37f95172251 100644
--- a/vespalib/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp
+++ b/vespalib/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp
@@ -5,32 +5,28 @@
#include <vespa/vespalib/test/insertion_operators.h>
using namespace vespalib::tensor;
-using DimensionsMeta = DenseTensor::DimensionsMeta;
+using vespalib::eval::ValueType;
-std::ostream &
-operator<<(std::ostream &out, const DenseTensor::DimensionMeta &dimMeta)
+ValueType
+combine(const std::vector<ValueType::Dimension> &lhs,
+ const std::vector<ValueType::Dimension> &rhs)
{
- out << dimMeta.dimension() << "[" << dimMeta.size() << "]";
- return out;
-}
-
-DimensionsMeta
-combine(const DimensionsMeta &lhs, const DimensionsMeta &rhs)
-{
- return DenseTensorAddressCombiner::combineDimensions(lhs, rhs);
+ return DenseTensorAddressCombiner::combineDimensions(
+ ValueType::tensor_type(lhs),
+ ValueType::tensor_type(rhs));
}
TEST("require that dimensions can be combined")
{
- EXPECT_EQUAL(DimensionsMeta({{"a", 3}, {"b", 5}}), combine({{"a", 3}}, {{"b", 5}}));
- EXPECT_EQUAL(DimensionsMeta({{"a", 3}, {"b", 5}}), combine({{"a", 3}, {"b", 5}}, {{"b", 5}}));
- EXPECT_EQUAL(DimensionsMeta({{"a", 3}, {"b", 5}}), combine({{"a", 3}, {"b", 7}}, {{"b", 5}}));
- EXPECT_EQUAL(DimensionsMeta({{"a", 3}, {"b", 11}, {"c", 5}, {"d", 7}, {"e", 17}}),
+ EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 5}}), combine({{"a", 3}}, {{"b", 5}}));
+ EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 5}}), combine({{"a", 3}, {"b", 5}}, {{"b", 5}}));
+ EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 5}}), combine({{"a", 3}, {"b", 7}}, {{"b", 5}}));
+ EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 11}, {"c", 5}, {"d", 7}, {"e", 17}}),
combine({{"a", 3}, {"c", 5}, {"d", 7}},
{{"b", 11}, {"c", 13}, {"e", 17}}));
- EXPECT_EQUAL(DimensionsMeta({{"a", 3}, {"b", 11}, {"c", 5}, {"d", 7}, {"e", 17}}),
- combine({{"b", 11}, {"c", 13}, {"e", 17}},
- {{"a", 3}, {"c", 5}, {"d", 7}}));
+ EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 11}, {"c", 5}, {"d", 7}, {"e", 17}}),
+ combine({{"b", 11}, {"c", 13}, {"e", 17}},
+ {{"a", 3}, {"c", 5}, {"d", 7}}));
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp b/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp
index 595b3743625..5036f247db3 100644
--- a/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp
+++ b/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp
@@ -9,14 +9,15 @@ using namespace vespalib::tensor;
using vespalib::IllegalArgumentException;
using Builder = DenseTensorBuilder;
using vespalib::eval::TensorSpec;
+using vespalib::eval::ValueType;
void
-assertTensor(const DenseTensor::DimensionsMeta &expDims,
+assertTensor(const std::vector<ValueType::Dimension> &expDims,
const DenseTensor::Cells &expCells,
const Tensor &tensor)
{
const DenseTensor &realTensor = dynamic_cast<const DenseTensor &>(tensor);
- EXPECT_EQUAL(expDims, realTensor.dimensionsMeta());
+ EXPECT_EQUAL(ValueType::tensor_type(expDims), realTensor.type());
EXPECT_EQUAL(expCells, realTensor.cells());
}
diff --git a/vespalib/src/tests/tensor/dense_tensor_operations/.gitignore b/vespalib/src/tests/tensor/dense_tensor_operations/.gitignore
deleted file mode 100644
index 1180a740209..00000000000
--- a/vespalib/src/tests/tensor/dense_tensor_operations/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vespalib_dense_tensor_operations_test_app
diff --git a/vespalib/src/tests/tensor/dense_tensor_operations/CMakeLists.txt b/vespalib/src/tests/tensor/dense_tensor_operations/CMakeLists.txt
deleted file mode 100644
index 4805b8dcf22..00000000000
--- a/vespalib/src/tests/tensor/dense_tensor_operations/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(vespalib_dense_tensor_operations_test_app TEST
- SOURCES
- dense_tensor_operations_test.cpp
- DEPENDS
- vespalib
- vespalib_vespalib_tensor
-)
-vespa_add_test(NAME vespalib_dense_tensor_operations_test_app COMMAND vespalib_dense_tensor_operations_test_app)
diff --git a/vespalib/src/tests/tensor/dense_tensor_operations/FILES b/vespalib/src/tests/tensor/dense_tensor_operations/FILES
deleted file mode 100644
index e9ba06f506a..00000000000
--- a/vespalib/src/tests/tensor/dense_tensor_operations/FILES
+++ /dev/null
@@ -1 +0,0 @@
-dense_tensor_operations_test.cpp
diff --git a/vespalib/src/tests/tensor/dense_tensor_operations/dense_tensor_operations_test.cpp b/vespalib/src/tests/tensor/dense_tensor_operations/dense_tensor_operations_test.cpp
deleted file mode 100644
index 69642fb1658..00000000000
--- a/vespalib/src/tests/tensor/dense_tensor_operations/dense_tensor_operations_test.cpp
+++ /dev/null
@@ -1,484 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/vespalib/tensor/dense/dense_tensor.h>
-#include <vespa/vespalib/tensor/dense/dense_tensor_builder.h>
-#include <vespa/vespalib/tensor/tensor_function.h>
-#include <vespa/vespalib/tensor/tensor_visitor.h>
-
-using namespace vespalib::tensor;
-
-using DenseTensorCells = std::map<std::map<vespalib::string, size_t>, double>;
-
-namespace vespalib {
-namespace tensor {
-
-static bool operator==(const Tensor &lhs, const Tensor &rhs)
-{
- return lhs.equals(rhs);
-}
-
-}
-}
-
-//-----------------------------------------------------------------------------
-
-class MyInput : public TensorFunction::Input
-{
-private:
- std::vector<Tensor::CREF> tensors;
- std::vector<CellFunction::CREF> cell_functions;
- const Tensor &get_tensor(size_t id) const override {
- ASSERT_GREATER(tensors.size(), id);
- return tensors[id];
- }
- virtual const CellFunction &get_cell_function(size_t id) const override {
- ASSERT_GREATER(cell_functions.size(), id);
- return cell_functions[id];
- }
-public:
- size_t add(const Tensor &tensor) {
- size_t id = tensors.size();
- tensors.push_back(tensor);
- return id;
- }
- size_t add(const CellFunction &cell_function) {
- size_t id = cell_functions.size();
- cell_functions.push_back(cell_function);
- return id;
- }
-};
-
-const Tensor &eval_tensor_checked(function::Node &function_ir, const TensorFunction::Input &input) {
- ASSERT_TRUE(function_ir.type().is_tensor());
- TensorFunction &function = function_ir; // compile step
- const Tensor &result = function.eval(input).as_tensor;
- EXPECT_EQUAL(result.getType(), function_ir.type());
- return result;
-}
-
-const Tensor &eval_tensor_unchecked(function::Node &function_ir, const TensorFunction::Input &input) {
- TensorFunction &function = function_ir; // compile step
- return function.eval(input).as_tensor;
-}
-
-const Tensor &eval_tensor(function::Node &function_ir, const TensorFunction::Input &input, bool check_types) {
- if (check_types) {
- return eval_tensor_checked(function_ir, input);
- } else {
- return eval_tensor_unchecked(function_ir, input);
- }
-}
-
-double eval_number(function::Node &function_ir, const TensorFunction::Input &input) {
- ASSERT_TRUE(function_ir.type().is_double());
- TensorFunction &function = function_ir; // compile step
- return function.eval(input).as_double;
-}
-
-//-----------------------------------------------------------------------------
-
-template <typename BuilderType>
-struct Fixture
-{
- BuilderType _builder;
- Fixture() : _builder() {}
-
- Tensor::UP createTensor(const DenseTensorCells &cells) {
- std::map<std::string, size_t> dimensionSizes;
- for (const auto &cell : cells) {
- for (const auto &addressElem : cell.first) {
- dimensionSizes[addressElem.first] = std::max(dimensionSizes[addressElem.first],
- (addressElem.second + 1));
- }
- }
- std::map<std::string, typename BuilderType::Dimension> dimensionEnums;
- for (const auto &dimensionElem : dimensionSizes) {
- dimensionEnums[dimensionElem.first] =
- _builder.defineDimension(dimensionElem.first, dimensionElem.second);
- }
- for (const auto &cell : cells) {
- for (const auto &addressElem : cell.first) {
- const auto &dimension = addressElem.first;
- size_t label = addressElem.second;
- _builder.addLabel(dimensionEnums[dimension], label);
- }
- _builder.addCell(cell.second);
- }
- return _builder.build();
- }
- void assertAddImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
- MyInput input;
- function::Node_UP ir = function::add(function::input(lhs.getType(), input.add(lhs)),
- function::input(rhs.getType(), input.add(rhs)));
- EXPECT_EQUAL(exp, eval_tensor(*ir, input, check_types));
- }
- void assertAdd(const DenseTensorCells &exp,
- const DenseTensorCells &lhs, const DenseTensorCells &rhs, bool check_types = true) {
- assertAddImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertSubtractImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
- MyInput input;
- function::Node_UP ir = function::subtract(function::input(lhs.getType(), input.add(lhs)),
- function::input(rhs.getType(), input.add(rhs)));
- EXPECT_EQUAL(exp, eval_tensor(*ir, input, check_types));
- }
- void assertSubtract(const DenseTensorCells &exp,
- const DenseTensorCells &lhs,
- const DenseTensorCells &rhs, bool check_types = true) {
- assertSubtractImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertMinImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
- MyInput input;
- function::Node_UP ir = function::min(function::input(lhs.getType(), input.add(lhs)),
- function::input(rhs.getType(), input.add(rhs)));
- EXPECT_EQUAL(exp, eval_tensor(*ir, input, check_types));
- }
- void assertMin(const DenseTensorCells &exp, const DenseTensorCells &lhs,
- const DenseTensorCells &rhs, bool check_types = true) {
- assertMinImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertMaxImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
- MyInput input;
- function::Node_UP ir = function::max(function::input(lhs.getType(), input.add(lhs)),
- function::input(rhs.getType(), input.add(rhs)));
- EXPECT_EQUAL(exp, eval_tensor(*ir, input, check_types));
- }
- void assertMax(const DenseTensorCells &exp, const DenseTensorCells &lhs,
- const DenseTensorCells &rhs, bool check_types = true) {
- assertMaxImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertSumImpl(double exp, const Tensor &tensor) {
- MyInput input;
- function::Node_UP ir = function::sum(function::input(tensor.getType(), input.add(tensor)));
- EXPECT_EQUAL(exp, eval_number(*ir, input));
- }
- void assertSum(double exp, const DenseTensorCells &cells) {
- assertSumImpl(exp, *createTensor(cells));
- }
- void assertMatchImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
- MyInput input;
- function::Node_UP ir = function::match(function::input(lhs.getType(), input.add(lhs)),
- function::input(rhs.getType(), input.add(rhs)));
- EXPECT_EQUAL(exp, eval_tensor(*ir, input, check_types));
- }
- void assertMatch(const DenseTensorCells &exp, const DenseTensorCells &lhs,
- const DenseTensorCells &rhs, bool check_types = true) {
- assertMatchImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertApplyImpl(const Tensor &exp, const Tensor &tensor, const CellFunction &func) {
- MyInput input;
- function::Node_UP ir = function::apply(function::input(tensor.getType(), input.add(tensor)), input.add(func));
- EXPECT_EQUAL(exp, eval_tensor_checked(*ir, input));
- }
- void assertApply(const DenseTensorCells &exp, const DenseTensorCells &arg,
- const CellFunction &func) {
- assertApplyImpl(*createTensor(exp), *createTensor(arg), func);
- }
- void assertDimensionSumImpl(const Tensor &exp, const Tensor &tensor, const vespalib::string &dimension, bool check_types) {
- MyInput input;
- function::Node_UP ir = function::dimension_sum(function::input(tensor.getType(), input.add(tensor)), dimension);
- if (ir->type().is_error()) {
- // According to the ir, it is not allowed to sum over a
- // non-existing dimension. The current implementation
- // allows this, resulting in a tensor with no cells and
- // with all dimensions not sliced.
- EXPECT_EQUAL(exp, eval_tensor_unchecked(*ir, input));
- } else {
- EXPECT_EQUAL(exp, eval_tensor(*ir, input, check_types));
- }
- }
- void assertDimensionSum(const DenseTensorCells &exp,
- const DenseTensorCells &arg,
- const vespalib::string &dimension, bool check_types = true) {
- assertDimensionSumImpl(*createTensor(exp), *createTensor(arg), dimension, check_types);
- }
- void assertMultiplyImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
- MyInput input;
- function::Node_UP ir = function::multiply(function::input(lhs.getType(), input.add(lhs)),
- function::input(rhs.getType(), input.add(rhs)));
- EXPECT_EQUAL(exp, eval_tensor(*ir, input, check_types));
- }
- void assertMultiply(const DenseTensorCells &exp,
- const DenseTensorCells &lhs, const DenseTensorCells &rhs, bool check_types = true) {
- assertMultiplyImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
- }
-};
-
-using DenseFixture = Fixture<DenseTensorBuilder>;
-
-
-template <typename FixtureType>
-void
-testTensorAdd(FixtureType &f)
-{
- TEST_DO(f.assertAdd({},{},{}, false));
- TEST_DO(f.assertAdd({ {{{"x",0}}, 8} },
- { {{{"x",0}}, 3} },
- { {{{"x",0}}, 5} }));
- TEST_DO(f.assertAdd({ {{{"x",0}}, -2} },
- { {{{"x",0}}, 3} },
- { {{{"x",0}}, -5} }));
- TEST_DO(f.assertAdd({ {{{"x",0}}, 10}, {{{"x",1}}, 16} },
- { {{{"x",0}}, 3}, {{{"x",1}}, 5} },
- { {{{"x",0}}, 7}, {{{"x",1}}, 11} }));
- TEST_DO(f.assertAdd({ {{{"x",0},{"y",0}}, 8} },
- { {{{"x",0},{"y",0}}, 3} },
- { {{{"x",0},{"y",0}}, 5} }));
- TEST_DO(f.assertAdd({ {{{"x",0}}, 3} },
- { {{{"x",0}}, 3} },
- { {{{"x",1}}, 5} }));
-}
-
-template <typename FixtureType>
-void
-testTensorSubtract(FixtureType &f)
-{
- TEST_DO(f.assertSubtract({},{},{}, false));
- TEST_DO(f.assertSubtract({ {{{"x",0}}, -2} },
- { {{{"x",0}}, 3} },
- { {{{"x",0}}, 5} }));
- TEST_DO(f.assertSubtract({ {{{"x",0}}, 8} },
- { {{{"x",0}}, 3} },
- { {{{"x",0}}, -5} }));
- TEST_DO(f.assertSubtract({ {{{"x",0}}, -4}, {{{"x",1}}, -6} },
- { {{{"x",0}}, 3}, {{{"x",1}}, 5} },
- { {{{"x",0}}, 7}, {{{"x",1}}, 11} }));
- TEST_DO(f.assertSubtract({ {{{"x",0},{"y",0}}, -2} },
- { {{{"x",0},{"y",0}}, 3} },
- { {{{"x",0},{"y",0}}, 5} }));
- TEST_DO(f.assertSubtract({ {{{"x",0}}, -5} },
- { {{{"x",1}}, 3} },
- { {{{"x",0}}, 5} }));
-}
-
-template <typename FixtureType>
-void
-testTensorMin(FixtureType &f)
-{
- TEST_DO(f.assertMin({},{},{}, false));
- TEST_DO(f.assertMin({ {{{"x",0}}, 3} },
- { {{{"x",0}}, 3} },
- { {{{"x",0}}, 5} }));
- TEST_DO(f.assertMin({ {{{"x",0}}, -5} },
- { {{{"x",0}}, 3} },
- { {{{"x",0}}, -5} }));
- TEST_DO(f.assertMin({ {{{"x",0}}, 3}, {{{"x",1}}, 5} },
- { {{{"x",0}}, 3}, {{{"x",1}}, 5} },
- { {{{"x",0}}, 7}, {{{"x",1}}, 11} }));
- TEST_DO(f.assertMin({ {{{"x",0},{"y",0}}, 3} },
- { {{{"x",0},{"y",0}}, 3} },
- { {{{"x",0},{"y",0}}, 5} }));
- TEST_DO(f.assertMin({ {{{"x",0}}, 0} },
- { {{{"x",1}}, 3} },
- { {{{"x",0}}, 5} }));
-}
-
-template <typename FixtureType>
-void
-testTensorMax(FixtureType &f)
-{
- TEST_DO(f.assertMax({},{},{}, false));
- TEST_DO(f.assertMax({ {{{"x",0}}, 5} },
- { {{{"x",0}}, 3} },
- { {{{"x",0}}, 5} }));
- TEST_DO(f.assertMax({ {{{"x",0}}, 3} },
- { {{{"x",0}}, 3} },
- { {{{"x",0}}, -5} }));
- TEST_DO(f.assertMax({ {{{"x",0}}, 7}, {{{"x",1}}, 11} },
- { {{{"x",0}}, 3}, {{{"x",1}}, 5} },
- { {{{"x",0}}, 7}, {{{"x",1}}, 11} }));
- TEST_DO(f.assertMax({ {{{"x",0},{"y",0}}, 5} },
- { {{{"x",0},{"y",0}}, 3} },
- { {{{"x",0},{"y",0}}, 5} }));
- TEST_DO(f.assertMax({ {{{"x",0}}, 5} },
- { {{{"x",1}}, 3} },
- { {{{"x",0}}, 5} }));
-}
-
-template <typename FixtureType>
-void
-testTensorSum(FixtureType &f)
-{
- TEST_DO(f.assertSum(0.0, {}));
- TEST_DO(f.assertSum(0.0, { {{{"x",0}}, 0} }));
- TEST_DO(f.assertSum(3.0, { {{{"x",0}}, 3} }));
- TEST_DO(f.assertSum(8.0, { {{{"x",0}}, 3}, {{{"x",1}}, 5} }));
- TEST_DO(f.assertSum(-2.0, { {{{"x",0}}, 3}, {{{"x",1}}, -5} }));
-}
-
-template <typename FixtureType>
-void
-testTensorMatch(FixtureType &f)
-{
- f.assertMatch({}, {}, {}, false);
- f.assertMatch({ {{{"x",0}}, 15} },
- { {{{"x",0}}, 3} },
- { {{{"x",0}}, 5} });
- f.assertMatch({ {{{"x",0}}, 0} },
- { {{{"x",0}}, 3} },
- { {{{"x",0}}, 0} });
- f.assertMatch({ {{{"x",0}}, -15} },
- { {{{"x",0}}, 3} },
- { {{{"x",0}}, -5} });
- f.assertMatch({ {{{"x",0}, {"y",0}}, 39},
- {{{"x",1}, {"y",0}}, 85},
- {{{"x",0}, {"y",1}}, 133},
- {{{"x",1}, {"y",1}}, 253} },
- { {{{"x",0}, {"y",0}}, 3},
- {{{"x",1}, {"y",0}}, 5},
- {{{"x",0}, {"y",1}}, 7},
- {{{"x",1}, {"y",1}}, 11} },
- { {{{"x",0}, {"y",0}}, 13},
- {{{"x",1}, {"y",0}}, 17},
- {{{"x",0}, {"y",1}}, 19},
- {{{"x",1}, {"y",1}}, 23} });
-}
-
-template <typename FixtureType>
-void
-testTensorMultiply(FixtureType &f)
-{
- f.assertMultiply({}, {}, {}, false);
- f.assertMultiply({ {{{"x",0}}, 15} },
- { {{{"x",0}}, 3} },
- { {{{"x",0}}, 5} });
- f.assertMultiply({ {{{"x",0}}, 21},
- {{{"x",1}}, 55} },
- { {{{"x",0}}, 3},
- {{{"x",1}}, 5} },
- { {{{"x",0}}, 7},
- {{{"x",1}}, 11} });
- f.assertMultiply({ {{{"x",0},{"y",0}}, 15} },
- { {{{"x",0}}, 3} },
- { {{{"y",0}}, 5} });
- f.assertMultiply({ {{{"x",0},{"y",0}}, 21},
- {{{"x",0},{"y",1}}, 33},
- {{{"x",1},{"y",0}}, 35},
- {{{"x",1},{"y",1}}, 55} },
- { {{{"x",0}}, 3},
- {{{"x",1}}, 5} },
- { {{{"y",0}}, 7},
- {{{"y",1}}, 11} });
- f.assertMultiply({ {{{"x",0},{"y",0},{"z",0}}, 7},
- {{{"x",0},{"y",0},{"z",1}}, 11},
- {{{"x",0},{"y",1},{"z",0}}, 26},
- {{{"x",0},{"y",1},{"z",1}}, 34},
- {{{"x",1},{"y",0},{"z",0}}, 21},
- {{{"x",1},{"y",0},{"z",1}}, 33},
- {{{"x",1},{"y",1},{"z",0}}, 65},
- {{{"x",1},{"y",1},{"z",1}}, 85} },
- { {{{"x",0},{"y",0}}, 1},
- {{{"x",0},{"y",1}}, 2},
- {{{"x",1},{"y",0}}, 3},
- {{{"x",1},{"y",1}}, 5} },
- { {{{"y",0},{"z",0}}, 7},
- {{{"y",0},{"z",1}}, 11},
- {{{"y",1},{"z",0}}, 13},
- {{{"y",1},{"z",1}}, 17} });
-}
-
-template <typename FixtureType>
-void
-testTensorMultiplePreservationOfDimensions(FixtureType &f)
-{
- (void) f;
-}
-
-struct MyFunction : public CellFunction
-{
- virtual double apply(double value) const override {
- return value + 5;
- }
-};
-
-template <typename FixtureType>
-void
-testTensorApply(FixtureType &f)
-{
- f.assertApply({ {{{"x",0}}, 6}, {{{"x",1}}, 2} },
- { {{{"x",0}}, 1}, {{{"x",1}}, -3} },
- MyFunction());
-}
-
-template <typename FixtureType>
-void
-testTensorSumDimension(FixtureType &f)
-{
- f.assertDimensionSum({ {{{"y",0}}, 4}, {{{"y",1}}, 12} },
- { {{{"x",0},{"y",0}}, 1},
- {{{"x",1},{"y",0}}, 3},
- {{{"x",0},{"y",1}}, 5},
- {{{"x",1},{"y",1}}, 7} }, "x");
-
- f.assertDimensionSum({ {{{"x",0}}, 6}, {{{"x",1}}, 10} },
- { {{{"x",0},{"y",0}}, 1},
- {{{"x",1},{"y",0}}, 3},
- {{{"x",0},{"y",1}}, 5},
- {{{"x",1},{"y",1}}, 7} }, "y");
- f.assertDimensionSum({ {{{"y",0}, {"z",0}}, 4},
- {{{"y",1}, {"z",0}}, 12},
- {{{"y",0}, {"z",1}}, 24},
- {{{"y",1}, {"z",1}}, 36} },
- { {{{"x",0},{"y",0}, {"z",0}}, 1},
- {{{"x",1},{"y",0}, {"z",0}}, 3},
- {{{"x",0},{"y",1}, {"z",0}}, 5},
- {{{"x",1},{"y",1}, {"z",0}}, 7},
- {{{"x",0},{"y",0}, {"z",1}}, 11},
- {{{"x",1},{"y",0}, {"z",1}}, 13},
- {{{"x",0},{"y",1}, {"z",1}}, 17},
- {{{"x",1},{"y",1}, {"z",1}}, 19} }, "x");
- f.assertDimensionSum({ {{{"x",0}, {"z",0}}, 6},
- {{{"x",1}, {"z",0}}, 10},
- {{{"x",0}, {"z",1}}, 28},
- {{{"x",1}, {"z",1}}, 32} },
- { {{{"x",0},{"y",0}, {"z",0}}, 1},
- {{{"x",1},{"y",0}, {"z",0}}, 3},
- {{{"x",0},{"y",1}, {"z",0}}, 5},
- {{{"x",1},{"y",1}, {"z",0}}, 7},
- {{{"x",0},{"y",0}, {"z",1}}, 11},
- {{{"x",1},{"y",0}, {"z",1}}, 13},
- {{{"x",0},{"y",1}, {"z",1}}, 17},
- {{{"x",1},{"y",1}, {"z",1}}, 19} }, "y");
- f.assertDimensionSum({ {{{"x",0}, {"y",0}}, 12},
- {{{"x",1}, {"y",0}}, 16},
- {{{"x",0}, {"y",1}}, 22},
- {{{"x",1}, {"y",1}}, 26} },
- { {{{"x",0},{"y",0}, {"z",0}}, 1},
- {{{"x",1},{"y",0}, {"z",0}}, 3},
- {{{"x",0},{"y",1}, {"z",0}}, 5},
- {{{"x",1},{"y",1}, {"z",0}}, 7},
- {{{"x",0},{"y",0}, {"z",1}}, 11},
- {{{"x",1},{"y",0}, {"z",1}}, 13},
- {{{"x",0},{"y",1}, {"z",1}}, 17},
- {{{"x",1},{"y",1}, {"z",1}}, 19} }, "z");
- f.assertDimensionSum({ {{{"x",0}}, 3} },
- { {{{"x",0}}, 3} },
- "y");
- f.assertDimensionSum({ {{}, 3} },
- { {{{"x",0}}, 3} },
- "x", false);
-}
-
-template <typename FixtureType>
-void
-testAllTensorOperations(FixtureType &f)
-{
- TEST_DO(testTensorAdd(f));
- TEST_DO(testTensorSubtract(f));
- TEST_DO(testTensorMin(f));
- TEST_DO(testTensorMax(f));
- TEST_DO(testTensorSum(f));
- TEST_DO(testTensorMatch(f));
- TEST_DO(testTensorMultiply(f));
- TEST_DO(testTensorMultiplePreservationOfDimensions(f));
- TEST_DO(testTensorApply(f));
- TEST_DO(testTensorSumDimension(f));
-}
-
-TEST_F("test tensor operations for DenseTensor", DenseFixture)
-{
- testAllTensorOperations(f);
-}
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp b/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp
index 39e82abec7d..0d157012a90 100644
--- a/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp
+++ b/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp
@@ -66,8 +66,8 @@ TEST("require that tensor can be converted to tensor spec")
{
Tensor::UP tensor = buildTensor();
TensorSpec expSpec("tensor(a{},b{},c{},d{})");
- expSpec.add({{"a", "1"}, {"b", "2"}}, 10).
- add({{"c", "3"}, {"d", "4"}}, 20);
+ expSpec.add({{"a", "1"}, {"b", "2"}, {"c", ""}, {"d", ""}}, 10).
+ add({{"a", ""},{"b",""},{"c", "3"}, {"d", "4"}}, 20);
TensorSpec actSpec = tensor->toSpec();
EXPECT_EQUAL(expSpec, actSpec);
}
diff --git a/vespalib/src/tests/tensor/tensor_function/.gitignore b/vespalib/src/tests/tensor/tensor_function/.gitignore
deleted file mode 100644
index 9dff11e518c..00000000000
--- a/vespalib/src/tests/tensor/tensor_function/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vespalib_tensor_function_test_app
diff --git a/vespalib/src/tests/tensor/tensor_function/CMakeLists.txt b/vespalib/src/tests/tensor/tensor_function/CMakeLists.txt
deleted file mode 100644
index cca15f932c9..00000000000
--- a/vespalib/src/tests/tensor/tensor_function/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(vespalib_tensor_function_test_app TEST
- SOURCES
- tensor_function_test.cpp
- DEPENDS
- vespalib
- vespalib_vespalib_tensor
-)
-vespa_add_test(NAME vespalib_tensor_function_test_app COMMAND vespalib_tensor_function_test_app)
diff --git a/vespalib/src/tests/tensor/tensor_function/tensor_function_test.cpp b/vespalib/src/tests/tensor/tensor_function/tensor_function_test.cpp
deleted file mode 100644
index 7a005a6aec4..00000000000
--- a/vespalib/src/tests/tensor/tensor_function/tensor_function_test.cpp
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/vespalib/tensor/tensor_function.h>
-
-using namespace vespalib::tensor;
-using vespalib::eval::ValueType;
-
-// Evaluation of tensor functions is tested in the 'tensor operations'
-// test. This test checks type resolving and will be extended with
-// inspectability of tensor functions when the implementation is
-// extended to support it.
-
-// Note: The 'tensor type' test verifies how tensor type dimensions
-// may be combined. Specifically the fact that common dense dimensions
-// must have the same size.
-
-function::Node_UP invalid_value() {
- return function::input(ValueType::error_type(), 0);
-}
-
-function::Node_UP number_value() {
- return function::sum(function::input(ValueType::tensor_type({}), 0));
-}
-
-ValueType sparse_type(const std::vector<vespalib::string> &dimensions_in) {
- std::vector<ValueType::Dimension> dimensions;
- std::copy(dimensions_in.begin(), dimensions_in.end(), std::back_inserter(dimensions));
- return ValueType::tensor_type(dimensions);
-}
-
-ValueType dense_type(const std::vector<ValueType::Dimension> &dimensions_in) {
- return ValueType::tensor_type(dimensions_in);
-}
-
-function::Node_UP sparse_value(const std::vector<vespalib::string> &arg) {
- return function::input(sparse_type(arg), 0);
-}
-
-function::Node_UP dense_value(std::vector<ValueType::Dimension> arg) {
- return function::input(dense_type(arg), 0);
-}
-
-TensorAddress address(const TensorAddress::Elements &elems) {
- return TensorAddress(elems);
-}
-
-
-TEST("require that helper functions produce appropriate types") {
- EXPECT_TRUE(invalid_value()->type().is_error());
- EXPECT_EQUAL(number_value()->type(), ValueType::double_type());
- EXPECT_EQUAL(sparse_value({"x", "y"})->type(), sparse_type({"x", "y"}));
- EXPECT_EQUAL(dense_value({{"x", 10}})->type(), dense_type({{"x", 10}}));
-}
-
-TEST("require that input tensors preserves type") {
- EXPECT_EQUAL(sparse_type({"x", "y"}),
- function::input(sparse_type({"x", "y"}), 0)->type());
- EXPECT_EQUAL(dense_type({{"x", 10}}),
- function::input(dense_type({{"x", 10}}), 0)->type());
-}
-
-TEST("require that input tensors with non-tensor types are invalid") {
- EXPECT_TRUE(function::input(ValueType::error_type(), 0)->type().is_error());
-}
-
-TEST("require that sum of tensor gives number as result") {
- EXPECT_EQUAL(ValueType::double_type(), function::sum(sparse_value({}))->type());
- EXPECT_EQUAL(ValueType::double_type(), function::sum(dense_value({}))->type());
-}
-
-TEST("require that sum of number gives number as result") {
- EXPECT_EQUAL(ValueType::double_type(), function::sum(number_value())->type());
-}
-
-TEST("require that dimension sum removes the summed dimension") {
- EXPECT_EQUAL(sparse_type({"x", "y"}),
- function::dimension_sum(sparse_value({"x", "y", "z"}), "z")->type());
- EXPECT_EQUAL(dense_type({{"y", 10}}),
- function::dimension_sum(dense_value({{"x", 10}, {"y", 10}}), "x")->type());
-}
-
-TEST("require that dimension sum over non-existing dimension is invalid") {
- EXPECT_TRUE(function::dimension_sum(sparse_value({"x", "y", "z"}), "w")->type().is_error());
- EXPECT_TRUE(function::dimension_sum(dense_value({{"x", 10}, {"y", 10}}), "z")->type().is_error());
-}
-
-TEST("require that apply preserves tensor type") {
- EXPECT_EQUAL(sparse_type({"x", "y"}),
- function::apply(sparse_value({"x", "y"}), 0)->type());
- EXPECT_EQUAL(dense_type({{"x", 10}}),
- function::apply(dense_value({{"x", 10}}), 0)->type());
-}
-
-TEST("require that tensor add result has union of input dimensions") {
- EXPECT_EQUAL(sparse_type({"x", "y", "z"}),
- function::add(sparse_value({"x", "y"}),
- sparse_value({"y", "z"}))->type());
- EXPECT_EQUAL(dense_type({{"x", 10}, {"y", 10}, {"z", 10}}),
- function::add(dense_value({{"x", 10}, {"y", 10}}),
- dense_value({{"y", 10}, {"z", 10}}))->type());
-}
-
-TEST("require that tensor subtract result has union of input dimensions") {
- EXPECT_EQUAL(sparse_type({"x", "y", "z"}),
- function::subtract(sparse_value({"x", "y"}),
- sparse_value({"y", "z"}))->type());
- EXPECT_EQUAL(dense_type({{"x", 10}, {"y", 10}, {"z", 10}}),
- function::subtract(dense_value({{"x", 10}, {"y", 10}}),
- dense_value({{"y", 10}, {"z", 10}}))->type());
-}
-
-TEST("require that tensor multiply result has union of input dimensions") {
- EXPECT_EQUAL(sparse_type({"x", "y", "z"}),
- function::multiply(sparse_value({"x", "y"}),
- sparse_value({"y", "z"}))->type());
- EXPECT_EQUAL(dense_type({{"x", 10}, {"y", 10}, {"z", 10}}),
- function::multiply(dense_value({{"x", 10}, {"y", 10}}),
- dense_value({{"y", 10}, {"z", 10}}))->type());
-}
-
-TEST("require that tensor min result has union of input dimensions") {
- EXPECT_EQUAL(sparse_type({"x", "y", "z"}),
- function::min(sparse_value({"x", "y"}),
- sparse_value({"y", "z"}))->type());
- EXPECT_EQUAL(dense_type({{"x", 10}, {"y", 10}, {"z", 10}}),
- function::min(dense_value({{"x", 10}, {"y", 10}}),
- dense_value({{"y", 10}, {"z", 10}}))->type());
-}
-
-TEST("require that tensor max result has union of input dimensions") {
- EXPECT_EQUAL(sparse_type({"x", "y", "z"}),
- function::max(sparse_value({"x", "y"}),
- sparse_value({"y", "z"}))->type());
- EXPECT_EQUAL(dense_type({{"x", 10}, {"y", 10}, {"z", 10}}),
- function::max(dense_value({{"x", 10}, {"y", 10}}),
- dense_value({{"y", 10}, {"z", 10}}))->type());
-}
-
-TEST("require that tensor match result has intersection of input dimensions") {
- EXPECT_EQUAL(sparse_type({"y"}),
- function::match(sparse_value({"x", "y"}),
- sparse_value({"y", "z"}))->type());
- EXPECT_EQUAL(dense_type({{"y", 10}}),
- function::match(dense_value({{"x", 10}, {"y", 10}}),
- dense_value({{"y", 10}, {"z", 10}}))->type());
-}
-
-TEST("require that tensor operations on non-tensor types are invalid") {
- EXPECT_TRUE(function::sum(invalid_value())->type().is_error());
- EXPECT_TRUE(function::dimension_sum(invalid_value(), "x")->type().is_error());
- EXPECT_TRUE(function::dimension_sum(number_value(), "x")->type().is_error());
- EXPECT_TRUE(function::apply(invalid_value(), 0)->type().is_error());
- EXPECT_TRUE(function::apply(number_value(), 0)->type().is_error());
- EXPECT_TRUE(function::add(invalid_value(), invalid_value())->type().is_error());
- EXPECT_TRUE(function::add(number_value(), number_value())->type().is_error());
- EXPECT_TRUE(function::subtract(invalid_value(), invalid_value())->type().is_error());
- EXPECT_TRUE(function::subtract(number_value(), number_value())->type().is_error());
- EXPECT_TRUE(function::multiply(invalid_value(), invalid_value())->type().is_error());
- EXPECT_TRUE(function::multiply(number_value(), number_value())->type().is_error());
- EXPECT_TRUE(function::min(invalid_value(), invalid_value())->type().is_error());
- EXPECT_TRUE(function::min(number_value(), number_value())->type().is_error());
- EXPECT_TRUE(function::max(invalid_value(), invalid_value())->type().is_error());
- EXPECT_TRUE(function::max(number_value(), number_value())->type().is_error());
- EXPECT_TRUE(function::match(invalid_value(), invalid_value())->type().is_error());
- EXPECT_TRUE(function::match(number_value(), number_value())->type().is_error());
-}
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp b/vespalib/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
index 6977a857944..18d8a8dd508 100644
--- a/vespalib/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
+++ b/vespalib/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp
@@ -8,7 +8,6 @@
#include <vespa/vespalib/tensor/dense/dense_tensor_builder.h>
#include <vespa/vespalib/tensor/types.h>
#include <vespa/vespalib/tensor/tensor_factory.h>
-#include <vespa/vespalib/tensor/tensor_function.h>
#include <vespa/vespalib/tensor/tensor_mapper.h>
#include <vespa/vespalib/tensor/default_tensor.h>
#include <ostream>
diff --git a/vespalib/src/tests/tensor/tensor_operations/.gitignore b/vespalib/src/tests/tensor/tensor_operations/.gitignore
deleted file mode 100644
index 2b54654dfb3..00000000000
--- a/vespalib/src/tests/tensor/tensor_operations/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vespalib_tensor_operations_test_app
diff --git a/vespalib/src/tests/tensor/tensor_operations/CMakeLists.txt b/vespalib/src/tests/tensor/tensor_operations/CMakeLists.txt
deleted file mode 100644
index 9b18e5384e6..00000000000
--- a/vespalib/src/tests/tensor/tensor_operations/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(vespalib_tensor_operations_test_app TEST
- SOURCES
- tensor_operations_test.cpp
- DEPENDS
- vespalib
- vespalib_vespalib_tensor
-)
-vespa_add_test(NAME vespalib_tensor_operations_test_app COMMAND vespalib_tensor_operations_test_app)
diff --git a/vespalib/src/tests/tensor/tensor_operations/FILES b/vespalib/src/tests/tensor/tensor_operations/FILES
deleted file mode 100644
index b3cc5fa2a92..00000000000
--- a/vespalib/src/tests/tensor/tensor_operations/FILES
+++ /dev/null
@@ -1 +0,0 @@
-tensor_operations_test.cpp
diff --git a/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp b/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp
deleted file mode 100644
index 5ad26e979c5..00000000000
--- a/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp
+++ /dev/null
@@ -1,631 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/vespalib/tensor/sparse/sparse_tensor.h>
-#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h>
-#include <vespa/vespalib/tensor/types.h>
-#include <vespa/vespalib/tensor/tensor_factory.h>
-#include <vespa/vespalib/tensor/tensor_function.h>
-#include <vespa/vespalib/tensor/tensor_visitor.h>
-#include <iostream>
-
-using namespace vespalib::tensor;
-
-namespace vespalib {
-namespace tensor {
-
-static bool operator==(const Tensor &lhs, const Tensor &rhs)
-{
- return lhs.equals(rhs);
-}
-
-}
-}
-
-//-----------------------------------------------------------------------------
-
-class MyInput : public TensorFunction::Input
-{
-private:
- std::vector<Tensor::CREF> tensors;
- std::vector<CellFunction::CREF> cell_functions;
- const Tensor &get_tensor(size_t id) const override {
- ASSERT_GREATER(tensors.size(), id);
- return tensors[id];
- }
- virtual const CellFunction &get_cell_function(size_t id) const override {
- ASSERT_GREATER(cell_functions.size(), id);
- return cell_functions[id];
- }
-public:
- size_t add(const Tensor &tensor) {
- size_t id = tensors.size();
- tensors.push_back(tensor);
- return id;
- }
- size_t add(const CellFunction &cell_function) {
- size_t id = cell_functions.size();
- cell_functions.push_back(cell_function);
- return id;
- }
-};
-
-const Tensor &eval_tensor_checked(function::Node &function_ir, const TensorFunction::Input &input) {
- ASSERT_TRUE(function_ir.type().is_tensor());
- TensorFunction &function = function_ir; // compile step
- const Tensor &result = function.eval(input).as_tensor;
- EXPECT_EQUAL(result.getType(), function_ir.type());
- return result;
-}
-
-const Tensor &eval_tensor_unchecked(function::Node &function_ir, const TensorFunction::Input &input) {
- TensorFunction &function = function_ir; // compile step
- return function.eval(input).as_tensor;
-}
-
-const Tensor &eval_tensor(function::Node &function_ir, const TensorFunction::Input &input, bool check_types) {
- if (check_types) {
- return eval_tensor_checked(function_ir, input);
- } else {
- return eval_tensor_unchecked(function_ir, input);
- }
-}
-
-double eval_number(function::Node &function_ir, const TensorFunction::Input &input) {
- ASSERT_TRUE(function_ir.type().is_double());
- TensorFunction &function = function_ir; // compile step
- return function.eval(input).as_double;
-}
-
-//-----------------------------------------------------------------------------
-
-template <typename BuilderType>
-struct Fixture
-{
- BuilderType _builder;
- Fixture() : _builder() {}
-
- Tensor::UP createTensor(const TensorCells &cells) {
- return TensorFactory::create(cells, _builder);
- }
- Tensor::UP createTensor(const TensorCells &cells, const TensorDimensions &dimensions) {
- return TensorFactory::create(cells, dimensions, _builder);
- }
- void assertEquals(const TensorCells &lhs,
- const TensorDimensions &lhsDimensions,
- const TensorCells &rhs,
- const TensorDimensions &rhsDimensions) {
- EXPECT_EQUAL(*createTensor(lhs, lhsDimensions),
- *createTensor(rhs, rhsDimensions));
- }
- void assertEquals(const TensorCells &lhs, const TensorCells &rhs) {
- EXPECT_EQUAL(*createTensor(lhs), *createTensor(rhs));
- }
- void assertNotEquals(const TensorCells &lhs, const TensorCells &rhs) {
- EXPECT_NOT_EQUAL(*createTensor(lhs), *createTensor(rhs));
- }
- void assertNotEquals(const TensorCells &lhs,
- const TensorDimensions &lhsDimensions,
- const TensorCells &rhs,
- const TensorDimensions &rhsDimensions) {
- EXPECT_NOT_EQUAL(*createTensor(lhs, lhsDimensions),
- *createTensor(rhs, rhsDimensions));
- }
- void assertAddImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
- MyInput input;
- function::Node_UP ir = function::add(function::input(lhs.getType(), input.add(lhs)),
- function::input(rhs.getType(), input.add(rhs)));
- EXPECT_EQUAL(exp, eval_tensor(*ir, input, check_types));
- }
- void assertAdd(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
- assertAddImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertAdd(const TensorCells &exp, const TensorDimensions &expDimensions,
- const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
- assertAddImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertSubtractImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
- MyInput input;
- function::Node_UP ir = function::subtract(function::input(lhs.getType(), input.add(lhs)),
- function::input(rhs.getType(), input.add(rhs)));
- EXPECT_EQUAL(exp, eval_tensor(*ir, input, check_types));
- }
- void assertSubtract(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
- assertSubtractImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertSubtract(const TensorCells &exp, const TensorDimensions &expDimensions, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
- assertSubtractImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertMinImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
- MyInput input;
- function::Node_UP ir = function::min(function::input(lhs.getType(), input.add(lhs)),
- function::input(rhs.getType(), input.add(rhs)));
- EXPECT_EQUAL(exp, eval_tensor(*ir, input, check_types));
- }
- void assertMin(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
- assertMinImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertMin(const TensorCells &exp, const TensorDimensions &expDimensions, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
- assertMinImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertMaxImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
- MyInput input;
- function::Node_UP ir = function::max(function::input(lhs.getType(), input.add(lhs)),
- function::input(rhs.getType(), input.add(rhs)));
- EXPECT_EQUAL(exp, eval_tensor(*ir, input, check_types));
- }
- void assertMax(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
- assertMaxImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertMax(const TensorCells &exp, const TensorDimensions &expDimensions, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
- assertMaxImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertSumImpl(double exp, const Tensor &tensor) {
- MyInput input;
- function::Node_UP ir = function::sum(function::input(tensor.getType(), input.add(tensor)));
- EXPECT_EQUAL(exp, eval_number(*ir, input));
- }
- void assertSum(double exp, const TensorCells &cells) {
- assertSumImpl(exp, *createTensor(cells));
- }
- void assertMatchImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs) {
- MyInput input;
- function::Node_UP ir = function::match(function::input(lhs.getType(), input.add(lhs)),
- function::input(rhs.getType(), input.add(rhs)));
- // The match operation currently ends up the union of input
- // dimensions. It should be the intersection of input
- // dimensions as claimed by the intermediate
- // representation. The tensor result type checking is disabled
- // until the corresponding bug is fixed.
- EXPECT_EQUAL(exp, eval_tensor_unchecked(*ir, input)); // UNCHECKED (ref VESPA-1868)
- }
- void assertMatch(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs) {
- assertMatchImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs));
- }
- void assertMatch(const TensorCells &expTensor, const TensorDimensions &expDimensions,
- const TensorCells &lhs, const TensorCells &rhs) {
- assertMatchImpl(*createTensor(expTensor, expDimensions), *createTensor(lhs), *createTensor(rhs));
- }
- void assertMultiplyImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
- MyInput input;
- function::Node_UP ir = function::multiply(function::input(lhs.getType(), input.add(lhs)),
- function::input(rhs.getType(), input.add(rhs)));
- EXPECT_EQUAL(exp, eval_tensor(*ir, input, check_types));
- }
- void assertMultiply(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
- assertMultiplyImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
- }
- void assertMultiply(const TensorCells &expTensor, const TensorDimensions &expDimensions,
- const TensorCells &lhs, const TensorCells &rhs) {
- assertMultiplyImpl(*createTensor(expTensor, expDimensions), *createTensor(lhs), *createTensor(rhs), true);
- }
- void assertMultiplyImpl(const Tensor &exp, const Tensor &arg1, const Tensor &arg2, const Tensor &arg3) {
- MyInput input;
- function::Node_UP ir = function::multiply(
- function::multiply(function::input(arg1.getType(), input.add(arg1)),
- function::input(arg2.getType(), input.add(arg2))),
- function::input(arg3.getType(), input.add(arg3)));
- EXPECT_EQUAL(exp, eval_tensor_checked(*ir, input));
- }
- void assertMultiply(const TensorCells &expTensor, const TensorDimensions &expDimensions,
- const TensorCells &arg1, const TensorCells &arg2, const TensorCells &arg3) {
- assertMultiplyImpl(*createTensor(expTensor, expDimensions), *createTensor(arg1), *createTensor(arg2), *createTensor(arg3));
- }
- void assertApplyImpl(const Tensor &exp, const Tensor &tensor, const CellFunction &func) {
- MyInput input;
- function::Node_UP ir = function::apply(function::input(tensor.getType(), input.add(tensor)), input.add(func));
- EXPECT_EQUAL(exp, eval_tensor_checked(*ir, input));
- }
- void assertApply(const TensorCells &exp, const TensorCells &arg, const CellFunction &func) {
- assertApplyImpl(*createTensor(exp), *createTensor(arg), func);
- }
- void assertDimensionSumImpl(const Tensor &exp, const Tensor &tensor, const vespalib::string &dimension) {
- MyInput input;
- function::Node_UP ir = function::dimension_sum(function::input(tensor.getType(), input.add(tensor)), dimension);
- EXPECT_EQUAL(exp, eval_tensor_checked(*ir, input));
- }
- void assertDimensionSum(const TensorCells &exp, const TensorCells &arg,
- const vespalib::string &dimension) {
- assertDimensionSumImpl(*createTensor(exp), *createTensor(arg), dimension);
- }
-};
-
-using SparseFixture = Fixture<SparseTensorBuilder>;
-
-
-template <typename FixtureType>
-void
-testTensorEquals(FixtureType &f)
-{
- TEST_DO(f.assertEquals({}, {}));
- TEST_DO(f.assertNotEquals({ {{{"x","1"}}, 3} }, {}));
- TEST_DO(f.assertNotEquals({}, { {{{"x","1"}}, 3} }));
- TEST_DO(f.assertEquals({ {{{"x","1"}}, 3} }, { {{{"x","1"}}, 3} }));
- TEST_DO(f.assertNotEquals({ {{{"x","1"}}, 3} }, { {{{"x","1"}}, 4} }));
- TEST_DO(f.assertNotEquals({ {{{"x","1"}}, 3} }, { {{{"x","2"}}, 3} }));
- TEST_DO(f.assertNotEquals({ {{{"x","1"}}, 3} }, { {{{"y","1"}}, 3} }));
- TEST_DO(f.assertEquals({ {{{"x","1"}}, 3} }, {"x"},
- { {{{"x","1"}}, 3} }, {"x"}));
- TEST_DO(f.assertNotEquals({ {{{"x","1"}}, 3} }, {"x"},
- { {{{"x","1"}}, 4} }, {"x"}));
- TEST_DO(f.assertNotEquals({ {{{"x","1"}}, 3} }, {"x"},
- { {{{"x","2"}}, 3} }, {"x"}));
- TEST_DO(f.assertNotEquals({ {{{"x","1"}}, 3} }, {"x"},
- { {{{"x","2"}}, 3} }, {"x"}));
- TEST_DO(f.assertEquals({ {{{"x","1"}}, 3} }, {"x", "y"},
- { {{{"x","1"}}, 3} }, {"x", "y"}));
- TEST_DO(f.assertNotEquals({ {{{"x","1"}}, 3} }, {"x", "y"},
- { {{{"x","1"}}, 3} }, {"x", "z"}));
- TEST_DO(f.assertNotEquals({ {{{"x","1"}}, 3} }, {"x", "y"},
- { {{{"y","1"}}, 3} }, {"y", "z"}));
-}
-
-template <typename FixtureType>
-void
-testTensorAdd(FixtureType &f)
-{
- f.assertAdd({},{},{}, false);
- TEST_DO(f.assertAdd({}, { "x" },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} }));
- TEST_DO(f.assertAdd({ {{{"x","1"}}, 8} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} }));
- TEST_DO(f.assertAdd({ {{{"x","1"}}, -2} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -5} }));
- TEST_DO(f.assertAdd({ {{{"x","1"}}, 0} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -3} }));
- TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14}, {{{"y","2"}}, 12} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
- TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14}, {{{"y","2"}}, 12} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
- TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "y", "z" },
- { {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
- TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "y", "z" },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} }));
- TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "x", "y" },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} }));
- TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "x", "y" },
- { {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
- TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14} },
- { {{{"x","1"}}, 3} },
- { {{{"z","3"}}, 11} }));
- TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14} },
- { {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} }));
-}
-
-template <typename FixtureType>
-void
-testTensorSubtract(FixtureType &f)
-{
- f.assertSubtract({},{},{}, false);
- TEST_DO(f.assertSubtract({}, { "x" },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} }));
- TEST_DO(f.assertSubtract({ {{{"x","1"}}, -2} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} }));
- TEST_DO(f.assertSubtract({ {{{"x","1"}}, 8} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -5} }));
- TEST_DO(f.assertSubtract({ {{{"x","1"}}, 0} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 3} }));
- TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, -8}, {{{"y","2"}},-2} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
- TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, 8}, {{{"y","2"}}, 2} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
- TEST_DO(f.assertSubtract({ {{{"y","2"}},-2} }, { "y", "z" },
- { {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
- TEST_DO(f.assertSubtract({ {{{"y","2"}}, 2} }, { "y", "z" },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} }));
- TEST_DO(f.assertSubtract({ {{{"y","2"}},-2} }, { "x", "y" },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} }));
- TEST_DO(f.assertSubtract({ {{{"y","2"}}, 2} }, { "x", "y" },
- { {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
- TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, -8} },
- { {{{"x","1"}}, 3} },
- { {{{"z","3"}}, 11} }));
- TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, 8} },
- { {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} }));
-}
-
-template <typename FixtureType>
-void
-testTensorMin(FixtureType &f)
-{
- f.assertMin({},{},{}, false);
- TEST_DO(f.assertMin({}, { "x" },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} }));
- TEST_DO(f.assertMin({ {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} }));
- TEST_DO(f.assertMin({ {{{"x","1"}}, -5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -5} }));
- TEST_DO(f.assertMin({}, { "x" },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 0} }));
- TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
- TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
- TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "y", "z" },
- { {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
- TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "y", "z" },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} }));
- TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "x", "y" },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} }));
- TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "x", "y" },
- { {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
- TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3} },
- { {{{"x","1"}}, 3} },
- { {{{"z","3"}}, 11} }));
- TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3} },
- { {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} }));
-}
-
-template <typename FixtureType>
-void
-testTensorMax(FixtureType &f)
-{
- f.assertMax({},{},{}, false);
- TEST_DO(f.assertMax({}, { "x" },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} }));
- TEST_DO(f.assertMax({ {{{"x","1"}}, 5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} }));
- TEST_DO(f.assertMax({ {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -5} }));
- TEST_DO(f.assertMax({}, { "x" },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 0} }));
- TEST_DO(f.assertMax({}, { "x" },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, -5} }));
- TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11}, {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
- TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11}, {{{"y","2"}}, 7} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
- TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "y", "z" },
- { {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
- TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "y", "z" },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} }));
- TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "x", "y" },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} }));
- TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "x", "y" },
- { {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
- TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} },
- { {{{"z","3"}}, 11} }));
- TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11} },
- { {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} }));
-}
-
-template <typename FixtureType>
-void
-testTensorSum(FixtureType &f)
-{
- f.assertSum(0.0, {});
- f.assertSum(0.0, { {{{"x","1"}}, 0} });
- f.assertSum(3.0, { {{{"x","1"}}, 3} });
- f.assertSum(8.0, { {{{"x","1"}}, 3}, {{{"x","2"}}, 5} });
- f.assertSum(-2.0, { {{{"x","1"}}, 3}, {{{"x","2"}}, -5} });
-}
-
-template <typename FixtureType>
-void
-testTensorMatch(FixtureType &f)
-{
- TEST_DO(f.assertMatch({}, {}, {}));
- TEST_DO(f.assertMatch({}, {"x"},
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} }));
- TEST_DO(f.assertMatch({ {{{"x","1"}}, 15} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} }));
- TEST_DO(f.assertMatch({ {{{"x","1"}}, 0} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 0} }));
- TEST_DO(f.assertMatch({ {{{"x","1"}}, -15} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -5} }));
- TEST_DO(f.assertMatch({ {{{"x","1"}}, 15},
- {{{"x","1"}, {"y","1"}}, 7} }, {"x","y","z"},
- { {{{"x","1"}}, 3},
- {{{"x","2"}}, 3},
- {{{"x","1"},{"y","1"}}, 1},
- {{{"x","1"},{"y","2"}}, 6} },
- { {{{"x","1"}}, 5},
- {{{"x","1"},{"y","1"}}, 7},
- {{{"x","1"},{"y","1"},{"z","1"}}, 6} }));
- TEST_DO(f.assertMatch({ {{{"y","2"}}, 35} }, {"x", "y", "z"},
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
- TEST_DO(f.assertMatch({ {{{"y","2"}}, 35} }, {"x", "y", "z"},
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
- TEST_DO(f.assertMatch({ {{{"y","2"}}, 35} }, {"y", "z"},
- { {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
- TEST_DO(f.assertMatch({ {{{"y","2"}}, 35} }, {"y", "z"},
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} }));
- TEST_DO(f.assertMatch({ {{{"y","2"}}, 35} }, {"x", "y"},
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} }));
- TEST_DO(f.assertMatch({ {{{"y","2"}}, 35} }, {"x", "y"},
- { {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
- TEST_DO(f.assertMatch({ }, {"x", "z"},
- { {{{"x","1"}}, 3} },
- { {{{"z","3"}}, 11} }));
- TEST_DO(f.assertMatch({ }, {"x", "z"},
- { {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} }));
-}
-
-template <typename FixtureType>
-void
-testTensorMultiply(FixtureType &f)
-{
- f.assertMultiply({}, {}, {}, false);
- f.assertMultiply({}, {"x"},
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} });
- f.assertMultiply({ {{{"x","1"}}, 15} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} });
- f.assertMultiply({ {{{"x","1"},{"y","1"}}, 15} },
- { {{{"x","1"}}, 3} },
- { {{{"y","1"}}, 5} });
- f.assertMultiply({ {{{"x","1"},{"y","1"}}, 15}, {{{"x","2"},{"y","1"}}, 35} },
- { {{{"x","1"}}, 3}, {{{"x","2"}}, 7} },
- { {{{"y","1"}}, 5} });
- f.assertMultiply({ {{{"x","1"},{"y","1"},{"z","1"}}, 7},
- {{{"x","1"},{"y","1"},{"z","2"}}, 13},
- {{{"x","2"},{"y","1"},{"z","1"}}, 21},
- {{{"x","2"},{"y","1"},{"z","2"}}, 39},
- {{{"x","1"},{"y","2"},{"z","1"}}, 55} },
- { {{{"x","1"},{"y","1"}}, 1},
- {{{"x","2"},{"y","1"}}, 3},
- {{{"x","1"},{"y","2"}}, 5} },
- { {{{"y","1"},{"z","1"}}, 7},
- {{{"y","2"},{"z","1"}}, 11},
- {{{"y","1"},{"z","2"}}, 13} });
- f.assertMultiply({ {{{"x","1"},{"y","1"},{"z","1"}}, 7} },
- { {{{"x","1"}}, 5}, {{{"x","1"},{"y","1"}}, 1} },
- { {{{"y","1"},{"z","1"}}, 7} });
- f.assertMultiply({ {{{"x","1"},{"y","1"},{"z","1"}}, 7}, {{{"x","1"},{"z","1"}}, 55} },
- { {{{"x","1"}}, 5}, {{{"x","1"},{"y","1"}}, 1} },
- { {{{"z","1"}}, 11}, {{{"y","1"},{"z","1"}}, 7} });
- f.assertMultiply({ {{{"x","1"},{"y","1"},{"z","1"}}, 7} },
- { {{}, 5}, {{{"x","1"},{"y","1"}}, 1} },
- { {{{"y","1"},{"z","1"}}, 7} });
- f.assertMultiply({ {{{"x","1"},{"y","1"},{"z","1"}}, 7}, {{}, 55} },
- { {{}, 5}, {{{"x","1"},{"y","1"}}, 1} },
- { {{}, 11}, {{{"y","1"},{"z","1"}}, 7} });
-}
-
-template <typename FixtureType>
-void
-testTensorMultiplePreservationOfDimensions(FixtureType &f)
-{
- f.assertMultiply({}, {"x"},
- { {{{"x","1"}}, 1} },
- { {{{"x","2"}}, 1} });
- f.assertMultiply({ {{{"x","1"}}, 1} }, {"x","y"},
- { {{{"x","1"}}, 1} },
- { {{{"x","2"},{"y","1"}}, 1}, {{{"x","1"}}, 1} });
- f.assertMultiply({}, {"x","y"},
- { {{{"x","1"}}, 1} },
- { {{{"x","2"},{"y","1"}}, 1}, {{{"x","1"}}, 1} },
- { {{{"x","1"},{"y","1"}}, 1} });
- f.assertMultiply({ {{{"x","1"},{"y","1"}}, 1} }, {"x","y"},
- { {{{"x","1"}}, 1} },
- { {{{"x","1"},{"y","1"}}, 1} });
-}
-
-struct MyFunction : public CellFunction
-{
- virtual double apply(double value) const override {
- return value + 5;
- }
-};
-
-template <typename FixtureType>
-void
-testTensorApply(FixtureType &f)
-{
- f.assertApply({ {{{"x","1"}}, 6}, {{{"y","1"}}, 2} },
- { {{{"x","1"}}, 1}, {{{"y","1"}}, -3} },
- MyFunction());
-}
-
-template <typename FixtureType>
-void
-testTensorSumDimension(FixtureType &f)
-{
- f.assertDimensionSum({ {{{"y","1"}}, 4}, {{{"y","2"}}, 12} },
- { {{{"x","1"},{"y","1"}}, 1},
- {{{"x","2"},{"y","1"}}, 3},
- {{{"x","1"},{"y","2"}}, 5},
- {{{"x","2"},{"y","2"}}, 7} }, "x");
- f.assertDimensionSum({ {{{"x","1"}}, 6}, {{{"x","2"}}, 10} },
- { {{{"x","1"},{"y","1"}}, 1},
- {{{"x","2"},{"y","1"}}, 3},
- {{{"x","1"},{"y","2"}}, 5},
- {{{"x","2"},{"y","2"}}, 7} }, "y");
- f.assertDimensionSum({ {{}, 13}, {{{"x","1"}}, 17}, {{{"x","2"}}, 10} },
- { {{{"x","1"},{"y","1"}}, 1},
- {{{"x","2"},{"y","1"}}, 3},
- {{{"x","1"},{"y","2"}}, 5},
- {{{"x","2"},{"y","2"}}, 7},
- {{{"x","1"}}, 11},
- {{{"y","2"}}, 13} }, "y");
- f.assertDimensionSum({ {{}, 11}, {{{"y","1"}}, 4}, {{{"y","2"}}, 25}, {{{"z","1"}}, 19} },
- { {{{"x","1"},{"y","1"}}, 1},
- {{{"x","2"},{"y","1"}}, 3},
- {{{"x","1"},{"y","2"}}, 5},
- {{{"x","2"},{"y","2"}}, 7},
- {{{"x","1"}}, 11},
- {{{"y","2"}}, 13},
- {{{"z","1"}}, 19}, }, "x");
-}
-
-template <typename FixtureType>
-void
-testAllTensorOperations(FixtureType &f)
-{
- TEST_DO(testTensorEquals(f));
- TEST_DO(testTensorAdd(f));
- TEST_DO(testTensorSubtract(f));
- TEST_DO(testTensorMin(f));
- TEST_DO(testTensorMax(f));
- TEST_DO(testTensorSum(f));
- TEST_DO(testTensorMatch(f));
- TEST_DO(testTensorMultiply(f));
- TEST_DO(testTensorMultiplePreservationOfDimensions(f));
- TEST_DO(testTensorApply(f));
- TEST_DO(testTensorSumDimension(f));
-}
-
-TEST_F("test tensor operations for SparseTensor", SparseFixture)
-{
- testAllTensorOperations(f);
-}
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/vespa/vespalib/eval/tensor_spec.h b/vespalib/src/vespa/vespalib/eval/tensor_spec.h
index 41c1f8d4f3c..06a9a3a2825 100644
--- a/vespalib/src/vespa/vespalib/eval/tensor_spec.h
+++ b/vespalib/src/vespa/vespalib/eval/tensor_spec.h
@@ -41,7 +41,12 @@ public:
double value;
Value(double value_in) : value(value_in) {}
operator double() const { return value; }
- bool operator==(const Value &rhs) const { return approx_equal(value, rhs.value); }
+ static bool both_nan(double a, double b) {
+ return (std::isnan(a) && std::isnan(b));
+ }
+ bool operator==(const Value &rhs) const {
+ return (both_nan(value, rhs.value) || approx_equal(value, rhs.value));
+ }
};
using Address = std::map<vespalib::string,Label>;
using Cells = std::map<Address,Value>;
diff --git a/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp b/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp
index 00aee32b99a..3be8cb048a0 100644
--- a/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp
+++ b/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp
@@ -231,6 +231,16 @@ TensorSpec spec() {
return spec(Layout({}));
}
+TensorSpec spec(const vespalib::string &type,
+ const std::vector<std::pair<TensorSpec::Address, TensorSpec::Value>> &cells) {
+ TensorSpec spec("tensor(" + type + ")");
+
+ for (const auto &cell : cells) {
+ spec.add(cell.first, cell.second);
+ }
+ return spec;
+}
+
// abstract evaluation wrapper
struct Eval {
// typed result wrapper
@@ -711,6 +721,215 @@ struct TestContext {
//-------------------------------------------------------------------------
+ void test_apply_op(const Eval &eval,
+ const TensorSpec &expect,
+ const TensorSpec &lhs,
+ const TensorSpec &rhs) {
+ EXPECT_EQUAL(safe(eval).eval(engine, lhs, rhs).tensor(), expect);
+ }
+
+ void test_fixed_sparse_cases_apply_op(const Eval &eval,
+ const BinaryOperation &op)
+ {
+ TEST_DO(test_apply_op(eval,
+ spec("x{}", {}),
+ spec("x{}", { { {{"x","1"}}, 3 } }),
+ spec("x{}", { { {{"x","2"}}, 5 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{}", { { {{"x","1"}}, op.eval(3,5) } }),
+ spec("x{}", { { {{"x","1"}}, 3 } }),
+ spec("x{}", { { {{"x","1"}}, 5 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{}", { { {{"x","1"}}, op.eval(3,-5) } }),
+ spec("x{}", { { {{"x","1"}}, 3 } }),
+ spec("x{}", { { {{"x","1"}}, -5 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{},y{},z{}",
+ { { {{"x","-"},{"y","2"},{"z","-"}},
+ op.eval(5,7) },
+ { {{"x","1"},{"y","-"},{"z","3"}},
+ op.eval(3,11) } }),
+ spec("x{},y{}",
+ { { {{"x","-"},{"y","2"}}, 5 },
+ { {{"x","1"},{"y","-"}}, 3 } }),
+ spec("y{},z{}",
+ { { {{"y","-"},{"z","3"}}, 11 },
+ { {{"y","2"},{"z","-"}}, 7 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{},y{},z{}",
+ { { {{"x","-"},{"y","2"},{"z","-"}},
+ op.eval(7,5) },
+ { {{"x","1"},{"y","-"},{"z","3"}},
+ op.eval(11,3) } }),
+ spec("y{},z{}",
+ { { {{"y","-"},{"z","3"}}, 11 },
+ { {{"y","2"},{"z","-"}}, 7 } }),
+ spec("x{},y{}",
+ { { {{"x","-"},{"y","2"}}, 5 },
+ { {{"x","1"},{"y","-"}}, 3 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("y{},z{}",
+ { { {{"y","2"},{"z","-"}},
+ op.eval(5,7) } }),
+ spec("y{}", { { {{"y","2"}}, 5 } }),
+ spec("y{},z{}",
+ { { {{"y","-"},{"z","3"}}, 11 },
+ { {{"y","2"},{"z","-"}}, 7 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("y{},z{}",
+ { { {{"y","2"},{"z","-"}},
+ op.eval(7,5) } }),
+ spec("y{},z{}",
+ { { {{"y","-"},{"z","3"}}, 11 },
+ { {{"y","2"},{"z","-"}}, 7 } }),
+ spec("y{}", { { {{"y","2"}}, 5 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{},y{}",
+ { { {{"x","-"},{"y","2"}},
+ op.eval(5,7) } }),
+ spec("x{},y{}",
+ { { {{"x","-"},{"y","2"}}, 5 },
+ { {{"x","1"},{"y","-"}}, 3 } }),
+ spec("y{}", { { {{"y","2"}}, 7 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{},y{}",
+ { { {{"x","-"},{"y","2"}},
+ op.eval(7,5) } }),
+ spec("y{}", { { {{"y","2"}}, 7 } }),
+ spec("x{},y{}",
+ { { {{"x","-"},{"y","2"}}, 5 },
+ { {{"x","1"},{"y","-"}}, 3 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{},z{}",
+ { { {{"x","1"},{"z","3"}},
+ op.eval(3,11) } }),
+ spec("x{}", { { {{"x","1"}}, 3 } }),
+ spec("z{}", { { {{"z","3"}}, 11 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{},z{}",
+ { { {{"x","1"},{"z","3"}},
+ op.eval(11,3) } }),
+ spec("z{}",{ { {{"z","3"}}, 11 } }),
+ spec("x{}",{ { {{"x","1"}}, 3 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{},y{}",
+ { { {{"x","1"},{"y","1"}},
+ op.eval(3,5) },
+ { {{"x","2"},{"y","1"}},
+ op.eval(7,5) } }),
+ spec("x{}",
+ { { {{"x","1"}}, 3 },
+ { {{"x","2"}}, 7 } }),
+ spec("y{}",
+ { { {{"y","1"}}, 5 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{},y{},z{}",
+ { { {{"x","1"},{"y","1"},{"z","1"}},
+ op.eval(1,7) },
+ { {{"x","1"},{"y","1"},{"z","2"}},
+ op.eval(1,13) },
+ { {{"x","1"},{"y","2"},{"z","1"}},
+ op.eval(5,11) },
+ { {{"x","2"},{"y","1"},{"z","1"}},
+ op.eval(3,7) },
+ { {{"x","2"},{"y","1"},{"z","2"}},
+ op.eval(3,13) } }),
+ spec("x{},y{}",
+ { { {{"x","1"},{"y","1"}}, 1 },
+ { {{"x","1"},{"y","2"}}, 5 },
+ { {{"x","2"},{"y","1"}}, 3 } }),
+ spec("y{},z{}",
+ { { {{"y","1"},{"z","1"}}, 7 },
+ { {{"y","1"},{"z","2"}}, 13 },
+ { {{"y","2"},{"z","1"}}, 11 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{},y{},z{}",
+ { { {{"x","1"},{"y","1"},{"z","1"}},
+ op.eval(1,7) } }),
+ spec("x{},y{}",
+ { { {{"x","1"},{"y","-"}}, 5 },
+ { {{"x","1"},{"y","1"}}, 1 } }),
+ spec("y{},z{}",
+ { { {{"y","1"},{"z","1"}}, 7 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{},y{},z{}",
+ { { {{"x","1"},{"y","-"},{"z","1"}},
+ op.eval(5,11) },
+ { {{"x","1"},{"y","1"},{"z","1"}},
+ op.eval(1,7) } }),
+ spec("x{},y{}",
+ { { {{"x","1"},{"y","-"}}, 5 },
+ { {{"x","1"},{"y","1"}}, 1 } }),
+ spec("y{},z{}",
+ { { {{"y","-"},{"z","1"}}, 11 },
+ { {{"y","1"},{"z","1"}}, 7 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{},y{},z{}",
+ { { {{"x","1"},{"y","1"},{"z","1"}},
+ op.eval(1,7) } }),
+ spec("x{},y{}",
+ { { {{"x","-"},{"y","-"}}, 5 },
+ { {{"x","1"},{"y","1"}}, 1 } }),
+ spec("y{},z{}",
+ { { {{"y","1"},{"z","1"}}, 7 } })));
+ TEST_DO(test_apply_op(eval,
+ spec("x{},y{},z{}",
+ { { {{"x","-"},{"y","-"},{"z", "-"}},
+ op.eval(5,11) },
+ { {{"x","1"},{"y","1"},{"z","1"}},
+ op.eval(1,7) } }),
+ spec("x{},y{}",
+ { { {{"x","-"},{"y","-"}}, 5 },
+ { {{"x","1"},{"y","1"}}, 1 } }),
+ spec("y{},z{}",
+ { { {{"y","-"},{"z","-"}}, 11 },
+ { {{"y","1"},{"z","1"}}, 7 } })));
+ }
+
+ void test_fixed_dense_cases_apply_op(const Eval &eval,
+ const BinaryOperation &op)
+ {
+ TEST_DO(test_apply_op(eval,
+ spec(op.eval(0,0)), spec(0.0), spec(0.0)));
+ TEST_DO(test_apply_op(eval,
+ spec(x(1), Seq({ op.eval(3,5) })),
+ spec(x(1), Seq({ 3 })),
+ spec(x(1), Seq({ 5 }))));
+ TEST_DO(test_apply_op(eval,
+ spec(x(1), Seq({ op.eval(3,-5) })),
+ spec(x(1), Seq({ 3 })),
+ spec(x(1), Seq({ -5 }))));
+ TEST_DO(test_apply_op(eval,
+ spec(x(2), Seq({ op.eval(3,7), op.eval(5,11) })),
+ spec(x(2), Seq({ 3, 5 })),
+ spec(x(2), Seq({ 7, 11 }))));
+ TEST_DO(test_apply_op(eval,
+ spec({x(1),y(1)}, Seq({ op.eval(3,5) })),
+ spec({x(1),y(1)}, Seq({ 3 })),
+ spec({x(1),y(1)}, Seq({ 5 }))));
+ TEST_DO(test_apply_op(eval,
+ spec(x(1), Seq({ op.eval(3, 0) })),
+ spec(x(1), Seq({ 3 })),
+ spec(x(2), Seq({ 0, 7 }))));
+ TEST_DO(test_apply_op(eval,
+ spec(x(1), Seq({ op.eval(0, 5) })),
+ spec(x(2), Seq({ 0, 3 })),
+ spec(x(1), Seq({ 5 }))));
+ TEST_DO(test_apply_op(eval,
+ spec({x(2),y(2),z(2)},
+ Seq({ op.eval(1, 7), op.eval(1, 11),
+ op.eval(2, 13), op.eval(2, 17),
+ op.eval(3, 7), op.eval(3, 11),
+ op.eval(5, 13), op.eval(5, 17)
+ })),
+ spec({x(2),y(2)},
+ Seq({ 1, 2,
+ 3, 5 })),
+ spec({y(2),z(2)},
+ Seq({ 7, 11,
+ 13, 17 }))));
+ }
+
void test_apply_op(const Eval &eval, const BinaryOperation &op, const Sequence &seq) {
std::vector<Layout> layouts = {
{}, {},
@@ -743,6 +962,8 @@ struct TestContext {
TensorSpec expect = ImmediateApply(op).eval(ref_engine, lhs_input, rhs_input).tensor();
EXPECT_EQUAL(safe(eval).eval(engine, lhs_input, rhs_input).tensor(), expect);
}
+ TEST_DO(test_fixed_sparse_cases_apply_op(eval, op));
+ TEST_DO(test_fixed_dense_cases_apply_op(eval, op));
}
void test_apply_op(const vespalib::string &expr, const BinaryOperation &op, const Sequence &seq) {
diff --git a/vespalib/src/vespa/vespalib/tensor/CMakeLists.txt b/vespalib/src/vespa/vespalib/tensor/CMakeLists.txt
index 8bd25747b35..7ed5e4d60d5 100644
--- a/vespalib/src/vespa/vespalib/tensor/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/tensor/CMakeLists.txt
@@ -6,7 +6,6 @@ vespa_add_library(vespalib_vespalib_tensor
tensor_address.cpp
tensor_apply.cpp
tensor_factory.cpp
- tensor_function.cpp
tensor_mapper.cpp
$<TARGET_OBJECTS:vespalib_vespalib_tensor_sparse>
$<TARGET_OBJECTS:vespalib_vespalib_tensor_dense>
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp
index 5e81e9cb05d..b8cb0838fee 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp
@@ -20,28 +20,28 @@ namespace tensor {
namespace {
string
-dimensionsMetaAsString(const DenseTensor::DimensionsMeta &dimensionsMeta)
+dimensionsAsString(const eval::ValueType &type)
{
std::ostringstream oss;
bool first = true;
oss << "[";
- for (const auto &dimMeta : dimensionsMeta) {
+ for (const auto &dim : type.dimensions()) {
if (!first) {
oss << ",";
}
first = false;
- oss << dimMeta;
+ oss << dim.name << ":" << dim.size;
}
oss << "]";
return oss.str();
}
size_t
-calcCellsSize(const DenseTensor::DimensionsMeta &dimensionsMeta)
+calcCellsSize(const eval::ValueType &type)
{
size_t cellsSize = 1;
- for (const auto &dimMeta : dimensionsMeta) {
- cellsSize *= dimMeta.size();
+ for (const auto &dim : type.dimensions()) {
+ cellsSize *= dim.size;
}
return cellsSize;
}
@@ -50,7 +50,7 @@ calcCellsSize(const DenseTensor::DimensionsMeta &dimensionsMeta)
void
checkCellsSize(const DenseTensor &arg)
{
- auto cellsSize = calcCellsSize(arg.dimensionsMeta());
+ auto cellsSize = calcCellsSize(arg.type());
if (arg.cells().size() != cellsSize) {
throw IllegalStateException(make_string("wrong cell size, "
"expected=%zu, "
@@ -64,14 +64,14 @@ void
checkDimensions(const DenseTensor &lhs, const DenseTensor &rhs,
vespalib::stringref operation)
{
- if (lhs.dimensionsMeta() != rhs.dimensionsMeta()) {
- throw IllegalStateException(make_string("mismatching dimensions meta for "
+ if (lhs.type() != rhs.type()) {
+ throw IllegalStateException(make_string("mismatching dimensions for "
"dense tensor %s, "
- "lhs dimensions meta = '%s', "
- "rhs dimensions meta = '%s'",
+ "lhs dimensions = '%s', "
+ "rhs dimensions = '%s'",
operation.c_str(),
- dimensionsMetaAsString(lhs.dimensionsMeta()).c_str(),
- dimensionsMetaAsString(rhs.dimensionsMeta()).c_str()));
+ dimensionsAsString(lhs.type()).c_str(),
+ dimensionsAsString(rhs.type()).c_str()));
}
checkCellsSize(lhs);
checkCellsSize(rhs);
@@ -87,7 +87,7 @@ checkDimensions(const DenseTensor &lhs, const DenseTensor &rhs,
template <typename Function>
Tensor::UP
joinDenseTensors(const DenseTensor &lhs, const DenseTensor &rhs,
- Function &&func)
+ Function &&func)
{
DenseTensor::Cells cells;
cells.reserve(lhs.cells().size());
@@ -97,42 +97,10 @@ joinDenseTensors(const DenseTensor &lhs, const DenseTensor &rhs,
++rhsCellItr;
}
assert(rhsCellItr == rhs.cells().cend());
- return std::make_unique<DenseTensor>(lhs.dimensionsMeta(),
+ return std::make_unique<DenseTensor>(lhs.type(),
std::move(cells));
}
-/*
- * Join the cells of two tensors, where the rhs values are treated as negated values.
- * The given function is used to calculate the resulting cell value for overlapping cells.
- */
-template <typename Function>
-Tensor::UP
-joinDenseTensorsNegated(const DenseTensor &lhs,
- const DenseTensor &rhs,
- Function &&func)
-{
- DenseTensor::Cells cells;
- cells.reserve(lhs.cells().size());
- auto rhsCellItr = rhs.cells().cbegin();
- for (const auto &lhsCell : lhs.cells()) {
- cells.push_back(func(lhsCell, - *rhsCellItr));
- ++rhsCellItr;
- }
- assert(rhsCellItr == rhs.cells().cend());
- return std::make_unique<DenseTensor>(lhs.dimensionsMeta(),
- std::move(cells));
-}
-
-std::vector<vespalib::string>
-getDimensions(const DenseTensor &tensor)
-{
- std::vector<vespalib::string> dimensions;
- for (const auto &dimMeta : tensor.dimensionsMeta()) {
- dimensions.emplace_back(dimMeta.dimension());
- }
- return dimensions;
-}
-
}
@@ -142,7 +110,7 @@ DenseTensor::CellsIterator::next()
++_cellIdx;
if (valid()) {
for (int64_t i = (_address.size() - 1); i >= 0; --i) {
- _address[i] = (_address[i] + 1) % _dimensionsMeta[i].size();
+ _address[i] = (_address[i] + 1) % _type.dimensions()[i].size;
if (_address[i] != 0) {
// Outer dimension labels can only be increased when this label wraps around.
break;
@@ -152,31 +120,31 @@ DenseTensor::CellsIterator::next()
}
DenseTensor::DenseTensor()
- : _dimensionsMeta(),
+ : _type(eval::ValueType::double_type()),
_cells(1)
{
}
-DenseTensor::DenseTensor(const DimensionsMeta &dimensionsMeta_in,
+DenseTensor::DenseTensor(const eval::ValueType &type_in,
const Cells &cells_in)
- : _dimensionsMeta(dimensionsMeta_in),
+ : _type(type_in),
_cells(cells_in)
{
checkCellsSize(*this);
}
-DenseTensor::DenseTensor(const DimensionsMeta &dimensionsMeta_in,
+DenseTensor::DenseTensor(const eval::ValueType &type_in,
Cells &&cells_in)
- : _dimensionsMeta(dimensionsMeta_in),
+ : _type(type_in),
_cells(std::move(cells_in))
{
checkCellsSize(*this);
}
-DenseTensor::DenseTensor(DimensionsMeta &&dimensionsMeta_in,
+DenseTensor::DenseTensor(eval::ValueType &&type_in,
Cells &&cells_in)
- : _dimensionsMeta(std::move(dimensionsMeta_in)),
+ : _type(std::move(type_in)),
_cells(std::move(cells_in))
{
checkCellsSize(*this);
@@ -185,23 +153,14 @@ DenseTensor::DenseTensor(DimensionsMeta &&dimensionsMeta_in,
bool
DenseTensor::operator==(const DenseTensor &rhs) const
{
- return (_dimensionsMeta == rhs._dimensionsMeta) &&
+ return (_type == rhs._type) &&
(_cells == rhs._cells);
}
eval::ValueType
DenseTensor::getType() const
{
- if (_dimensionsMeta.empty()) {
- return eval::ValueType::double_type();
- }
- std::vector<eval::ValueType::Dimension> dimensions;
- dimensions.reserve(_dimensionsMeta.size());
- for (const auto &dimensionMeta : _dimensionsMeta) {
- dimensions.emplace_back(dimensionMeta.dimension(),
- dimensionMeta.size());
- }
- return eval::ValueType::tensor_type(dimensions);
+ return _type;
}
double
@@ -296,7 +255,7 @@ DenseTensor::apply(const CellFunction &func) const
++itr;
}
assert(itr == newCells.end());
- return std::make_unique<DenseTensor>(_dimensionsMeta,
+ return std::make_unique<DenseTensor>(_type,
std::move(newCells));
}
@@ -329,7 +288,7 @@ DenseTensor::toString() const
Tensor::UP
DenseTensor::clone() const
{
- return std::make_unique<DenseTensor>(_dimensionsMeta, _cells);
+ return std::make_unique<DenseTensor>(_type, _cells);
}
namespace {
@@ -338,8 +297,8 @@ void
buildAddress(const DenseTensor::CellsIterator &itr, TensorSpec::Address &address)
{
auto addressItr = itr.address().begin();
- for (const auto &dim : itr.dimensions()) {
- address.emplace(std::make_pair(dim.dimension(), TensorSpec::Label(*addressItr++)));
+ for (const auto &dim : itr.type().dimensions()) {
+ address.emplace(std::make_pair(dim.name, TensorSpec::Label(*addressItr++)));
}
assert(addressItr == itr.address().end());
}
@@ -351,7 +310,7 @@ DenseTensor::toSpec() const
{
TensorSpec result(getType().to_spec());
TensorSpec::Address address;
- for (CellsIterator itr(_dimensionsMeta, _cells); itr.valid(); itr.next()) {
+ for (CellsIterator itr(_type, _cells); itr.valid(); itr.next()) {
buildAddress(itr, address);
result.add(address, itr.cell());
address.clear();
@@ -365,11 +324,11 @@ DenseTensor::print(std::ostream &out) const
// TODO (geirst): print on common format.
out << "[ ";
bool first = true;
- for (const auto &dimMeta : _dimensionsMeta) {
+ for (const auto &dim : _type.dimensions()) {
if (!first) {
out << ", ";
}
- out << dimMeta;
+ out << dim.name << ":" << dim.size;
first = false;
}
out << " ] { ";
@@ -387,16 +346,16 @@ DenseTensor::print(std::ostream &out) const
void
DenseTensor::accept(TensorVisitor &visitor) const
{
- DenseTensor::CellsIterator iterator(_dimensionsMeta, _cells);
+ DenseTensor::CellsIterator iterator(_type, _cells);
TensorAddressBuilder addressBuilder;
TensorAddress address;
vespalib::string label;
while (iterator.valid()) {
addressBuilder.clear();
auto rawIndex = iterator.address().begin();
- for (const auto &dimension : _dimensionsMeta) {
+ for (const auto &dimension : _type.dimensions()) {
label = vespalib::make_string("%zu", *rawIndex);
- addressBuilder.add(dimension.dimension(), label);
+ addressBuilder.add(dimension.name, label);
++rawIndex;
}
address = addressBuilder.build();
@@ -405,13 +364,6 @@ DenseTensor::accept(TensorVisitor &visitor) const
}
}
-std::ostream &
-operator<<(std::ostream &out, const DenseTensor::DimensionMeta &value)
-{
- out << value.dimension() << ":" << value.size();
- return out;
-}
-
Tensor::UP
DenseTensor::apply(const eval::BinaryOperation &op, const Tensor &arg) const
{
@@ -429,7 +381,7 @@ DenseTensor::reduce(const eval::BinaryOperation &op,
const std::vector<vespalib::string> &dimensions) const
{
return dense::reduce(*this,
- (dimensions.empty() ? getDimensions(*this) : dimensions),
+ (dimensions.empty() ? _type.dimension_names() : dimensions),
[&op](double lhsValue, double rhsValue)
{ return op.eval(lhsValue, rhsValue); });
}
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h
index 104fddeee7e..0a253f398b2 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h
@@ -4,6 +4,7 @@
#include <vespa/vespalib/tensor/tensor.h>
#include <vespa/vespalib/tensor/types.h>
+#include <vespa/vespalib/eval/value_type.h>
namespace vespalib {
namespace tensor {
@@ -18,78 +19,46 @@ public:
typedef std::unique_ptr<DenseTensor> UP;
using Cells = std::vector<double>;
- class DimensionMeta
- {
- vespalib::string _dimension;
- size_t _size;
-
- public:
- DimensionMeta(const vespalib::string & dimension_in, size_t size_in)
- : _dimension(dimension_in),
- _size(size_in)
- {
- }
-
- const vespalib::string &dimension() const { return _dimension; }
- size_t size() const { return _size; }
-
- bool operator==(const DimensionMeta &rhs) const {
- return (_dimension == rhs._dimension) &&
- (_size == rhs._size);
- }
- bool operator!=(const DimensionMeta &rhs) const {
- return !(*this == rhs);
- }
- bool operator<(const DimensionMeta &rhs) const {
- if (_dimension == rhs._dimension) {
- return _size < rhs._size;
- }
- return _dimension < rhs._dimension;
- }
- };
-
- using DimensionsMeta = std::vector<DimensionMeta>;
-
class CellsIterator
{
private:
- const DimensionsMeta &_dimensionsMeta;
+ const eval::ValueType &_type;
const Cells &_cells;
size_t _cellIdx;
std::vector<size_t> _address;
public:
- CellsIterator(const DimensionsMeta &dimensionsMeta,
+ CellsIterator(const eval::ValueType &type_in,
const Cells &cells)
- : _dimensionsMeta(dimensionsMeta),
+ : _type(type_in),
_cells(cells),
_cellIdx(0),
- _address(dimensionsMeta.size(), 0)
+ _address(type_in.dimensions().size(), 0)
{}
bool valid() const { return _cellIdx < _cells.size(); }
void next();
double cell() const { return _cells[_cellIdx]; }
const std::vector<size_t> &address() const { return _address; }
- const DimensionsMeta &dimensions() const { return _dimensionsMeta; }
+ const eval::ValueType &type() const { return _type; }
};
private:
- DimensionsMeta _dimensionsMeta;
+ eval::ValueType _type;
Cells _cells;
public:
DenseTensor();
- DenseTensor(const DimensionsMeta &dimensionsMeta_in,
+ DenseTensor(const eval::ValueType &type_in,
const Cells &cells_in);
- DenseTensor(const DimensionsMeta &dimensionsMeta_in,
+ DenseTensor(const eval::ValueType &type_in,
Cells &&cells_in);
- DenseTensor(DimensionsMeta &&dimensionsMeta_in,
+ DenseTensor(eval::ValueType &&type_in,
Cells &&cells_in);
- const DimensionsMeta &dimensionsMeta() const { return _dimensionsMeta; }
+ const eval::ValueType &type() const { return _type; }
const Cells &cells() const { return _cells; }
bool operator==(const DenseTensor &rhs) const;
- CellsIterator cellsIterator() const { return CellsIterator(_dimensionsMeta, _cells); }
+ CellsIterator cellsIterator() const { return CellsIterator(_type, _cells); }
virtual eval::ValueType getType() const override;
virtual double sum() const override;
@@ -114,7 +83,5 @@ public:
virtual void accept(TensorVisitor &visitor) const override;
};
-std::ostream &operator<<(std::ostream &out, const DenseTensor::DimensionMeta &value);
-
} // namespace vespalib::tensor
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp
index 88fe86ca9e6..1a3780b8f66 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp
@@ -9,8 +9,6 @@ namespace vespalib {
namespace tensor {
using Address = DenseTensorAddressCombiner::Address;
-using DimensionMeta = DenseTensor::DimensionMeta;
-using DimensionsMeta = DenseTensorAddressCombiner::DimensionsMeta;
namespace {
@@ -35,19 +33,19 @@ public:
}
-DenseTensorAddressCombiner::DenseTensorAddressCombiner(const DimensionsMeta &lhs,
- const DimensionsMeta &rhs)
+DenseTensorAddressCombiner::DenseTensorAddressCombiner(const eval::ValueType &lhs,
+ const eval::ValueType &rhs)
: _ops(),
_combinedAddress()
{
- auto rhsItr = rhs.cbegin();
- auto rhsItrEnd = rhs.cend();
- for (const auto &lhsDim : lhs) {
- while ((rhsItr != rhsItrEnd) && (rhsItr->dimension() < lhsDim.dimension())) {
+ auto rhsItr = rhs.dimensions().cbegin();
+ auto rhsItrEnd = rhs.dimensions().cend();
+ for (const auto &lhsDim : lhs.dimensions()) {
+ while ((rhsItr != rhsItrEnd) && (rhsItr->name < lhsDim.name)) {
_ops.push_back(AddressOp::RHS);
++rhsItr;
}
- if ((rhsItr != rhsItrEnd) && (rhsItr->dimension() == lhsDim.dimension())) {
+ if ((rhsItr != rhsItrEnd) && (rhsItr->name == lhsDim.name)) {
_ops.push_back(AddressOp::BOTH);
++rhsItr;
} else {
@@ -89,31 +87,36 @@ DenseTensorAddressCombiner::combine(const CellsIterator &lhsItr,
return true;
}
-DimensionsMeta
-DenseTensorAddressCombiner::combineDimensions(const DimensionsMeta &lhs, const DimensionsMeta &rhs)
+eval::ValueType
+DenseTensorAddressCombiner::combineDimensions(const eval::ValueType &lhs,
+ const eval::ValueType &rhs)
{
// NOTE: both lhs and rhs are sorted according to dimension names.
- DimensionsMeta result;
- auto lhsItr = lhs.cbegin();
- auto rhsItr = rhs.cbegin();
- while (lhsItr != lhs.end() && rhsItr != rhs.end()) {
- if (lhsItr->dimension() == rhsItr->dimension()) {
- result.emplace_back(DimensionMeta(lhsItr->dimension(), std::min(lhsItr->size(), rhsItr->size())));
+ std::vector<eval::ValueType::Dimension> result;
+ auto lhsItr = lhs.dimensions().cbegin();
+ auto rhsItr = rhs.dimensions().cbegin();
+ while (lhsItr != lhs.dimensions().end() &&
+ rhsItr != rhs.dimensions().end()) {
+ if (lhsItr->name == rhsItr->name) {
+ result.emplace_back(lhsItr->name,
+ std::min(lhsItr->size, rhsItr->size));
++lhsItr;
++rhsItr;
- } else if (lhsItr->dimension() < rhsItr->dimension()) {
+ } else if (lhsItr->name < rhsItr->name) {
result.emplace_back(*lhsItr++);
} else {
result.emplace_back(*rhsItr++);
}
}
- while (lhsItr != lhs.end()) {
+ while (lhsItr != lhs.dimensions().end()) {
result.emplace_back(*lhsItr++);
}
- while (rhsItr != rhs.end()) {
+ while (rhsItr != rhs.dimensions().end()) {
result.emplace_back(*rhsItr++);
}
- return result;
+ return (result.empty() ?
+ eval::ValueType::double_type() :
+ eval::ValueType::tensor_type(result));
}
} // namespace vespalib::tensor
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h
index 2c7f9e61223..89168e038bc 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h
@@ -16,7 +16,6 @@ class DenseTensorAddressCombiner
{
public:
using Address = std::vector<size_t>;
- using DimensionsMeta = DenseTensor::DimensionsMeta;
private:
enum class AddressOp {
@@ -31,14 +30,14 @@ private:
Address _combinedAddress;
public:
- DenseTensorAddressCombiner(const DimensionsMeta &lhs,
- const DimensionsMeta &rhs);
+ DenseTensorAddressCombiner(const eval::ValueType &lhs,
+ const eval::ValueType &rhs);
bool combine(const CellsIterator &lhsItr,
const CellsIterator &rhsItr);
const Address &address() const { return _combinedAddress; }
- static DimensionsMeta combineDimensions(const DimensionsMeta &lhs, const DimensionsMeta &rhs);
+ static eval::ValueType combineDimensions(const eval::ValueType &lhs, const eval::ValueType &rhs);
};
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp
index 3168089b941..270539f72d8 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp
@@ -14,8 +14,8 @@ template <typename Function>
std::unique_ptr<Tensor>
apply(const DenseTensor &lhs, const DenseTensor &rhs, Function &&func)
{
- DenseTensorAddressCombiner combiner(lhs.dimensionsMeta(), rhs.dimensionsMeta());
- DirectDenseTensorBuilder builder(DenseTensorAddressCombiner::combineDimensions(lhs.dimensionsMeta(), rhs.dimensionsMeta()));
+ DenseTensorAddressCombiner combiner(lhs.type(), rhs.type());
+ DirectDenseTensorBuilder builder(DenseTensorAddressCombiner::combineDimensions(lhs.type(), rhs.type()));
for (DenseTensor::CellsIterator lhsItr = lhs.cellsIterator(); lhsItr.valid(); lhsItr.next()) {
for (DenseTensor::CellsIterator rhsItr = rhs.cellsIterator(); rhsItr.valid(); rhsItr.next()) {
bool combineSuccess = combiner.combine(lhsItr, rhsItr);
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_builder.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_builder.cpp
index ab0404f320b..d5e6feb2135 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_builder.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_builder.cpp
@@ -13,8 +13,6 @@ using vespalib::make_string;
namespace vespalib {
namespace tensor {
-using DimensionMeta = DenseTensor::DimensionMeta;
-
namespace {
constexpr size_t UNDEFINED_LABEL = std::numeric_limits<size_t>::max();
@@ -39,14 +37,21 @@ validateLabelNotSpecified(size_t oldLabel, const vespalib::string &dimension)
}
}
+eval::ValueType
+makeValueType(const std::vector<eval::ValueType::Dimension> &&dimensions) {
+ return (dimensions.empty() ?
+ eval::ValueType::double_type() :
+ eval::ValueType::tensor_type(std::move(dimensions)));
+}
+
}
void
DenseTensorBuilder::allocateCellsStorage()
{
size_t cellsSize = 1;
- for (const auto &dimensionMeta : _dimensionsMeta) {
- cellsSize *= dimensionMeta.size();
+ for (const auto &dimension : _dimensions) {
+ cellsSize *= dimension.size;
}
_cells.resize(cellsSize, 0);
}
@@ -55,13 +60,14 @@ DenseTensorBuilder::allocateCellsStorage()
void
DenseTensorBuilder::sortDimensions()
{
- std::sort(_dimensionsMeta.begin(), _dimensionsMeta.end(),
- [](const DimensionMeta &lhs, const DimensionMeta &rhs)
- { return lhs.dimension() < rhs.dimension(); });
- _dimensionsMapping.resize(_dimensionsMeta.size());
+ std::sort(_dimensions.begin(), _dimensions.end(),
+ [](const eval::ValueType::Dimension &lhs,
+ const eval::ValueType::Dimension &rhs)
+ { return lhs.name < rhs.name; });
+ _dimensionsMapping.resize(_dimensions.size());
Dimension dim = 0;
- for (const auto &dimension : _dimensionsMeta) {
- auto itr = _dimensionsEnum.find(dimension.dimension());
+ for (const auto &dimension : _dimensions) {
+ auto itr = _dimensionsEnum.find(dimension.name);
assert(itr != _dimensionsEnum.end());
_dimensionsMapping[itr->second] = dim;
++dim;
@@ -75,14 +81,14 @@ DenseTensorBuilder::calculateCellAddress()
size_t multiplier = 1;
for (int64_t i = (_addressBuilder.size() - 1); i >= 0; --i) {
const size_t label = _addressBuilder[i];
- const auto &dimMeta = _dimensionsMeta[i];
+ const auto &dim = _dimensions[i];
if (label == UNDEFINED_LABEL) {
throw IllegalArgumentException(make_string("Label for dimension '%s' is undefined. "
"Expected a value in the range [0, %zu>",
- dimMeta.dimension().c_str(), dimMeta.size()));
+ dim.name.c_str(), dim.size));
}
result += (label * multiplier);
- multiplier *= dimMeta.size();
+ multiplier *= dim.size;
_addressBuilder[i] = UNDEFINED_LABEL;
}
return result;
@@ -90,7 +96,7 @@ DenseTensorBuilder::calculateCellAddress()
DenseTensorBuilder::DenseTensorBuilder()
: _dimensionsEnum(),
- _dimensionsMeta(),
+ _dimensions(),
_cells(),
_addressBuilder(),
_dimensionsMapping()
@@ -108,9 +114,9 @@ DenseTensorBuilder::defineDimension(const vespalib::string &dimension,
assert(_cells.empty());
Dimension result = _dimensionsEnum.size();
_dimensionsEnum.insert(std::make_pair(dimension, result));
- _dimensionsMeta.emplace_back(dimension, dimensionSize);
+ _dimensions.emplace_back(dimension, dimensionSize);
_addressBuilder.push_back(UNDEFINED_LABEL);
- assert(_dimensionsMeta.size() == (result + 1));
+ assert(_dimensions.size() == (result + 1));
assert(_addressBuilder.size() == (result + 1));
return result;
}
@@ -122,13 +128,13 @@ DenseTensorBuilder::addLabel(Dimension dimension, size_t label)
sortDimensions();
allocateCellsStorage();
}
- assert(dimension < _dimensionsMeta.size());
+ assert(dimension < _dimensions.size());
assert(dimension < _addressBuilder.size());
Dimension mappedDimension = _dimensionsMapping[dimension];
- const auto &dimMeta = _dimensionsMeta[mappedDimension];
- validateLabelInRange(label, dimMeta.size(), dimMeta.dimension());
+ const auto &dim = _dimensions[mappedDimension];
+ validateLabelInRange(label, dim.size, dim.name);
validateLabelNotSpecified(_addressBuilder[mappedDimension],
- dimMeta.dimension());
+ dim.name);
_addressBuilder[mappedDimension] = label;
return *this;
}
@@ -152,10 +158,10 @@ DenseTensorBuilder::build()
if (_cells.empty()) {
allocateCellsStorage();
}
- Tensor::UP result = std::make_unique<DenseTensor>(std::move(_dimensionsMeta),
- std::move(_cells));
+ Tensor::UP result = std::make_unique<DenseTensor>(makeValueType(std::move(_dimensions)),
+ std::move(_cells));
_dimensionsEnum.clear();
- _dimensionsMeta.clear();
+ _dimensions.clear();
DenseTensor::Cells().swap(_cells);
_addressBuilder.clear();
_dimensionsMapping.clear();
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_builder.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_builder.h
index 1533ff3ba61..31e3b7cf451 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_builder.h
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_builder.h
@@ -19,7 +19,7 @@ public:
private:
vespalib::hash_map<vespalib::string, size_t> _dimensionsEnum;
- DenseTensor::DimensionsMeta _dimensionsMeta;
+ std::vector<eval::ValueType::Dimension> _dimensions;
DenseTensor::Cells _cells;
std::vector<size_t> _addressBuilder;
std::vector<Dimension> _dimensionsMapping;
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp
index e2af832f068..b072b7ef206 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp
@@ -8,31 +8,15 @@ namespace tensor {
namespace dense {
using Cells = DenseTensor::Cells;
-using DimensionsMeta = DenseTensor::DimensionsMeta;
namespace {
-DimensionsMeta
-removeDimension(const DimensionsMeta &dimensionsMeta,
- const string &dimensionToRemove)
-{
- DimensionsMeta result = dimensionsMeta;
- auto itr = std::lower_bound(result.begin(), result.end(), dimensionToRemove,
- [](const auto &dimMeta, const auto &dimension_in) {
- return dimMeta.dimension() < dimension_in;
- });
- if ((itr != result.end()) && (itr->dimension() == dimensionToRemove)) {
- result.erase(itr);
- }
- return result;
-}
-
size_t
-calcCellsSize(const DimensionsMeta &dimensionsMeta)
+calcCellsSize(const eval::ValueType &type)
{
size_t cellsSize = 1;
- for (const auto &dimMeta : dimensionsMeta) {
- cellsSize *= dimMeta.size();
+ for (const auto &dim : type.dimensions()) {
+ cellsSize *= dim.size;
}
return cellsSize;
}
@@ -41,41 +25,42 @@ calcCellsSize(const DimensionsMeta &dimensionsMeta)
class DimensionReducer
{
private:
- DimensionsMeta _dimensionsResult;
+ eval::ValueType _type;
Cells _cellsResult;
size_t _innerDimSize;
size_t _sumDimSize;
size_t _outerDimSize;
- void setup(const DimensionsMeta &dimensions,
+ void setup(const eval::ValueType &oldType,
const vespalib::string &dimensionToRemove) {
- auto itr = std::lower_bound(dimensions.cbegin(), dimensions.cend(), dimensionToRemove,
- [](const auto &dimMeta, const auto &dimension) {
- return dimMeta.dimension() < dimension;
- });
- if ((itr != dimensions.end()) && (itr->dimension() == dimensionToRemove)) {
- for (auto outerItr = dimensions.cbegin(); outerItr != itr; ++outerItr) {
- _outerDimSize *= outerItr->size();
+ auto itr = std::lower_bound(oldType.dimensions().cbegin(),
+ oldType.dimensions().cend(),
+ dimensionToRemove,
+ [](const auto &dim, const auto &dimension)
+ { return dim.name < dimension; });
+ if ((itr != oldType.dimensions().end()) && (itr->name == dimensionToRemove)) {
+ for (auto outerItr = oldType.dimensions().cbegin(); outerItr != itr; ++outerItr) {
+ _outerDimSize *= outerItr->size;
}
- _sumDimSize = itr->size();
- for (++itr; itr != dimensions.cend(); ++itr) {
- _innerDimSize *= itr->size();
+ _sumDimSize = itr->size;
+ for (++itr; itr != oldType.dimensions().cend(); ++itr) {
+ _innerDimSize *= itr->size;
}
} else {
- _outerDimSize = calcCellsSize(dimensions);
+ _outerDimSize = calcCellsSize(oldType);
}
}
public:
- DimensionReducer(const DimensionsMeta &dimensions,
+ DimensionReducer(const eval::ValueType &oldType,
const string &dimensionToRemove)
- : _dimensionsResult(removeDimension(dimensions, dimensionToRemove)),
- _cellsResult(calcCellsSize(_dimensionsResult)),
+ : _type(oldType.remove_dimensions({ dimensionToRemove })),
+ _cellsResult(calcCellsSize(_type)),
_innerDimSize(1),
_sumDimSize(1),
_outerDimSize(1)
{
- setup(dimensions, dimensionToRemove);
+ setup(oldType, dimensionToRemove);
}
template <typename Function>
@@ -101,7 +86,7 @@ public:
}
assert(itr_out == _cellsResult.end());
assert(itr_in == cellsIn.cend());
- return std::make_unique<DenseTensor>(std::move(_dimensionsResult), std::move(_cellsResult));
+ return std::make_unique<DenseTensor>(std::move(_type), std::move(_cellsResult));
}
};
@@ -109,7 +94,7 @@ template <typename Function>
DenseTensor::UP
reduce(const DenseTensor &tensor, const vespalib::string &dimensionToRemove, Function &&func)
{
- DimensionReducer reducer(tensor.dimensionsMeta(), dimensionToRemove);
+ DimensionReducer reducer(tensor.type(), dimensionToRemove);
return reducer.reduceCells(tensor.cells(), func);
}
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp
index dd1682fb451..8a7ed1928ef 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp
@@ -7,27 +7,27 @@ namespace vespalib {
namespace tensor {
using Address = DirectDenseTensorBuilder::Address;
-using DimensionsMeta = DirectDenseTensorBuilder::DimensionsMeta;
+using eval::ValueType;
namespace {
size_t
-calculateCellsSize(const DimensionsMeta &dimensionsMeta)
+calculateCellsSize(const ValueType &type)
{
size_t cellsSize = 1;
- for (const auto &dimMeta : dimensionsMeta) {
- cellsSize *= dimMeta.size();
+ for (const auto &dim : type.dimensions()) {
+ cellsSize *= dim.size;
}
return cellsSize;
}
size_t
-calculateCellAddress(const Address &address, const DimensionsMeta &dimensionsMeta)
+calculateCellAddress(const Address &address, const ValueType &type)
{
- assert(address.size() == dimensionsMeta.size());
+ assert(address.size() == type.dimensions().size());
size_t result = 0;
for (size_t i = 0; i < address.size(); ++i) {
- result *= dimensionsMeta[i].size();
+ result *= type.dimensions()[i].size;
result += address[i];
}
return result;
@@ -35,16 +35,16 @@ calculateCellAddress(const Address &address, const DimensionsMeta &dimensionsMet
}
-DirectDenseTensorBuilder::DirectDenseTensorBuilder(const DimensionsMeta &dimensionsMeta)
- : _dimensionsMeta(dimensionsMeta),
- _cells(calculateCellsSize(_dimensionsMeta))
+DirectDenseTensorBuilder::DirectDenseTensorBuilder(const ValueType &type_in)
+ : _type(type_in),
+ _cells(calculateCellsSize(_type))
{
}
void
DirectDenseTensorBuilder::insertCell(const Address &address, double cellValue)
{
- size_t cellAddress = calculateCellAddress(address, _dimensionsMeta);
+ size_t cellAddress = calculateCellAddress(address, _type);
assert(cellAddress < _cells.size());
_cells[cellAddress] = cellValue;
}
@@ -52,7 +52,7 @@ DirectDenseTensorBuilder::insertCell(const Address &address, double cellValue)
Tensor::UP
DirectDenseTensorBuilder::build()
{
- return std::make_unique<DenseTensor>(std::move(_dimensionsMeta), std::move(_cells));
+ return std::make_unique<DenseTensor>(std::move(_type), std::move(_cells));
}
} // namespace tensor
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h
index 74234f1cabe..b5329860e86 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h
+++ b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h
@@ -13,16 +13,15 @@ namespace tensor {
class DirectDenseTensorBuilder
{
public:
- using DimensionsMeta = DenseTensor::DimensionsMeta;
using Cells = DenseTensor::Cells;
using Address = std::vector<size_t>;
private:
- DimensionsMeta _dimensionsMeta;
+ eval::ValueType _type;
Cells _cells;
public:
- DirectDenseTensorBuilder(const DimensionsMeta &dimensionsMeta);
+ DirectDenseTensorBuilder(const eval::ValueType &type_in);
void insertCell(const Address &address, double cellValue);
Tensor::UP build();
};
diff --git a/vespalib/src/vespa/vespalib/tensor/serialization/dense_binary_format.cpp b/vespalib/src/vespa/vespalib/tensor/serialization/dense_binary_format.cpp
index f074f8d4335..0f6d1b3b2a5 100644
--- a/vespalib/src/vespa/vespalib/tensor/serialization/dense_binary_format.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/serialization/dense_binary_format.cpp
@@ -11,16 +11,26 @@ using vespalib::nbostream;
namespace vespalib {
namespace tensor {
+namespace {
+
+eval::ValueType
+makeValueType(const std::vector<eval::ValueType::Dimension> &&dimensions) {
+ return (dimensions.empty() ?
+ eval::ValueType::double_type() :
+ eval::ValueType::tensor_type(std::move(dimensions)));
+}
+
+}
void
DenseBinaryFormat::serialize(nbostream &stream, const DenseTensor &tensor)
{
- stream.putInt1_4Bytes(tensor.dimensionsMeta().size());
+ stream.putInt1_4Bytes(tensor.type().dimensions().size());
size_t cellsSize = 1;
- for (const auto &dimension : tensor.dimensionsMeta()) {
- stream.writeSmallString(dimension.dimension());
- stream.putInt1_4Bytes(dimension.size());
- cellsSize *= dimension.size();
+ for (const auto &dimension : tensor.type().dimensions()) {
+ stream.writeSmallString(dimension.name);
+ stream.putInt1_4Bytes(dimension.size);
+ cellsSize *= dimension.size;
}
const DenseTensor::Cells &cells = tensor.cells();
assert(cells.size() == cellsSize);
@@ -34,15 +44,15 @@ std::unique_ptr<DenseTensor>
DenseBinaryFormat::deserialize(nbostream &stream)
{
vespalib::string dimensionName;
- DenseTensor::DimensionsMeta dimensionsMeta;
+ std::vector<eval::ValueType::Dimension> dimensions;
DenseTensor::Cells cells;
size_t dimensionsSize = stream.getInt1_4Bytes();
size_t dimensionSize;
size_t cellsSize = 1;
- while (dimensionsMeta.size() < dimensionsSize) {
+ while (dimensions.size() < dimensionsSize) {
stream.readSmallString(dimensionName);
dimensionSize = stream.getInt1_4Bytes();
- dimensionsMeta.emplace_back(dimensionName, dimensionSize);
+ dimensions.emplace_back(dimensionName, dimensionSize);
cellsSize *= dimensionSize;
}
cells.reserve(cellsSize);
@@ -51,7 +61,7 @@ DenseBinaryFormat::deserialize(nbostream &stream)
stream >> cellValue;
cells.emplace_back(cellValue);
}
- return std::make_unique<DenseTensor>(std::move(dimensionsMeta),
+ return std::make_unique<DenseTensor>(makeValueType(std::move(dimensions)),
std::move(cells));
}
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp
index 5e7ec5b1db3..024d63572c6 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp
@@ -223,9 +223,7 @@ buildAddress(const SparseTensor::Dimensions &dimensions,
{
for (const auto &dimension : dimensions) {
auto label = decoder.decodeLabel();
- if (!label.empty()) {
- address.emplace(std::make_pair(dimension, TensorSpec::Label(label)));
- }
+ address.emplace(std::make_pair(dimension, TensorSpec::Label(label)));
}
assert(!decoder.valid());
}
diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_function.cpp b/vespalib/src/vespa/vespalib/tensor/tensor_function.cpp
deleted file mode 100644
index 180f5f321cd..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/tensor_function.cpp
+++ /dev/null
@@ -1,360 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "tensor_function.h"
-#include <vespa/vespalib/eval/value_type.h>
-
-namespace vespalib {
-namespace tensor {
-namespace function {
-namespace {
-
-//-----------------------------------------------------------------------------
-
-/**
- * Base function class keeping track of result type.
- **/
-class FunctionBase : public Node
-{
-private:
- eval::ValueType _type;
-protected:
- explicit FunctionBase(const eval::ValueType &type_in) : _type(type_in) {}
- const eval::ValueType &type() const override { return _type; }
-
- // helper function used to unwrap tensor value from eval result
- static const Tensor &eval_tensor(Node &node, const Input &input) {
- return node.eval(input).as_tensor;
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Function mixin class used to keep tensor results alive.
- **/
-class TensorCache : public FunctionBase
-{
-private:
- Tensor::UP _my_result;
-protected:
- explicit TensorCache(const eval::ValueType &type_in)
- : FunctionBase(type_in), _my_result() {}
- const Tensor &store_tensor(Tensor::UP result) {
- _my_result = std::move(result);
- return *_my_result;
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Resolve an input tensor value.
- **/
-class InputTensor : public FunctionBase
-{
-private:
- size_t _tensor_id;
-
- static eval::ValueType infer_type(const eval::ValueType &type_in) {
- if (type_in.is_tensor() || type_in.is_double()) {
- return type_in;
- } else {
- return eval::ValueType::error_type();
- }
- }
-
-public:
- InputTensor(const eval::ValueType &type_in, size_t tensor_id)
- : FunctionBase(infer_type(type_in)), _tensor_id(tensor_id) {}
- Result eval(const Input &input) override {
- return input.get_tensor(_tensor_id);
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Sum all the cells in a tensor.
- **/
-class Sum : public FunctionBase
-{
-private:
- Node_UP _child;
-
- static eval::ValueType infer_type(const eval::ValueType &child_type) {
- if (child_type.is_tensor() || child_type.is_double()) {
- return eval::ValueType::double_type();
- } else {
- return eval::ValueType::error_type();
- }
- }
-
-public:
- explicit Sum(Node_UP child)
- : FunctionBase(infer_type(child->type())),
- _child(std::move(child)) {}
-
- Result eval(const Input &input) override {
- return eval_tensor(*_child, input).sum();
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Perform sum over a tensor dimension.
- **/
-class DimensionSum : public TensorCache
-{
-private:
- Node_UP _child;
- vespalib::string _dimension;
-
- static eval::ValueType infer_type(const eval::ValueType &child_type, const vespalib::string &dimension) {
- return child_type.remove_dimensions({dimension});
- }
-
-public:
- DimensionSum(Node_UP child, const vespalib::string &dimension)
- : TensorCache(infer_type(child->type(), dimension)),
- _child(std::move(child)), _dimension(dimension) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_child, input).sum(_dimension));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Apply a cell function to all cells in a tensor.
- **/
-class Apply : public TensorCache
-{
-private:
- Node_UP _child;
- size_t _cell_function_id;
-
- static eval::ValueType infer_type(const eval::ValueType &child_type) {
- if (child_type.is_tensor()) {
- return child_type;
- } else {
- return eval::ValueType::error_type();
- }
- }
-
-public:
- Apply(Node_UP child, size_t cell_function_id)
- : TensorCache(infer_type(child->type())),
- _child(std::move(child)), _cell_function_id(cell_function_id) {}
-
- Result eval(const Input &input) override {
- const auto &cell_function = input.get_cell_function(_cell_function_id);
- return store_tensor(eval_tensor(*_child, input).apply(cell_function));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Add two tensors.
- **/
-class Add : public TensorCache
-{
-private:
- Node_UP _lhs;
- Node_UP _rhs;
-
- static eval::ValueType infer_type(const eval::ValueType &lhs_type, const eval::ValueType &rhs_type) {
- return lhs_type.add_dimensions_from(rhs_type);
- }
-
-public:
- Add(Node_UP lhs, Node_UP rhs)
- : TensorCache(infer_type(lhs->type(), rhs->type())),
- _lhs(std::move(lhs)), _rhs(std::move(rhs)) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_lhs, input)
- .add(eval_tensor(*_rhs, input)));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Subtract two tensors.
- **/
-class Subtract : public TensorCache
-{
-private:
- Node_UP _lhs;
- Node_UP _rhs;
-
- static eval::ValueType infer_type(const eval::ValueType &lhs_type, const eval::ValueType &rhs_type) {
- return lhs_type.add_dimensions_from(rhs_type);
- }
-
-public:
- Subtract(Node_UP lhs, Node_UP rhs)
- : TensorCache(infer_type(lhs->type(), rhs->type())),
- _lhs(std::move(lhs)), _rhs(std::move(rhs)) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_lhs, input)
- .subtract(eval_tensor(*_rhs, input)));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Multiply two tensors.
- **/
-class Multiply : public TensorCache
-{
-private:
- Node_UP _lhs;
- Node_UP _rhs;
-
- static eval::ValueType infer_type(const eval::ValueType &lhs_type, const eval::ValueType &rhs_type) {
- return lhs_type.add_dimensions_from(rhs_type);
- }
-
-public:
- Multiply(Node_UP lhs, Node_UP rhs)
- : TensorCache(infer_type(lhs->type(), rhs->type())),
- _lhs(std::move(lhs)), _rhs(std::move(rhs)) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_lhs, input)
- .multiply(eval_tensor(*_rhs, input)));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Cellwise min between two tensors.
- **/
-class Min : public TensorCache
-{
-private:
- Node_UP _lhs;
- Node_UP _rhs;
-
- static eval::ValueType infer_type(const eval::ValueType &lhs_type, const eval::ValueType &rhs_type) {
- return lhs_type.add_dimensions_from(rhs_type);
- }
-
-public:
- Min(Node_UP lhs, Node_UP rhs)
- : TensorCache(infer_type(lhs->type(), rhs->type())),
- _lhs(std::move(lhs)), _rhs(std::move(rhs)) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_lhs, input)
- .min(eval_tensor(*_rhs, input)));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Cellwise max between two tensors.
- **/
-class Max : public TensorCache
-{
-private:
- Node_UP _lhs;
- Node_UP _rhs;
-
- static eval::ValueType infer_type(const eval::ValueType &lhs_type, const eval::ValueType &rhs_type) {
- return lhs_type.add_dimensions_from(rhs_type);
- }
-
-public:
- Max(Node_UP lhs, Node_UP rhs)
- : TensorCache(infer_type(lhs->type(), rhs->type())),
- _lhs(std::move(lhs)), _rhs(std::move(rhs)) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_lhs, input)
- .max(eval_tensor(*_rhs, input)));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-/**
- * Match two tensors.
- **/
-class Match : public TensorCache
-{
-private:
- Node_UP _lhs;
- Node_UP _rhs;
-
- static eval::ValueType infer_type(const eval::ValueType &lhs_type, const eval::ValueType &rhs_type) {
- return lhs_type.keep_dimensions_in(rhs_type);
- }
-
-public:
- Match(Node_UP lhs, Node_UP rhs)
- : TensorCache(infer_type(lhs->type(), rhs->type())),
- _lhs(std::move(lhs)), _rhs(std::move(rhs)) {}
-
- Result eval(const Input &input) override {
- return store_tensor(eval_tensor(*_lhs, input)
- .match(eval_tensor(*_rhs, input)));
- }
-};
-
-//-----------------------------------------------------------------------------
-
-} // namespace vespalib::tensor::function::<unnamed>
-
-Node_UP input(const eval::ValueType &type, size_t tensor_id) {
- return std::make_unique<InputTensor>(type, tensor_id);
-}
-
-Node_UP sum(Node_UP child) {
- return std::make_unique<Sum>(std::move(child));
-}
-
-Node_UP dimension_sum(Node_UP child, const vespalib::string &dimension) {
- return std::make_unique<DimensionSum>(std::move(child), dimension);
-}
-
-Node_UP apply(Node_UP child, size_t cell_function_id) {
- return std::make_unique<Apply>(std::move(child), cell_function_id);
-}
-
-Node_UP add(Node_UP lhs, Node_UP rhs) {
- return std::make_unique<Add>(std::move(lhs), std::move(rhs));
-}
-
-Node_UP subtract(Node_UP lhs, Node_UP rhs) {
- return std::make_unique<Subtract>(std::move(lhs), std::move(rhs));
-}
-
-Node_UP multiply(Node_UP lhs, Node_UP rhs) {
- return std::make_unique<Multiply>(std::move(lhs), std::move(rhs));
-}
-
-Node_UP min(Node_UP lhs, Node_UP rhs) {
- return std::make_unique<Min>(std::move(lhs), std::move(rhs));
-}
-
-Node_UP max(Node_UP lhs, Node_UP rhs) {
- return std::make_unique<Max>(std::move(lhs), std::move(rhs));
-}
-
-Node_UP match(Node_UP lhs, Node_UP rhs) {
- return std::make_unique<Match>(std::move(lhs), std::move(rhs));
-}
-
-} // namespace vespalib::tensor::function
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_function.h b/vespalib/src/vespa/vespalib/tensor/tensor_function.h
deleted file mode 100644
index f47c33adcbe..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/tensor_function.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "tensor.h"
-#include <vespa/vespalib/eval/value_type.h>
-#include <memory>
-
-namespace vespalib {
-namespace tensor {
-
-//-----------------------------------------------------------------------------
-
-/**
- * A tensor function that can be evaluated. A TensorFunction will
- * typically be produced by an implementation-specific compile step
- * that takes an implementation-independent intermediate
- * representation of the tensor function as input (tree of
- * function::Node objects).
- **/
-struct TensorFunction
-{
- typedef std::unique_ptr<TensorFunction> UP;
-
- /**
- * A tensor function will evaluate to either a tensor or a double
- * value. The result type indicated by the tensor function
- * intermediate representation will indicate which form is valid.
- **/
- union Result {
- double as_double;
- Tensor::CREF as_tensor;
- Result(const Result &rhs) { memcpy(this, &rhs, sizeof(Result)); }
- Result(double value) : as_double(value) {}
- Result(const Tensor &value) : as_tensor(value) {}
- ~Result() {}
- };
-
- /**
- * Interface used to obtain input to a tensor function.
- **/
- struct Input {
- virtual const Tensor &get_tensor(size_t id) const = 0;
- virtual const CellFunction &get_cell_function(size_t id) const = 0;
- virtual ~Input() {}
- };
-
- /**
- * Evaluate this tensor function based on the given input. This
- * function is defined as non-const because it will return tensors
- * by reference. Intermediate results are typically kept alive
- * until the next time eval is called. The return value must
- * conform to the result type indicated by the intermediate
- * representation describing this tensor function.
- *
- * @return result of evaluating this tensor function
- * @param input external stuff needed to evaluate this function
- **/
- virtual Result eval(const Input &input) = 0;
-
- virtual ~TensorFunction() {}
-};
-
-//-----------------------------------------------------------------------------
-
-namespace function {
-
-/**
- * Interface used to describe a tensor function as a tree of nodes
- * with information about operation sequencing and intermediate result
- * types. Each node in the tree will describe a single tensor
- * operation. This is the intermediate representation of a tensor
- * function.
- *
- * Since tensor operations currently are part of the tensor interface,
- * the intermediate representation of a tensor function can also be
- * used to evaluate the tensor function by performing the appropriate
- * operations directly on the input tensors. In other words, the
- * intermediate representation 'compiles to itself'.
- **/
-struct Node : public TensorFunction
-{
- /**
- * The result type of the tensor operation represented by this
- * Node.
- *
- * @return tensor operation result type.
- **/
- virtual const eval::ValueType &type() const = 0;
-};
-
-using Node_UP = std::unique_ptr<Node>;
-
-Node_UP input(const eval::ValueType &type, size_t tensor_id);
-Node_UP sum(Node_UP child);
-Node_UP dimension_sum(Node_UP child, const vespalib::string &dimension);
-Node_UP apply(Node_UP child, size_t cell_function_id);
-Node_UP add(Node_UP lhs, Node_UP rhs);
-Node_UP subtract(Node_UP lhs, Node_UP rhs);
-Node_UP multiply(Node_UP lhs, Node_UP rhs);
-Node_UP min(Node_UP lhs, Node_UP rhs);
-Node_UP max(Node_UP lhs, Node_UP rhs);
-Node_UP match(Node_UP lhs, Node_UP rhs);
-
-} // namespace vespalib::tensor::function
-
-//-----------------------------------------------------------------------------
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp b/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp
index a527627d786..d7bec94548e 100644
--- a/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp
@@ -118,7 +118,7 @@ SparseTensorMapper<TensorT>::map(const Tensor &tensor,
class DenseTensorMapper : public TensorVisitor
{
- DenseTensor::DimensionsMeta _dimensionsMeta;
+ eval::ValueType _type;
DenseTensor::Cells _cells;
static constexpr uint32_t BAD_LABEL = std::numeric_limits<uint32_t>::max();
static constexpr uint32_t BAD_ADDRESS =
@@ -138,14 +138,12 @@ public:
};
DenseTensorMapper::DenseTensorMapper(const ValueType &type)
- : _dimensionsMeta(),
+ : _type(type),
_cells()
{
- _dimensionsMeta.reserve(type.dimensions().size());
size_t size = 1;
for (const auto &dimension : type.dimensions()) {
size *= dimension.size;
- _dimensionsMeta.emplace_back(dimension.name, dimension.size);
}
_cells.resize(size);
}
@@ -157,7 +155,7 @@ DenseTensorMapper::~DenseTensorMapper()
std::unique_ptr<Tensor>
DenseTensorMapper::build()
{
- return std::make_unique<DenseTensor>(std::move(_dimensionsMeta),
+ return std::make_unique<DenseTensor>(std::move(_type),
std::move(_cells));
}
@@ -182,17 +180,17 @@ DenseTensorMapper::mapAddressToIndex(const TensorAddress &address)
{
uint32_t idx = 0;
TensorAddressElementIterator<TensorAddress> addressIterator(address);
- for (const auto &dimension : _dimensionsMeta) {
- if (addressIterator.skipToDimension(dimension.dimension())) {
+ for (const auto &dimension : _type.dimensions()) {
+ if (addressIterator.skipToDimension(dimension.name)) {
uint32_t label = mapLabelToNumber(addressIterator.label());
- if (label == BAD_LABEL || label >= dimension.size()) {
+ if (label == BAD_LABEL || label >= dimension.size) {
return BAD_ADDRESS;
}
- idx = idx * dimension.size() + label;
+ idx = idx * dimension.size + label;
addressIterator.next();
} else {
// output dimension not in input
- idx = idx * dimension.size();
+ idx = idx * dimension.size;
}
}
return idx;